1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-28 07:13:39 +00:00

Compare commits

...

1644 Commits

Author SHA1 Message Date
Nick Craig-Wood
bede3a5d48 fs: dump config for an Fs before creation - FIXME TESTING DO NOT MERGE
See: https://forum.rclone.org/t/authenticating-to-box-using-connection-strings-and-config-file-fails/23333/7
2021-04-08 12:39:03 +01:00
Nick Craig-Wood
f52ae75a51 rclone authorize: Send and receive extra config options to fix oauth
Before this change any backends which required extra config in the
oauth phase (like the `region` for zoho) didn't work with `rclone
authorize`.

This change serializes the extra config and passes it to `rclone
authorize` and returns new config items to be set from rclone
authorize.

`rclone authorize` will still accept its previous configuration
parameters for use with old rclones.

Fixes #5178
2021-04-08 12:34:15 +01:00
Nick Craig-Wood
9d5c5bf7ab fs: add Options.NonDefault to read options which aren't at their default #5178 2021-04-08 12:34:15 +01:00
Nick Craig-Wood
53573b4a09 configmap: Add Encode and Decode methods to Simple for command line encoding #5178 2021-04-08 12:34:15 +01:00
Nick Craig-Wood
3622e064f5 configmap: Add priorities to configmap Setters #5178 2021-04-08 12:34:15 +01:00
Nick Craig-Wood
6d28ea7ab5 fs: factor config override detection into its own function #5178 2021-04-08 12:34:15 +01:00
Nick Craig-Wood
b9fd02039b authorize: refactor to use new config interfaces #5178 2021-04-08 12:34:15 +01:00
Nick Craig-Wood
1a41c930f3 configmap: add ClearSetters to get rid of all setters #5178 2021-04-08 12:34:15 +01:00
albertony
ddb7eb6e0a docs: fixed some typos 2021-04-08 10:19:03 +02:00
buengese
c114695a66 zoho: do not ask for mountpoint twice when using headless setup 2021-04-08 00:23:27 +02:00
Nick Craig-Wood
fcba51557f dropbox: set visibility in link sharing when --expire is set
Note that due to a bug in the dropbox SDK you'll need to set --expire
to access this.

See: https://github.com/dropbox/dropbox-sdk-go-unofficial/issues/75
See: https://forum.rclone.org/t/rclone-link-dropbox-permissions/23211
2021-04-07 13:58:37 +01:00
Nick Craig-Wood
9393225a1d link: use "off" value for unset expiry 2021-04-07 13:58:37 +01:00
albertony
3d3ff61f74 docs: minor cleanup of space around code section 2021-04-07 08:47:29 +02:00
albertony
d98f192425 docs: WinFsp 2021 is out of beta 2021-04-07 08:13:40 +02:00
Nick Craig-Wood
54771e4402 sync: fix incorrect error reported by graceful cutoff - fixes #5203
Before this change, a sync which was finished with a graceful transfer
cutoff could return "context canceled" instead of the correct error.

This fixes the problem by ignoring "context canceled" errors if we
have done a graceful stop.
2021-04-06 13:08:42 +01:00
Nick Craig-Wood
dc286529bc drive: fix backend copyid of google doc to directory - fixes #5196
Before this change the google doc was being copied to the directory
without an extension.
2021-04-06 11:46:52 +01:00
Nick Craig-Wood
7dc7c021db sftp: fix Update ReadFrom failed: failed to send packet: EOF errors
In

a3fcadddc8 sftp: close idle connections after --sftp-idle-timeout (1m by default)

Idle SFTP connections were closed after 1 minute. However due to the
way SSH multiplexes connections over a single SSH connection this
meant that if uploads or downloads went on for more than one minute
they failed with "EOF errors" as their underlying connection was
closed.

This fixes the problem by not clearing idle connections if there are
any transfers in progress.

Fixes #5197
2021-04-06 10:01:49 +01:00
Nick Craig-Wood
fe1aa13069 sftp: revert sftp library to v1.12.0 from v1.13.0 to fix performance regression #5197
This reverts the library update done in this commit.

713f8f357d sftp: fix "file not found" errors for read once servers

Reverting this commit triples the performance to a far away sftp server.

See: https://github.com/pkg/sftp/issues/426
2021-04-06 10:01:49 +01:00
Nick Craig-Wood
5fa8e7d957 Add Nick Gaya to contributors 2021-04-06 10:01:49 +01:00
Nick Gaya
9db7c51eaa sync: don't warn about --no-traverse when --files-from is set 2021-04-05 20:36:39 +01:00
Ivan Andreev
3859fe2f52 cmd/version: print os/version, kernel and bitness (#5204)
Related to #5121

Note: OpenBSD is stub yet. This will be fixed after upstream PR gets resolved
https://github.com/shirou/gopsutil/pull/993
2021-04-05 21:53:09 +03:00
buengese
0caf417779 zoho: fix error when region isn't set 2021-04-05 15:11:30 +02:00
Ivan Andreev
9eab258ffb build: add build tag noselfupdate
Allow downstream packaging to build rclone without selfupdate command:
$ go build -tags noselfupdate

Fixes #5187
2021-04-04 11:22:09 +03:00
Nick Gaya
7df57cd625 contributing.md: update setup instructions for go1.16 2021-04-04 09:10:43 +01:00
Nick Gaya
1fd9b483c8 onedrive: add list_chunk option
Add --onedrive-list-chunk option similar to existing options for azureblob, drive, and s3.

Suggested as a workaround for a OneDrive pagination bug

See: https://forum.rclone.org/t/unexpected-duplicates-on-onedrive-with-0s-in-filename/23164/8
2021-04-04 09:08:16 +01:00
Ivan Andreev
93353c431b selfupdate: dont detect FUSE if build is static
Before this patch selfupdate detected ANY build with cmount tag as a build
having libFUSE capabilities. However, only dynamic builds really have it.
The official linux builds are static and have the cmount tag as of the time
of this writing. This results in inability to update official linux binaries.
This patch fixes that. The build can be fixed independently.
2021-04-03 21:54:15 +03:00
Nick Craig-Wood
886dfd23e2 fichier: check if more than one upload link is returned #5152 2021-04-03 15:00:50 +01:00
Nick Craig-Wood
116a8021bb drive: switch to the Drives API for looking up shared drives - fixes #3139
Before this change rclone used the deprecated teamdrives API. This
change uses the new drives API (which seems to be the teamdrives API
renames).
2021-04-03 14:21:20 +01:00
Nick Craig-Wood
9e2fbe0f1a install.sh: fix macOS arm64 download - fixes #5183 2021-03-31 21:48:31 +01:00
Nick Craig-Wood
6d65d116df Start v1.56.0-DEV development 2021-03-31 19:51:43 +01:00
Ivan Andreev
edaeb51ea9 backlog: ticket templates should recommend to update rclone
Aligns Bug and Feature github templates with rclone forum
and instructs submitter to proactively update rclone.
2021-03-31 19:13:50 +01:00
Nick Craig-Wood
6e2e2d9eb2 Version v1.55.0 2021-03-31 19:12:08 +01:00
Nick Craig-Wood
20e15e52a9 vfs: fix Create causing windows explorer to truncate files on CTRL-C CTRL-V
Before this fix, doing CTRL-C and CTRL-V on a file in Windows explorer
caused the **source** and the the destination to be truncated to 0.

This is because Windows opens the source file with Create with flags
`O_RDWR|O_CREATE|O_EXCL` but doesn't write to it - it only reads from
it. Rclone was taking the call to Create as a signal to always make a
new file, but this is incorrect.

This fix reads an existing file from the directory if it exists when
Create is called rather than always creating a new one. This fixes the
problem.

Fixes #5181
2021-03-31 14:48:02 +01:00
Nick Craig-Wood
d0f8b4f479 fs/cache: fix recreation of backends after they have expired
Before this change, on the first attempt to create a backend we used a
non-canonicalized string. When the backend expired the second attempt
to create it would use the canonicalized string (because it was in the
remap cache) which would fail because it was now `name{XXXX}:`

This change makes sure that whenever we create a backend we always use
the non-canonicalized string.

See: https://forum.rclone.org/t/connection-string-inconsistencies-on-beta/23171
2021-03-30 18:46:30 +01:00
Nick Craig-Wood
58d82a5c73 rc: allow fs= params to be a JSON blob 2021-03-30 17:07:27 +01:00
Nick Craig-Wood
c0c74003f2 fs/cache: add --fs-cache-expire-duration to control the fs cache
This commit makes the previously statically configured fs cache configurable.

It introduces two parameters `--fs-cache-expire-duration` and
`--fs-cache-expire-interval` to control the caching of the items.

It also adds new interfaces to lib/cache to set these.
2021-03-30 12:46:47 +01:00
Nick Craig-Wood
60bc7a079a rc: factor rc.Error out of rcserver for re-use in librclone #4891 2021-03-30 12:46:05 +01:00
Nick Craig-Wood
20c5ca08fb test_all: fix crash when using -clean 2021-03-29 23:12:53 +01:00
Nick Craig-Wood
fc57648b75 lib/rest: fix multipart uploads stopping on context cancel
Before this change when the context was cancelled (due to
--max-duration for example) this could deadlock when uploading
multipart uploads.

This change fixes the problem by introducing another go routine to
monitor the context and close the pipe with an error when the context
errors.
2021-03-29 19:09:47 +01:00
Nick Craig-Wood
8c5c91e68f lib/readers: add NewContextReader to error on context errors 2021-03-29 19:09:47 +01:00
Nick Craig-Wood
9dd39e8524 Add x0b to contributors 2021-03-29 19:09:47 +01:00
albertony
9c9186183d docs: add short description of configuration file format (#5142)
Fixes #572
2021-03-27 17:26:01 +01:00
Nick Craig-Wood
2ccf416e83 build: add version to android builds and fix upload 2021-03-26 09:18:54 +00:00
x0b
5577c7b760 build: replace xgo with NDK for Android builds 2021-03-26 09:18:54 +00:00
Nick Craig-Wood
f6dbb98a1d cmount: update cgofuse to the latest version to bring in macfuse 4 fix 2021-03-25 09:02:17 +00:00
Nick Craig-Wood
d042f3194f b2: fix html files downloaded via cloudflare
When reading files from B2 via cloudflare using --b2-download-url
cloudflare strips the Content-Length headers (presumably so it can
inject stuff into the body).

This caused rclone to think the file was corrupted as the length
didn't match.

The patch uses the old length read from the listing if there is no
Content-Length.

See: https://forum.rclone.org/t/b2-cloudflare-error-directory-not-found/23026
2021-03-24 17:06:59 +00:00
Nick Craig-Wood
524cd327e6 build: update notes on how to build the release manually with docker 2021-03-24 14:22:27 +00:00
Nick Craig-Wood
b8c1cf7451 union: fix initialisation broken in refactor - fixes #5139
This commit broke the initialisation of the union backend

f17d7c0012 union: refactor to use fspath.SplitFs instead of fs.ParseRemote #4996

This patch fixes it.
2021-03-24 09:47:38 +00:00
Nick Craig-Wood
0fa68bda02 fspath: fix path parsing on Windows - fixes #5143
In this commit

8a46dd1b57 fspath: Implement a connection string parser #4996

The parsing code was re-written. This didn't quite work as before,
failing to adjust local paths on Windows when it should.

This patch fixes the problem and implements tests for it.
2021-03-24 09:47:03 +00:00
Nick Craig-Wood
1378bfee63 box: fix transfers getting stuck on token expiry after API change
Box recently changed their API, changing the case of returned API items

> On May 10th, 2021, as part of our continued infrastructure upgrade,
> Box's API response headers will standardize to return in a case
> insensitive manner, in line with industry best practices and our API
> documentation. Applications that are using these headers, such as
> "location" and "retry-after", will need to verify that their
> applications are checking for these headers in a case-insensitive
> fashion.

Rclone was reading the raw headers from the `http.Header` and not
using the `Get` accessor method which meant that it was sensitive to
case changes.

This fixes the problem by using the `Get` accessor method.

See: https://forum.rclone.org/t/box-backend-incompatible-with-box-api-changes-being-deployed/22972
2021-03-24 09:45:17 +00:00
nguyenhuuluan434
d6870473a1 swift: implement copying large objects 2021-03-24 08:56:39 +00:00
albertony
12cd322643 crypt: log hash ok on upload 2021-03-23 18:36:51 +01:00
Ivan Andreev
1406b6c3c9 install.sh: fail on download errors
This patch makes install.sh always run curl with flag "-f"
so it fails on download errors.
2021-03-23 11:29:00 +03:00
Ivan Andreev
088a83872d install.sh: fix some shellcheck warnings 2021-03-23 11:29:00 +03:00
Nick Craig-Wood
cb46092883 lib/atexit: unregister interrupt handler once it has fired so users can interrupt again 2021-03-23 08:03:00 +00:00
Nick Craig-Wood
a2cd5d8fa3 lib/atexit: fix deadlock calling Finalise while Run is running 2021-03-23 08:03:00 +00:00
Ivan Andreev
1fe2460e38 selfupdate: abort if updating would discard fuse semantics 2021-03-22 22:55:24 +03:00
Ivan Andreev
ef5c212f9b version: show build tags and type of executable
This patch modifies the output of `rclone version`.
The `os/arch` line is split into `os/type` and `os/arch`.
The `go version` line is now tagged as `go/version` for consistency.

Additionally the `go/linking` line tells whether the rclone
was linked as a static or dynamic executable.
The new `go/tags` line shows a space separated list of build tags.

The info about linking and build tags is also added to the output
of the `core/version` RC endpoint.
2021-03-22 22:55:24 +03:00
Nick Craig-Wood
268a7ff7b8 rc: add a full set of stats to core/stats
This patch adds the missing stats to the output of core/stats

- totalChecks
- totalTransfers
- totalBytes
- eta

This now includes enough information to rebuild the normal stats
output from rclone including percentage completions and ETAs.

Fixes #5116
2021-03-22 10:10:36 +00:00
Nick Craig-Wood
b47d6001a9 vfs: fix directory renaming by renaming dirs cached in memory
Before this change, if a directory was renamed and it or any children
had virtual entries in it they weren't flushed.

The consequence of this was that the directory path got out sync with
the actual position of the directory in the tree, leading to listings
of the old directory rather than the new one.

The fix renames any directories remaining after the ForgetAll to have
the correct path which fixes the problem.

See: https://forum.rclone.org/t/after-a-directory-renmane-using-mv-files-are-not-visible-any-longer/22797
2021-03-22 09:07:01 +00:00
Nick Craig-Wood
a4c4ddf052 vfs: rename files in cache and cancel uploads on directory rename
Before this change rclone did not cancel an uploads or rename the
cached files in the directory cache when a directory was renamed.

This caused issues with uploads arriving in the wrong place on bucket
based file systems.

See: https://forum.rclone.org/t/after-a-directory-renmane-using-mv-files-are-not-visible-any-longer/22797
2021-03-22 09:07:01 +00:00
Nick Craig-Wood
4cc2a7f342 mount: fix caching of old directories after renaming them
It was discovered `rclone mount` (but not `rclone cmount`) cached
directories after rename which it shouldn't have done.

This caused IO errors when trying to access files in renamed
directories on bucket based file systems.

This turned out to be the kernel caching the directories as basil/fuse
sets their expiry time to 60s for some reason.

This fix invalidates the relevant kernel cache entries in the for the
directories which fixes the problem.

Fixes: #4977
See: https://forum.rclone.org/t/after-a-directory-renmane-using-mv-files-are-not-visible-any-longer/22797
2021-03-22 09:07:01 +00:00
Nick Craig-Wood
c72d2c67ed vfs: add debug dump function to dump the state of the VFS cache 2021-03-22 09:06:44 +00:00
Nick Craig-Wood
9deab5a563 dropbox: raise priority of rate limited message to INFO to make it more noticeable
If you exceed rate limits, dropbox tells you to wait for 300 seconds -
this is rather a long time for the user to be waiting for rclone to
finish, so emit a NOTICE level log instead of a DEBUG.
2021-03-22 09:04:25 +00:00
buengese
da5b0cb611 zoho: add forgotten setupRegion() to NewFs
- this finally fixes regions other than eu
2021-03-21 02:15:22 +01:00
buengese
0187bc494a zoho: replace client id 2021-03-21 02:15:22 +01:00
Ivan Andreev
2bdbf00fa3 selfupdate: add instructions on reverting the update (#5141) 2021-03-18 23:11:16 +03:00
Nick Craig-Wood
9ee3ad70e9 sftp: fix SetModTime stat failed: object not found with --sftp-set-modtime=false
Some sftp servers don't allow the user to access the file after upload.

In this case the error message indicates that using
--sftp-set-modtime=false would fix the problem. However it doesn't
because SetModTime does a stat call which can't be disabled.

    Update SetModTime failed: SetModTime stat failed: object not found

After upload this patch checks for an `object not found` error if
set_modtime == false and ignores it, returning the expected size of
the object instead.

It also makes SetModTime do nothing if set_modtime = false

https://forum.rclone.org/t/sftp-update-setmodtime-failed/22873
2021-03-18 16:31:51 +00:00
albertony
ce182adf46 copyurl: add option to print resulting auto-filename (#5095)
Fixes #5094
2021-03-18 10:04:59 +01:00
albertony
97fc3b9046 rc: avoid +Inf value for speed in core/stats (#5134)
Fixes #5132
2021-03-18 10:02:30 +01:00
Nick Craig-Wood
e59acd16c6 drive: remove duplicated Context(ctx) calls
These were added by accident in

d9959b0271 drive: pass context on to drive SDK - this will help with cancellation

Which added lots of new Context() calls but duplicated some existing
ones.
2021-03-17 16:46:58 +00:00
albertony
acfd7e2403 docs: add note about limitation with pattern-list to filtering docs (#5118)
Fixes #5112
2021-03-17 14:34:46 +01:00
Nick Craig-Wood
f47893873d fs: fix failed token refresh on mounts created via the rc
Users have noticed that backends created via the rc have been failing
to refresh their tokens with this error:

    Token refresh failed try 1/5: context canceled

This is because the rc server cancels the context used to make the
backend when the request has finished. This same context is used to
refresh the token and the oauth library checks to see if the context
has been cancelled.

This patch creates a new context for the cached backends and copies
the global and filter config into the new context.

See: https://forum.rclone.org/t/google-drive-token-refresh-failed/22283
2021-03-16 16:29:22 +00:00
Nick Craig-Wood
b9a015e5b9 s3: fix --s3-profile which wasn't working - fixes #4757 2021-03-16 16:25:07 +00:00
Nick Craig-Wood
d72d9e591a ftp: retry connections and logins on 421 errors #3984
Before this we just failed if the ftp connection or login failed.

This change adds a pacer just for the ftp connect and retries if the
connection failed to Dial or the login returns a 421 error.
2021-03-16 16:17:22 +00:00
Nick Craig-Wood
df451e1e70 ftp: add --ftp-close-timeout flag for use with awkward ftp servers #3984 2021-03-16 16:17:22 +00:00
Nick Craig-Wood
d9959b0271 drive: pass context on to drive SDK - this will help with cancellation 2021-03-16 16:17:22 +00:00
Nick Craig-Wood
f2c0f82fc6 backends: Add context checking to remaining backends #4504
This is a follow up to 4013bc4a4c which missed some backends.

It adds a ctx parameter to shouldRetry and checks it.
2021-03-16 16:17:22 +00:00
albertony
f76c6cc893 docs: describe how to bypass loading of config file
Fixes #5125
2021-03-16 14:25:00 +00:00
Nick Craig-Wood
5e95877840 vfs: fix modtime set if --vfs-cache-mode writes/full and no write
When using --vfs-cache-mode writes or full if a file was opened for
write intent, the modtime was set and the file was closed without
being modified the modtime would never be written back to storage.

The sequence of events

- app opens file with write intent
- app does set modtime
- rclone sets the modtime on the cache file, but not the remote file
  because it is open for write and can't be set yet
- app closes the file without changing it
- rclone doesn't upload the file because the file wasn't changed so
  the modtime doesn't get updated

This fixes the problem by making sure any unapplied modtime changes
are applied even if the file is not modified when being closed.

Fixes #4795
2021-03-16 13:36:48 +00:00
Nick Craig-Wood
8b491f7f3d vfs: fix modtimes changing by fractional seconds after upload #4763
Before this change, rclone would return the modification times of the
cache file or the pending modtime which would be more accurate than
the modtime that the backend was capable of.

This meant that the modtime would be change slightly when the item was
actually uploaded.

For example modification times on Google Drive would be rounded to the
nearest millisecond.

This fixes the VFS layer to always return modtimes directly from an
object stored on the remote, or rounded to the precision that the
remote is capable of.
2021-03-16 13:31:47 +00:00
Nick Craig-Wood
aea8776a43 vfs: fix modtimes not updating when writing via cache - fixes #4763
This reads modtime from a dirty cache item if it exists. This mirrors
the way reading the size works.

This fixes the mod time not updating when the file is written, only
when the writeback completes.

See: https://forum.rclone.org/t/rclone-mount-and-changing-timestamps-after-writes/22629
2021-03-16 13:31:47 +00:00
Nick Craig-Wood
c387eb8c09 vfs: don't set modification time if it was already correct
Before this change, rclone would set the modification time of an
object after it had been uploaded. However with --vfs-cache-mode
writes and above, the modification time of the object is already
correct as the cache backing file gets set with the correct
modification time before upload.

Setting the modification time causes another version to be created on
backends such as S3 so it should be avoided if possible.

This change checks to see if the modification time needs changing and
only sets it if necessary.

See: https://forum.rclone.org/t/produce-2-versions-when-overwrite-an-object-in-min-io/19634
2021-03-16 13:31:47 +00:00
Nick Craig-Wood
a12b2746b4 fs: make sure backends with additional config have a different name #4996
Backends for which additional config is detected (in the config string
or on the command line or as environment variables) will gain a suffix
`{XXXXX}` where `XXXX` is a base64 encoded md5hash of the config
string.

This fixes backend caching with config string remotes.

This much requested feature now works properly:

    rclone copy -vv drive,shared_with_me:file.txt drive:
2021-03-15 19:22:07 +00:00
Nick Craig-Wood
3dbef2b2fd fs: make sure we don't save on the fly remote config to the config file #4996 2021-03-15 19:22:07 +00:00
Nick Craig-Wood
f111e0eaf8 fs: enable configmap to be able to tell values set vs config file values #4996
This adds AddOverrideGetter and GetOverride methods to config map and
uses them in fs.ConfigMap.

This enables us to tell which values have been set and which are just
read from the config file or at their defaults.

This also deletes the unused AddGetters method in configmap.
2021-03-15 19:22:07 +00:00
Nick Craig-Wood
96207f342c configmap: add consistent String() method to configmap.Simple #4996 2021-03-15 19:22:07 +00:00
Nick Craig-Wood
e25ac4dcf0 fs: Use connection string config as highest priority config #4996 2021-03-15 19:22:07 +00:00
Nick Craig-Wood
28f6efe955 cmd: refactor to use fspath.SplitFs instead of fs.ParseRemote #4996 2021-03-15 19:22:07 +00:00
Nick Craig-Wood
f17d7c0012 union: refactor to use fspath.SplitFs instead of fs.ParseRemote #4996 2021-03-15 19:22:07 +00:00
Nick Craig-Wood
3761cf68b4 chunker: refactor to use fspath.SplitFs instead of fspath.Parse #4996 2021-03-15 19:22:07 +00:00
Nick Craig-Wood
71554c1371 fspath: factor Split into SplitFs and Split #4996 2021-03-15 19:22:07 +00:00
Nick Craig-Wood
8a46dd1b57 fspath: Implement a connection string parser #4996
This is implemented as a state machine parser so it can emit sensible
error messages.

It does not use the connection strings elsewhere in rclone yet - see
subsequent commits.

An optional fuzzer is implemented for the Parse function.
2021-03-15 19:22:07 +00:00
Nick Craig-Wood
3b21857097 config: clear fs cache of stale entries when altering config - fixes #4811
Before this change if a config was altered via the rc then when a new
backend was created from that config, if there was a backend already
running from the old config in the cache then it would be used instead
of creating a new backend with the new config, thus leading to
confusion.

This change flushes the fs cache of any backends based off a config
when that config is changed is over the rc.
2021-03-15 19:22:07 +00:00
Nick Craig-Wood
a10fbf16ea fs/cache: add ClearConfig method to clear all remotes based on Config #4811 2021-03-15 19:22:07 +00:00
Nick Craig-Wood
f4750928ee lib/cache: add Delete and DeletePrefix methods #4811 2021-03-15 19:22:07 +00:00
Nick Craig-Wood
657be2ace5 rc: add fscache/clear and fscache/entries to control the fs cache #4811 2021-03-15 19:22:07 +00:00
Nick Craig-Wood
feaaca4987 fs/cache: add Entries() for finding how many entries in the fscache #4811 2021-03-15 19:22:07 +00:00
Nick Craig-Wood
ebd9462ea6 union: fix union attempting to update files on a read only file system
Before this change, when using an all create method with one of the
upstreams being read only, if there was an existing file on the read
only remote, it was impossible to update it.

This change detects that situation and creates the file on a
read/write upstream. This file will shadow the file on the read/only
upstream. If it is deleted the read only upstream file will be visible
again.

Fixes #4929
2021-03-15 19:22:07 +00:00
Nick Craig-Wood
6b9e4f939d union: fix crash when using epff policy - fixes #5000
Before this fix using the epff policy could double close a channel.

The fix refactors the code to make that impossible and cancels any
running queries when the first query is found.
2021-03-15 19:22:07 +00:00
Nick Craig-Wood
687a3b1832 vfs: fix data race discovered by the race detector
This fixes a place where we read from item.o without the item.mu held.
2021-03-15 19:22:07 +00:00
Nick Craig-Wood
25d5ed763c fs: make sync and operations tests use context instead of global variables 2021-03-15 19:22:07 +00:00
Nick Craig-Wood
5e038a5e1e lib/file: retry preallocate on EINTR
Before this change, sometimes preallocate failed with EINTR which
rclone ignored.

Retrying the syscall is the correct thing to do and seems to make
preallocate 100% reliable.
2021-03-15 19:22:07 +00:00
Nick Craig-Wood
4b4e531846 build: add missing BUILD_FLAGS to compile_only to speed up other_os build
Before this we were building all architectures unnecessarily in the
compile_all step for the other_os build. There are built elsewhere so
we don't need to build them here too.

This fix adds the missing BUILD_FLAGS which excludes the other builds
and should speed up the workflow.
2021-03-15 19:22:07 +00:00
Nick Craig-Wood
89e8fb4818 local: don't ignore preallocate disk full errors
See: https://forum.rclone.org/t/input-output-error-copying-to-cifs-mount-disk-space-filled/22163
2021-03-15 19:22:07 +00:00
Nick Craig-Wood
b9bf91c510 lib/file: don't run preallocate concurrently
This seems to cause file systems to get the amount of free space
wrong.
2021-03-15 19:22:06 +00:00
Nick Craig-Wood
40b58d59ad lib/file: make pre-allocate detect disk full errors and return them 2021-03-15 19:22:06 +00:00
Nick Craig-Wood
4fbb50422c drive: don't stop server side copy if couldn't read description
Before this change we would error out of a server side copy if we
couldn't read the description for a google doc.

This patch just logs an ERROR message and carries on.

See: https://forum.rclone.org/t/rclone-google-drive-to-google-drive-migration-for-multiple-users/19024/3
2021-03-15 19:22:06 +00:00
Nick Craig-Wood
8d847a4e94 lib/atexit: fix occasional failure to unmount with CTRL-C #4957
Before this change CTRL-C could come in to exit rclone which would
start the atexit actions running. The Fuse unmount then signals rclone
to exit which wasn't waiting for the already running atexit actions to
complete.

This change makes sure that if the atexit actions are started they
should be completed.
2021-03-15 19:22:06 +00:00
Nick Craig-Wood
e3e08a48cb Add Manish Kumar to contributors 2021-03-15 19:22:06 +00:00
Manish Kumar
ff6868900d azureblob: add container public access level support. Fixes #5045 2021-03-15 17:18:47 +00:00
albertony
aab076029f local: make nounc advanced option except on windows 2021-03-15 17:10:27 +00:00
Nick Craig-Wood
294f090361 fs: make sure --low-level-retries, --checkers, --transfers are > 0 2021-03-15 17:05:35 +00:00
Nick Craig-Wood
301e1ad982 fs: fix crash when --low-level-retries=0 - fixes #5024 2021-03-15 17:05:35 +00:00
Nick Craig-Wood
3cf6ea848b config: remove log.Fatal and replace with error passing where possible 2021-03-14 16:03:35 +00:00
Nick Craig-Wood
bb0b6432ae config: --config "" or "/notfound" for in memory config only #4996
If `--config` is set to empty string or the special value `/notfound`
then rclone will keep the config file in memory only.
2021-03-14 16:03:35 +00:00
Nick Craig-Wood
46078d391f config: make config file reads reload the config file if needed #4996
Before this change the config file needed to be explicitly reloaded.
This coupled the config file implementation with the backends
needlessly.

This change stats the config file to see if it needs to be reloaded on
every config file operation.

This allows us to remove calls to

- config.SaveConfig
- config.GetFresh

Which now makes the the only needed interface to the config file be
that provided by configmap.Map when rclone is not being configured.

This also adds tests for configfile
2021-03-14 16:03:35 +00:00
Nick Craig-Wood
849bf20598 build: disable IOS builds for the time being - see #5124 2021-03-13 22:07:46 +00:00
Ivan Andreev
e91f2e342a docs: mention rclone selfupdate in quickstart (#5122) 2021-03-13 23:02:40 +03:00
Nick Craig-Wood
713f8f357d sftp: fix "file not found" errors for read once servers - fixes #5077
It introduces a new flag --sftp-disable-concurrent-reads to stop the
problematic behaviour in the SFTP library for read-once servers.

This upgrades the sftp library to v1.13.0 which has the fix.
2021-03-13 15:38:38 +00:00
Evan Harris
83368998be docs: Updated sync and dedupe command docs #4429 2021-03-13 15:01:32 +00:00
Nick Craig-Wood
4013bc4a4c Fix excessive retries missing --max-duration timeout - fixes #4504
This change checks the context whenever rclone might retry, and
doesn't retry if the current context has an error.

This fixes the pathological behaviour of `--max-duration` refusing to
exit because all the context deadline exceeded errors were being
retried.

This unfortunately meant changing the shouldRetry logic in every
backend and doing a lot of context propagation.

See: https://forum.rclone.org/t/add-flag-to-exit-immediately-when-max-duration-reached/22723
2021-03-13 09:25:44 +00:00
Nick Craig-Wood
32925dae1f Add Lucas Messenger to contributors 2021-03-13 09:25:44 +00:00
Nick Craig-Wood
6cc70997ba Add Naveen Honest Raj to contributors 2021-03-13 09:25:44 +00:00
buengese
d260e3824e docs: cleanup optional feature table 2021-03-12 09:20:01 +00:00
Lucas Messenger
a5bd26395e hdfs: fix permissions for when directory is created 2021-03-12 09:15:14 +00:00
Ivan Andreev
6fa74340a0 cmd: rclone selfupdate (#5080)
Implements self-update command
Fixes #548
Fixes #5076
2021-03-11 22:39:30 +03:00
Saksham Khanna
4d8ef7bca7 cmd/dedupe: make largest directory primary to minimize data moved (#3648)
This change makes dedupe recursively count elements in same-named directories
and make the largest one primary. This allows to minimize the amount of data
moved (or at least the amount of API calls) when dedupe merges them.
It also adds a new fs.Object interface `ParentIDer` with function `ParentID` and
implements it for the drive and opendrive backends. This function returns
parent directory ID for objects on filesystems that allow same-named dirs.
We use it to correctly count sizes of same-named directories.

Fixes #2568

Co-authored-by: Ivan Andreev <ivandeex@gmail.com>
2021-03-11 20:40:29 +03:00
Nick Craig-Wood
6a9ae32012 config: split up main file more and move tests into correct packages
This splits config.go into ui.go for the user interface functions and
authorize.go for the implementation of `rclone authorize`.

It also moves the tests into the correct places (including one from
obscure which was in the wrong place).
2021-03-11 17:29:26 +00:00
Nick Craig-Wood
a7fd65bf2d config: move account initialisation out into accounting
Before this change the initialisation for the accounting package was
done in the config package for some strange historical reason.
2021-03-11 17:29:26 +00:00
Nick Craig-Wood
1fed2d910c config: make config file system pluggable
If you are using rclone a library you can decide to use the rclone
config file system or not by calling

    configfile.LoadConfig(ctx)

If you don't you will need to set `config.Data` to an implementation
of `config.Storage`.

Other changes
- change interface of config.FileGet to remove unused default
- remove MustValue from config.Storage interface
- change GetValue to return string or bool like elsewhere in rclone
- implement a default config file system which panics with helpful error
- implement getWithDefault to replace the removed MustValue
- don't embed goconfig.ConfigFile so we can change the methods
2021-03-11 17:29:26 +00:00
Fionera
c95b580478 config: Wrap config library in an interface 2021-03-11 17:29:26 +00:00
Nick Craig-Wood
2be310cd6e build: fix dependencies for docs build 2021-03-11 17:29:26 +00:00
Naveen Honest Raj
02a5d350f9 rcd: Added systemd notification during the 'rclone rcd' command call. This also fixes #5073.
Signed-off-by: Naveen Honest Raj <naveendurai19@gmail.com>
2021-03-11 17:12:14 +00:00
albertony
18cd2064ec mount: docs: add note about volume path syntax on windows 2021-03-11 17:09:22 +00:00
buengese
59ed70ca91 fichier: implement public link 2021-03-11 00:44:26 +01:00
Nick Craig-Wood
6df56c55b0 Changelog updates from Version v1.54.1 2021-03-08 11:06:11 +00:00
Nick Craig-Wood
94e34cb783 build: fix nfpm install by using the released binary 2021-03-07 16:42:22 +00:00
Robert Thomas
c3e2392f2b dropbox: fix polling support for scoped apps - fixes #5089 (#5092)
This fixes the polling implementation for Dropbox, particularly
when using a scoped app. This also adds a lower end check for the
timeout, as I forgot to include that in the original implementation.
2021-03-05 17:44:47 +00:00
Nick Craig-Wood
f7e3115955 s3: fix Wasabi HEAD requests returning stale data by using only 1 transport
In this commit

fc5b14b620 s3: Added `--s3-disable-http2` to disable http/2

We created our own transport so we could disable http/2. However the
added function is called twice meaning that we create two HTTP
transports. This didn't happen with the original code because the
default transport is cached by fshttp.

Rclone normally does a PUT followed by a HEAD request to check an
upload has been successful.

With the two transports, the PUT and the HEAD were being done on
different HTTP transports. This means that it wasn't re-using the same
HTTP connection, so the HEAD request showed the previous object value.
This caused rclone to declare the upload was corrupted, delete the
object and try again.

This patch makes sure we only create one transport and use it for both
PUT and HEAD requests which fixes the problem with Wasabi.

See: https://forum.rclone.org/t/each-time-rclone-is-run-1-3-fails-2-3-succeeds/22545
2021-03-05 15:34:56 +00:00
Nick Craig-Wood
e01e8010a0 Add Maxwell Calman to contributors 2021-03-05 15:34:56 +00:00
Ivan Andreev
75056dc9b2 ftp: update dependency jlaffaye/ftp (#5097) 2021-03-05 15:58:04 +03:00
Ivan Andreev
7aa7acd926 address stringent ineffectual assignment check in golangci-lint (#5093) 2021-03-04 14:26:48 +03:00
Nick Craig-Wood
0ad38dd6fa dropbox,ftp,onedrive,yandex: make --timeout 0 work properly
See: https://forum.rclone.org/t/an-issue-about-ftp-backend-in-2-different-systems/22551
2021-03-01 12:08:58 +00:00
Maxwell Calman
9cc8ff4dd4 chunker: partially implement no-rename transactions (#4675)
Some storage providers e.g. S3 don't have an efficient rename operation.
Before this change, when chunker finished an upload, the server-side copy
and delete operations that renamed temporary chunks to their final names
could take a significant amount of time.
This PR records transaction identifier (versioning) in the metadata of
chunker composite objects striving to remove the need for rename
operations on such backends.
This approach will be triggered be the new "transactions" configuration
option, which can be "rename" (the default) or "norename".
We implement the new approach for uploads (Put operations).
The chunker Move operation still uses the rename operation of
underlying backend. Filling this gap is left for a later PR.

Co-authored-by: Ivan Andreev <ivandeex@gmail.com>
2021-02-28 10:49:17 +00:00
Nick Craig-Wood
b029fb591f s3: fix failed to create file system with folder level permissions policy
Before this change, if folder level access permissions policy was in
use, with trailing `/` marking the folders then rclone would HEAD the
path without a trailing `/` to work out if it was a file or a folder.
This returned a permission denied error, which rclone returned to the
user.

    Failed to create file system for "s3:bucket/path/": Forbidden: Forbidden
        status code: 403, request id: XXXX, host id:

Previous to this change

53aa03cc44 s3: complete sse-c implementation

rclone would assume any errors when HEAD-ing the object implied it
didn't exist and this test would not fail.

This change reverts the functionality of the test to work as it did
before, meaning any errors on HEAD will make rclone assume the object
does not exist and the path is referring to a directory.

Fixes #4990
2021-02-24 20:35:44 +00:00
Nick Craig-Wood
95e9c4e7f1 Add georne to contributors 2021-02-24 20:35:44 +00:00
Nick Craig-Wood
c40bafb72c Add tYYGH to contributors 2021-02-24 20:35:44 +00:00
Nick Craig-Wood
eac77b06ab Add Romeo Kienzler to contributors 2021-02-24 20:35:44 +00:00
Yaroslav Halchenko
0355d6daf2 CONTRIBUTING.md: recommend to push feature branch with -u + minor tuneups 2021-02-24 20:24:59 +00:00
buengese
c4b8df6903 fichier: implement copy & move 2021-02-24 21:05:41 +01:00
Ivan Andreev
0dd3ae5e0d Add Robert Thomas to contributors 2021-02-24 19:40:54 +03:00
Robert Thomas
e5aa92c922 dropbox: add polling support - fixes #2949
This implements polling support for the Dropbox backend. The Dropbox SDK dependency had to be updated due to an auth issue, which was fixed on Jan 12 2021. A secondary internal Dropbox service was created to handle unauthorized SDK requests, as is necessary when using the ListFolderLongpoll function/endpoint. The config variable was renamed to cfg to avoid potential conflicts with the imported config package.
2021-02-24 09:33:31 +00:00
Ivan Andreev
f6265fbeff Add pvalls to contributors 2021-02-24 03:35:24 +03:00
Ivan Andreev
1397b85214 Add Georg Neugschwandtner to contributors 2021-02-24 03:28:15 +03:00
Ivan Andreev
86a0dae632 Add Rauno Ots to contributors 2021-02-24 03:27:16 +03:00
Ivan Andreev
076ff96f6b webdav: check that purged directory really exists (#2921)
Sharepoint 2016 returns status 204 to the purge request
even if the directory to purge does not really exist.
This change adds an extra check to detect this condition
and returns a proper error code.
2021-02-23 23:27:30 +00:00
Ivan Andreev
985011e73b webdav: fix sharepoint-ntlm error 401 for parallel actions (#2921)
The go-ntlmssp NTLM negotiator has to try various authentication methods.
Intermediate responses from Sharepoint have status code 401, only the
final one is different. When rclone runs a large operation in parallel
goroutines according to --checkers or --transfers, one of threads can
receive intermediate 401 response targeted for another one and returns
the 401 authentication error to the user.
This patch fixes that.
2021-02-23 23:27:30 +00:00
Ivan Andreev
9ca6bf59c6 webdav: enforce encoding to fix errors with sharepoint-ntlm (#2921)
On-premises Sharepoint returns HTTP errors 400 or 500 in
reply to attempts to use file names with special characters
like hash, percent, tilde, invalid UTF-7 and so on.
This patch activates transparent encoding of such characters.
2021-02-23 23:27:30 +00:00
georne
e5d5ae9ab7 webdav: disable HTTP/2 for NTLM authentication (#2921)
As per Microsoft documentation, Windows authentication
(NTLM/Kerberos/Negotiate) is not supported with HTTP/2.
This patch disables transparent HTTP/2 support when the
vendor setting is "sharepoint-ntlm". Otherwise connections
to IIS/10.0 can fail with HTTP_1_1_REQUIRED.

Co-authored-by: Georg Neugschwandtner <georg.neugschwandtner@gmx.net>
2021-02-23 23:27:30 +00:00
Ivan Andreev
ac6bb222f9 webdav: improve terminology in sharepoint-ntlm docs (#2921)
The most popular keyword for the Sharepoint in-house or company
installations is "On-Premises".
"Microsoft OneDrive account" is in fact just a Microsoft account.

Co-authored-by: Georg Neugschwandtner <georg.neugschwandtner@gmx.net>
2021-02-23 23:27:30 +00:00
Alex Chen
62d5876eb4 webdav: make sharepoint-ntlm docs more consistent (#2921)
Clarify difference between Sharepoint Online and
hosted Sharepoint with NTLM authentication.
2021-02-23 23:27:30 +00:00
Rauno Ots
9808a53416 webdav: add support for sharepoint with NTLM authentication (#2921)
Add new option option "sharepoint-ntlm" for the vendor setting.
Use it when your hosted Sharepoint is not tied to the OneDrive
accounts and uses NTLM authentication.
Also add documentation and integration test.

Fixes: #2171
2021-02-23 23:27:30 +00:00
pvalls
cc08f66dc1 docs: singular/plural duplicity for MByte{s} 2021-02-23 11:34:32 +00:00
pvalls
6b8da24eb8 docs: uppercase for MBytes
MBytes is written as Mbytes and MBytes interchangeably.
Use uppercase consistently across all docs.md
2021-02-23 11:34:32 +00:00
buengese
333faa6c68 zoho: fix custom client id's 2021-02-23 11:27:05 +00:00
Nick Craig-Wood
1b92e4636e rc: implement passing filter config with _filter parameter 2021-02-23 10:54:40 +00:00
Nick Craig-Wood
c5a299d5b1 rc: fix options/local to return the filter options 2021-02-23 10:33:03 +00:00
Nick Craig-Wood
04a8859d29 cmount: fix mount dropping on macOS by setting --daemon-timeout 10m
Previously rclone set --daemon-timeout to 15m by default. However
osxfuse seems to be ignoring that value since it is above the maximum
value of 10m. This is conjecture since the source of osxfuse is no
longer available.

Setting the value to 10m seems to resolve the problem.

See: https://forum.rclone.org/t/rclone-mount-frequently-drops-when-using-plex/22352
2021-02-21 12:56:19 +00:00
Nick Craig-Wood
4b5fe3adad delete,rmdirs: make --rmdirs obey the filters
See: https://forum.rclone.org/t/a-problem-with-rclone-delete-from-list/22143
2021-02-19 10:32:28 +00:00
edwardxml
7db68b72f1 docs: directory filter rules 2021-02-18 12:11:56 +01:00
edwardxml
9c667be2a1 docs: remove dead link from rc.md (#5038) 2021-02-18 01:37:17 +03:00
tYYGH
c0cf54067a vfs: --vfs-used-is-size to report used space using recursive scan (#4043)
Some backends, most notably S3, do not report the amount of bytes used.
This patch introduces a new flag that allows instead of relying on the
backend, use recursive scan similar to `rclone size` to compute the total
used space. However, this is ineffective and should be used as a last resort.

Co-authored-by: Yves G <theYinYeti@yalis.fr>
2021-02-17 23:36:13 +03:00
Romeo Kienzler
297ca23abd docs: fix typo in crypt.md (#5037) 2021-02-17 19:11:57 +03:00
Nick Craig-Wood
d809930e1d union: fix mkdir at root with remote:/
Before the this fix if you specified remote:/ then the union backend
would fail to notice the root directory existed.

This was fixed by stripping the trailing / from the root.

See: https://forum.rclone.org/t/upgraded-from-1-45-to-1-54-now-cant-create-new-directory-within-union-mount/22284/
2021-02-17 12:11:34 +00:00
Nick Craig-Wood
fdc0528bd5 Add Dmitry Chepurovskiy to contributors 2021-02-17 12:11:34 +00:00
Nick Craig-Wood
a0320d6e94 Add Vesnyx to contributors 2021-02-17 12:11:34 +00:00
Nick Craig-Wood
89bf036e15 Add K265 to contributors 2021-02-17 12:11:34 +00:00
Dmitry Chepurovskiy
1605f9e14d s3: Fix shared_credentials_file auth
S3 backend shared_credentials_file option wasn't working neither from
config option nor from command line option. This was caused cause
shared_credentials_file_provider works as part of chain provider, but in
case user haven't specified access_token and access_key we had removed
(set nil) to credentials field, that may contain actual credentials got
from ChainProvider.

AWS_SHARED_CREDENTIALS_FILE env varible as far as i understood worked,
cause aws_sdk code handles it as one of default auth options, when
there's not configured credentials.
2021-02-17 12:04:26 +00:00
albertony
cd6fd4be4b mount: docs: document the new FileSecurity option in WinFsp 2021 (#5002) 2021-02-17 03:44:28 +03:00
Vesnyx
4ea7c7aa47 crypt: add option to not encrypt data #1077 (#2981)
Co-authored-by: Ivan Andreev <ivandeex@gmail.com>
2021-02-17 03:40:37 +03:00
Ivan Andreev
5834020316 docker.bash: work correctly with multi-ip containers (#5028)
Currently if container under test has multiple IP addresses,
the `docker_ip` function from `docker.sh` will return a gibberish.
This patch makes it return the first address found.
Additionally, I apply shellcheck on `docker.sh`.
2021-02-17 03:38:02 +03:00
Ivan Andreev
f5066a09cd build: replace go 1.16-rc1 by 1.16.x (#5036) 2021-02-17 03:37:30 +03:00
edwardxml
863bd93c30 docs: fix broken link in sftp page
Just a spare line break had crept in breaking the link form.
2021-02-16 23:24:11 +01:00
edwardxml
d96af3b005 docs: convert bogus example link to code
Convert the bogus example plex url from a url that is auto created to code format that hopefully isn't.
2021-02-16 23:20:49 +01:00
edwardxml
3280ceee3b docs: badly formed link
Fix for a badly formed link created in earlier rewrite
2021-02-16 23:16:03 +01:00
K265
930bca2478 feat: add multiple paths support to --compare-dest and --copy-dest flag 2021-02-16 18:17:04 +00:00
edwardxml
23b12c39bd Docs: Zoho WorkDrive authorisation reword
Mainly the reference to firewalls didn't make sense. Tried to make more precise. Left z in authorize.
2021-02-16 18:07:55 +00:00
Nick Craig-Wood
9d37c208b7 vfs: document simultaneous usage with the same cache shouldn't be used
Fixes #2227
2021-02-16 17:15:05 +00:00
Nick Craig-Wood
c81311722e ftp: close idle connections after --ftp-idle-timeout (1m by default)
This fixes a problem where ftp backends live on forever when using
the rc and use more and more connections.
2021-02-16 12:39:05 +00:00
Nick Craig-Wood
843ddd9136 ftp: implement Shutdown method 2021-02-16 12:39:05 +00:00
Nick Craig-Wood
a3fcadddc8 sftp: close idle connections after --sftp-idle-timeout (1m by default)
This fixes a problem where sftp backends live on forever when using
the rc and use more and more connections.

Fixes #4883
2021-02-16 12:39:05 +00:00
Nick Craig-Wood
a63e1f1383 Add Miron Veryanskiy to contributors 2021-02-16 12:39:05 +00:00
Nick Craig-Wood
5b84adf3b9 test: add "rclone test histogram" for file name distribution stats 2021-02-13 14:24:43 +00:00
Nick Craig-Wood
f890965020 test: add makefiles test command (converted from script) 2021-02-13 14:24:43 +00:00
Nick Craig-Wood
f88a5542cf test: move test commands under "rclone test" and make them visible 2021-02-13 14:24:43 +00:00
Miron Veryanskiy
fd94b3a473 docs: replace #file-caching with #vfs-file-caching
The documentation had dead links pointing to #file-caching. They've been
moved to point to #vfs-file-caching.
2021-02-13 12:56:25 +00:00
Nick Craig-Wood
2aebeb6061 accounting: fix --bwlimit when up or down is off - fixes #5019
Before this change the core bandwidth limit was limited to upload or
download value if the other value was off.

This fix only applies a core bandwidth limit when both values are set.
2021-02-13 12:45:12 +00:00
Nick Craig-Wood
e779cacc82 fshttp: fix bandwidth limiting after bad merge
Reapply missing bwlimiting which was inserted in

0a932dc1f2 Add --bwlimit for upload and download #1873

But accidentally removed when merging

edfe183ba2 fshttp: add DSCP support with --dscp for QoS with differentiated services
2021-02-13 12:45:12 +00:00
Nick Craig-Wood
37e630178e dropbox: add scopes to oauth request and optionally "members.read"
This change adds the scopes rclone wants during the oauth request.
Previously rclone left these blank to get a default set.

This allows rclone to add the "members.read" scope which is necessary
for "impersonate" to work, but only when it is in use as it require
authorisation from a Team Admin.

See: https://forum.rclone.org/t/dropbox-no-members-read/22223/3
2021-02-13 12:35:24 +00:00
Nick Craig-Wood
2cdc071b85 Add Ankur Gupta to contributors 2021-02-13 12:35:24 +00:00
Nick Craig-Wood
496e32fd8a Add cynthia kwok to contributors 2021-02-13 12:35:24 +00:00
Nick Craig-Wood
bf3ba50a0f Add David Sze to contributors 2021-02-13 12:35:24 +00:00
Nick Craig-Wood
22c226b152 Add Alexey Tabakman to contributors 2021-02-13 12:35:23 +00:00
Klaus Post
5ca7f1fe87 encoder/filename: Wrap scsu package 2021-02-12 11:39:39 +00:00
Klaus Post
f14220ef1e encoder/filename: Add 2 more tables and tests. 2021-02-12 11:39:39 +00:00
Klaus Post
424aaac2e1 encoder/filename: Add SCSU as tables
Instead of only adding SCSU, add it as an existing table.

Allow direct SCSU and add a, perhaps, reasonable table as well.

Add byte interfaces that doesn't base64 encode the URL as well with `EncodeBytes` and `DecodeBytes`.

Fuzz tested and decode tests added.
2021-02-12 11:39:39 +00:00
Ankur Gupta
47b69d6300 operations: Made copy and sync operations obey a RetryAfterError 2021-02-11 17:47:34 +00:00
cynthia kwok
c0c2505977 build: add an rclone user to the Docker image but don't use it by default
partially addresses #4831

Co-authored-by: cynful <cynful@users.noreply.github.com>
2021-02-11 17:45:44 +00:00
David Sze
2d7afe8690 local: Add flag --no-preallocate - #3207
Some virtual filesystems (such as Google Drive File Stream) may
incorrectly set the actual file size equal to the preallocated space,
causing checksum and file size checks to fail.

This flag can be used to disable preallocation for local backends of
this type.
2021-02-11 17:25:28 +00:00
Nick Craig-Wood
92187a3b33 cmount: fix unicode issues with accented characters on macOS
This adds

    -o modules=iconv,from_code=UTF-8,to_code=UTF-8-MAC

To the mount options if it isn't already present which fixes mounting
issues on macOS with accented characters in the finder.
2021-02-11 15:13:19 +00:00
Nick Craig-Wood
53aa4b87fd b2: fix failed to create file system with application key limited to a prefix
Before this change, if an application key limited to a prefix was in
use, with trailing `/` marking the folders then rclone would HEAD the
path without a trailing `/` to work out if it was a file or a folder.
This returned a permission denied error, which rclone returned to the
user.

    Failed to create file system for "b2:bucket/path/":
        failed to HEAD for download: Unknown 401  (401 unknown)

This change assumes any errors on HEAD will make rclone assume the
object does not exist and the path is referring to a directory.

See: https://forum.rclone.org/t/b2-error-on-application-key-limited-to-a-prefix/22159/
2021-02-11 15:13:19 +00:00
Max Sum
edfe183ba2 fshttp: add DSCP support with --dscp for QoS with differentiated services 2021-02-10 18:29:18 +00:00
edwardxml
dfc63eb8f1 docs: update filtering docs
Typos from prior major rewrite
2021-02-10 18:21:41 +00:00
edwardxml
f21f2529a3 docs: fix nesting of brackets and backticks in ftp docs 2021-02-10 18:18:01 +00:00
edwardxml
1efb543ad8 docs: add a Windows example to the filtering docs
Add an example pinched from rclone forum

https://forum.rclone.org/t/need-help-to-understand-filtering-commands/22196

Credit to @asdffdsa
2021-02-10 18:09:48 +00:00
edwardxml
92e36fcfc5 docs: update filtering time formats
Correction per @x0b from 
https://forum.rclone.org/t/max-age-min-age-rfc3339-format-rejected/22204
2021-02-10 18:08:25 +00:00
Alexey Tabakman
bf8542c670 docs: update information about Disk-O: desktop client #4988 (#4988) 2021-02-09 21:23:45 +03:00
albertony
cc5a1e90d8 mount: improved handling of relative paths on windows 2021-02-08 20:55:23 +00:00
albertony
b39fa54ab2 mount: allow mounting to root directory on windows 2021-02-08 20:55:23 +00:00
Nick Craig-Wood
f1147fe1dd rc: sync,copy,move: document createEmptySrcDirs parameter - fixes #4489 2021-02-08 12:25:40 +00:00
Nick Craig-Wood
8897377a54 filter: Make --exclude "dir/" equivalent to --exclude "dir/**"
Rclone uses directory exclusions to cut down the listing it has to do,
so before this fix `--exclude dir/` would make sure nothing in `dir/`
was scanned, **except** if --fast-list was used, in which case only
the directory was excluded and everything within it was included.

This is rather unexpected, so this patch makes `--exclude dir/` be
equivalent to `--exclude dir/**`, meaning that excluding a directory
excludes it and its contents.

We can't do the same for --include without changing the semantics of
filtering slightly.

Fixes #3375
2021-02-07 17:29:16 +00:00
Nick Craig-Wood
f50b4e51ed build: make a macOS ARM64 build to support Apple Silicon - Fixes #4786
- add `-macos-sdk` and `-macos-arch` to adjust CGO_CFLAGS and CGO_LDFLAGS
    - select macOS SDK 11.1 and arch arm64 when building
- add -cgo-cflags and -cgo-ldflags to set CGO_CFLAGS and CGO_LDFLAGS
    - add back /usr/local to pickup fuse headers and library
- add `-env` to cross-compile
- add macOS/arm64 to download matrix
2021-02-07 14:59:53 +00:00
Nick Craig-Wood
f135acbdfb build: install macfuse 4.x instead of osxfuse 3.x
The macfuse has been renamed, but brew is still picking up the old
version under the old name.

This corrects the name to macfuse which brings in v4.x which should
support Apple Silicon.
2021-02-07 14:59:53 +00:00
Nick Craig-Wood
cdd99a6f39 fs/accounting: fix occasionally failing test on macOS 2021-02-07 14:59:53 +00:00
Nick Craig-Wood
6ecb5794bc rc: add _config parameter to set global config for just this rc call 2021-02-07 14:56:41 +00:00
Nick Craig-Wood
9a21aff4ed rc: add options/local to see the options configured in the context 2021-02-07 14:56:41 +00:00
Nick Craig-Wood
8574a7bd67 rc: factor async/sync job handing into rc/jobs from rc/rcserver
This fixes async jobs with `rclone rc --loopback` which isn't very
important but sets the stage for _config setting.
2021-02-07 14:56:41 +00:00
Nick Craig-Wood
a0fc10e41a rc: factor out duplicate code in job creation 2021-02-07 14:56:41 +00:00
Nick Craig-Wood
ae3963e4b4 fs: Add string alternatives for setting options over the rc
Before this change options were read and set in native format. This
means for example nanoseconds for durations or an integer for
enumerated types, which isn't very convenient for humans.

This change enables these types to be set with a string with the
syntax as used in the command line instead, so `"10s"` rather than
`10000000000` or `"DEBUG"` rather than `8` for log level.
2021-02-07 14:56:41 +00:00
Nick Craig-Wood
e32f08f37b drive: refer to Shared Drives instead of Team Drives 2021-02-07 12:30:21 +00:00
Nick Craig-Wood
fea4b753b2 Add Alex JOST to contributors 2021-02-07 12:30:21 +00:00
Nick Craig-Wood
b2b5b7598c build: ensure go1.16 go gets the correct versions of tools
go1.16 uses GO111MODULE=on by default which meant we were picking up
v1 of nfpm instead of the intended v2.
2021-02-04 17:34:30 +00:00
Nick Craig-Wood
5f943aabc8 build: Use GO386=softfloat instead of deprecated GO386=387 for 386 builds 2021-02-04 16:37:23 +00:00
Nick Craig-Wood
84c785bc36 build: use go1.16-rc1 as default compiler 2021-02-04 16:08:51 +00:00
Nick Craig-Wood
993146375e build: update all dependencies 2021-02-03 21:34:38 +00:00
Nick Craig-Wood
bbe791a886 swift: update github.com/ncw/swift to v2.0.0
The update to v2 of the swift library introduces a context parameter
to each function. This required a lot of mostly mechanical changes
adding context parameters.

See: https://github.com/ncw/swift/issues/159
See: https://github.com/ncw/swift/issues/161
2021-02-03 20:23:37 +00:00
Nick Craig-Wood
1545ace8f2 build: remove go1.13 build constraints 2021-02-03 17:46:08 +00:00
Nick Craig-Wood
bcac8fdc83 Use http.NewRequestWithContext where possible after go1.13 minimum version 2021-02-03 17:41:27 +00:00
Nick Craig-Wood
15e1a6bee7 build: raise minimum go version to go1.13 2021-02-03 17:41:06 +00:00
Nick Craig-Wood
9710ded60f b2: automatically raise upload cutoff to avoid spurious error
Before this change, if --b2-chunk-size was raised above 200M then this
error would be produced:

    b2: upload cutoff: 200M is less than chunk size 1G

This change automatically reaises --b2-upload-cutoff to be the value
of --b2-chunk-size if it is below it, which stops this error being
generated.

Fixes #4475
2021-02-03 16:29:32 +00:00
Nick Craig-Wood
5f3672102c s3: add --s3-no-head to reducing costs docs - Fixes #2163 2021-02-03 16:18:29 +00:00
Nick Craig-Wood
644cc69108 build: update GitHub release tool to use gh and put a link to changelog
Fixes #4994
2021-02-03 14:44:40 +00:00
Nick Craig-Wood
1415666074 lsjson: fix unterminated JSON in the presence of errors
See: https://forum.rclone.org/t/rclone-lsjson-invalid-json-produced-no-at-the-end/22046
2021-02-02 17:46:56 +00:00
Alex JOST
bae550c71e docs: Changelog: Correct link to digitalis.io 2021-02-02 17:26:41 +00:00
Nick Craig-Wood
beff081abb Start v1.55.0-DEV development 2021-02-02 16:30:58 +00:00
Nick Craig-Wood
7f5ee5d81f Version v1.54.0 2021-02-02 14:17:09 +00:00
Nick Craig-Wood
8b41dfa50a s3: add --s3-no-head parameter to minimise transactions on upload
See: https://forum.rclone.org/t/prevent-head-on-amazon-s3-family/21935
2021-02-02 10:07:48 +00:00
Nick Craig-Wood
0d8bcc08da Revert "fs/accounting: make edge bandwidth limiters have smaller bursts to make smoother" #4987
This reverts commit 463a18aa07.

In practice this caused Windows to have a max bandwidth limit of 8
MiB/s. This is because of the limited resolution (1ms) of the Windows
timer.

This needs fixing in a different way.

See: https://forum.rclone.org/t/bug-report-for-v1-54-0-beta-5133/22015
2021-02-01 22:23:50 +00:00
buengese
d3b7f14b66 bump sgzip to v1.1.0 - fixes #4970 2021-02-01 18:34:42 +00:00
Nick Craig-Wood
f66928a846 drive: fix copyid command with a bare filename: can't use empty string as a path
Before this change, running

    rclone backend copyid drive: ID file.txt

Failed with the error

    command "copyid" failed: failed copying "ID" "file.txt": can't use empty string as a path

This fixes the problem.
2021-02-01 15:17:25 +00:00
Nick Craig-Wood
3b1122c888 azureblob: require go1.14+ to compile due to SDK changes 2021-01-30 18:01:12 +00:00
Nick Craig-Wood
463a18aa07 fs/accounting: make edge bandwidth limiters have smaller bursts to make smoother
This change decreases the edge limiter burst size which dramatically
increases the smoothness of the bandwidth limiting.

The core bandwidth limiter remains with a large burst so it isn't
affected by double rate limiting on the edge limiters.

See: #4395
See: https://forum.rclone.org/t/bwlimit-is-not-really-smooth/20947
2021-01-30 17:39:30 +00:00
Nick Craig-Wood
0a932dc1f2 Add --bwlimit for upload and download #1873 2021-01-30 17:39:30 +00:00
Nick Craig-Wood
8856e0e559 fshttp: Obey bwlimit in http Transport - fixes #4395
This change uses the bwlimit code to apply limits to the receive and
transmit data functions in the HTTP Transport.

This means that all HTTP transactions will have limiting applied -
this includes listings for example.

For HTTP based transorts this makes the limiting in Accounting
redundant and possibly counter productive
2021-01-30 17:39:30 +00:00
Nick Craig-Wood
3b6df71838 accounting: refactor bwlimit code to allow for multiple slots 2021-01-30 17:39:30 +00:00
Nick Craig-Wood
31de631b22 vendor: run go mod tidy 2021-01-30 17:28:27 +00:00
Nick Craig-Wood
189ef5f257 azureblob: fix memory usage by upgrading the SDK and implementing a TransferManager
In the Azure SDK there was a bug which caused excessive memory use
when doing repeated transfers:

https://github.com/Azure/azure-storage-blob-go/issues/233

This patch updates the SDK to v0.13.0 which allowed us to implement a
custom TransferManager which integrates with rclone's memory pool.

This fixes the excessive memory consumption.

See: https://forum.rclone.org/t/ask-for-settings-recommendation-for-azureblob/21505/
2021-01-30 17:26:59 +00:00
Nick Craig-Wood
2f67681e3b cmount: don't attempt to unmount if fs has been destroyed already #4957 2021-01-30 17:19:28 +00:00
Nick Craig-Wood
41127965b0 fstest: add Onedrive Business and Onedrive China to the integration tests 2021-01-30 16:32:32 +00:00
Nick Craig-Wood
8171671d82 Add Bob Pusateri to contributors 2021-01-30 16:24:54 +00:00
Nick Craig-Wood
75617c0c3b Add Pau Rodriguez-Estivill to contributors 2021-01-30 16:24:54 +00:00
Nick Craig-Wood
8b9d23916b Add Nicolas Rueff to contributors 2021-01-30 16:24:54 +00:00
Bob Pusateri
e43b79e33d azureblob: add examples for access tier
Azure Blob access tier values are case-sensitive, though this is
not indicated anywhere in the documentation. Adding examples
with proper casing.
2021-01-30 16:21:34 +00:00
albertony
459cc70a50 vfs: fix invalid cache path on windows when using :backend: as remote
The initial ':' is included in the ad-hoc remote name, but is illegal character
in Windows path. Replacing it with '^', which is legal in filesystems but illegal
in regular remote names, so name conflict is avoided.

Fixes #4544
2021-01-30 16:18:15 +00:00
Ivan Andreev
20578f3f89 Add Sakuragawa Misty to contributors 2021-01-29 23:05:49 +03:00
NyaMisty
15da53696e onedrive: add support for china region operated by 21vianet #4963 (#4963)
fixes #3804
obsoletes #3973
obsoletes #4072
2021-01-29 23:04:21 +03:00
Ivan Andreev
2bddba118e fstest: apply shellcheck on rclone-serve.bash (#4975) 2021-01-29 19:07:17 +03:00
Ivan Andreev
c7e5976e11 build: replace go 1.16-beta1 by 1.16-rc1 (#4974) 2021-01-29 19:05:46 +03:00
Pau Rodriguez-Estivill
f0bf9cfda1 drive: add xdg office icons to xdg desktop files 2021-01-28 17:12:48 +00:00
Nguyễn Hữu Luân
671dd047f7 swift: ensure partially uploaded large files are uploaded unless --swift-leave-parts-on-error
This makes sure that partially uploaded large files are removed
unless the `--swift-leave-parts-on-error` flag is supplied.

- refactor swift.go
- add unit test for swift with chunk
- add unit test for large object with fail case
- add "-" to white list char during encode.
2021-01-28 17:09:41 +00:00
negative0
6272ca74bc plugins: Move plugins cache path initialization to initPluginsOrError.
Fixes #4951.
2021-01-28 16:58:23 +00:00
Nicolas Rueff
f5af761466 gphotos: new flag --gphotos-include-archived - fixes #4728
New flag --gphotos-include-archived to download and view archived media when needed.
2021-01-28 16:51:31 +00:00
Nick Craig-Wood
06f1c0c61c move: fix data loss when moving the same object
This checks to see if IDs are the same of the source and destination
object before deleting one of them and potentially causing data loss.

See: https://forum.rclone.org/t/files-deleted-and-not-moved/21830
2021-01-28 16:14:16 +00:00
Nick Craig-Wood
e6a9f005d6 sftp: implement --sftp-use-fstat
See: https://forum.rclone.org/t/sftp-fails-to-sync-to-local-failed-to-copy-file-does-not-exist/21759
2021-01-28 16:07:26 +00:00
Nick Craig-Wood
8f6f4b053c obscure: make rclone osbcure - ignore newline at end of line
See: https://forum.rclone.org/t/authentification-issues-with-webdav-server/21891
2021-01-28 15:54:41 +00:00
Nick Craig-Wood
fe15a2eeeb Add Riccardo Iaconelli to contributors 2021-01-28 15:54:41 +00:00
Nick Craig-Wood
019667170f Add Zach Kipp to contributors 2021-01-28 15:54:41 +00:00
albertony
7a496752f3 fs: add support for flag --no-console on windows to hide the console window 2021-01-27 18:44:35 +00:00
Yury Stankevich
b569dc11a0 hdfs: support kerberos authentication #42 2021-01-27 18:16:58 +00:00
Riccardo Iaconelli
df4e6079f1 local: new flag --local-zero-size-links to fix sync on some virtual filesystems
Assume the Stat size of links is zero (and read them instead)

On some virtual filesystems (such ash LucidLink), reading a link size via a
Stat call always returns 0.
However, on unix it reads as the length of the text in the link. This may
cause errors like this when syncing:

    Failed to copy: corrupted on transfer: sizes differ 0 vs 13

Setting this flag causes rclone to read the link and use that as the size of
the link instead of 0 which in most cases fixes the problem.

Fixes #4950

Signed-off-by: Riccardo Iaconelli <riccardo@kde.org>
2021-01-27 18:13:16 +00:00
Zach Kipp
6156f90601 Fix test failure in different local time zones
TestParseDuration relied on an elapsed time calculation which
would vary based on the system local time. Fix the test by not relying
on the system time location. Also make the test more deterministic
by injecting time in tests rather than using system time.

Fixes #4529.
2021-01-27 15:05:35 +00:00
Louis Koo
cdaea62932 s3: fix copy multipart with v2 auth failing with 'SignatureDoesNotMatch'
Signed-off-by: zhuc <zhucan.k8s@gmail.com>
2021-01-27 14:43:02 +00:00
Nick Craig-Wood
78afe01d15 filefabric: fix finding directories in a case insensitive way #4830 2021-01-27 14:28:17 +00:00
Nick Craig-Wood
4eac88babf premiumizeme: fix finding directories in a case insensitive way #4830 2021-01-27 14:28:17 +00:00
Nick Craig-Wood
b4217fabd3 opendrive: fix finding directories in a case insensitive way #4830 2021-01-27 14:28:17 +00:00
Nick Craig-Wood
92b9dabf3c fstests: only test with ASCII uppercase for case insensitive tests
Upper/lower case for multilingual is provider dependent so we don't go
there in the tests.
2021-01-27 14:28:17 +00:00
Antoine GIRARD
4323ff8a63 docs: fix broken link in serve sftp/ftp #4968 2021-01-27 13:41:32 +03:00
Nick Craig-Wood
3e188495f5 sugarsync: fix finding directories in a case insentive way #4830 2021-01-26 14:48:33 +00:00
Nick Craig-Wood
acb9e17eb3 box: fix finding directories in a case insentive way #4830 2021-01-26 14:48:33 +00:00
Nick Craig-Wood
c8ab4f1d02 fstests: add a test for finding a directory in a case insensitive way #4830 2021-01-26 14:48:06 +00:00
Nick Craig-Wood
e776a1b122 fstests: don't run encoding tests on -short 2021-01-26 14:46:23 +00:00
Nick Craig-Wood
c57af26de9 docs: add note about --check-first being useful with --order-by 2021-01-26 12:12:26 +00:00
Nick Craig-Wood
7d89912666 Add lluuaapp to contributors 2021-01-26 12:12:26 +00:00
Martin Michlmayr
cd075f1703 docs: fix markup of arguments #4276
Command line arguments have to be marked as code.
2021-01-25 22:40:46 +03:00
lluuaapp
35b2ca642c b2: fixed possible crash when accessing Backblaze b2 remote 2021-01-25 17:48:40 +00:00
edwardxml
127f48e8ad docs: Rewrite rclone filtering documentation
This is an attempt at rewriting the rclone filter documentation page.

I have drawn largely from what appears to be the strong original
structure of the page; existing text, and forum comments.

The term flag is used throughout rather than differentiating `--`
options with more complex arguments. That diverges from some standard
practice but is consistent with messages in the rclone binary and `go`
documentation.

The term directory not folder is used throughout.

I tried referring to objects more broadly rather than files and it
just did not seem to work. Apart from a note at the top the
explanations refer entirely to paths, directories and files. My
justification is that bucket store users understand the concept of
files. Not all users of directory aware storage are so familiar with
objects, keys and metadata.

Many of the changes I have made involve moving issues into what seemed
to me to be more relevant parts of the original page structure. I
still find the content repetitious and overly long but that may be
inevitable when users can only be expected to read the section of the
page they think most relevant.

I have eliminated the rsync section from the original structure. It is
hard enough explaining how rclone filters work without also setting
out how they do not. Comment on sync is instead relegated to a
paragraph in the directory filter section.

The structure of the page is intended to work with a hugo toc card
from html Header2 to Header3.

My original intention was to establish a separate examples section. I
have instead retained examples in each section, added to them and
tried to make clear what is documentation and what example.

The changes draw on Github and Forum issues too numerous to mention.
for instance:

https://forum.rclone.org/t/certain-exclusion-flags-seem-to-be-ignored/20049/2

I am **especially** grateful for
https://forum.rclone.org/t/object-key-remote-directory-filter-clarification/20386/2
for making sense of directory filters for me.

@ncw has a fun (and useful) online filter app at
https://filterdemo.rclone.org/ I have not referred to it at this stage
though I particularly like the fact that it is tied to the same
codebase as an rclone version.

I have added cautions about mixing the `--filter...` flags with
`--exclude...` or `--include...`. The same issues seem to arise as
already recognised between the latter two.

The formal summary of glob syntax introduced at the top of the page is
shamelessly stolen from https://godoc.org/github.com/gobwas/glob

I have tried not to alter too many header descriptions and thereby
break existing links to them.

The reference to 'lass' in the example has been retained to confuse
all those not of Scottish or Yorkshire heritage.

Some of my activity was to remove ambiguity and I anticipate
suggestions to roll that back where it has become overly complex.

I tried particularly to bring together and make clear material about
directory filters. It was previously scattered throughout the page and
I couldn't understand it. I am particularly grateful for the
explanations I received about directory filters though any remaining
errors are entirely my own.

Removed erroneous references to non existent `--filter...` flags.

In some ways the best person to write this page would be one with no
knowledge whatsoever of how rclone filters work. The further I got
into it the better qualified I found myself to be.

E&OE
2021-01-22 16:59:22 +00:00
Nick Craig-Wood
3e986cdf54 dedup: add warning if used on a remote which can't have duplicate names 2021-01-22 15:25:35 +00:00
Nick Craig-Wood
b80d498304 vfs: fix file leaks with --vfs-cache-mode full and --buffer-size 0
Before this change using --vfs-cache-mode full and --buffer-size 0
together caused the vfs downloader to open more and more downloaders.

This is fixed by introducing a minimum size of 1M for the window to
look for an existing downloader.

Fixes #4892
2021-01-21 18:35:04 +00:00
Nick Craig-Wood
757e696a6b Add Sơn Trần-Nguyễn to contributors 2021-01-21 18:35:04 +00:00
Nick Craig-Wood
e3979131f2 Add CokeMine to contributors 2021-01-21 18:35:04 +00:00
Nick Craig-Wood
a774f6bfdb qingstor: fix rclone cleanup
This patch changes to using the default page limit for listing
unfinished multpart uploads rather than 1000. 1000 is the maximum
specified in the docs, but setting anything larger than 200 gives an
error.
2021-01-21 17:35:31 +00:00
Nick Craig-Wood
d7cd35e2ca qingstor: fix error propagation in CleanUp
Before this change errors cleaning multiple buckets were passing silently
2021-01-21 17:35:31 +00:00
Aleksandar Jankovic
38e70f1797 rc/jobs: add listener for finished jobs
Add jobs.OnFinish method to register listener that will trigger when job
is finished.

Includes fix for stopping listeners.
2021-01-21 13:43:41 +00:00
albertony
3b49440c25 crypt: docs: extended description 2021-01-21 13:40:12 +00:00
Sơn Trần-Nguyễn
7c0287b824 docs: fix references to upstreams in union example 2021-01-21 13:33:16 +00:00
edwardxml
f97c2c85bd docs: update ftp with note about active mode
See: https://forum.rclone.org/t/copy-or-sync-from-ftp-server-results-in-source-directory-not-found/20636/4
2021-01-21 13:21:31 +00:00
Nick Craig-Wood
14c0d8a93e cryptdecode: fix formatting 2021-01-21 10:39:51 +00:00
Evan Harris
768ad4de2a docs: Updated crypt/cryptdecode docs with additional info 2021-01-21 09:55:20 +00:00
CokeMine
817987dfc4 docs: Update Onedrive max file size limit 2021-01-21 09:50:48 +00:00
buengese
eb090d3544 compress: check type assertion in SetTier - fixes #4941 2021-01-20 22:59:14 +00:00
Nick Craig-Wood
4daf8b7083 Changelog updates from Version v1.53.4 2021-01-20 22:34:04 +00:00
Nick Craig-Wood
0be69018b8 drive: log that emptying the trash can take some time - fixes #4915 2021-01-19 18:09:36 +00:00
Nick Craig-Wood
9b9ab5f3e8 gcs: Fix Entry doesn't belong in directory "" (same as directory) - ignoring
This change allows directory markers to be non-zero in size.

See: https://forum.rclone.org/t/public-gcs-bucket-and-entry-doesnt-belong-in-directory-same-as-directory/21753/
2021-01-19 16:50:37 +00:00
Nick Craig-Wood
072464cbdb gcs: fix anonymous client to use rclone's HTTP client 2021-01-19 16:50:37 +00:00
Nick Craig-Wood
b0491dec88 Add new email address for buengese to contributors 2021-01-19 16:50:28 +00:00
Nick Craig-Wood
ccfefedb47 Add Patrik Nordlén to contributors 2021-01-19 16:48:37 +00:00
Nick Craig-Wood
2fffcf9e7f Add Janne Johansson to contributors 2021-01-19 16:48:37 +00:00
buengese
a39a5d261c docs/compress: but a warning at the top 2021-01-18 21:42:58 +01:00
buengese
45b57822d5 compress: improve testing 2021-01-18 21:42:58 +01:00
buengese
d8984cd37f compress: correctly handle wrapping of remotes without PutStream
Also fixes ObjectInfo wrapping for Hash and Size - fixes #4928
2021-01-18 21:42:58 +01:00
Patrik Nordlén
80e63af470 jottacloud: Add support for Telia Cloud (#4930) 2021-01-17 02:38:57 +01:00
Janne Johansson
db2c38b21b docs: fix small spelling nit, file -> find in FAQ 2021-01-13 17:36:57 +00:00
Nick Craig-Wood
cef51d58ac jottacloud: fix token refresh failed: is not a regular file error
Before this change the jottacloud token renewer would run and give the
error:

    Token refresh failed: is not a regular file

This is because the refresh runs on the root and it isn't a file.

This was fixed by ignoring that specific error.

See: https://forum.rclone.org/t/jottacloud-crypt-3-gb-copy-runs-for-a-week-without-completing/21173
2021-01-12 17:09:44 +00:00
Nick Craig-Wood
e0b5a13a13 jottacloud: fix token renewer to fix long uploads
See: https://forum.rclone.org/t/jottacloud-crypt-3-gb-copy-runs-for-a-week-without-completing/21173
2021-01-11 16:44:11 +00:00
Nick Craig-Wood
de21356154 Add Denis Neuling to contributors 2021-01-11 16:44:11 +00:00
Ivan Andreev
35a4de2030 chunker: fix case-insensitive NewObject, test metadata detection #4902
- fix test case FsNewObjectCaseInsensitive (PR #4830)
- continue PR #4917, add comments in metadata detection code
- add warning about metadata detection in user documentation
- change metadata size limits, make room for future development
- hide critical chunker parameters from command line
2021-01-10 22:29:24 +03:00
Ivan Andreev
847625822f chunker: improve detection of incompatible metadata #4917
Before this patch chunker required that there is at least one
data chunk to start checking for a composite object.

Now if chunker finds at least one potential temporary or control
chunk, it marks found files as a suspected composite object.
When later rclone tries a concrete operation on the object,
it performs postponed metadata read and decides: is this a native
composite object, incompatible metadata or just garbage.
2021-01-10 21:55:15 +03:00
Nick Craig-Wood
3877df4e62 s3: update help for --s3-no-check-bucket #4913 2021-01-10 17:54:19 +00:00
Denis Neuling
ec73d2fb9a azure-blob-storage: utilize streaming capabilities - #1614 2021-01-10 17:02:42 +00:00
Nick Craig-Wood
a7689d7023 rcserver: fix 500 error when marshalling errors from core/command
Before this change attempting to return an error from core/command
failed with a 500 error and a message about unmarshable types.

This is because it was attempting to marshal the input parameters
which get _response added to them which contains an unmarshalable
field.

This was fixed by using the original parameters in the error response
rather than the one modified during the error handling.

This also adds end to end tests for the streaming facilities as used
in core/command.
2021-01-10 16:34:46 +00:00
Nick Craig-Wood
847a44e7ad fs/rc: add Copy method to rc.Params 2021-01-10 16:34:46 +00:00
Nick Craig-Wood
b3710c962e rc: fix core/command giving 500 internal error - fixes #4914
Before this change calling core/command gave the error

    error: response object is required expecting *http.ResponseWriter value for key "_response" (was *http.response)

This was because the http.ResponseWriter is an interface not an object.

Removing the `*` fixes the problem.

This also indicates that this bit of code wasn't properly tested.
2021-01-10 16:34:46 +00:00
Nick Craig-Wood
35ccfe1721 Add kice to contributors 2021-01-10 16:34:46 +00:00
kice
ef2bfb9718 onedrive: Support addressing site by server-relative URL (#4761) 2021-01-09 03:26:42 +08:00
Nick Craig-Wood
a97effa27c build: add go1.16beta1 to the build matrix 2021-01-08 12:22:37 +00:00
Nick Craig-Wood
01adee7554 build: raise minimum go version to go1.12 2021-01-08 12:17:09 +00:00
Alex Chen
78a76b0d29 onedrive: remove % and # from the set of encoded characters (#4909)
onedrive: remove % and # from the set of encoded characters

This fixes #4700, fixes #4184, fixes #2920.
2021-01-08 12:07:17 +00:00
Nick Craig-Wood
e775328523 ftp,sftp: Make --tpslimit apply - fixes #4906 2021-01-08 10:29:57 +00:00
Nick Craig-Wood
50344e7792 accounting: factor --tpslimit code into accounting from fshttp 2021-01-08 10:29:57 +00:00
Nick Craig-Wood
d58fdb10db onedrive: enhance link creation with expiry, scope, type and password
This change makes the --expire flag in `rclone link` work.

It also adds the new flags

    --onedrive-link-type
    --onedrive-link-scope
    --onedrive-link-password

See: https://forum.rclone.org/t/create-share-link-within-the-organization-only/21498
2021-01-08 09:22:50 +00:00
Nick Craig-Wood
feaacfd226 hdfs: correct username parameter in integration tests 2021-01-08 09:05:25 +00:00
Nick Craig-Wood
e3c238ac95 build: add -buildmode to cross-compile.go
This builds on

768e4c4735 build: Temporary fix for Windows build errors

But passes the -buildmode flag down to the cross-compile.go command
too.
2021-01-08 08:58:50 +00:00
Nick Craig-Wood
752997c5e8 Add Yury Stankevich to contributors 2021-01-08 08:58:50 +00:00
Yury Stankevich
71edc75ca6 HDFS (Hadoop Distributed File System) implementation - #42
This includes an HDFS docker image to use with the integration tests.

Co-authored-by: Ivan Andreev <ivandeex@gmail.com>
Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
2021-01-07 09:48:51 +00:00
Ivan Andreev
768e4c4735 build: Temporary fix for Windows build errors
Applies a temporary fix similar to https://github.com/grafana/grafana/pull/28557
before go 1.15.6+ fixes https://github.com/golang/go/issues/40795
2021-01-07 09:48:51 +00:00
Nick Craig-Wood
c553ad5158 serve sftp: fix authentication on one connection blocking others - fixes #4882
Before this change, if one connection was authenticating this would
block any others from authenticating.

This was due to ssh.NewServerConn not being called in a go routine
after the Accept call.

This is fixed by running the ssh authentication in a go routine.

Thanks to @FiloSottile for advice on how to fix this.

See: https://github.com/golang/go/issues/43521
2021-01-06 15:34:07 +00:00
Alex Chen
c66b901320 onedrive: (business only) workaround to replace existing file on server-side copy (#4904) 2021-01-06 10:50:37 +08:00
Nick Craig-Wood
dd67a3d5f5 operations: add size if known to skipped items and JSON log - fixes #4624 2021-01-05 19:44:25 +00:00
Nick Craig-Wood
e972f2c98a log: make it easier to add parameters to JSON logging 2021-01-05 19:44:25 +00:00
Nick Craig-Wood
acbcb1ea9d Add Ilyess Bachiri to contributors 2021-01-05 19:44:25 +00:00
Nick Craig-Wood
d4444375ac Add Kerry Su to contributors 2021-01-05 19:44:25 +00:00
Cnly
00bf40a8ef onedrive: fix server-side copy completely disabled on OneDrive for Business
This fixes a little problem in PR #4903, which is a fix for #4342
2021-01-06 02:57:51 +08:00
Ilyess Bachiri
5d1f947f32 docs: fix a typo in the dedupe docs 2021-01-05 15:46:57 +00:00
Alex Chen
b594cb9430 onedrive: fall back to normal copy if server-side copy unavailable (#4903)
Fixes #4342 by:
* Disabling server-side copy if either drive isn't OneDrive Personal
* Falling back to normal copy if server-side copy fails
2021-01-05 21:26:00 +08:00
Kerry Su
add7a35e55 b2: docs for download_url with private buckets
The current authentication scheme works without creating
a public download endpoint for a private bucket as in the B2 official blog.
On the contrary, if the existing authorization header gets duplicated
in the Cloudflare Workers script, one might receive 401 Unauthorized errors.
2021-01-02 11:33:48 +00:00
Nick Craig-Wood
2af7b61fc3 cmd: fix wording for no retries message
See: https://forum.rclone.org/t/immutable-should-set-retries-1-when-source-and-dest-are-different/21326/
2021-01-02 11:20:02 +00:00
Nick Craig-Wood
cb97c2b0d3 azureblob: fix crash on startup
This was introduced by accidental code deletion in

08b9ede217 azureblob: add support for managed identities
2020-12-31 18:39:09 +00:00
Nick Craig-Wood
35da38e93f operations: fix --immutable error message
This was accidentally changed in

53a1a0e3ef accounting: add reference to completed transfers
2020-12-31 18:16:51 +00:00
Nick Craig-Wood
963c0f28b9 sync: Only print "There was nothing to transfer" if no errors
See: https://forum.rclone.org/t/immutable-should-set-retries-1-when-source-and-dest-are-different/21326
2020-12-31 18:16:51 +00:00
Nick Craig-Wood
b3815dc0c2 sync: fix --immutable errors retrying many times
See: https://forum.rclone.org/t/immutable-should-set-retries-1-when-source-and-dest-are-different/21326
2020-12-31 18:16:51 +00:00
Nick Craig-Wood
8053fc4e16 fs: correct default implementation of fs.CountError 2020-12-31 18:16:51 +00:00
buengese
66c3f2f31f new backend: zoho workdrive - fixes #4533 2020-12-30 17:56:08 +00:00
Nick Craig-Wood
62c9074132 compress: fix icon in docs 2020-12-30 17:56:08 +00:00
Nick Craig-Wood
a854cb9617 webdav: add "Depth: 0" to GET requests to fix bitrix
See: https://forum.rclone.org/t/bitrix24-de-remote-support/21112/
2020-12-30 10:14:50 +00:00
Nick Craig-Wood
fbf9942fe7 Add Ingo Weiss to contributors 2020-12-30 10:14:50 +00:00
Nick Craig-Wood
f425950a52 fs: Always show stats when using --dry-run or --interactive #4624 2020-12-29 21:11:12 +00:00
Ingo Weiss
1d40bc1901 fs: Accumulate stats when using --dry-run
Fixes #4624
2020-12-29 21:11:12 +00:00
Nick Craig-Wood
ba51409c3c sftp: implement keyboard interactive authentication - fixes #4177
Some ssh servers are set up with keyboard interactive authentication
which previously the sftp backkend was ignoring.
2020-12-29 19:48:09 +00:00
Nick Craig-Wood
a64fc05385 Add Benjamin Gustin to contributors 2020-12-29 19:48:09 +00:00
Benjamin Gustin
4d54454900 fs/log: don't compile systemd log integration for non unix systems 2020-12-28 23:07:12 +00:00
Nick Craig-Wood
5601652d65 ncdu: add ! (errror) and . (unreadable) file flags to go with e (empty) 2020-12-28 17:25:46 +00:00
Adam Plánský
b218bc5bed ncdu: add empty folder flag into ncdu browser
Implemented empty folder flag for ncdu browser interface. If there is
empty folder in the list the flag e is prepended before size. If there
is no empty folder this flag is ommited. It has the same behaviour as
original ncdu browser. (https://dev.yorhel.nl/ncdu/man)
2020-12-28 17:25:46 +00:00
Nick Craig-Wood
65eee674b9 webdav: fix Open Range requests to fix 4shared mount
Before this change the webdav backend didn't truncate Range requests
to the size of the object. Most webdav providers are OK with this (it
is RFC compliant), but it causes 4shared to return 500 internal error.

Because Range requests are used in mounting, this meant that mounting
didn't work for 4shared.

This change truncates the Range request to the size of the object.

See: https://forum.rclone.org/t/cant-copy-use-files-on-webdav-mount-4shared-that-have-foreign-characters/21334/
2020-12-28 15:45:40 +00:00
Nick Craig-Wood
72eb74e94a Add Claudio Bantaloukas to contributors 2020-12-28 15:45:40 +00:00
Nick Craig-Wood
6bfec25165 Add Mitsuo Heijo to contributors 2020-12-28 15:45:39 +00:00
Nick Craig-Wood
1c61d51448 Add Brad Ackerman to contributors 2020-12-28 15:45:39 +00:00
Claudio Bantaloukas
f7fe1d766b cmd/ncdu: highlight read errors instead of aborting - fixes #4014
When a directory cannot be walk-ed because of a permissions error
- or any error for that matter -, ncdu mode keeps track of the error
and highlights directories that could not be read.

Previously, the error would cause ncdu to abort.

Now, directories with unreadable sub-directories are displayed in yellow and
a message warns that the total may be underestimated.

Unreadable directories themselves are displayed in red along with the error message
2020-12-28 14:08:12 +00:00
albertony
55aec19389 mount: docs: add section about windows filesystem permissions 2020-12-28 13:59:34 +00:00
albertony
9db51117dc vfs: docs: add note on os specific option 2020-12-28 13:59:34 +00:00
albertony
a9c9467210 vfs: docs: add note about use of --transfers in section about performance 2020-12-28 13:59:34 +00:00
albertony
f50e15c77c mount: docs: add note about mounted file system size 2020-12-28 13:59:34 +00:00
albertony
e3191d096f mount: just set default options without checking if customized by user, because it will be overridden anyway 2020-12-28 13:59:34 +00:00
albertony
07c40780b3 mount: also detect if uid or gid are set in same option string: -o uid=123,gid=456 2020-12-28 13:59:34 +00:00
albertony
67b82b4a28 mount: docs: update documentation according to new syntax on windows 2020-12-28 13:59:34 +00:00
albertony
5f47e1e034 mount: cleanup os specific option handling and documentation 2020-12-28 13:59:34 +00:00
albertony
e92cb9e8f8 mount: more user friendly mounting as network drive on windows
Add --network-mode option to activate mounting as network drive without having to set volume prefix.
Add support for automatic drive letter assignment (not specific to network drive mounting).
Allow full network share unc path in --volname, which will also implicitely activate network drive mounting.
Allow full network share unc path as mountpoint, which will also implicitely activate network drive mounting, and the specified path will be used as volume prefix and the remote will be mounted on an automatically assigned drive letter instead.
2020-12-28 13:59:34 +00:00
Mitsuo Heijo
9ea990d5a2 azureblob: update azure-storage-blob-go to v0.12.0
See https://github.com/Azure/azure-storage-blob-go/blob/master/ChangeLog.md#version-0120
2020-12-28 13:29:38 +00:00
Brad Ackerman
08b9ede217 azureblob: add support for managed identities
Fixes #3213
2020-12-28 13:23:35 +00:00
Nguyễn Hữu Luân
6342499c47 swift: fix deletion of parts of Static Large Object (SLO)
Before this change, deleting SLO objects could leave the parts of the object behind.
2020-12-28 13:21:11 +00:00
Nick Craig-Wood
f347a198f7 azureblob: delete archive tier blobs before update if --azureblob-archive-tier-delete
Before this change, attempting to update an archive tier blob failed
with a 409 error message:

    409 This operation is not permitted on an archived blob.

This change detects if we are overwriting a blob and either generates
the error (if `--azureblob-archive-tier-delete` is not set):

    can't update archive tier blob without --azureblob-archive-tier-delete

Or deletes the blob first before uploading it again (if
`--azureblob-archive-tier-delete` is set).

Fixes #4819
2020-12-28 12:31:24 +00:00
Nick Craig-Wood
060642ad14 flags: improve error message when reading environment vars #4888
The message now includes the flag name to help the user work out what
is happening.

    Invalid value for environment variable "RCLONE_VERSION" when setting default
    for --version: strconv.ParseBool: parsing "yes": invalid syntax
2020-12-28 12:26:23 +00:00
Nick Craig-Wood
629c0d0f65 serve http: fix serving files of unknown length
Before this change serving files of unknown length were always
returned as 0 length files.

This change serves them correctly, but does not support Range:
requests on them.

See: https://forum.rclone.org/t/serve-http-behavior-when-the-size-is-unknown/21319
2020-12-27 22:01:41 +00:00
Nick Craig-Wood
f7404f52e7 azureblob: fix crash when listing outside a SAS URL's root - fixes #4851
Before this change if you attempted to list a remote set up with a SAS
URL outside its container then it would crash the Azure SDK.

A check is done to make sure the root is inside the container when
starting the backend which is usually enough, but when two SAS URL
based remotes are mounted in a union, the union backend attempts to
read paths outside the named container. This was causing a mysterious
crash in the Azure SDK.

This fixes the problem by checking to see if the container in the
listing is the one in the SAS URL before listing the directory and
returning directory not found if it isn't.
2020-12-27 15:55:00 +00:00
Nick Craig-Wood
74a321e156 Add gtorelly to contributors 2020-12-27 15:55:00 +00:00
Nick Craig-Wood
fce885c0cd Add Milly to contributors 2020-12-27 15:55:00 +00:00
Nick Craig-Wood
4028a245b0 Add kelv to contributors 2020-12-27 15:55:00 +00:00
Nick Craig-Wood
c5b07a6714 Add lostheli to contributors 2020-12-27 15:55:00 +00:00
Nick Craig-Wood
b0965bf34f Add Nathan Collins to contributors 2020-12-27 15:54:33 +00:00
Nick Craig-Wood
1eaca9fb45 Add Bob Bagwill to contributors 2020-12-27 15:54:33 +00:00
gtorelly
d833e49db9 docs: Update onedrive setup guide
I followed this guide and successfully set up OneDrive on rclone, but there is a detail which stumped me for some time.
You may not copy and paste `http://localhost:53682/` from this document to Microsoft's website, you must type it, otherwise it is not recognized as a valid address. I edited the line with this information.
2020-12-27 15:47:56 +00:00
Milly
3aee544cee docs: fix invalid option name in example of crypt config 2020-12-27 15:46:23 +00:00
kelv
9e87f5090f s3: add requester pays option - fixes #301 2020-12-27 15:43:44 +00:00
lostheli
c8cfa43ccc Add a download flag to hashsum and related commands to force rclone to download and hash files locally
This commit modifies the operations.hashSum function by adding an alternate code path. This code path is triggered by passing downloadFlag = True. When activated, rclone will download files from the remote and hash them locally. downloadFlag = False preserves the existing behavior of using the remote to retrieve the hash.

This commit modifies HashLister to support the new hashSum method as well as consolidating the roles of HashLister, HashListerBase64, Md5sum, and Sha1sum.  The printing of hashes from the function defined in HashLister has been revised to work with --progress.  There are light changes to operations.syncFprintf and cmd.startProgress for this.

The unit test operations_test.TestHashSums is modified to support this change and test the download functionality.

The command functions hashsum, md5sum, sha1sum, and dbhashsum are modified to support this change.  A download flag has been added and an output-file flag has been added.  The output-file flag writes hashes to a file instead of stdout to avoid the need to redirect stdout.
2020-12-27 15:40:44 +00:00
negative0
ed7af3f370 plugins: Create plugins files only if webui is enabled. Fixes #4592. May fix #4600. 2020-12-27 15:05:41 +00:00
Nathan Collins
be19d6a403 fshttp: prevent overlap of HTTP headers in logs 2020-12-27 12:44:46 +00:00
Bob Bagwill
46858ee6fe docs: fix typo in FAQ 2020-12-27 12:43:30 +00:00
Nick Craig-Wood
a94e4d803b build: update nfpm syntax to fix build of .deb/.rpm packages 2020-12-26 18:36:16 +00:00
Nick Craig-Wood
dcbe62ab0a build: fix brew install --cask syntax for macOS build 2020-12-26 17:23:43 +00:00
Nick Craig-Wood
121b981b49 build: revert GitHub actions brew fix since this is now fixed
Revert "build: work around GitHub actions brew problem"

This reverts commit a2fa1370c5.
2020-12-26 16:32:26 +00:00
Nick Craig-Wood
73bb9322f5 rc: prefer actual listener address if using ":port" or "addr:0" only
Before this change rclone would turn `localhost:8888` into
`127.0.0.1:8888` which apparently does not work with some browsers.

See: https://github.com/rclone/rclone-webui-react/issues/117
2020-12-26 16:28:54 +00:00
Nick Craig-Wood
bdc2278a30 alias: fix tests after parsing of ... change #4862
This was broken in ea8d13d841

    fs: Fix parsing of .. when joining remotes
2020-12-21 18:23:16 +00:00
Nick Craig-Wood
ea8d13d841 fs: Fix parsing of .. when joining remotes - Fixes #4862
Before this fix setting an alias of `s3:bucket` then using `alias:..`
would use the current working directory!

This fix corrects the path parsing. This parsing is also used in
wrapping backends like crypt, chunker, union etc.

It does not allow looking above the root of the alias, so `alias:..`
now lists `s3:bucket` as you might expect if you did `cd /` then
`ls ..`.
2020-12-18 13:06:39 +00:00
Nick Craig-Wood
e45716cac2 mount: add "." and ".." to directories to match cmount and expectations
See: https://forum.rclone.org/t/empty-directorys-size-for-a-mounted-crypt-remote/21077
2020-12-17 12:14:22 +00:00
Nick Craig-Wood
c98dd8755c log: fix enabling systemd logging when using --log-file
This also moves all the systemd logging decisions to fs/log
2020-12-17 11:55:27 +00:00
Nick Craig-Wood
5ae5e1dd56 docs: add an extra paragraph with links to rclone rc and the HTTP API
See: https://forum.rclone.org/t/rcd-endpoint-documenation/20949
2020-12-11 10:58:59 +00:00
Nick Craig-Wood
4f8ee736b1 vfs: make cache dir absolute before using it to fix path too long errors
If --cache-dir is passed in as a relative path, then rclone will not
be able to turn it into a UNC path under Windows, which means that
file names longer than 260 chars will fail when stored in the cache.

This patch makes the --cache-dir path absolute before using it.

See: https://forum.rclone.org/t/handling-of-long-paths-on-windows-260-characters/20913
2020-12-11 10:00:51 +00:00
Nick Craig-Wood
816e68a274 Add Laurens Janssen to contributors 2020-12-11 10:00:51 +00:00
Laurens Janssen
6ab6c8eefa gcs: Storage class object header support - fixes #3043 2020-12-10 20:06:49 +00:00
Nick Craig-Wood
cb16f42075 b2: Make NewObject use less expensive API calls
Before this change when NewObject was called the b2 backend would list
the directory that the object was in in order to find it.

Unfortunately list calls are Class C transactions and cost more.

This patch switches to using HEAD requests instead. These are Class B
transactions. It is then necessary to parse the headers from response
back into the data that we get from the listing. However B2 returns
exactly the same data, just in a different form.

Rclone will use the old directory listing method when looking for
files with versions as these can't be found via a HEAD request.

This change will particularly benefit --files-from, rclone serve
restic but most operations will see some benefit.
2020-12-09 20:00:22 +00:00
Nick Craig-Wood
7ae84a3c91 Add James Lim to contributors 2020-12-09 20:00:22 +00:00
James Lim
2fd543c989 azure: add support for service principals - fixes #3230
Before: users can only connect to Azure blob containers using the access keys
from the storage account.

After: users can additionally choose connect to Azure blob containers
using service principals. This uses OAuth2 under the hood to exchange
a client ID and client secret for a short-lived access token.

Ref:
- https://github.com/rclone/rclone/issues/3230
- https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-app?tabs=dotnet#well-known-values-for-authentication-with-azure-ad
- https://docs.microsoft.com/en-us/azure/developer/go/azure-sdk-authorization#available-authentication-types-and-methods
- https://gist.github.com/ItalyPaleAle/ec6498bfa81a96f9ca27a2da6f60a770
2020-12-09 17:52:15 +00:00
Nick Craig-Wood
50cf97fc72 sugarsync: fix NewObject for files that differ in case #4830 2020-12-07 17:38:22 +00:00
Nick Craig-Wood
4acd68188b box: fix NewObject for files that differ in case #4830 2020-12-07 17:38:22 +00:00
Nick Craig-Wood
b81b6da3fc fstest: add test for reading object in a case insensitive way #4830 2020-12-07 17:38:22 +00:00
Nick Craig-Wood
56ad6aac4d test_all: remove duplicated config for filefabric backend 2020-12-07 17:38:22 +00:00
Caleb Case
1efb8ea280 backend/tardigrade: Upgrade to uplink v1.4.1
Uplink v1.4.1 provides two important improvements for rclone:

* Fix for a connection handling issue where an open project could
  potentially become unusable because the underlying connection had
  failed.
* Fix for concurrent use issue in drpc.
2020-12-06 11:45:47 +00:00
Matteo Pietro Dazzi
9cfc01f791 build: upgrade docker buildx action 2020-12-06 11:43:34 +00:00
Nick Craig-Wood
86014cebd7 dedupe: add --dedupe-mode list to just list dupes, changing nothing 2020-12-02 16:52:12 +00:00
Nick Craig-Wood
507f861c67 dedupe: add --by-hash to dedupe on hash not file name - fixes #1674 2020-12-02 16:52:12 +00:00
Nick Craig-Wood
e073720a8f dropbox: enable short lived access tokens #4792
Starting September 30th, 2021, the Dropbox OAuth flow will no longer
return long-lived access tokens. It will instead return short-lived
access tokens, and optionally return refresh tokens.

This patch adds the token_access_type=offline parameter which causes
dropbox to return short lived tokens now.
2020-12-02 16:50:16 +00:00
Nick Craig-Wood
ce7cdadb71 Add zhucan to contributors 2020-12-02 16:50:16 +00:00
zhucan
a223b78872 fs: support multi-threads to head dst object
Signed-off-by: zhuc <zhucan.k8s@gmail.com>
2020-12-02 16:26:37 +00:00
buengese
d5181118cc compress: finish docs 2020-12-02 16:30:02 +01:00
buengese
886b3abac1 compress: fix broken tests 2020-12-02 16:30:02 +01:00
Nick Craig-Wood
250f8d9371 drive: allow shortcut resolution and creation to be retried
This was an oversight in the original code - these operations should
always have been retriable.
2020-12-02 15:28:38 +00:00
Anagh Kumar Baranwal
8a429d12cf s3: Added error handling for error code 429 indicating too many requests
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2020-12-01 18:13:31 +00:00
Nick Craig-Wood
8bf4697dc2 serve http/webdav: redirect requests to the base url without the /
When using `--baseurl` before this patch, if a request was made to the
base URL without a trailing / then rclone would return a 404 error.

Unfortunately GVFS / Nautilus makes the request without the /
regardless of what the user put in.

This patch redirects the request to the base URL with a /. So if the
user was using `--baseurl rclone` then a request to
http://localhost/rclone would be redirected with a 308 response to
http://localhost/rclone/

Fixes #4814
2020-12-01 18:00:07 +00:00
Nick Craig-Wood
584523672c dropbox: test file name length before upload to fix upload loop
Before this change rclone would upload the whole of multipart files
before receiving a message from dropbox that the path was too long.

This change hard codes the 255 rune limit and checks that before
uploading any files.

Fixes #4805
2020-12-01 17:56:36 +00:00
Nick Craig-Wood
a9585efd64 dropbox: make malformed_path errors from too long files not retriable
Before this change, rclone would retry files with filenames that were
too long again and again.

This changed recognises the malformed_path error that is returned and
marks it not to be retried which stops unnecessary retrying of the file.

See #4805
2020-12-01 17:56:36 +00:00
Nick Craig-Wood
f6b1f05e0f dropbox: tidy repeated error message 2020-12-01 17:56:36 +00:00
Nick Craig-Wood
cc8538e0d1 gcs: fix server side copy of large objects - fixes #3724
Before this change rclone was using the copy endpoint to copy large objects.

This can fail for large objects with this error:

    Error 413: Copy spanning locations and/or storage classes could
    not complete within 30 seconds. Please use the Rewrite method

This change makes Copy use the Rewrite method as suggested by the
error message which should be good for any size of copy.
2020-11-30 16:20:30 +00:00
Nick Craig-Wood
f7d9b15707 cmount: don't call host.Umount if a signal has been received
Before this change cgofuse and libatexit would race to see who could
unmount the file system with unpredicatable results. On Linux it could
report an error or not, depending.

This change checks to see if umount is beng called from a signal and
if so leaves the unmounting to cgofuse/libfuse.

See #4804
2020-11-29 17:44:00 +00:00
Nick Craig-Wood
83406bc473 atexit: add Signalled() function - set if called from a signal #4804 2020-11-29 17:44:00 +00:00
Nick Craig-Wood
1cfce703b2 mountlib: make sure we don't call umount more than once #4804
Before this change when using CTRL-C with rclone cmount the
mount would be unmounted twice.
2020-11-29 17:44:00 +00:00
Nick Craig-Wood
3b24a4cada yandex: set Features.WriteMimeType=false as Yandex ignores mime types
Yandex appears to ignore mime types set as part of the PUT request or
as part of a PATCH request.

The docs make no mention of being able to set a mime type, so set
WriteMimeType=false indicating the backend can't set mime types on
uploaded files.
2020-11-29 17:22:43 +00:00
Nick Craig-Wood
135adb426e filefabric: set Features.Read/WriteMimeType as both supported 2020-11-29 17:22:43 +00:00
Nick Craig-Wood
987dac9fe5 fichier: set Features.ReadMimeType=true as Object.MimeType is supported 2020-11-29 17:22:43 +00:00
Nick Craig-Wood
7fde48a805 dropbox: set Features.ReadMimeType=false as Object.MimeType not supported 2020-11-29 17:22:43 +00:00
Nick Craig-Wood
ce9028bb5b chunker: set Features.ReadMimeType=false as Object.MimeType not supported 2020-11-29 17:22:43 +00:00
buengese
52688a63c6 jottacloud: don't erroniously report support for writing mime types - fixes #4817 2020-11-29 18:11:43 +01:00
Durval Menezes
8904e81cdf Fixed verbal tense
"which are uploaded" -> "which were uploaded" (minor peeve)
2020-11-29 17:32:49 +03:00
Nick Craig-Wood
bcbe393af3 sftp: implement Shutdown method 2020-11-27 17:35:01 +00:00
Nick Craig-Wood
47aada16a0 fs: add Shutdown optional method for backends 2020-11-27 17:35:01 +00:00
Nick Craig-Wood
c22d04aa30 filter: deglobalise to put filter config into the context #4685 2020-11-27 17:28:42 +00:00
Nick Craig-Wood
354b4f19ec rc: add context to flag Reload function #4685 2020-11-27 17:28:42 +00:00
Durval Menezes
0ed1857fa9 webdav: updated docs to show streaming to nextcloud is working 2020-11-27 16:57:43 +00:00
Nick Craig-Wood
dfadd98969 azureblob,memory,pcloud: fix setting of mime types
Before this change the backend was reading the mime type of the
destination object instead of the source object when uploading.

This changes fixes the problem and introduces an integration test for
it.

See: https://forum.rclone.org/t/is-there-a-way-to-get-rclone-copy-to-preserve-metadata/20682/2
2020-11-27 14:40:05 +00:00
edwardxml
19a8b66cee docs: update rclone about docs
Create a full loop of documentation for rclone about, backends overview
and individual backend pages.

Discussion:
https://github.com/rclone/rclone/pull/4774 relates

Previously pull was requested, in part, under ref
https://github.com/rclone/rclone/pull/4801

Notes:
Introduce a tentative draft see-link format the end of sections to try
rather than lots of in-para links.

Update about.go incl link to list of backends not supporting about.

In list of backends not supporting about, include link to about command
reference.

I appreciate there may be decisions to make going forward about whether
command links should be code formatted, and using proper pretty url
links, but I have fudged that for now.

Update backend pages that do not support about with wording used
previously for ftp - it is in passive voice but I can live with it. (my
own wording and fault). The note is applied to a limitations section. If
one does not already exist it is created (even if there are other
limitations with their own sections)
2020-11-27 14:08:52 +00:00
Anagh Kumar Baranwal
07dee18d6b cmount: Add optional brew tag to throw an error when using mount in the
binaries installed via Homebrew - Fixes #4775

Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2020-11-26 22:56:59 +00:00
Maciej Zimnoch
70e8b11805 accounting: fix data race in Transferred()
startedTransfers is accessed by multiple threads, and it wasn't
protected by the mutex call in Transferred() func.

Fixes #4799
2020-11-26 22:54:38 +00:00
Nick Craig-Wood
9d574c0d63 fshttp: read config from ctx not passed in ConfigInfo #4685 2020-11-26 16:40:12 +00:00
Nick Craig-Wood
2e21c58e6a fs: deglobalise the config #4685
This is done by making fs.Config private and attaching it to the
context instead.

The Config should be obtained with fs.GetConfig and fs.AddConfig
should be used to get a new mutable config that can be changed.
2020-11-26 16:40:12 +00:00
Nick Craig-Wood
506342317b s3: update docs with a Reducing Costs section - Fixes #2889 2020-11-26 15:05:26 +00:00
Nick Craig-Wood
979bb07c86 filefabric: Implement the Enterprise File Fabric backend
Missing features
- M-Stream support
- Oauth-like flow (soon being changed to oauth)
2020-11-25 21:11:29 +00:00
Nick Craig-Wood
dfeae0e70a Revert "sharefile: fix backend due to API swapping integers for strings"
The API seems to have reverted to what it was before

This reverts commit 095c7bd801.
2020-11-25 20:52:57 +00:00
Nick Craig-Wood
f43a9ac17e pcloud: only use SHA1 hashes in EU region
Apparently only SHA1 hashes are supported in the EU region for
pcloud. This has been confirmed by pCloud support. The EU regions also
support SHA256 hashes which we don't support yet.

https://forum.rclone.org/t/pcloud-to-local-no-hashes-in-common/19440
2020-11-25 20:46:38 +00:00
Nick Craig-Wood
c3ac9319f4 build: attempt to fix docker build by upgrading ilteoood/docker_buildx 2020-11-25 13:01:59 +00:00
Nick Craig-Wood
76ee3060d1 s3: Add MD5 metadata to objects uploaded with SSE-AWS/SSE-C
Before this change, small objects uploaded with SSE-AWS/SSE-C would
not have MD5 sums.

This change adds metadata for these objects in the same way that the
metadata is stored for multipart uploaded objects.

See: #1824 #2827
2020-11-25 12:28:02 +00:00
Nick Craig-Wood
4bb241c435 s3: store md5 in the Object rather than the ETag
This enables us to set the md5 to cache it.

See: #1824 #2827
2020-11-25 12:28:02 +00:00
Nick Craig-Wood
a06f4c2514 s3: fix hashes on small files with aws:kms and sse-c
If rclone is configured for server side encryption - either aws:kms or
sse-c (but not sse-s3) then don't treat the ETags returned on objects
as MD5 hashes.

This fixes being able to upload small files.

Fixes #1824
2020-11-25 12:28:02 +00:00
Nick Craig-Wood
53aa03cc44 s3: complete sse-c implementation
This now can complete all operations with SSE-C enabled.

Fixes #2827
See: https://forum.rclone.org/t/issues-with-aws-s3-sse-c-getting-strange-log-entries-and-errors/20553
2020-11-25 12:28:02 +00:00
Nick Craig-Wood
1ce0b45965 build: Stop tagged releases making a current beta - fixes #4789
Before this change stable releases updated the current beta which mean
confusingly the current beta release would jump backwards from
1.54.0-beta to 1.53.3-beta say.

This commit stops any tagged build making a current beta release. They
will still make beta releases, they just won't update the
rclone*current*.zip and version.txt files.

This also means that a .0 release will not make a current beta like it
does at the moment.
2020-11-25 09:48:29 +00:00
Nick Craig-Wood
7078311a84 compress: add integration tests 2020-11-23 18:02:22 +00:00
Nick Craig-Wood
ef9b717961 Add Marcin Zelent to contributors 2020-11-23 18:02:22 +00:00
Nick Craig-Wood
09246ed9d5 Add Deepak Sah to contributors 2020-11-23 18:02:22 +00:00
Ankur Gupta
33ea55efed fs: parseduration: fixed tests to use UTC time 2020-11-23 17:11:56 +00:00
albertony
79474b2e4c docs: update documentation of commands delete,purge,rmdir,rmdirs 2020-11-23 17:10:08 +00:00
edwardxml
fb001b6c01 docs: improve "rclone about" docs and relate to union mfs limitation in ftp remote
See: https://forum.rclone.org/t/rclone-union-mfs-most-free-space-not-working-for-ftp/20346
2020-11-23 16:34:27 +00:00
Marcin Zelent
2896f51a22 docs: Fixed links to VFS FIle Caching section 2020-11-23 16:30:58 +00:00
Deepak Sah
5b9115d87a serve ftp: add options to enable TLS - fixes #3640 2020-11-23 16:07:51 +00:00
Nick Craig-Wood
211b08f771 Changelog updates from Version v1.53.3 2020-11-19 17:57:27 +00:00
Nick Craig-Wood
f0905499e3 random: seed math/rand in one place with crypto strong seed #4783
This shouldn't be read as encouraging the use of math/rand instead of
crypto/rand in security sensitive contexts, rather as a safer default
if that does happen by accident.
2020-11-18 17:48:44 +00:00
Nick Craig-Wood
7985df3768 random: fix incorrect use of math/rand instead of crypto/rand CVE-2020-28924
For implications see the linked issue.

Fixes #4783
2020-11-18 12:03:01 +00:00
Nick Craig-Wood
095c7bd801 sharefile: fix backend due to API swapping integers for strings
For some reason the API started returning some integers as strings in
JSON. This is probably OK in Javascript but it upsets Go.

This is easily fixed with the `json:"name,size"` struct tag.
2020-11-13 14:37:43 +00:00
Nick Craig-Wood
23469c9c7c ftp: add --ftp-disable-msld option to ignore MLSD for really old servers
This is useful for servers which advertise MLSD (eg some versions of
Serv-U) but don't support it properly.

See: https://forum.rclone.org/t/double-folder-names-on-target-destination-paths-ftp/18822
See: https://github.com/jlaffaye/ftp/pull/196
2020-11-13 11:25:34 +00:00
Nick Craig-Wood
2347762b0d vfs: fix "file already exists" error for stale cache files
Before this change if a file was uploaded through a mount, then
deleted externally, trying to upload that file again could give EEXIST
"file already exists".

This was because the file already existing in the cache was confusing
rclone into thinking it already had the file.

The fix is to check that if rclone has a stale cache file then to
ignore it in this situation.

See: https://forum.rclone.org/t/rclone-cant-reuse-filenames/20400
2020-11-13 10:32:21 +00:00
buengese
636fb5344a drive: implement CleanUp workaround for team drives - fixes #2418 2020-11-13 03:30:28 +01:00
buengese
aaa8b7738a compress: initial documentation 2020-11-13 02:31:59 +01:00
buengese
bc4282e49e compress: added experimental compression remote - implements #2098, #1356, #675
This remote implements transparent compression using gzip. Uses JSON as a for storing metadata.

Co-authored-by: id01 <gaviniboom@gmail.com>
2020-11-13 02:31:59 +01:00
buengese
2812816142 hash: add MultiHasher.Sum() to retrieve a single specific hash 2020-11-13 02:31:59 +01:00
Nick Craig-Wood
ceeac84cfe serve restic: implement object cache
This caches all the objects returned from the List call. This makes
opening them much quicker so speeds up prune and restores. It also
uses fewer transactions. It can be disabled with
`--cache-objects=false`.

This was discovered when using the B2 backend when the budget was
being blown on list object calls which can avoided with a bit of
caching.

For typical 1 million file backup for a latop or server this will only
use a small amount more memory.
2020-11-12 17:58:46 +00:00
Nick Craig-Wood
83d48f65b6 Add Manish Gupta to contributors 2020-11-12 17:58:46 +00:00
Manish Gupta
95d0410baa local: continue listing files/folders when a circular symlink is detected
Before this change a circular symlink would cause rclone to error out from the listings.

After this change rclone will skip a circular symlink and carry on the listing,
producing an error at the end.

Fixes #4743
2020-11-12 11:32:55 +00:00
albertony
2708a7569e mount: docs: make note about mounting as network drive less confusing 2020-11-11 20:37:57 +00:00
Nick Craig-Wood
45e8bea8d0 testserver: Make Test{FTP,SFTP,Webdav}Rclone run the current rclone
Before this change the tests were run against the previous stable
rclone/rclone docker image.

This unfortunately masked errors in the integration test server.

This change uses the currently installed rclone to run "rclone serve
ftp" etc. This is installed out of the current code by the integration
test server so will make a better test.
2020-11-10 18:01:15 +00:00
Nick Craig-Wood
f980f230c5 vfs: fix virtual entries causing deleted files to still appear
Before this change, if a file was created on a remote but deleted
externally from that remote then there was potential for the delete to
never be noticed.

The sequence of events was:

- Create file on VFS - creates virtual directory entry
- File deleted externally to remote before the directory refreshed
- Now the file has a virtual add but is not in the listings so will never disappear

This patch fixes it by removing all virtual directory entries except
the following when the directory is re-read.

- On remotes which can't have empty directories: virtual directory
  adds are not flushed. These will remain virtual as long as the
  directory is empty.

- For virtual file add: files that are in the process of being
  uploaded are not flushed

This patch also adds the distinction between virtually added files and
directories.

It also refactors the virtual directory logic to make it easier to follow.

Fixes #4446
2020-11-10 16:47:25 +00:00
Nick Craig-Wood
e204f89685 servetest: add -sub-run flag for running a subset of the backend tests
Use like this (eg in cmd/serve/sftp)

    go test -v -run TestSftp/Normal -sub-run "TestIntegration/FsMkdir/FsPutFiles/FsDirMove"
2020-11-10 16:47:25 +00:00
Nick Craig-Wood
f7efce594b config: add context.Context #3257 #4685
This add config to the Config callback in the backends and the related
config functions.
2020-11-09 18:05:54 +00:00
Nick Craig-Wood
1fb6ad700f accounting: add context.Context #3257 #4685 2020-11-09 18:05:54 +00:00
Nick Craig-Wood
e3fe31f7cb fs: add context.Context to fs.GetModifyWindow #3257 #4685 2020-11-09 18:05:54 +00:00
Nick Craig-Wood
8b96933e58 fs: Add context to fs.Features.Fill & fs.Features.Mask #3257 #4685 2020-11-09 18:05:54 +00:00
Nick Craig-Wood
d69b96a94c test: Add context to mockfs.NewFs #3257 #4685 2020-11-09 18:05:54 +00:00
Nick Craig-Wood
d846210978 fs: Add context to NewFs #3257 #4685
This adds a context.Context parameter to NewFs and related calls.

This is necessary as part of reading config from the context -
backends need to be able to read the global config.
2020-11-09 18:05:54 +00:00
Anagh Kumar Baranwal
30c8b1b84f fs: Fix nil pointer on copy & move operations directly to remote
Fix the copy and move operations that broke in 127f0fc when copying directly
to a remote without a specific destination.

Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2020-11-09 12:36:10 +00:00
Nick Craig-Wood
43e0929339 vfs: fix vfs/refresh calls with fs= parameter
Before this change rclone gave an error when the fs parameter was
provided.

This change removes the fs parameter from the parameters once it has
been read which avoids the error.

See: https://forum.rclone.org/t/precaching-with-vfs-refresh-fails-with-an-error-when-having-multiple-cloud-drives/20267
2020-11-07 14:26:33 +00:00
albertony
6c70c42577 jottacloud: docs: add heading to the example section so that it does not seem to be part of the legacy setup section 2020-11-06 17:52:50 +00:00
Adam Plánský
cd2c06f2a7 testserver: speed up seafile integration test
Before every seafile integration test we run Seafile server in docker
environment. We don't have to sleep for 60 seconds to have everything ready before running integration test. We can assume everything is ready when Seafile webserver returns status code 200. Seafile Dockerfile (https://github.com/haiwen/seafile-docker/blob/master/image/seafile/Dockerfile) runs scripts/start.py where is defined that before init_seafile_server() we always wait for mysql wait_for_mysql() (https://github.com/haiwen/seafile-docker/blob/master/scripts/start.py)
2020-11-03 16:31:39 +00:00
Nick Craig-Wood
af55a74bd2 stats: add counter for deleted directories - fixes #4676 2020-11-03 11:47:00 +00:00
Nick Craig-Wood
d00c126cef operations: fix --cutof-mode hard not cutting off immediately
This failure was noted on the integration tests server.

The fix was to be more careful about which error message was emitted
with which --cutoff-mode
2020-11-02 17:13:19 +00:00
Nick Craig-Wood
bedf6e90d2 onedrive: warn on gateway timeout errors
It seems that when doing chunked uploads to onedrive, if the chunks
take more than 3 minutes or so to upload then they may timeout with
error 504 Gateway Timeout.

This change produces an error (just once) suggesting lowering
`--onedrive-chunk-size` or decreasing `--transfers`.

This is easy to replicate with:

    rclone copy -Pvv --bwlimit 0.05M 20M onedrive:20M

See: https://forum.rclone.org/t/default-onedrive-chunk-size-does-not-work/20010/
2020-11-02 16:53:35 +00:00
Nick Craig-Wood
e8c84d8b53 Add Adam Plánský to contributors 2020-11-02 16:53:35 +00:00
Adam Plánský
f89ff3872d ncdu: add toggle option for average size in directory - key 'a'
Add toggle option to show average size in directory. This toggle
function is for ncdu and is binded to key 'a'.
2020-10-30 15:33:54 +00:00
Adam Plánský
127f0fc64c operations: move and copy log name of the destination object in verbose
If the object is moved or copied rclone in verbose mode prints name of the
destination object into the info log.
2020-10-30 15:31:54 +00:00
edwardxml
0cfa89f316 docs: ftp: put limitations in a single section
The topic is mostly about so limitations so all of these are grouped together with a section hyperlink near the top of the page. Intention is to avoid potential duplication and make it more straightforward (there is a place and it is essentially just a list so wording doesn't need to be elegant) to add notes about limitations in future, harvested from rclone forum postings.

Minor wording alterations that do not intend to change meaning
2020-10-30 14:59:36 +00:00
Nick Craig-Wood
bfcd4113c3 mount: implement mknod to make NFS file creation work - fixes #2115
It turns out that NFS calls mknod in FUSE even though we have create
defined. This was causing EIO errors when creating files.

This patch fixes it by implementing mknod. The way it is implemented
means that to write to an NFS file system you'll need --vfs-cache-mode
writes.
2020-10-29 15:12:36 +00:00
Nick Craig-Wood
0e7fc7613f mount: make mount be cmount under macOS #4393
This also adds an alias to the mount command so it responds as `rclone
cmount` as well as `rclone mount`.
2020-10-29 13:34:39 +00:00
Nick Craig-Wood
8ac2f52b6e mount: disable bazil/fuse based mount on macOS #4393
The library is no longer supported on macOS.
2020-10-29 13:34:39 +00:00
Nick Craig-Wood
1973fc1ecc azureblob: update lib from v0.10.0 to v0.11.0 and fix API breakage
See: https://github.com/Azure/azure-storage-blob-go/issues/226
2020-10-29 13:34:39 +00:00
Nick Craig-Wood
7c39a13281 build: update all dependencies 2020-10-29 13:34:39 +00:00
Nick Craig-Wood
c5c503cbbe Add Adam Plánský to contributors 2020-10-29 13:34:39 +00:00
Josh Soref
d09488b829 docs: update: add Tencent
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
Josh Soref
0a6196716c docs: style: avoid double-nesting parens
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
Josh Soref
8bc9b2b883 docs: grammar: examples are examples
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
Josh Soref
a15f50254a docs: grammar: if, then
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
Josh Soref
5d4f77a022 docs: grammar: Oxford comma
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
Josh Soref
a089de0964 docs: grammar: uncountable: links
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
Josh Soref
3068ae8447 docs: grammar: count agreement: files
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
Josh Soref
67ff153b0c docs: grammar: article: a-file
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
Josh Soref
3e1cb8302a docs: spelling: etc.
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
Josh Soref
e4a87f772f docs: spelling: e.g.
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
Josh Soref
d4f38d45a5 docs: spelling: high-speed
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
Josh Soref
bbe7eb35f1 docs: spelling: server-side
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
edwardxml
87e54f2dde ftp: update wording for flags
Minor wording change to help for explicit and implicit FTPS flags. More consistent between flags. Add 's' to request because only one 'client' mentioned.
2020-10-28 15:45:52 +00:00
edwardxml
3f3afe489f docs: mount: minor changes to wording 2020-10-28 15:43:49 +00:00
edwardxml
70b21d9c87 docs: Update mount docs
Eliminate repeat word mode
2020-10-28 15:42:33 +00:00
Adam Plánský
e00bf3d723 ncdu: add sort by average size in directory
Add keyboard shortcut 'A' which sort by average size in directory.

If files/folders have same avgSize sort by actual size

Fixes: #4699
2020-10-27 13:28:38 +00:00
Nick Craig-Wood
605f2b819a build: fix nfpm install 2020-10-27 13:15:21 +00:00
Nick Craig-Wood
bf2b975359 build: update nfpm and github-release install method to go modules 2020-10-26 19:07:59 +00:00
Nick Craig-Wood
00a5086ff2 Remove accidentally committed binary and fix formatting
These were both committed in

b7253fc1c1 mount: docs: add note that allow-root and allow-other is not relevant on windows
2020-10-26 19:07:42 +00:00
Ivan Andreev
be6a888e50 chunker: skip long local hashing, hash in-transit (fixes #4021)
PR 4614
2020-10-26 20:18:07 +03:00
Ivan Andreev
dad8447423 mailru: avoid prehashing of large local files
PR 4617
2020-10-26 20:16:52 +03:00
Ivan Andreev
65ff109065 mailru: accept special folders eg camera-upload
Fixes #4025
PR 4690
2020-10-26 20:04:31 +03:00
albertony
b7253fc1c1 mount: docs: add note that allow-root and allow-other is not relevant on windows 2020-10-26 16:21:43 +00:00
Nick Craig-Wood
d143f576c6 Changelog updates from Version v1.53.2 2020-10-26 15:44:26 +00:00
Nick Craig-Wood
a152351a71 build: stop using set-env and set-path in the GitHub actions
A security problem was discovered when using set-env and
set-path. This has been deprecated by GitHub and a new mechanism
introduced.

This patch switches to using the new mechanism which will stop GitHub
warning about the use of the old mechanism.

See: https://github.com/actions/toolkit/security/advisories/GHSA-mfwh-5m23-j46w
2020-10-26 11:19:06 +00:00
Nick Craig-Wood
a2fa1370c5 build: work around GitHub actions brew problem
Brew was failing with

    fatal: 'origin' does not appear to be a git repository
    fatal: Could not read from remote repository.

See: https://github.com/actions/virtual-environments/issues/1811
See: https://github.com/actions/virtual-environments/issues/1869
2020-10-25 18:26:01 +00:00
Nick Craig-Wood
bed83b0b64 test: add ListRetries config parameter to integration tests
Occasionally the b2 tests fail because the integration tests don't
retry hard enough with their new setting of -list-retries 3. Override
this setting to 5 for the b2 tests only.
2020-10-25 18:10:50 +00:00
Nick Craig-Wood
cf0bdad5de union: create root directories if none exist
This fixes the TestUnion: integration test if the /tmp/union[123] dirs
don't exist.
2020-10-25 18:10:49 +00:00
Nick Craig-Wood
85d35ef03c test: remove TestS3Ceph: and TestSwiftCeph: from integration tests
Unfortunately we don't have access to this server any more
2020-10-25 18:10:49 +00:00
Nick Craig-Wood
514d10b314 Add Ingo to contributors 2020-10-25 18:10:49 +00:00
Ingo
5164c3d2d0 genautocomplete: add support to output to stdout 2020-10-22 17:28:33 +01:00
albertony
ffdd0719e7 jottacloud: avoid double url escaping of device/mountpoint - fixes #4697 2020-10-20 17:43:49 +02:00
Nick Craig-Wood
4e2b5389d7 check: make the error count match up in the log message
Before this change we counted the final summary error as an error,
producing confusing log messages like:

    Failed to check with 54 errors: last error was: 53 differences found

This change marks the summary error as already being counted, so the
error message becomes:

    Failed to check with 53 errors: last error was: 53 differences found

This change also returns a listing failure in preference to a summary error.

See: https://forum.rclone.org/t/slow-checksum-validation/19763/22
2020-10-15 12:57:48 +01:00
Nick Craig-Wood
dc4e63631f Add David to contributors 2020-10-15 12:57:48 +01:00
Nick Craig-Wood
275bf456d3 Add Josh Soref to contributors 2020-10-15 12:57:48 +01:00
Nick Craig-Wood
7dfa871095 Add Dan Hipschman to contributors 2020-10-15 12:57:48 +01:00
Nick Craig-Wood
70cc88de22 Add Ameer Dawood to contributors 2020-10-15 12:57:48 +01:00
Nick Craig-Wood
4bc0f46955 Add Dov Murik to contributors 2020-10-15 12:57:48 +01:00
Anagh Kumar Baranwal
5b09599a23 drive: Added flag --drive-stop-on-download-limit to stop transfers when the download limit is exceeded
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2020-10-14 15:51:01 +01:00
David
f4dd8e3fe8 dedupe: minor clarification in docs
dedupe will not delete equal files if they are located in another folder.
2020-10-14 15:31:47 +01:00
Josh Soref
d0888edc0a Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-14 15:21:31 +01:00
Nick Craig-Wood
51a230d7fd fs: Implement UnWrapObjectInfo for getting original object out of src objects 2020-10-14 15:20:06 +01:00
Anagh Kumar Baranwal
fc5b14b620 s3: Added --s3-disable-http2 to disable http/2
Fixes #4673

Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2020-10-13 17:11:22 +01:00
Stephen Harris
bbddadbd04 sftp: remember entered password in AskPass mode
As reported in

  https://github.com/rclone/rclone/issues/4660#issuecomment-705502792

After switching to a password callback function, if the ssh connection
aborts and needs to be reconnected then the user is-reprompted for their
password.  Instead we now remember the password they entered and just give
that back.  We do lose the ability for them to correct mistakes, but that's
the situation from before switching to callbacks.  We keep the benefits
of not asking for passwords until the SSH connection succeeds (right
known_hosts entry, for example).

This required a small refactor of how `f := &Fs{}` was built, so we can
store the saved password in the Fs object
2020-10-13 16:53:11 +01:00
Nick Craig-Wood
7428e47ebc local: fix sizes and syncing with --links option on Windows - fixes #4581
Before this change rclone returned the size from the Stat call of the
link. On Windows this reads as 0 always, however on unix it reads as
the length of the text in the link. This caused errors like this when
syncing:

    Failed to copy: corrupted on transfer: sizes differ 0 vs 13

This change causes Windows platforms to read the link and use that as
the size of the link instead of 0 which fixes the problem.
2020-10-13 16:29:56 +01:00
Nick Craig-Wood
72083c65ad cmd: make backend env vars show in help as the defaults for backend flags
Before this change

    RCLONE_DRIVE_CHUNK_SIZE=111M rclone help flags | grep drive-chunk-size

Would show the default value, not the setting of RCLONE_DRIVE_CHUNK_SIZE
as the non backend flags do.

This change makes it work as expected by setting the default of the
option to the environment variable.

Fixes #4659
2020-10-13 15:43:58 +01:00
Dan Hipschman
70f92fd6b3 crypt: small simplification, no functionality change 2020-10-12 17:20:39 +01:00
Nick Craig-Wood
a86cedbc24 vfs: Fix --no-modtime to not attempt to set modtimes (as documented)
See: https://forum.rclone.org/t/rclone-mount-with-azure-blob-archive-tier/19414
2020-10-09 17:01:16 +01:00
Nick Craig-Wood
0906f8dd3b onedrive: fix disk usage for sharepoint
Some onedrive sharepoints appear to return all 0s for quota

    "quota":{"deleted":0,"remaining":0,"total":0,"used":0}

This commit detects this and returns unknown for all quota parts.

See: https://forum.rclone.org/t/zero-size-volume-when-mounting-onedrive-sharepoint/19597
2020-10-09 14:11:56 +01:00
buengese
664213cedb jottacloud: remove clientSecret from config when upgrading to token based authentication - #4645 2020-10-08 11:51:17 +02:00
Ameer Dawood
75a7226174 mount: docs: correction of repeated word 2020-10-07 14:25:31 +01:00
Stephen Harris
9e925becb6 sftp: defer asking for user passwords until the SSH connection succeeds
Issue: 4660
    https://github.com/rclone/rclone/issues/4660

Unexpected side effect: a wrong password allows for the user to retry!
2020-10-07 12:01:17 +01:00
Anagh Kumar Baranwal
e3a5bb9b48 s3: Add missing regions for AWS
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2020-10-06 16:54:42 +01:00
Dov Murik
b7eeb0e260 docs: Box: explain about the backslash-like unicode character
Add the full name of the backslash-lookalike unicode character.
2020-10-06 16:47:49 +01:00
Nick Craig-Wood
84d64ddabc Add LaSombra to contributors 2020-10-06 16:42:28 +01:00
Nick Craig-Wood
6c9f92aee6 Add Hekmon to contributors 2020-10-06 16:42:28 +01:00
Nick Craig-Wood
893297760b Add gyutw to contributors 2020-10-06 16:42:28 +01:00
Leo Luan
c5c56cda02 vfs: Add a missed update of used cache space
The missed update can cause incorrect before-cleaning cache stats
and a pre-mature condition broadcast in purgeOld before the cache
space use is reduced below the quota.
2020-10-06 16:35:23 +01:00
Leo Luan
2295123cad vfs: Add exponential backoff during ENOSPC retries
Add an exponentially increasing delay during retries up ENOSPC error
to avoid exhausting the 10 retries too soon when the cache space
recovery from item resets is not available from the file system yet
or consumed by other large cache writes.
2020-10-06 16:35:23 +01:00
Leo Luan
ff0280c0cb vfs: Fix missed concurrency control between some item operations and reset
Item reset is invoked by cache cleaner for synchronous recovery
from ENOSPC errors. The reset operation removes the cache file and
closes/reopens the downloaders.  Although most parts of reset and
other item operations are done with the item mutex held, the mutex
is released during fd.WriteAt and downloaders calls. We used preAccess
and postAccess calls to serialize Reset, ReadAt, and Open, but missed
some other item operations. The patch adds preAccess/postAccess
calls in Sync, Truncate, Close, WriteAt, and rename.
2020-10-06 16:35:23 +01:00
Leo Luan
64d736a57b vfs: Fix a race condition in retryFailedResets
A failed item reset is saved in the errItems for retryFailedResets
to process.  If the item gets closed before the retry, the item may
have been removed from the c.item array. Previous code did not
account for this condition. This patch adds the check for the
exitence of the retry items in retryFailedResets.
2020-10-06 16:35:23 +01:00
Leo Luan
5f1d5a1897 vfs: Fix a deadlock vulnerability in downloaders.Close
The downloaders.Close() call acquires the downloaders' mutex before
calling the wait group wait and the main downloaders thread has a
periodical (5 seconds interval) call to kick its waiters and the
waiter dispatch function tries to get the mutex. So a deadlock can
occur if the Close() call starts, gets the mutex, while the main
downloader thread already got the timer's tick and proceeded to
call kickWaiters. The deadlock happens when the Close call gets
the mutex between the timer's kick and the main downloader thread
gets the mutex first. So it's a pretty short period of time and
it probably explains why the problem has not surfaced, maybe
something like tens of nanoseconds out of 5 seconds (~10^^-8).
It took 5 days of continued stressing the Close calls for the
deadlock to appear.
2020-10-06 16:35:23 +01:00
LaSombra
aac2406e19 cmd: add --progress-terminal-title to print ETA to terminal title
Adds a flag, --progress-terminal-title, that when used with --progress,
will print the string `ETA: %s` to the terminal title.

This also adds WriteTerminalTitle to lib/terminal
2020-10-06 16:34:26 +01:00
Stephen Harris
6dc28ef50a sftp: Allow user to optionally check server hosts key to add security
Based on Issue 4087
  https://github.com/rclone/rclone/issues/4087

Current behaviour is insecure.  If the user specifies this value then we
switch to validating the server hostkey and so can detect server changes
or MITM-type attacks.
2020-10-06 16:27:42 +01:00
Hekmon
66def93373 mount cmd: update systemd status with cache stats 2020-10-06 16:21:30 +01:00
Hekmon
c58023a9ba enhance systemd integration
* log level identification
* manual activation with flag
* automatic systemd launch detection
2020-10-06 16:21:30 +01:00
buengese
3edc9ff0b0 jottacloud: remove DirMove workaround as it's not required anymore - also fixes #4655 2020-10-05 20:13:05 +02:00
edwardxml
8e8ae1edc7 crypt: update docs
Mostly tense, clarity and point of view proposed changes.

There is still some duplication and benefits that would accrue from further examples.
2020-10-05 17:19:00 +01:00
Nick Craig-Wood
20b00db390 operations: fix spurious "--checksum is in use but the source and destination have no hashes in common"
Before this change rclone would emit the message

    --checksum is in use but the source and destination have no hashes in common; falling back to --size-only

When the source or destination hash was missing as well as when the
source and destination had no hashes in common.

This first case is very confusing for users when the source and
destination do have a hash in common.

This change fixes that and makes sure the error message is not emitted
on missing hashes even when there is a hash in common.

See: https://forum.rclone.org/t/source-and-destination-have-no-hashes-in-common-for-unencrypted-drive-to-local-sync/19531
2020-10-05 16:07:05 +01:00
Nick Craig-Wood
db4bbf9521 operations: fix use of --suffix without --backup-dir
As part of the original work adding this feature it was overlooked
that this didn't actually work for full rclone copy/sync.

This commit fixes the problem and adds a test to make sure it stays
working.

See: https://forum.rclone.org/t/suffix-not-working-on-folder-upload-via-ssh-sftp/19526
2020-10-04 16:40:20 +01:00
Nick Craig-Wood
2b7994e739 operations: add logs when need to upload files to set mod times #1505 2020-09-29 17:04:29 +01:00
gyutw
e7fbdac8e0 fichier: increase maximum file size from 100GB to 300GB - fixes #4634 2020-09-28 20:27:17 +02:00
Nick Craig-Wood
41ec712aa9 ftp,sftp: fix docs for usernames
- factor env.CurrentUser out of backend/sftp
- Use env.CurrentUser in ftp and sftp
- fix docs to have correct username
2020-09-27 11:44:05 +01:00
Stephen Harris
17acae2b00 sftp: allow cert based auth via optional pubkey
Discussion at
  https://forum.rclone.org/t/ssh-certificate-based-authentication-does-not-work/19222

Basically we allow the user to specify their own public key cert rather
than letting the SSH client extract the pubkey from the private key.
This allows certificate based authentication to work.
2020-09-27 11:10:13 +01:00
Nick Craig-Wood
57261c7e97 mount: docs: remove incorrect statement about --vfs-cache-mode full
See: https://forum.rclone.org/t/is-this-documentation-correct/19376
2020-09-27 11:04:59 +01:00
Ivan Andreev
d8239e0194 mailru: remove deprecated protocol quirks 2020-09-26 15:38:32 +03:00
Ivan Andreev
004c3796de chunker: disable ListR to fix missing files on GDrive (workaround #3972) 2020-09-26 15:19:16 +03:00
Ivan Andreev
18c7549770 mailru: fix invalid timestamp on corrupted files (fixes #4229) 2020-09-26 15:12:30 +03:00
Nick Craig-Wood
e5190f14ce drive: implement "rclone backend copyid" command for copying files by ID
This allows files to be copied by ID from google drive. These can be
copied to any rclone remote and if the remote is a google drive then
server side copy will be attempted.

Fixes #3625
2020-09-25 17:53:51 +01:00
Nick Craig-Wood
433b73a5a8 accounting: stabilize display order of transfers on Windows
Before this change we sorted transfers in the stats list solely on
time started. However if --check-first was in use then lots of
transfers could be started in the same millisecond. Because Windows
time resolution is only 1mS this caused the entries to sort equal and
bounce around in the list.

This change fixes the sort so that if the time is equal it uses the
name which should stabilize the order.

Fixes #4599
2020-09-24 19:10:37 +01:00
Nick Craig-Wood
ab88a3341f Add Russell Cattelan to contributors 2020-09-24 19:10:37 +01:00
Nick Craig-Wood
181da3ce9b Add Christopher Stewart to contributors 2020-09-24 19:10:37 +01:00
Russell Cattelan
b14a58c9b8 cmd/mount2: fix the swapped UID / GID values 2020-09-23 23:06:33 +01:00
buengese
60cc2cba1f sftp: always convert the checksum to lower case - fixes #4518 2020-09-22 03:15:09 +02:00
Ivan Andreev
c797494d88 Merge pull request #4608 from ivandeex/pr-chunker-crypt
chunker: fix upload over crypt (fixes #4570)
2020-09-18 17:58:44 +03:00
Ivan Andreev
e2a57182be mailru: re-enable fixed chunker tests
This reverts commit 9d3d397f50.
2020-09-18 17:56:34 +03:00
Ivan Andreev
8928441466 mailru: fix range requests after june changes on server 2020-09-18 17:56:34 +03:00
Ivan Andreev
0e8965060f mailru: fix uploads after recent changes on server
similar fix: 5efa9958f1
2020-09-18 17:56:34 +03:00
Christopher Stewart
f3cf6fcdd7 s3: fix spelling mistake
Fix spelling mistake "patific" => "pacific"
2020-09-18 12:03:13 +01:00
Nick Craig-Wood
18ccf0f871 vfs: detect and recover from a file being removed externally from the cache
Before this change if a file was removed from the cache while rclone
is running then rclone would not notice and proceed to re-create it
full of zeros.

This change notices files that we expect to have data in going missing
and if they do logs an ERROR recovers.

It isn't recommended deleting files from the cache manually with
rclone running!

See: https://forum.rclone.org/t/corrupted-data-streaming-after-vfs-meta-files-removed/18997
Fixes #4602
2020-09-18 10:30:02 +01:00
Nick Craig-Wood
313647bcf3 Add Muffin King to contributors 2020-09-18 10:30:02 +01:00
Muffin King
61fe068c90 seafile: fix accessing libraries > 2GB on 32 bit systems - fixes #4588 2020-09-15 21:55:10 +02:00
Nick Craig-Wood
5c49096e11 acounting: fix incorrect speed and transferTime in core/stats
Before this change the code which summed up the existing transfers
over all the stats groups forgot to add the old transfer time and old
transfers in.

This meant that the speed and elapsedTime got increasingly inaccurate
over time due to the transfers being culled from the list but their
time not being accounted for.

This change adds the old transfers into the sum which fixes the
problem.

This was only a problem over the rc.

Fixes #4569
2020-09-15 12:01:18 +01:00
Nick Craig-Wood
a73c78545d Changelog updates from Version v1.53.1 2020-09-13 10:27:24 +01:00
Nick Craig-Wood
e0fd560711 build: update not-in-stable after semver changes 2020-09-12 12:49:16 +01:00
Nick Craig-Wood
6a56ac1032 vfs,local: Log an ERROR if we fail to set the file to be sparse
See: https://forum.rclone.org/t/rclone-1-53-release/18880/73
2020-09-11 15:36:47 +01:00
Nick Craig-Wood
96299629b4 Add wjielai to contributors 2020-09-11 15:36:38 +01:00
Nick Craig-Wood
75de30cfa8 sync: fix --cutoff-mode soft & cautious so it doesn't end the transfer early
Before ths fix --cutoff-mode soft and cautious would emit a Fatal
error which stopped the sync immediately.

This fix introduces a new error which is checked in the sync error
processing which stops the sync gracefully.

Fixes #4576
2020-09-09 12:53:21 +01:00
buengese
233bed6a73 dropbox: implement IDer - fixes #2928 2020-09-08 19:04:32 +02:00
buengese
b3964efe4d docs/dropbox: update docs with information regarding the new flags to access shared files and folders 2020-09-08 19:02:35 +02:00
buengese
575f061629 dropbox: add support for viewing shared files and folders 2020-09-08 19:02:35 +02:00
Evan Harris
640d7d3b4e opendrive: Do not retry 400 errors
This type of error is unlikely to be an error that can be resolved by a retry,
and is triggered in #2296 by files with a timestamp before the unix epoch.
2020-09-08 17:15:35 +01:00
Evan Harris
e92294b482 docs: Updated mount command to reflect that it requires Go 1.13 or newer 2020-09-08 16:40:43 +01:00
wjielai
22937e8982 docs: add Tencent COS to s3 provider list - fixes #4468
* add Tencent COS to s3 provider list.

Co-authored-by: wjielai <wjielai@tencent.com>
2020-09-08 16:34:25 +01:00
edwardxml
c3d1474eb9 docs: Add full stops for consistency in rclone --help
closes #4560 closes #4561 closes #4562 closes #4563 closes #4564
2020-09-08 16:26:09 +01:00
Nick Craig-Wood
e2426ea87b docs: note --log-file does append 2020-09-08 16:13:33 +01:00
Nick Craig-Wood
e58a61175f build: fix architecture name in ARMv7 build - fixes #4571
After introducing the arm-v7 build we are accidentally making debs
and rpms with the architecture arm-v7.

This fixes the problem by stripping the version off.
2020-09-08 16:10:52 +01:00
Nick Craig-Wood
05bea46c3e accounting: remove new line from end of --stats-one-line display 2020-09-08 16:10:52 +01:00
Chaitanya Bankanhal
c8a719ae0d webui: Prompt user for updating webui if an update is available 2020-09-07 16:45:00 +01:00
Tim Gallant
c3884aafd9 drive: adds special oauth help test - fixes #4555 2020-09-07 12:48:46 +01:00
Nick Craig-Wood
0a9785a4ff build: don't explicitly set ARM version to fix ARMv5 build #4553
This partially reverts commit f71f6c57d7.
2020-09-07 12:39:26 +01:00
Nick Craig-Wood
8140f67092 check: fix docs
See: https://forum.rclone.org/t/possible-issue-with-documention/18926
2020-09-07 12:10:52 +01:00
Nick Craig-Wood
4a001b8a02 check: add back missing --download flag - fixes #4565
This was accidentally removed when refactoring check and cryptcheck in

8b6f2bbb4b check,cryptcheck: add reporting of filenames for same/missing/changed #3264
2020-09-05 09:29:35 +01:00
Nick Craig-Wood
525433e6dd build: fix "Illegal instruction" error for ARMv6 builds - fixes #4553
Before this change we used `go build -i` to build the releases in parallel.

However this causes the ARMv6 and ARMv7 build to get mixed up somehow,
causing an illegal instruction when running rclone binaries on ARMv6.

See go bug: https://github.com/golang/go/issues/41223

This removes the -i which should have no effect on build times on the
CI and appears to fix the problem.
2020-09-04 16:30:50 +01:00
Nick Craig-Wood
f71f6c57d7 build: explicitly set ARM version to fix build #4553 2020-09-04 16:30:50 +01:00
albertony
e35623c72e docs/jottacloud: mention that uploads from local disk will not need to cache files to disk for md5 calculation 2020-09-04 00:04:09 +02:00
Nick Craig-Wood
344bce7e2a docs: fix formatting of rc docs page
See: https://forum.rclone.org/t/rclone-1-53-release/18880/24
2020-09-03 11:53:24 +01:00
Nick Craig-Wood
3a4322a7ba build: update build for stable branch 2020-09-03 11:30:23 +01:00
Nick Craig-Wood
27b9ae4fc3 vfs: fix spurious error "vfs cache: failed to _ensure cache EOF"
Before this change the error message was produced for every file which
was confusing users.

After this change we check for EOF and return from ReadAt at that
point.

See: https://forum.rclone.org/t/rclone-1-53-release/18880/10
2020-09-03 10:25:00 +01:00
Nick Craig-Wood
7e2488af10 build: include vendor tar ball in release and fix startdev 2020-09-02 17:53:05 +01:00
Nick Craig-Wood
41ecb586c4 Start v1.54.0-DEV development 2020-09-02 17:52:58 +01:00
Nick Craig-Wood
510ac341e1 Version v1.53.0 2020-09-02 17:00:18 +01:00
Nick Craig-Wood
358e2b2665 Revert "docs: make the website navbar stick to top"
Unfortunately this breaks the anchor links - the titles are under the
navbar, so revert this for the moment.

This reverts commit cdfb3f7194.
2020-09-02 16:43:05 +01:00
Nick Craig-Wood
3305079a03 vfs: fix typos in help 2020-09-02 14:24:44 +01:00
Nick Craig-Wood
6ed8471a37 Add Sam Edwards to contributors 2020-09-02 14:12:22 +01:00
Nick Craig-Wood
dc7ce37c32 Add WarpedPixel to contributors 2020-09-02 14:12:22 +01:00
themylogin
57c10babfe drive: Remove --drive-alternate-export in favor of exportLinks
Google engineer confirms that the new official API should works properly:
https://issuetracker.google.com/issues/36761333#comment8
2020-09-02 12:16:25 +01:00
Sam Edwards
23b2c58018 vfs: Quiet removeNotInUse logging to debug when not removing 2020-09-02 11:55:20 +01:00
Evan Harris
78abd21eec docs: Cleaned up and corrected bugs docs verbiage 2020-09-02 11:53:52 +01:00
Evan Harris
841edc729c docs: Updated --track-renames docs 2020-09-02 11:52:01 +01:00
Evan Harris
b03fcbcc12 docs: Updated overview with Opendrive support of dupes 2020-09-02 11:45:11 +01:00
WarpedPixel
b60ac7b66a onedrive: document workaround for file size download errors #2036 2020-09-02 11:44:05 +01:00
Nick Craig-Wood
725ae91387 s3: reduce the default --s3-copy-cutoff to < 5GB
The maximum value for the --s3--copy-cutoff should be 5GiB as tested
with AWS S3.

However b2 have implemented this as 5GB rather than 5GiB so having the
default at 5 GiB makes the b2s3 server side copy of a large file by
default.

This patch sets the default to 4768 MiB which is slightly less than
5GB.

This should have very little effect on anything.

If in future rclone can lower this limit more if Copy can multithread.

See: https://forum.rclone.org/t/copying-files-within-a-b2-bucket/16680/76
2020-09-01 18:53:29 +01:00
Nick Craig-Wood
b7dd3ce608 s3: preserve metadata when doing multipart copy
Before this change the s3 multipart server side copy was not
preserving the metadata of the object. This was most noticeable
because the modtime was not preserved.

This change fetches the metadata from the object before starting the
copy and overwrites it if requires.

It will also mean any other metadata is preserved.

See: https://forum.rclone.org/t/copying-files-within-a-b2-bucket/16680/70
2020-09-01 18:39:30 +01:00
Nick Craig-Wood
70c8566cb8 fs: Pin created backends until parents are finalized
This attempts to solve the backend lifecycle problem by

- Pinning backends mentioned on the command line into the cache
  indefinitely

- Unpinning backends when the containing structure (VFS, wrapping
  backend) is destroyed

See: https://forum.rclone.org/t/rclone-rc-backend-command-not-working-as-expected/18834
2020-09-01 18:21:03 +01:00
Nick Craig-Wood
0d066bdf46 alias,cache,chunker,crypt: make any created backends be cached to fix rc problems
Before this change, when the above backends created a new backend they
didn't put it into the backend cache.

This meant that rc commands acting on those backends did not work.

This was fixed by making sure the backends use the backend cache.

See: https://forum.rclone.org/t/rclone-rc-backend-command-not-working-as-expected/18834
2020-09-01 18:21:03 +01:00
Nick Craig-Wood
3affc2e066 fspath: make JoinRootPath convert backslashes to slashes on Windows
The function is used for contructing remotes which may have
backslashes in on Windows.
2020-09-01 18:21:03 +01:00
Nick Craig-Wood
23c826db52 union: fix writing with the all policy - fixes #4534
Before this change writing with the all policy deadlocked while
uploading.

This change fixes the problem by fixing the multi reader, closing the
pipes at the correct time with the correct error. This is factored
into a new function as it was used twice.

This patch also adds a new test which tests the all policies.
2020-09-01 18:21:03 +01:00
Nick Craig-Wood
1ae36a4e32 Add Lucas Kanashiro to contributors 2020-09-01 18:21:03 +01:00
Nick Craig-Wood
bc969ad244 Add Egor Margineanu to contributors 2020-09-01 18:21:03 +01:00
Nick Craig-Wood
d7ac1f5b0e Add Aaron Gokaslan to contributors 2020-09-01 18:21:03 +01:00
Nick Craig-Wood
5bf53fe3ac test_all: only run the backend tests for 1fichier
Running all the tests for 1fichier takes too long due to the directory
reading rate limiter.

The backend tests do complete in a reasonable time (21 mins).
2020-09-01 16:13:47 +01:00
Nick Craig-Wood
9cc17cec9a swift: fix missing hash from object returned from upload
Before this fix we were reading the hash from the upload using the
string "ETag", however the go runtime normalises the tag into "Etag"
so we were in fact always reading an empty string.

This bug was introduced in

aeea4430d5 swift: efficiency: slim Object and reduce requests on upload

It was spotted by the integration tests.

The fix was just to use the canonical form "Etag" instead of "ETag".
2020-09-01 16:04:32 +01:00
Nick Craig-Wood
e2816629d0 fstest: fix upwrapping tests for bucket based remotes
TestIntegration/FsRmdirNotFound was failing on crypt wrapping a bucket based remote.

This was spotted by the integration tests.
2020-09-01 15:43:41 +01:00
Nick Craig-Wood
3f0d54daae crypt: fix purge bug introduced by refactor #1891
In this commit

a2afa9aadd fs: Add directory to optional Purge interface

We failed to encrypt the directory name so the Purge failed.

This was spotted by the integration tests.
2020-09-01 15:16:14 +01:00
Aaron Gokaslan
7dcbebf9bc jottacloud: rename unused variable to _ in jottacloud.go 2020-08-31 18:11:36 +01:00
Nick Craig-Wood
c31defbbd3 fs: add debug to show when a backend is being created
See: https://forum.rclone.org/t/rclone-rc-backend-command-not-working-as-expected/18834/
2020-08-31 14:51:06 +01:00
Nick Craig-Wood
e54ce35019 build: change beta numbering to be semver compatible - Fixes #4516
From now on the betas will be numbered for the version that they will
become, so:

v1.53.0-beta.NNNN.CCCCC

Where N is commit number and C is commit. When released this will
become v1.53.0 and the beta will become v1.54.0-beta.NNN.CCCCC.

The commit number is the count of the commits since the root of the
tree since we can no longer use the the git version numbers since the
last tag.

This will simplify building the stable branch but that release
procedure hasn't been revised yet.

This commit also injects the name of the branch for the beta builds
into the download path.
2020-08-31 13:55:04 +01:00
Nick Craig-Wood
75d54d720c version: replace internal code with github.com/coreos/go-semver
We were already importing go-semver so it makes sense to remove the
duplicated semver parsing code and just use go-semver
2020-08-31 13:55:04 +01:00
Nick Craig-Wood
cc0421cb9e rc/webgui: skip AddPlugin and RemovePlugin tests if download fails 2020-08-31 13:45:06 +01:00
Nick Craig-Wood
9c01ac9894 rc/webgui: improve error handling on web fetches 2020-08-31 13:45:06 +01:00
Chaitanya Bankanhal
20300d1f61 plugins: Change failing plugin test to new repo rclone/rclone-test-plugin 2020-08-31 13:45:06 +01:00
Chaitanya Bankanhal
6231beefc5 webui: Fix broken webui because of plugins redirection 2020-08-31 13:45:06 +01:00
Nick Craig-Wood
068cfdaa00 drive: fix "panic: send on closed channel" when recycling dir entries
In this commit:

cbf3d43561 drive: fix missing items when listing using --fast-list / ListR

We introduced a bug where under specific circumstances it could cause
a "panic: send on closed channel".

This was caused by:

- rclone engaging the workaround from the commit above
- one of the listing routines returning an error
- this caused the `in` channel to be closed to stop the readers
- however the workaround was recycling stuff into the `in` channel at the time
- hence the panic on closed channel

This fix factors out the sending to the `in` channel into `sendJob`
and calls this both from the master go routine and the list
runners. `sendJob` detects the `in` channel being closed properly and
also deals correctly with contention on the `in` channel.

Fixes #4511
2020-08-31 11:41:15 +01:00
Nick Craig-Wood
7d62d1fc97 Add aus to contributors 2020-08-31 11:41:15 +01:00
Nick Craig-Wood
e13ac28b8d Add Leo Luan to contributors 2020-08-31 11:41:15 +01:00
Lucas Kanashiro
b30ee57cd9 backend/local/aaaa: remove this unneeded file
This file was introduced as part of f39655093 probably by
mistake. There is no reference for this file in the local
backend directory.

Fixes #4536
2020-08-30 22:35:58 +01:00
Egor Margineanu
921e384c4d s3: update IBM COS endpoints - fixes #4522 2020-08-30 17:21:11 +01:00
Aaron Gokaslan
bf685f600e webgui: fixes previously unhandled error in JSON marshall in fs/rc/webgui/plugins.go:writeToFile 2020-08-30 17:15:03 +01:00
aus
b6d3cad70e sftp: add options for subsystem and server_command - fixes #1801 2020-08-25 21:38:13 +01:00
Leo Luan
c665201b85 vfs: support synchronous cache space recovery upon ENOSPC
This patch provides the support of synchronous cache space recovery
to allow read threads to recover from ENOSPC errors when cache space
can be recovered from cache items that are not in use or safe to be
reset/emptied .

The patch complements the existing cache cleaning process in two ways.

Firstly, the existing cache cleaning process is time-driven that runs
periodically. The cache space can run out while the cache cleaner
thread is still waiting for its next scheduled run. The io threads
encountering ENOSPC return an internal error to the applications
in this case even when cache space can be recovered to avoid this
error. This patch addresses this problem by having the read threads
kick the cache cleaner thread in this condition to recover cache
space preventing unnecessary ENOSPC errors from being seen by the
applications.

Secondly, this patch enhances the cache cleaner to support cache
item reset. Currently the cache purge process removes cache
items that are not in use. This may not be sufficient when the
total size of the working set exceeds the cache directory's
capacity. Like in the current code, this patch starts the purge
process by removing cache files that are not in use. Cache items
whose access times are older than vfs-cache-max-age are removed first.
After that, other not-in-use items are removed in LRU order until
vfs-cache-max-size is reached. If the vfs-cache-max-size (the quota)
is still not reached at this time, this patch adds a cache reset
step to reset/empty cache files that are still in use but not
dirtied.  This enables application processes to continue without
seeing an error even when the working set depletes the cache space
as long as there is not a large write working set hoarding the
entire cache space.

By design this patch does not add ENOSPC error recovery for write
IOs. Rclone does not empty a write cache item until the file data
is written back to the backend upon close. Allowing more cache
space to be consumed by dirty cache items when the cache space is
already running low would increase the risk of exhausting the cache
space in a way that the vfs mount becomes unreadable.
2020-08-25 21:12:06 +01:00
Chaitanya Bankanhal
d6996e3347 plugins: Add url query params to regex for referrer path 2020-08-24 10:56:04 +01:00
Chaitanya Bankanhal
dffcc99373 plugins: Create availablePlugins config file if it does not exist. 2020-08-24 10:56:04 +01:00
Chaitanya Bankanhal
09b79679cd plugins: restructure and add tests for pluginsctl/* calls 2020-08-24 10:56:04 +01:00
Chaitanya Bankanhal
cf68e61f40 Add redirection for plugin urls 2020-08-24 10:56:04 +01:00
Chaitanya Bankanhal
22674d1146 plugins: Add reverse proxy pluginsHandler for serving plugins 2020-08-24 10:56:04 +01:00
Chaitanya Bankanhal
f9ee0dc3f2 plugins: allow installation and use of plugins and test plugins with rclone-webui 2020-08-24 10:56:04 +01:00
Chaitanya Bankanhal
65fa6a946a webui: Expose webui downloader and other utility for use with plugins 2020-08-24 10:56:04 +01:00
Chaitanya
4cf82118d9 rc: add plugins support 2020-08-24 10:56:04 +01:00
Chaitanya
5f56611a76 webgui: Move to new package fs/rc/webgui. 2020-08-24 10:56:04 +01:00
Nick Craig-Wood
0f7a2f0f3c fichier: Detect Flood detected: IP Locked error and sleep for 30s
This is in an attempt to make the integration tests pass.
2020-08-23 18:01:22 +01:00
Nick Craig-Wood
be2b310ace Add Jay McEntire to contributors 2020-08-23 18:01:22 +01:00
Jay McEntire
45afe97e8e drive: Added --drive-starred-only to only show starred files - fixes #3928 2020-08-21 17:30:41 +01:00
Nick Craig-Wood
fee8f21ce1 pcloud: Add example hostnames to configurator and more docs - Fixes #4493
When using `rclone authorize` the hostname doesn't get set in the
config file.

This commit allows it to be set in the configurator and gives the user
a hint that it needs setting.
2020-08-21 16:14:02 +01:00
Nick Craig-Wood
1abc252ed3 onedrive: document refresh token expiry - fixes #4512 2020-08-21 15:56:41 +01:00
Nick Craig-Wood
801a820c54 s3: fix detection of bucket existing
This reverts part of

151f03378f s3: fix upload of single files into buckets without create permission

This erroneously assumed that a HEAD request on a non existent object
would return "NotFound" if the bucket was found. In fact it returns
"NotFound" when the bucket isn't found also.

This will break the fix for #4297 - however that can be made to work
using the new --s3-assume-bucket-exists flag
2020-08-21 13:28:08 +01:00
Nick Craig-Wood
2bcc66c805 drive: fix duplication of Google docs on server side copy #4517
Before this change, rclone was looking for the file without the
extension to see if it existed which meant that it never did.

This change checks the destination file exists firsts, before removing
the extension.
2020-08-20 20:19:33 +01:00
Nick Craig-Wood
b5ba077a2f drive: work around drive bug which didn't set modtime of copied docs
Google drive appears to no longer be copying the modification time of
google docs.

Setting the mod time immediately after the copy doesn't work either,
so this patch copies the object, waits for 1 second and then sets the
modtime.

Fixes #4517
2020-08-20 20:19:33 +01:00
Nick Craig-Wood
0931b84940 pcloud: Fix rclone link for files
This was only working for files in the root directory and wasn't
looking at the encoding.

This is fixed to use NewObject which takes both things into account
and it makes the share by ID instead of by path.

This problem was spotted by the integration tests.
2020-08-20 20:09:55 +01:00
Nick Craig-Wood
94a0991584 vfs: set the modtime of the cache file immediately
Before this change we set the modtime of the cache file when all
writers had finished.

This has the unfortunate effect that the file is uploaded with the
wrong modtime which means on backends which can't set modtimes except
when uploading files it is wrong.

This change sets the modtime of the cache file immediately in the
cache and in turn sets the modtime in the file info.
2020-08-20 16:24:04 +01:00
Nick Craig-Wood
9d3d397f50 test_all: disable chunker + mailru tests while mailru is broken #4376 2020-08-20 12:50:20 +01:00
Nick Craig-Wood
38e8415e77 test_all: remove Digital Ocean s3 integration tests due to excessive rate limiting
This is what I wrote to Digital Ocean support on July 10, 2020 - alas
it didn't result in the rate limits dropping, so reluctantly I'm going
to remove DO from the integration tests since they never pass and have
no hope of ever passing while this rate limit is in effect.

----

Somewhere towards the end of June 2020 or the start of July 2020 my
integration tests between rclone ( https://rclone.org ) and Digital
Ocean started failing.

I tried moving the tests to different regions (currently they are
using AMS1 because I'm in Europe) with no improvement.

Rclone seems to be hitting this rate limit as documented here:
https://www.digitalocean.com/docs/spaces/#limits

- 2 COPYs per 5 minutes on any individual object in a Space

Rclone creates small objects about 100 bytes in size and renames them
a few times - this involves using the COPY call as S3 does not have a
rename API. The tests do this more than twice per object so hit the 5
minute timeout I think. Rclone does exponential backoff and fails
after 10 retries not having reached 5 minutes delay after 10 retries.

Having a 5 minute lockout on an S3 compatible API is surprising!

Rclone integration tests with about 30 other providers, none of which
have a rate limit like this.

I understand the need for a COPY rate limit as server side copying
large files can be resource intensive. However a 5 minute lockout for
copying 100 byte files seems excessive!

Might I humbly suggest that you reduce or eliminate this rate limit
for small files?

----

This was the reply

Unfortunately it is not possible to raise this limit or remove it
currently on our platform. I do see how this would interfere with type
of applications that need to copy many small files and will be happy
to take the feedback to our engineering team to see how we can improve
the spaces system in the future
2020-08-20 12:50:04 +01:00
Nick Craig-Wood
fb9edbe34e test_all: export more internal variables to index.json for analysis 2020-08-20 12:23:33 +01:00
Nick Craig-Wood
85f9bd1abf union: fix tests by looking for fs.ErrorDirNotFound found in Purge and About
Before this change we errored out if one upstream errored in Purge or
About.

This change checks for fs.ErrorDirNotFound and skips that backend in
this case.
2020-08-19 18:04:16 +01:00
Nick Craig-Wood
63e4d2952b fstests: Suggest that Purge on a nonexistent dir should return fs.ErrorDirNotFound 2020-08-19 18:03:42 +01:00
Nick Craig-Wood
52247e9a9f local: return fs.ErrorDirNot found from About and Purge
Before this a stat error was returned which wasn't very helpful.
2020-08-19 18:02:21 +01:00
Nick Craig-Wood
d2ad293fae vfs: fix rename tests by waiting for writes to complete
Before this change the background writing of the file was racing with
the test of the object on the remote.

This meant that the tests passed locally but failed on a lot of the
remotes.
2020-08-19 17:04:17 +01:00
Nick Craig-Wood
6082096f7e vfs: check file exists in cache before renaming/setmodtime/deleting
Before this change we didn't check the file exists before renaming it,
setting its modification time or deleting it. If the file isn't in the
cache we don't need to do the action since it has been done on the
actual object, so these errors were producing unecessary log messages.

This change checks to see if the file exists first before doing those
actions.
2020-08-19 17:01:59 +01:00
Nick Craig-Wood
9a6fcd035b vfs: file: fix some locking issues reading f.d without the lock
Before this change we were reading Files.d without the lock. This
isn't allowed as d can change when the file is renamed.
2020-08-19 17:01:33 +01:00
Nick Craig-Wood
47d08ac1f1 vfs: recommend --vfs-cache-modes writes on backends which can't stream 2020-08-18 17:33:27 +01:00
Nick Craig-Wood
c4c6a1ee7d Add Kaloyan Raev to contributors 2020-08-18 17:33:27 +01:00
Anagh Kumar Baranwal
29d6358f34 docs: Updated docker docs regarding usage of the RC API from outside the
container

Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2020-08-17 17:11:22 +01:00
Chaitanya Bankanhal
6308153ae7 rc: pass response writer when needsResponse is set instead of pointer
rc: Fix failing tests for *http.ResponseWriter
2020-08-17 17:09:31 +01:00
Chaitanya Bankanhal
a9713cd0ed core/command: Add streaming output for long running commands. 2020-08-17 17:09:31 +01:00
Chaitanya Bankanhal
1cae4152f9 rc: add NeedsResponse for rc calls 2020-08-17 17:09:31 +01:00
Nick Craig-Wood
4884bee8ba core/command: pretend to be "rclone version" to make tests pass 2020-08-17 17:09:31 +01:00
Chaitanya Bankanhal
54fc2821cd core/command: Add version command instead of ls 2020-08-17 17:09:31 +01:00
Chaitanya Bankanhal
5549fd25fc core/command: Allow rc to execute rclone terminal commands.
Allow command parameter to be skipped.
2020-08-17 17:09:31 +01:00
Kaloyan Raev
3d5a63607e backend/tardigrade: Upgrade to uplink v1.2.0
Uplink v1.2.0 comes with two improvements related to rclone:
* Fix for resource leak in uploads.
* The socket dialer comes with better congestion control in some
environments. On Linux environments, if a congestion controller named
'ledbat' is installed, it will be used. Consider installing
https://github.com/silviov/TCP-LEDBAT
2020-08-13 16:32:18 +01:00
Klaus Post
cb7534dcdf lib: Add file name compression
Allows to compress short arbitrary strings and returns a string using base64 url encoding.

Generator for tables included and a few samples has been added. Add more to init.go

Tested with fuzzing for crash resistance and symmetry, see fuzz.go
2020-08-13 16:14:11 +01:00
Nick Craig-Wood
770a6f2cad build: build with go1.15.x by default now that it is released 2020-08-12 09:51:22 +01:00
Nick Craig-Wood
aab9aa8a2e js: add experimental interface for integrating rclone into browsers
This works by compiling rclone to wasm and exporting the RC api to
javascript.
2020-08-10 17:32:21 +01:00
Nick Craig-Wood
3a14b1d5a9 build: make rclone build with wasm
Needed to drop
- azureblob backend
- cache backend
- qingstor backend
- cachestats command
- ncdu command
2020-08-10 17:32:21 +01:00
Nick Craig-Wood
ac044b1c54 Add Tim Gallant to contributors 2020-08-10 17:32:21 +01:00
Chaitanya Bankanhal
61c7ea4085 rc: fix rc/uploadfile only working for root of the fs 2020-08-10 17:09:46 +01:00
Nick Craig-Wood
01280798e9 build: drop macOS 386 build as it is no longer supported by go1.15
The go team made the decision to drop support for 32 bit macOS as 32
bit apps are no longer supported by macOS and 32 bit hardware hasn't
been produced by Apple for over 10 years.
2020-08-09 12:59:17 +01:00
Nick Craig-Wood
db56d30078 build: build with go1.15-rc2 2020-08-09 10:38:02 +01:00
Nick Craig-Wood
a00274d2ab build: update test builder to go1.15-rc2 2020-08-08 17:15:43 +01:00
Nick Craig-Wood
82975109af Start v1.52.3-DEV development 2020-08-08 10:35:06 +01:00
Tim Gallant
30eb094f28 oauthutil: adds SharedOptions for OAuth backends
1. adds SharedOptions data structure to oauthutil
2. adds config.ConfigToken option to oauthutil.SharedOptions
3. updates the backends that have oauth functionality

Fixes #2849
2020-08-07 16:32:01 +01:00
Nick Craig-Wood
b401a727f7 onedrive: add --onedrive-no-versions flag to remove old versions - fixes #4106 2020-08-07 15:58:30 +01:00
Nick Craig-Wood
8eb16ce89c onedrive: implement rclone cleanup #4106 2020-08-07 15:58:30 +01:00
Nick Craig-Wood
8e7eb37456 drive: implement backend command untrash
rclone backend untrash drive:directory

This was based on: https://gitlab.com/B4dM4n/drive-untrash

See: https://forum.rclone.org/t/rclone-teamdrive-undelete/18278/3
2020-08-07 11:10:37 +01:00
Nick Craig-Wood
4d7f91309b vfs: fix download threads timing out
Before this fix, download threads would fill up the buffer and then
timeout even though data was still being read from them. If the client
was streaming slower than network speed this caused the downloader to
stop and be restarted continuously. This caused more potential for
skips in the download and unecessary network transactions.

This patch fixes that behaviour - as long as a downloader is being
read from more often than once every 5 seconds, it won't timeout.

This was done by:

- kicking the downloader whenever ensureDownloader is called
- making the downloader loop if it has already downloaded past the maxOffset
- making setRange() always kick the downloader
2020-08-06 17:26:18 +01:00
Nick Craig-Wood
109b695621 vfs: add --vfs-read-ahead parameter for use with --vfs-cache-mode full
This parameter causes extra read-ahead over --buffer-size which is not
buffered in memory but on disk.
2020-08-06 17:26:18 +01:00
Nick Craig-Wood
177d2f2f79 build: add script for torturing the VFS 2020-08-06 17:26:18 +01:00
Nick Craig-Wood
f5439ddc54 accounting: fix deadlock in stats printing
The deadlock was caused in transfermap.go by calling mu.RLock() in one
function then calling it again in a sub function. Normally this is
fine, however this leaves a window where mu.Lock() can be called. When
mu.Lock() is called it doesn't allow the second mu.RLock() and
deadlocks.

    Thead 1                    Thread 2
    String():mu.RLock()
                               del():mu.Lock()
    sortedSlice():mu.RLock()                     - DEADLOCK

Lesson learnt: don't try using locks recursively ever!

This patch fixes the problem by removing the second mu.RLock(). This
was done by factoring the code that was calling it into the
transfermap.go file so all the locking can be seen at once which was
ultimately the cause of the problem - the code which used the locks
was too far away from the rest of the code using the lock.

This problem was introduced in:

bfa5715017 fs/accounting: sort transfers by start time

Which hasn't been released in a stable version yet
2020-08-05 17:13:00 +01:00
Nick Craig-Wood
324077fb48 swift: fix update multipart object removing all of its own parts
After uploading a multipart object, rclone deletes any unused parts.

Probably as part of the listing unification, the detection of the
parts beloning to the current upload was failing and calling Update
was deleting the parts for the current object.

This change fixes the detection and deletes all the old parts but none
of the new ones now.

Fixes #4075
2020-08-03 14:45:03 +01:00
Nick Craig-Wood
f50ab981f7 drive: stop using root_folder_id as a cache #4419
Previous to this change rclone cached the looked up root_folder_id in
the root_folder_id config variable.

This has caused a lot of confusion and a few attempts at workarounds
and ultimately was a mistake.

This reverts rclone attempting to cache anything in root_folder_id and
returns that variable to be entirely user modified.

It gives a little hint in the debug that rclone could be sped up
slightly by setting it, but it is up to the user to think about
whether that would be OK or not.

    Google drive root '': root_folder_id = "XXX" - save this in the config to speed up startup

It does not change root_folder_id itself, leaving this to the user.

See: https://forum.rclone.org/t/rclone-gdrive-no-longer-returning-anything/17215
2020-08-02 11:47:07 +01:00
Nick Craig-Wood
0c620ad076 Add David Ibarra to contributors 2020-08-02 11:47:07 +01:00
David Ibarra
49cf2eb7e4 cmd/obscure: Allow obscure command to accept password on STDIN
`rclone obscure` currently only accepts a command line argument of `password` to generate
an obfuscated password. This is an issue since generating obfuscated passwords programatically
requires sending the plain text password as a shell argument, which can cause problems if the
password contains shell characters, or if the password is from an untrusted source.

This patch opens up STDIN which will allow developers to open the STDIN source and print a password
directly to `rclone obscure`, which can increase safety and convenince.
2020-08-02 11:32:47 +01:00
Nick Craig-Wood
a2afa9aadd fs: Add directory to optional Purge interface - fixes #1891
- add a directory to the optional Purge interface
- fix up all the backends
- add an additional integration test to test for the feature
- use the new feature in operations.Purge

Many of the backends had been prepared in advance for this so the
change was trivial for them.
2020-07-31 17:43:17 +01:00
Nick Craig-Wood
c2f3949ded Add tyhuber1 to contributors 2020-07-30 16:44:13 +01:00
tyhuber1
bf355c4527 local: Add --local-no-set-modtime option to prevent modtime changes
If this option is enabled, rclone will not set modtime of uploaded files and
the backend will return ModTimeNotSupported as its Precision.

Normally rclone updates modification time of files after they are done
uploading. This can cause permissions issues on Linux platforms when
rclone is copying to a CIFS mount where the user rclone is
running as does not own the file uploaded. If this option is enabled,
rclone will no longer update the modtime after copying a file.

See: https://forum.rclone.org/t/chtimes-error-on-local-mounted-copy/17784
2020-07-30 16:43:17 +01:00
Nick Craig-Wood
3daa63cae8 mount: fix volume name broken in recent refactor 2020-07-29 14:23:00 +01:00
Nick Craig-Wood
4441e012cf vfs: fix saving from chrome without --vfs-cache-mode writes #4293
Due to Chrome's rather complicated use of file handles when saving
files from the download windows, rclone was attempting to truncate a
closed file.

The file appeared closed due to the handling of 0 length files.

This patch removes the check for the file being closed in the
WriteFileHandle.Truncate call. This is safe because the only action
this method takes is to emit an error message if the file is the wrong
size.

See: https://forum.rclone.org/t/google-drive-cannot-save-files-directly-from-browser-to-gdrive-mounted-path/17992/
2020-07-28 17:18:31 +01:00
Nick Craig-Wood
122a47fba6 accounting: Allow transfers to be canceled with context #3257
This makes all transfers cancelable even if the backend doesn't
support context as all transfers are done using the Accounting
framework.
2020-07-28 16:41:17 +01:00
Nick Craig-Wood
421585dd72 accounting: add context to Account and propagate changes #3257
This is preparation for getting the Accounting to check the context,
buf first we need to get it in place. Since this is one of those
changes that makes lots of noise, this is in a seperate commit.
2020-07-28 16:41:17 +01:00
Nick Craig-Wood
0bab9903ee drive: factor creation of the Fs so it can be re-used in team drive listing 2020-07-28 16:24:00 +01:00
Nick Craig-Wood
700deb0a81 drive: add rclone backend drives to list shared drives (teamdrives)
See: https://forum.rclone.org/t/google-drive-remotes-team-drive-list-commend/17595
2020-07-28 16:24:00 +01:00
Nick Craig-Wood
1222b78ec4 cmount: add support for reading unknown length files using direct IO
This means that on Linux and OSX at least reading a google doc from a
mount will behave sensibly.
2020-07-28 16:23:11 +01:00
Nick Craig-Wood
0ee16b51c4 mount: On Windows don't add -o uid/gid=-1 if user supplies -o uid/gid.
Before this change if the user supplied `-o uid=XXX` then rclone would
write `-o uid=-1 -o uid=XXX` so duplicating the uid value.

After this change rclone doesn't write the default `-1` version.

This fix affects `uid` and `gid`.

See: https://forum.rclone.org/t/issue-with-rclone-mount-and-resilio-sync/14730/27
2020-07-28 16:22:29 +01:00
Nick Craig-Wood
26001d520a fs: add --bwlimit-file flag to limit speeds of individual file transfers 2020-07-28 11:46:24 +01:00
David
8bf265c775 box: allow authentication with access token - fixes #4114 2020-07-28 11:43:44 +01:00
Nick Craig-Wood
62f0bbb598 dedupe: Make it obey the --size-only flag for duplicate detection #4321 2020-07-28 11:40:37 +01:00
Nick Craig-Wood
d5f4c74697 s3: implement cleanup and backend command to list & remove multipart uploads
This implements `rclone cleanup` to remove multipart uploads over 24
hours old. It also implements the backend command
`list-multipart-uploads` to see which ones are available and `cleanup`
to delete them with a configurable expiry interval.

See #4302
2020-07-28 11:37:46 +01:00
Nick Craig-Wood
8f42532b6d sync: add --track-renames-strategy leaf
See: https://forum.rclone.org/t/how-to-minimize-bandwith-w-r-t-renames-during-sync/16928/22
2020-07-28 11:34:27 +01:00
Nick Craig-Wood
2288a5c617 s3: implement profile and shared_credentials_file options
It is impossible to use two different profiles at the same time -
these config vars enable that.

See: https://forum.rclone.org/t/s3-source-destination-named-profile/17417
2020-07-28 11:32:32 +01:00
Nick Craig-Wood
957311f479 b2: fix transfers when using download_url
Before this fix, if an object had ID set and download_url was in use,
downloading the object would give this error:

    failed to open for download: bucket example_bucket does not have file: /b2api/v1/b2_download_file_by_id (404 not_found)

After this fix we only download by ID if download_url is not set

See: https://forum.rclone.org/t/correct-format-for-rclone-b2-download-url-variable/15498
2020-07-28 11:30:01 +01:00
Nick Craig-Wood
2cc381b91d build: disable lib/plugin under gccgo to make rclone build with gccgo 2020-07-28 09:56:31 +01:00
Nick Craig-Wood
f406dbbb4d s3: add --s3-no-check-bucket for minimising rclone transactions and perms
Fixes #4449
2020-07-27 17:49:40 +01:00
Nick Craig-Wood
3b2322285a Add kcris to contributors 2020-07-27 17:49:40 +01:00
kcris
47d093e863 drive: update docs to show use of sharing with a user instead of impersonate 2020-07-27 17:10:28 +01:00
Nick Craig-Wood
b2ae94de5b mount: fix mount flags not working
This was broken in the recent refactor.

See: https://forum.rclone.org/t/issue-with-allow-other-in-beta/18133
2020-07-27 15:24:28 +01:00
Nick Craig-Wood
4afea1ebaf docs: update install from source instructions
This has changed post Go modules.

In particular it recommends against the go get `-u` flag.

See: https://forum.rclone.org/t/install-from-source-go-get-errors/18114
2020-07-27 11:47:46 +01:00
Nick Craig-Wood
711736054f Add Jack to contributors 2020-07-26 12:07:04 +01:00
Jack
d64212d902 serve/restic: expose interfaces so that rclone can be used as a library from within restic
This patch enables rclone to be used as a library from within restic

- exposes NewServer
- exposes Server
- implements http.RoundTripper

Co-authored-by: Jack Deng <jackdeng@gmail.com>
2020-07-26 12:06:47 +01:00
Chaitanya Bankanhal
8913679d88 accounting: Fix elapsed time not show actual time since beginning
This fixes the elapsed time display in the statistics output in the rc and in the log messages.
2020-07-26 11:59:50 +01:00
Nick Craig-Wood
4f9a80e2d3 build: actions update, cache, go1.15-rc1 build
- Use cache to store package versions
- Update actions/setup-go to v2
- Add go1.15-rc1 build
- Make seperate build step
- stop downloading code into special path
- leave adding ~/go/bin to PATH to sction/setup-go
- remove docker build from xgo as we are building rclone anyway
- remove modules setting since it is now always on
- use ./... instead of listing files in tests
2020-07-25 18:52:33 +01:00
Nick Craig-Wood
aa93b39d9b build: fix tests on go1.15
go1.15 introduced a stricter policy for what you can convert with
`string()` and now `go vet` warns if you try to do `string(int)`.

See: https://github.com/golang/go/issues/32479
2020-07-25 18:51:28 +01:00
Nick Craig-Wood
101f82c6b3 drive: drop "Disabling ListR" messages down to debug
This was causing unecessary anguish for users since these messages are
harmless and really only interesting for debugging.

See: https://forum.rclone.org/t/rclone-gdrive-error/18098
2020-07-25 16:50:55 +01:00
Nick Craig-Wood
d35673efc6 webdav: fix directory creation with 4shared - fixes #4428
When we run MKCOL on 4shared on a directory that already exists, this
returns a 409/Conflict error. However this error code usually means
that the intermediate collections need creating.

The actual error code to return when trying to create a directory that
already exists isn't specified in the RFC, only that an error MUST be
returned and there are already 3 statuses checked in the code.

However using 409 makes rclone's usual strategy for making directories
fail and return the 409 error.

This patch tries the MKCOL and if it returns an unrecognised error
code, then calls PROPFIND on the directory to discover whether the
directory really exists or not.

This should also cover other WebDAV servers returning other error
messages we haven't accounted for in the code yet.
2020-07-24 17:26:42 +01:00
Nick Craig-Wood
3286d1992b mount: warn macOS users that mount implementation is changing #4393 2020-07-24 15:41:31 +01:00
Nick Craig-Wood
4ac662d144 cmount: fix macOS losing directory contents #4393
Before this change when reading directories we would use the directory
handle and the Readdir(-1) call on the directory handle. This worked
fine for the first read, but if the directory was read again on the
same handle Readdir(-1) returns nothing (as per its design).

It turns out that macOS leaves the directory handle open and just
re-reads the data from it, so this problem causes directories to start
out full then subsequently appear empty.

macOS/OSXFUSE is passing an offset of 0 to the Readdir call telling
rclone to seek in the directory, but we've told FUSE that we can't
seek by always returning ofst=0 in the fill function.

This fix works around the problem by reading the directory from the
path each time, ignoring the actual handle. This should be no less
efficient.

We will return an ESPIPE if offset is ever non 0.

There are possible corner cases reading deleted directories which this
ignores.
2020-07-24 15:38:08 +01:00
Nick Craig-Wood
d73a418a55 cmount: always supply stat information in Readdir
It is cheap to make the stat information here - we give FUSE a file
type to look at least.
2020-07-24 15:12:05 +01:00
Nick Craig-Wood
306a3e0cd7 cmount: catch panics in initialization and turn into error messages 2020-07-24 15:12:05 +01:00
Nick Craig-Wood
975a53c9e3 build: enable cmount on macOS #4393 2020-07-24 15:12:05 +01:00
Nick Craig-Wood
78fdc5805b vendor: Update github.com/billziss-gh/cgofuse to v1.4.0 #4393 2020-07-24 15:12:05 +01:00
Nick Craig-Wood
8f9d5af26d cache: remove mount tests as they aren't being run and cause maintenance issues
Before this change the cache backend contained its own routines for
mounting testing on that mount.

These tests are never run on the CI and cause a maintenance burden.

This commit removes the tests.
2020-07-24 11:57:49 +01:00
Nick Craig-Wood
6ff5787b40 mount: add VFS and Mount options to mount/listmounts 2020-07-24 10:48:51 +01:00
Nick Craig-Wood
3c1c6d2f01 mount: add mountOpt to mount/mount rc 2020-07-24 10:48:51 +01:00
Nick Craig-Wood
0272a7f405 mount: change interface of mount commands to take mount options
This is in preparation of being able to pass mount options to the rc
command "mount/mount"
2020-07-24 10:48:51 +01:00
Nick Craig-Wood
e1d34ef427 mount: factor Mount into mountlib and tidy signal handling
This factors common code from mount, cmount and mount2 into mountlib.

It also uses atexit for unregistering the mount.
2020-07-23 13:08:38 +01:00
Nick Craig-Wood
26b4698212 mount: make mount/mount remote control take vfsOpt option
See: https://forum.rclone.org/t/passing-mount-options-like-vfs-cache-mode-when-using-rclone-rc-mount-mount/17863
2020-07-23 12:30:41 +01:00
Nick Craig-Wood
2871268505 mount: change interface of mount commands to take VFS
This is in preparation of being able to pass options to the rc command
"mount/mount"
2020-07-23 12:30:41 +01:00
Nick Craig-Wood
744828a4de rc: allow JSON parameters to simplify command line usage
If the parameter being passed is an object then it can be passed as a
JSON string rather than using the `--json` flag which simplifies the
command line.

rclone rc operations/list fs=/tmp remote=test opt='{"showHash": true}'

Rather than

rclone rc operations/list --json '{"fs": "/tmp", "remote": "test", "opt": {"showHash": true}}'
2020-07-22 18:40:52 +01:00
Nick Craig-Wood
ff84351655 operations: factor Check and related functions into its own files 2020-07-21 22:08:13 +01:00
Nick Craig-Wood
8b6f2bbb4b check,cryptcheck: add reporting of filenames for same/missing/changed #3264
See: https://forum.rclone.org/t/rclone-check-v-doesnt-show-once-per-minute-update-counts/17402
2020-07-21 22:08:13 +01:00
Nick Craig-Wood
d2efb4b29b ftp: add support for --dump bodies and --dump auth
See: https://forum.rclone.org/t/rclone-copy-gives-error-connection-reset-by-peer-using-ftp/17934/27
2020-07-21 16:26:31 +01:00
Nick Craig-Wood
db56b1bfec serve/ftp: use refactored goftp.io/server library for binary shrink
This uses the refactored goftp library which doesn't include the minio
driver. This reduces the binary size by 1.5MB

See: https://gitea.com/goftp/server/pulls/120
2020-07-21 16:23:55 +01:00
Nick Craig-Wood
990a33b393 build: go mod tidy 2020-07-21 16:19:24 +01:00
Nick Craig-Wood
664c658da6 ftp: Update github.com/jlaffaye/ftp to fix interop with pure-ftpd
See: https://github.com/jlaffaye/ftp/pull/190
2020-07-21 16:17:37 +01:00
Nick Craig-Wood
d1617ce7ce Stop doing vendoring - fixes #4032 2020-07-21 16:09:53 +01:00
Nick Craig-Wood
2b50d44a2f Remove vendor directory #4032 2020-07-21 16:09:53 +01:00
Nick Craig-Wood
ddfde68140 vfs: fix directory locking caused by slow directory listings
Before this fix we took the directory lock to read the ModTime of the
directory. This was causing locking on directories which were being
re-read from the backend.

This commit gives the modtime its own lock so it can be read even when
the directory is being updated.

See: https://forum.rclone.org/t/high-cpu-load-with-rclone-mount/17604
2020-07-18 09:08:18 +01:00
Nick Craig-Wood
811b30d116 sync: fix deadlock with --track-renames-strategy modtime - fixes #4427
Before this change we could exit the popRenameMap function with the
lock held.

This fixes the problem by defer-ring the unlock.

See: https://forum.rclone.org/t/track-renames-strategy-modtime-doesnt-work/16992
2020-07-17 17:09:58 +01:00
Nick Craig-Wood
bcd362fcd5 accounting: fix documentation for speed/speedAvg
Fix the documentation for the very confusingly names speed and
speedAvg stats items.

See: https://github.com/rclone/rclone-webui-react/issues/99
2020-07-17 15:35:15 +01:00
Nick Craig-Wood
1fafcd4d28 Add Dmitry Ustalov to contributors 2020-07-15 23:15:06 +01:00
Nick Craig-Wood
2807a85f68 Add Morten Linderud to contributors 2020-07-15 23:15:06 +01:00
Nick Craig-Wood
39515acf68 Add Kevin to contributors 2020-07-15 23:15:06 +01:00
Dmitry Ustalov
aaed74fe4e docs: workaround and policy for Google Drive API
* workaround for Google Drive API
* mention the use of Google User Data
* unified wording for user data policy
2020-07-15 23:14:39 +01:00
Nick Craig-Wood
126efaadcc vfs: fix renamed files not being uploaded with --vfs-cache-mode minimal
Before this change files that were in the cache and renamed with
--vfs-cache-mode minimal weren't renamed at all.

This fixes the problem and adds tests for all the different
combinations of cache modes and in and out of the cache.
2020-07-15 16:22:12 +01:00
Nick Craig-Wood
2adc057d95 vfs: fix very high load caused by slow directory listings
In this commit (released in v1.52.0)

6ca7198f mount: fix disappearing cwd problem

SetSys was introduced to cache node lookups.

Unfortunately taking the vfs.(*Dir) lock in SetSys causes any FUSE
operations on a directory to pile up behind slow directory listings.

In some situations this leads to very high load.

This commit fixes it by using atomic operations to read and write the
Sys value make it independent of the lock.

See: https://forum.rclone.org/t/high-cpu-load-with-rclone-mount/17604
See: #4104
2020-07-15 16:22:12 +01:00
Nick Craig-Wood
59770a4953 ftp: add note to docs about home vs root directory selection
See: https://forum.rclone.org/t/update-docs-for-ftp-path-absolute-vs-relative/17875
2020-07-15 15:27:47 +01:00
Morten Linderud
67098511db make_manual: Support SOURCE_DATE_EPOCH
The documentation contains an embedded datetime which do not read
SOURCE_DATE_EPOCH. This makes the documentation unreproducible when
distribution tries to recreate previous build of the package.

This patch ensures we attempt to read the environment variable before
default to the current build time.

Signed-off-by: Morten Linderud <morten@linderud.pw>
2020-07-15 13:23:08 +01:00
Kevin
07b2ce4ab2 Avoid comma rendered in URL in onedrive.md (#4438)
Removed comma from the end of the Azure AD Applications List Blade URL since it was not resolving and customers were opening up support tickets with the Microsoft Azure AD team.
2020-07-15 15:55:00 +08:00
Nick Craig-Wood
80d2f38192 s3: fix bucket Region auto detection when Region unset in config #2915
Previous to this fix if Region was not set and Endpoint was not set
then we set the endpoint to "https://s3.amazonaws.com/".

This is unecessary because if the Region alone isn't set then we set
it to "us-east-1" which has the same endpoint.

Having the endpoint set breaks the bucket region auto detection with
the error "Failed to update region for bucket: can't set region to
"xxx" as endpoint is set".

This fix removes that check.
2020-07-10 17:16:59 +01:00
Nick Craig-Wood
0792f4722c swift: fix purge not deleting directory markers
At some point Purge stopped deleting directory markers. We don't have
an integration test for this so it went unnoticed.

This patch fixes the problem but doesn't introduce an integration test
as we don't have a framework for making directory markers yet.
2020-07-10 15:16:11 +01:00
Nick Craig-Wood
db37360a1d swift: fix dangling large objects breaking the listing
Before this change, large objects which had had their contents deleted
would return "Object not found" and break the listing.

This change makes these objects appear as 0 sized entities so they can
be listed and deleted.
2020-07-10 11:03:08 +01:00
Nick Craig-Wood
44ff766f98 mkdir: warn when using mkdir on remotes which can't have empty directories
It is a source of confusion for users that `rclone mkdir` on a remote
which can't have empty directories such as s3/b2 does nothing.

This emits a warning at NOTICE level if the user tries to mkdir a
directory not at the root for a remote which can't have empty
directories.

See: https://forum.rclone.org/t/mkdir-on-b2-consider-adding-some-output/17689
2020-07-08 17:55:58 +01:00
Max Sum
bfa5715017 fs/accounting: sort transfers by start time 2020-07-07 15:42:06 +01:00
Max Sum
e2183ad661 fs/accounting: use transferMap instead of stringSet 2020-07-07 15:42:06 +01:00
Nick Craig-Wood
e2201689cf build: add ARMv7 to the supported builds - fixes #748 2020-07-07 12:13:20 +01:00
Nick Craig-Wood
0f72aa8a5f docs: add section about config variable precedence
See: https://forum.rclone.org/t/precedence-rules-for-config/17707
2020-07-07 11:12:35 +01:00
Nick Craig-Wood
b2f4f52b64 vfs cache: make logging consistent and remove some debug logging 2020-07-06 17:32:53 +01:00
Nick Craig-Wood
c65ed26a7e vfs: vfscache: Fix renaming of items while they are being uploaded
Previous to the fix, if an item was being uploaded and it was renamed,
the upload would fail with missing checksum errors.

This change cancels any uploads in progress if the file is renamed.
2020-07-06 17:32:53 +01:00
Nick Craig-Wood
df5dbaf49b vfs: writeback: add Rename call for renaming items in the writeback queue 2020-07-06 17:32:53 +01:00
Nick Craig-Wood
80fe1f16db b2: note that b2's encoding now allows \ but rclone's hasn't changed
See: https://forum.rclone.org/t/why-are-there-error-messages-about-non-existing-files-in-the-log/17608
2020-07-06 16:28:30 +01:00
Nick Craig-Wood
f524a4c1cc vendor: drop unused github.com/djherbis/times 2020-07-06 14:29:42 +01:00
Nick Craig-Wood
c61c3cddbd Add Evan Harris to contributors 2020-07-06 14:29:42 +01:00
Nick Craig-Wood
51767aee23 Add Garrett Squire to contributors 2020-07-06 14:29:42 +01:00
Evan Harris
cd3d7e2dca docs: Update install.md to reflect minimum Go version
Fixes #3765
2020-07-06 13:42:47 +01:00
Garrett Squire
4f7f5404ce build: fix file handle leak in GitHub release tool
This is a small patch to remove a defer statement found in a for loop.
It instead closes the file after it is done copying the bytes from the
tar file reader.
2020-07-04 10:51:37 +01:00
Nick Craig-Wood
d4b2709fb0 pcloud: fix oauth on European region "eapi.pcloud.com"
Pcloud appears to have opened up a new region and they are returning
the hostname in the oauth callback, thus

    GET /?code=XXX&locationid=1&hostname=api.pcloud.com&state=XXX HTTP/1.1
    GET /?code=XXX&locationid=2&hostname=eapi.pcloud.com&state=XXX HTTP/1.1

This isn't documented yet, however pCloud have confirmed that this is
the correct interpretation.

Rclone now reads the "hostname" parameter in the oauth callback and
stores it in the config file. It uses it for all subequent API calls.
2020-07-03 20:38:42 +01:00
Nick Craig-Wood
e6fdc3a932 drive: make dangling shortcuts appear in listings
Previous to this a dangling shortcut would error the directory
listing.

This patch makes dangling shortcuts appear as 0 sized objects in the
directory listing so they can be deleted. These objects can't be read
though.
2020-07-02 22:12:44 +01:00
Nick Craig-Wood
63ebe4ca8d vfs: Reduce logging of metadata expiry to debug 2020-07-02 14:52:12 +01:00
Nick Craig-Wood
8d5bc7f28b fs/cache: fix moveto/copyto remote:file remote:file2
Before this change, if the cache was given a source `remote:file` it
stored `remote:` with the error `fs.ErrorIsFile` attached. This meant
that if it `remote:` was subsequently looked up it would return the
`fs.ErrorIsFile` error.

This broke `moveto remote:file remote:file2` as moveto would lookup
`remote:` from the second argument and erroneously get the
`fs.ErrorIsFile` error.

This likely broke other commands too.

This was broken in

4c9836035 fs/cache: Add Pin and Unpin and canonicalised lookup

Which was released in v1.52.0

The fix is to make a new cache entry for `remote:` with no error
attached in the case that the original call returned `fs.ErrorIsFile`.
2020-07-02 10:55:36 +01:00
Nick Craig-Wood
50e36fb482 onedrive: Fix reverting to Copy when Move would have worked
For some objects the onedrive backend has been doing a server side
copy and a delete when a server side move would have worked OK.

This was caused by not detecting the home drive correctly (when it was
an empty string) and assuming that these transfers were cross drive.

This is fixed by comparing canonicalizing drive IDs before comparing them.
2020-07-02 10:55:36 +01:00
Nick Craig-Wood
a1c5e76c27 Add Kai Lüke to contributors 2020-07-02 10:55:36 +01:00
Kai Lüke
54f2587c1e gcs: add support for anonymous access
Currently credentials are required to download a public bucket file
which is not really necessary and makes automated usage more complex.
Add a new option "anonymous" which when enabled configures the gcs
backend to use an anonymous HTTP client. This of course only works
for read access and trying to write will lead to errors like that:
"googleapi: Error 401: Anonymous caller does not not have
storage.objects.create access to the Google Cloud Storage object.",
as expected. By default the anonymous access option is disabled so that
the GCS Application Default Credentials are still used by default as
before and an error is given if they can't be found.
2020-07-01 20:54:49 +01:00
Nick Craig-Wood
99c293a403 log: fix --use-json-log going to stderr not --log-file on Windows - fixes #4367 2020-07-01 20:47:37 +01:00
Nick Craig-Wood
fefcbf60fa sftp: use the absolute path instead of the relative path
Before this change rclone used the relative path from the current
working directory.

It appears that WS FTP doesn't like this and the openssh sftp tool
also uses absolute paths which is a good reason for switching to
absolute paths.

This change reads the current working directory at startup and bases
all file requests from there.

See: https://forum.rclone.org/t/sftp-ssh-fx-failure-directory-not-found/17436
2020-06-30 16:07:23 +01:00
Nick Craig-Wood
96c2fdb445 vfs: update VFS help 2020-06-30 12:03:39 +01:00
Nick Craig-Wood
8301a72453 vfs: Fix over downloading with --vfs-cache-mode full and --buffer-size 0
This was caused by the signal to stop buffering being ignored when
there was no buffer!

This is fixed by explicitly checking for no buffering and stopping.
2020-06-30 12:03:39 +01:00
Nick Craig-Wood
05ddef117a accounting: add HasBuffer method to Account 2020-06-30 12:03:39 +01:00
Nick Craig-Wood
15402e46c9 vfs: Add recovered items on cache reload to directory listings
Before this change, if we restarted an upload after a restart then the
file would get uploaded but never added to the directory listings.

This change makes sure we add virtual items to the directory cache
when reloading the cache so that they show up properly.
2020-06-30 12:03:39 +01:00
Nick Craig-Wood
939860eb85 vfs: vfscache make TestCacheCleaner test more reliable 2020-06-30 12:03:39 +01:00
Nick Craig-Wood
530dc77cde vfs: Fix race condition in vfscache 2020-06-30 12:03:39 +01:00
Nick Craig-Wood
5db15cb157 vfs: make dir.ForgetAll and friends not forget virtual entries
Before this change dir.ForgetAll and vfs/forget would forget about
virtual directory entries.

This change preserves them.
2020-06-30 12:03:39 +01:00
Nick Craig-Wood
06a12f5e27 vfs: stop virtual directory entries dropping out of the directory cache
Rclone adds virtual directory entries to the directory cache when it
creates a file or directory.

Before this change these dropped out of the directory cache when the
directory cache was reloaded. This meant that when the directory cache
expired:

- On bucket based backends, empty directories would disappear
- When using VFS writeback, files in the process of uploading would disappear

This is fixed by keeping track of the virtual entries in each
directory. The virtual entries are removed when they become real - ie
the object is read back from the listing.

This also keeps tracks of deletes in the same way so if a file is
deleted, it will not re-appear when the directory cache is reloaded if
the deletion hasn't finished yet.
2020-06-30 12:03:39 +01:00
Nick Craig-Wood
143abe39f2 vfs: add tests for downloaders 2020-06-30 12:03:39 +01:00
Nick Craig-Wood
ee04732cbb vfs: factor writeback and downloaders into their own packages 2020-06-30 12:03:39 +01:00
Nick Craig-Wood
79455cc71e vfs: downloaders: remove unused osPath 2020-06-30 12:03:39 +01:00
Nick Craig-Wood
042e5fe097 vfs: downloader: limit the reader to 10 errors before giving up 2020-06-30 12:03:39 +01:00
Nick Craig-Wood
d273a9d82d vfs: remove items from writeback when dirty, don't just cancel the upload
This stops open items continually trying to be uploaded
2020-06-30 12:03:39 +01:00
Nick Craig-Wood
3eded3c4ac vfs: remove workaround Sleep() calls from tests 2020-06-30 12:03:39 +01:00
Nick Craig-Wood
20f4fda3c9 local: fix race conditions updating and reading Object metadata 2020-06-30 12:03:39 +01:00
Nick Craig-Wood
ed32a759ed vfs: add test for writeBack.cancelUpload 2020-06-30 12:01:36 +01:00
Nick Craig-Wood
ef2d036884 vfs: make writeback heap sort in insertion order if expiry times equal
This makes the tests 100% consistent on platforms which have a lower
resolution timer like Windows.
2020-06-30 12:01:36 +01:00
Nick Craig-Wood
746c41f527 vfs: fix race in writeback tests 2020-06-30 12:01:36 +01:00
Nick Craig-Wood
b0fb457746 vfs: add tests for writeback 2020-06-30 12:01:36 +01:00
Nick Craig-Wood
b9ff495483 vfs: writeback - stop the timer explicitly on transfers exceeded 2020-06-30 12:01:36 +01:00
Nick Craig-Wood
8506066926 vfs: use call after functions in writeback to simplify code
This also fixes a bug in the uploader which didn't restart the timer
when the queue was empty.
2020-06-30 12:01:36 +01:00
Nick Craig-Wood
43018973ac vfs: decouple writeback from Item so it can be tested 2020-06-30 12:01:36 +01:00
Nick Craig-Wood
7e4ba54608 vfs: allow ReadAt and WriteAt to run concurrently with themselves
This should help with throughput on mounts and help when multiple
readers have the file open.

See: https://forum.rclone.org/t/concurrent-read-accesses-on-the-same-file-through-rclone-vfs-mount/17192
2020-06-30 12:01:36 +01:00
Nick Craig-Wood
2f66355f20 vfs: re-use existing VFS if possible 2020-06-30 12:01:36 +01:00
Nick Craig-Wood
7781ea8d59 vfs: add an optional fs parameter to vfs rc methods
Before this change we initialized the rc for a single VFS. However
rclone can have multiple VFSes in use now so this is no longer
adequate.

This change adds an optional fs parameter to all the VFS methods to
disambiguate VFSes when there is more than one in use.

It also adds a method vfs/list to show all the active VFSes.

This adds outline tests for the rc commands which didn't have tests
before.
2020-06-30 12:01:36 +01:00
Nick Craig-Wood
ce065614e2 Revert "mount2,cmount: skip unreliable tests #4171"
The VFS is now reliable enough so that the mount tests don't fail.

This reverts commit 4808958f93.
2020-06-30 12:01:36 +01:00
Nick Craig-Wood
fa472a340e vfs: fix writeback deadlocks and other bugs
- fix deadlock when cancelling upload
- fix double upload and panic after cancelled upload
- fix cancelation strategy of uploading files
    - don't cancel uploads if we don't modify the file
    - cancel uploads if we do modify the file
- fix deadlock between Item and writeback
- fix confusion about whether writeback item was being uploaded
- fix cornercases in cancelling uploads and removing files
2020-06-30 12:01:36 +01:00
Nick Craig-Wood
279a516c53 vfs: add tests and fix bugs for vfscache.Item
Item
- Remove unused method getName
- Fix Truncate on unopened file
- Fix bug when downloading segments to fill out file on close
- Fix bug when WriteAt extends the file and we don't mark space as used

downloader
- Retry failed waiters every 5 seconds
2020-06-30 12:01:36 +01:00
Nick Craig-Wood
9ac5c6de14 vfs: cache: rework file downloader
- Download to multiple places at once in the stream
- Restart as necessary
- Timeout unused downloaders
- Close reader if too much data skipped
- Only use one file handle as use item for writing
- Implement --vfs-read-chunk-size and --vfs-read-chunk-limit
- fix deadlock between asyncbuffer and vfs cache downloader
- fix display of stream abandoned error which should be ignored
2020-06-30 12:01:36 +01:00
Nick Craig-Wood
58a7faa281 vfs: Make tests run reliably
On file Remove
- cancel any writebacks in progress
- ignore error message deleting non existent file if file was in the
  process of being uploaded

Writeback
- Don't transfer the file if it has disappeared in the meantime
- Take our own copy of the file name to avoid deadlocks
- Fix delayed retry logic
- Wait for upload to finish when cancelling upload

Fix race condition in item saving

Fix race condition in vfscache test

Make sure we delete the file on the error path - this makes cascading
failures much less likely
2020-06-30 12:01:36 +01:00
Nick Craig-Wood
496a87a665 vfs: restart pending uploads on restart of the cache 2020-06-30 12:01:36 +01:00
Nick Craig-Wood
e4e53a2e61 vfs: add --vfs-writeback option to delay writes back to cloud storage
This is enabled by default and can be disabled with --vfs-writeback 0
2020-06-30 12:01:36 +01:00
Nick Craig-Wood
28255f1bac vfs: fix errors when using > 260 char files in the cache in Windows
This makes the cache use UNC paths on Windows. This stops the cache
exploding when using > 260 character paths
2020-06-30 12:01:36 +01:00
Nick Craig-Wood
917cb4acb3 vfs: implement partial reads in --vfs-cache-mode full
This allows reads to only read part of the file and it keeps on disk a
cache of what parts of each file have been loaded.

File data itself is kept in sparse files.
2020-06-30 12:01:36 +01:00
Nick Craig-Wood
d84527a730 lib/ranges: ranges libary for dealing with partially downloaded files 2020-06-30 12:01:36 +01:00
Nick Craig-Wood
7d0783aad5 lib/readers: add Seek method to PatternReader 2020-06-30 12:01:36 +01:00
Nick Craig-Wood
7622506fe2 local: factor UNCPath into lib/file 2020-06-30 12:01:36 +01:00
Nick Craig-Wood
ae8bbc63da cache: export Canonicalize method for external use 2020-06-30 12:01:36 +01:00
Nick Craig-Wood
79f5d940cf fs: add Fingerprint to detect changes in an object 2020-06-30 12:01:36 +01:00
Nick Craig-Wood
25662b9e05 fstest: add ability for mock objects and filesystems to have hashes 2020-06-30 12:01:36 +01:00
Nick Craig-Wood
c820576329 fs: define SlowModTime and SlowHash features in the relevant backends 2020-06-30 12:01:36 +01:00
Nick Craig-Wood
af601575cb serve ftp: Use new facilities in goftp to fix and simplify auth proxy #4394
- Use Driver.CheckPasswd instead of server.CheckPasswd
- Make server.CheckPasswd return an error
- Remove awful findID to find parent function hack
- Remove Driver.Init as it is no longer called
- Fix backwards incompatible PublicIp -> PublicIP change

See: https://gitea.com/goftp/server/issues/117
2020-06-30 09:34:13 +01:00
Nick Craig-Wood
c7eae60944 vendor: update goftp.io/server to v0.3.5-master to fix auth proxy #4394
See upstream issue: https://gitea.com/goftp/server/issues/117
2020-06-30 09:16:43 +01:00
Nick Craig-Wood
0afd5a2204 build: drop xgo builds for macOS, Linux and Windows
The xgo builds for macOS, Linux and Windows are used for testing - the
actual builds are built on the correct platform.

Since the darwin build has stopped working, this can be an excuse for
removing these builds as they really are only for testing.

The Android and IOS builds will continue to be built by xgo

See: https://github.com/billziss-gh/cgofuse/issues/47
2020-06-29 14:51:16 +01:00
Nick Craig-Wood
92cb21f0f2 serve ftp: Add error message if auth proxy fails #4394 2020-06-29 14:45:39 +01:00
Nick Craig-Wood
0031130111 serve ftp: don't compile on < go1.13 after dependency update 2020-06-29 14:45:39 +01:00
Nick Craig-Wood
2a3b377d34 azureblob: don't compile on < go1.13 after dependency update 2020-06-29 14:45:39 +01:00
Nick Craig-Wood
2aed3bf9ab vendor: roll back bazil.org/fuse to the last version which supports macOS #4393
Roll back the bazil.org/fuse update to give us some time to explore
alternatives for macOS.

See upstream issue: https://github.com/bazil/fuse/issues/224
2020-06-29 14:45:02 +01:00
Nick Craig-Wood
ec4e0e4d58 vendor: revert goftp.io/server from v0.3.4 to v0.3.3 to fix auth proxy #4394
See upstream issue: https://gitea.com/goftp/server/issues/117
2020-06-29 14:45:02 +01:00
Nick Craig-Wood
696d012c05 vendor: update all dependencies 2020-06-29 14:44:57 +01:00
Nick Craig-Wood
61ff7306ae crypt: add --crypt-server-side-across-configs flag
This can be used for changing filename encryption mode without
re-uploading data.

See: https://forum.rclone.org/t/revert-filename-encryption-method/17454/
2020-06-27 11:40:15 +01:00
Nick Craig-Wood
0bcf4769fe local: make --local-no-updated provide a consistent view of the objects
Before this change the --local-no-updated flag would not error if the
files changed in size during the transfer. The file could still be
read beyond the size advertised though which caused problems with
certain backends.

After this change we attempt to provide a consistent view of the file
once it has been opened.

Once the file has had stat() called on it for the first time we

- Only transfer the size that stat gave
- Only checksum the size that stat gave
- Don't update the stat info for the file

This means that files that are extending can be transferred - rclone
will transfer the length it saw the first time it listed the file.

See: https://forum.rclone.org/t/transport-connection-broken/16494/21
2020-06-27 10:00:43 +01:00
Nick Craig-Wood
0bfbecf9cb build: add btest Makefile target for pasteable download message 2020-06-26 16:26:29 +01:00
David
9058ec32e1 s3: Use regional s3 us-east-1 endpoint 2020-06-26 16:25:52 +01:00
Nick Craig-Wood
61e4b4db42 drive: Allow the use of --drive-impersonate with the root_folder_id "appDataFolder"
In this commit

5c5ad6220 drive: fix --drive-impersonate with cached root_folder_id

We disabled the use of root_folder_id with --drive-impersonate to fix
a problem with a cached root_folder_id giving the wrong results.

This, alas, broke one users setup with a root_folder_id of
appDataFolder. Since this is identifiable and definitely couldn't have
been cached, we can safely skip this check in this case.

See: https://forum.rclone.org/t/rclone-gdrive-no-longer-returning-anything/17215/10
2020-06-25 21:43:11 +01:00
Nick Craig-Wood
fd7c63bc78 s3: add backend restore command to restore objects from GLACIER
See: https://forum.rclone.org/t/rclone-settier-fails-with-scaleway-entitytoolarge/17384
2020-06-25 21:33:23 +01:00
Nick Craig-Wood
49a7d08a40 qingstor: cancel in progress multipart uploads on rclone exit #4300 2020-06-25 15:22:53 +01:00
Nick Craig-Wood
2c10ce64aa onedrive: rework cancel of multipart uploads on rclone exit #4300
This now uses the atexit.OnError framework rather than a home grown one.
2020-06-25 15:22:53 +01:00
Nick Craig-Wood
a41a294e1d box: cancel in progress multipart uploads and copies on rclone exit #4300 2020-06-25 15:22:53 +01:00
Nick Craig-Wood
47b17dc1bb b2: cancel in progress multipart uploads and copies on rclone exit #4300 2020-06-25 15:22:53 +01:00
Nick Craig-Wood
5f75444ef6 s3: cancel in progress multipart uploads and copies on rclone exit #4300 2020-06-25 12:55:56 +01:00
Nick Craig-Wood
54fda3422e atexit: implement OnError for cancelling multpart uploads 2020-06-25 12:55:56 +01:00
Nick Craig-Wood
fcc2db8093 doc: disable smart typography (eg en-dash) in MANUAL.* and man page
Before this change MANUAL.html and rclone.1 would show flags like –addr

Now it shows --addr which is easy to copy and paste.

This was pointed out in #4362
2020-06-25 12:20:01 +01:00
Nick Craig-Wood
89b7ffbd5c Add Tim Burke to contributors 2020-06-25 12:20:01 +01:00
Nick Craig-Wood
ada43b0e58 Add Petri Salminen to contributors 2020-06-25 12:20:01 +01:00
Tim Burke
5050c33162 dlna: Mark flags in docs as code
Otherwise, we get en dashes in the man page, making args more difficult
to copy/paste to a command line.

Before:

    Use –addr to specify ...

After:

    Use --addr to specify ...
2020-06-25 12:18:54 +01:00
Harry
4e8fda228d docs: Updated OneDrive max file size 2020-06-25 12:04:52 +01:00
Petri Salminen
cdfb3f7194 docs: make the website navbar stick to top
Navbar will remain visible on the top of the screen, even when
the user has scrolled down.

This makes it easier to navigate the site quickly.
2020-06-25 11:47:56 +01:00
Chaitanya Bankanhal
a2dd23efd3 rc: Add tests for operations/uploadfile
rc: Go import file rc_test.go
2020-06-25 11:38:24 +01:00
Chaitanya Bankanhal
fa43d02874 rc: Add operations/uploadfile to upload a file through rc using encoding multipart/form-data 2020-06-25 11:38:24 +01:00
Chaitanya
d0de39ebcd rc: add NeedsRequest to call. 2020-06-25 11:38:24 +01:00
Nick Craig-Wood
2121c0fa23 dircache: factor DirMove code out of backends into dircache
Before this change there was lots of duplicated code in all the
dircache using backends to support DirMove.

This change factors this code into the dircache library.
2020-06-25 09:41:36 +01:00
Nick Craig-Wood
a8652e2252 dircache: simplify interface, fix corner cases and apply to backends
Dircache was changed to:

- Remove special cases for the root directory
- Remove Fatal errors
- Call FindRoot on behalf of the user wherever possible
- Bring up to modern Go standards

Backends were changed to:

- Remove calls to FindRoot
- Change calls to FindRootAndPath to FindPath
- Don't make special cases for the root

This fixes several corner cases, for example removing a non existent
directory if FindRoot hasn't been called.
2020-06-25 09:41:36 +01:00
Nick Craig-Wood
81151523af drive: fix shortcut tests 2020-06-24 15:52:02 +01:00
Nick Craig-Wood
3e82771413 Start v1.52.2-DEV development 2020-06-24 14:35:12 +01:00
Nick Craig-Wood
9445b12328 check: make it show stats by default 2020-06-24 11:23:34 +01:00
Nick Craig-Wood
4bb103ef43 build: speed up tidy-beta script by doing fewer directory traversals 2020-06-24 10:01:24 +01:00
Nick Craig-Wood
0dba7b8a46 swift: speed up deletes by not retrying segment container deletes
Before this fix rclone would continually try to delete non empty
segment containers which made deleting lots of files very slow.

This fix makes rclone just try the delete once and then carry on which
was the original intent of the code before the retry logic got put in.
2020-06-24 10:01:24 +01:00
buengese
e247811db5 jottacloud: remove debug Printf accidentally left in 2020-06-23 13:16:23 +02:00
buengese
6768f999ed docs/overview: pcloud now supports link sharing 2020-06-21 17:22:56 +02:00
buengese
ce767bc3cf pcloud: implement PublicLink 2020-06-21 17:22:56 +02:00
Caleb Case
e780cda1d4 backend/tardigrade: Upgrade to uplink v1.1.1
This fixes issue #4370 by restoring the correct error response.
2020-06-20 16:44:06 +01:00
Nick Craig-Wood
a55d882b7b webdav: Fix free/used display for rclone about/df for certain backends - fixes #4348
Before this change if the server sent us xml like this

```
<D:propstat>
<D:prop>
<g0:quota-available-bytes/>
<g0:quota-used-bytes/>
</D:prop>
<D:status>HTTP/1.1 404 Not Found</D:status>
</D:propstat>
```

Rclone would read the empty XML items as containing 0

After this fix we make sure that we have a value before using it.
2020-06-20 15:15:15 +01:00
Nick Craig-Wood
5c5ad62208 drive: fix --drive-impersonate with cached root_folder_id
Before this fix rclone v1.51 and 1.52 would incorrectly use the cached
root_folder_id when the --drive-impersonate flag was in use. This
meant that rclone could be looking up the wrong directory ID with
unpredictable results - usually all files apparently being missing.

This fix makes rclone look up the root_folder_id always when using
--drive-impersonate. It does this by clearing the root_folder_id and
making a NOTICE message that it is ignoring the cached value.

It also stops rclone caching the root_folder_id when using
--drive-impersonate.

See: https://forum.rclone.org/t/rclone-gdrive-no-longer-returning-anything/17215
2020-06-20 15:01:37 +01:00
Nick Craig-Wood
62a1a561cf build: test-repeat.sh add -tag to buildflags 2020-06-20 14:52:04 +01:00
Nick Craig-Wood
ce394426b0 check: fix successful retries with --download counting errors
See: https://forum.rclone.org/t/tons-of-data-corruption-after-rclone-copy-to-mega-can-rclone-correct-it/17017
2020-06-20 14:52:04 +01:00
buengese
6606602f1e docs/box: add some info regarding the CleanUp implementation 2020-06-18 23:39:59 +02:00
buengese
b6b8958fb4 box: implement CleanUp - fixes #4326 2020-06-18 23:39:59 +02:00
Nick Craig-Wood
d8eea0e397 build: run gofmt -s to simplify the code: suggested by Go Report Card 2020-06-18 18:45:39 +01:00
Nick Craig-Wood
df9c930581 dropbox: fix public link by removing expires parameter
Adding the expires parameter gives settings_error/not_authorized/.. errors.

The expires setting isn't in the documentation so this commit removes
it for now.
2020-06-18 18:40:33 +01:00
Nick Craig-Wood
85bcacac90 s3: Cap expiry duration to 1 Week and return error when sharing dir 2020-06-18 17:50:50 +01:00
Nick Craig-Wood
4b4ee72796 fstest: fix PublicLink tests to send non zero expiry and work with s3 2020-06-18 17:50:12 +01:00
Nick Craig-Wood
40611fc4fc check: retry downloads if they fail when using the --download flag
See: https://forum.rclone.org/t/tons-of-data-corruption-after-rclone-copy-to-mega-can-rclone-correct-it/17017/7
2020-06-18 16:16:19 +01:00
Nick Craig-Wood
7c4ba9fcb2 check: Fix misleading message which printed errors instead of differences
See: https://forum.rclone.org/t/tons-of-data-corruption-after-rclone-copy-to-mega-can-rclone-correct-it/17017/7
2020-06-18 16:16:19 +01:00
Nick Craig-Wood
a1c9612d75 build: fix windows/386 icon/version embedding #4304
Make sure we install goversioninfo binary for the running architecture
otherwise we don't get a binary.
2020-06-18 16:08:38 +01:00
Nick Craig-Wood
33c8709439 build: fix windows/amd64 build and icon/version embedding #4304
The parameters were being passed to goversioninfo in the wrong order
so that the 64 bit .syso was actually a 32 bit .syso thus calling the
linker to fail.
2020-06-18 16:08:38 +01:00
Nick Craig-Wood
5e6f4ab281 drive: fix creating a directory inside a shortcut
See: https://forum.rclone.org/t/cant-create-new-directory-on-google-drive-remote/17208
2020-06-17 11:32:28 +01:00
Nick Craig-Wood
3efdf5e095 operations: implement --refresh-times flag to set modtimes on hashless backends 2020-06-17 10:48:13 +01:00
Nick Craig-Wood
d174b97af7 errors: add WSAECONNREFUSED and more to the list of retriable Windows errors
This adds the missing WSAECONNREFUSED error to the list of errors we
can retry under Windows.

> Connection refused.  No connection could be made because the target
> computer actively refused it.

It also adds any relevant errors I could see in the error code list.

See: https://forum.rclone.org/t/failing-to-upload-large-file-to-b2/17085
2020-06-17 10:46:22 +01:00
Nick Craig-Wood
fff8822239 Add jtagcat to contributors 2020-06-17 10:46:22 +01:00
Nick Craig-Wood
7cfe3760f4 Add Matteo Pietro Dazzi to contributors 2020-06-17 10:46:22 +01:00
NoLooseEnds
298bd640f3 build: fix custom timezone in Docker image
Added the tzdata package to fix this ref. 

See: https://forum.rclone.org/t/rclone-in-docker-uses-tz-as-utc-not-as-provided/17173
2020-06-17 10:43:03 +01:00
buengese
945a37d0d2 docs/jottacloud: add some info about the setup for whitelabel version 2020-06-16 18:15:49 +02:00
Chaitanya Bankanhal
68afa28b27 rc: Add mount to list if mount point was successfully created 2020-06-16 15:17:55 +01:00
jtagcat
d6a9017298 fs: fix formatting of errInvalidCharacters error message
errInvalidCharacters: 'and space .' -> 'and space.' Remove a space between the word, space, and period.

Co-authored-by: jtagcat <gitlab@c7.ee>
2020-06-16 15:08:09 +01:00
jtagcat
da862f82cf docs: add how to squash to contributing
Co-authored-by: jtagcat <gitlab@c7.ee>
2020-06-16 15:06:03 +01:00
jtagcat
f8b6727190 docs: document valid remote names 2020-06-16 15:02:15 +01:00
jtagcat
2d88d24881 config: reject remote names starting with a dash. (#4261) 2020-06-16 15:00:34 +01:00
Matteo Pietro Dazzi
62650a3eb3 serve dlna: Fix file list on Samsung Series 6+ TVs
This fixes the command "serve dlna" in order correctly show the list
of files on Samsung TV models starting from Series 6.
2020-06-16 14:56:02 +01:00
buengese
2c4f7b61c1 jottacloud: switch to new api root - fixes #4295
- also implement a very ugly workaround for the DirMove failures
2020-06-16 15:44:34 +02:00
Nick Craig-Wood
a3f6fe5287 dedupe: fix logging to be easier to understand #4321 2020-06-16 11:41:56 +01:00
Nick Craig-Wood
8d85c51a28 Add Heiko Bornholdt to contributors 2020-06-16 11:41:56 +01:00
Heiko Bornholdt
17d5a72416 ftp: add explicit tls support
Add support for explicit FTP over TLS.

Fixes #4100
2020-06-16 09:13:50 +01:00
Heiko Bornholdt
c4ce260b49 vendor: update jlaffaye/ftp 2020-06-16 09:13:50 +01:00
Nick Craig-Wood
4808958f93 mount2,cmount: skip unreliable tests #4171 2020-06-15 21:34:37 +01:00
Nick Craig-Wood
b58bb03e95 test: Don't run unreliable tests on CI #4171 2020-06-15 21:34:37 +01:00
Nick Craig-Wood
ba7fbfa8a7 testy: test utility functions 2020-06-15 21:34:37 +01:00
Nick Craig-Wood
117ff1d781 serve sftp: fix race in the tests #4171 2020-06-15 21:34:33 +01:00
Nick Craig-Wood
160c97da13 build: re-add accidentally deleted ci_upload 2020-06-15 20:21:20 +01:00
Nick Craig-Wood
0760bc09aa build: fix Windows exe info insertion
The goversioninfo tool wasn't being installed in the correct place.

This also gets rid of the old Travis and Appveyor stuff from the
Makefile
2020-06-15 19:29:43 +01:00
Nick Craig-Wood
5ca82e2f05 Add Vincent Feltz to contributors 2020-06-14 10:11:13 +01:00
Nick Craig-Wood
746a6ef8d3 Add Zac Rubin to contributors 2020-06-14 10:11:13 +01:00
Gary Kim
763944f673 rcd: fix incorrect prometheus metrics - fixes #4341
This was caused by using the stats group from the context passed in by the rcd
rather than the global stats group.

Signed-off-by: Gary Kim <gary@garykim.dev>
2020-06-14 10:09:24 +01:00
Vincent Feltz
f4d7e41f24 s3: add Scaleway provider - fixes #4338 2020-06-13 11:55:37 +01:00
Zac Rubin
f9306218f8 sftp: Fix SSH key PEM loading
For SSH authentication, `key_pem` should both override `key_file`
and not require other SSH authentication methods to be set.

Prior to this fix, rclone would attempt to use an ssh-agent
when `key_pem` was the only SSH authentication method set.

Fixes #4240
2020-06-12 22:46:33 +01:00
Nick Craig-Wood
fb06427c69 sync: fix --track-renames-strategy modtime
Before this change `--track-renames-strategy` was broken. The hashing
method it used could declare times that were very close together to be
different.

The time hash was discarded and instead we check the modification time
window on every hash match.

Provided that the user doesn't use `--track-renames-strategy` on a
huge number of identically sized files this will perform just fine.

See: https://forum.rclone.org/t/track-renames-strategy-modtime-doesnt-work/16992/5
2020-06-12 15:38:35 +01:00
Nick Craig-Wood
93bd601149 touch: add ability to set nanosecond resolution times 2020-06-12 15:38:35 +01:00
Nick Craig-Wood
848c5b78e1 drive: fix not being able to delete a directory with a trashed shortcut
When we resolve the shortcut we now propagate the trashed status of
the shortcut into the resolved item which fixes the issue.
2020-06-12 15:10:35 +01:00
buengese
84d5df3c84 jottacloud: bring back legacy authentification for use with whitelabel versions - fixes #4299 2020-06-12 12:08:27 +02:00
Nick Craig-Wood
63e6d9d2d1 serve webdav,serve restic: Fix flags so they use environment variables
See: https://forum.rclone.org/t/serve-restic-append-only-environment-variable/17050
2020-06-11 19:28:51 +01:00
albertony
6a2b7b97d7 build: Add file properties and icon to Windows executable (fixes #4304) 2020-06-11 09:26:14 +01:00
Chaitanya
d8d19072c5 mount: Add call for unmount all
mount: handle locking through single mutex.
2020-06-10 22:19:34 +01:00
Chaitanya
830ab37371 rc: Add mount list option for listing current mounts 2020-06-10 22:19:34 +01:00
Nick Craig-Wood
7e48ee8758 cache: fix dedupe on caches wrapping drives - fixes #4320
This implements the MergeDirs optional method.
2020-06-10 21:52:52 +01:00
Nick Craig-Wood
d55053098f check: make check do --checkers files concurrently - fixes #4318 2020-06-10 17:20:54 +01:00
Nick Craig-Wood
63cf0b1cdd check: make check command obey --dry-run/-i/--interactive - fixes #4325 2020-06-10 17:20:54 +01:00
Nick Craig-Wood
5866b1b017 bin: test-repeat.sh - script to run tests many times with individual logs 2020-06-10 17:08:31 +01:00
Nick Craig-Wood
8493f3939c bin: not-in-stable.go - script to help with merging fixes to the stable branch 2020-06-10 17:07:43 +01:00
Nick Craig-Wood
095f4e9b9d build: fix docker release build action 2020-06-10 17:00:33 +01:00
Nick Craig-Wood
a1382a03aa Start v1.52.1-DEV development 2020-06-10 16:49:55 +01:00
Nick Craig-Wood
844b903595 docs: promote the use of -i/--interactive and "rclone sync -i" everywhere #1574 2020-06-10 12:33:53 +01:00
Nick Craig-Wood
a3b3e1f646 tree: remove -i shorthand for --noindent as it conflicts with -i/--interactive 2020-06-10 12:33:53 +01:00
Nick Craig-Wood
b23cf58a41 operations: Add skip all, do all, quit operations to --interactive - fixes #3886
This also adds SkipDestructive into all the remaing places --dry-run
was used and adds documentation.
2020-06-10 12:33:53 +01:00
fishbullet
ba5eb230fb operations: interactive mode -i/--interactive for destructive operations #3886 2020-06-10 12:33:53 +01:00
Nick Craig-Wood
2ea15a72bc s3: fix --header-upload - Fixes #4303
Before this change we were setting the headers on the PUT
request for normal and multipart uploads. For normal uploads this caused the error

    403 Forbidden: There were headers present in the request which were not signed

After this fix we set the headers in the object upload request itself
as the s3 SDK expects.

This means that we only support a limited range of headers

- Cache-Control
- Content-Disposition
- Content-Encoding
- Content-Language
- Content-Type
- X-Amz-Tagging
- X-Amz-Meta-

Note for the last of those are for setting custom metadata in the form
"X-Amz-Meta-Key: value".

This now works for multipart uploads and single part uploads

See also #59
2020-06-10 12:28:48 +01:00
Nick Craig-Wood
b5c654a100 lib/structs: factor reflection based structure manipulation into a library 2020-06-10 12:28:48 +01:00
Nick Craig-Wood
6807b0e42f Add Kamil Trzciński to contributors 2020-06-10 12:16:27 +01:00
Cenk Alti
16422a6b78 putio: fix panic on Object.Open #4315 2020-06-10 12:16:09 +01:00
Rob Calistri
b2ded6212b vfs: Change modtime of file before upload to current
Previously files before they get uploaded will inherit the directory modtime.  This changes that to use the current time instead.
2020-06-10 12:10:50 +01:00
Nick Craig-Wood
88df5927f9 vfs: funnel all read/write calls through ReadAt/WriteAt
This is in preparation for partial reads for read/write files
2020-06-09 18:07:41 +01:00
Nick Craig-Wood
8c37262e05 vfs: don't use embedded methods for read/write handles for clarity 2020-06-09 18:07:23 +01:00
Nick Craig-Wood
3c14a893fb asyncreader: Make StopBuffer as well as Abandon and fix confusion in callers 2020-06-09 18:05:12 +01:00
Nick Craig-Wood
05bc19c331 vfs: Remove uneeded locking from read write handle String() 2020-06-09 18:04:50 +01:00
Caleb Case
40fe97e946 backend/tardigrade: Set UserAgent to rclone
This provides two things:

* It gives Storj insight into which uplink clients are using the
  network.
* It facilitate rclone participating in the Tardigrade Open Source
  Partner Program https://tardigrade.io/partner/
2020-06-09 14:20:28 +01:00
Kamil Trzciński
7458d37d2a s3: add max_upload_parts support - fixes #4159
* s3: add `max_upload_parts` support

This allows to configure a maximum amount of chunks used to upload file:

- Support Scaleway which has a limit of 1k chunks currently
- Reduce a cost on S3 when each request costs some money at the expense of memory used

Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
2020-06-08 18:22:34 +01:00
Nick Craig-Wood
c4110780bf lib/file: fix SetSparse on Windows 7 which fixes downloads of files > 250MB
Before this change we passed both lpOverlapped and lpBytesReturned as NULL.

> If lpOverlapped is NULL, lpBytesReturned cannot be NULL. Even when
> an operation produces no output data, and lpOutBuffer can be NULL,
> the DeviceIoControl function makes use of the variable pointed to by
> lpBytesReturned. After such an operation, the value of the variable
> is without meaning.

After this change we set lpBytesReturned to a valid pointer.

See: https://forum.rclone.org/t/errors-when-downloading-any-file-over-250mb-from-google-drive-windows-sparse-files/16889
2020-06-06 13:13:15 +01:00
Nick Craig-Wood
d729004554 Add Roman Kredentser to contributors 2020-06-05 14:51:52 +01:00
Roman Kredentser
c0521791db s3: implement link sharing with PublicLink 2020-06-05 14:51:05 +01:00
Roman Kredentser
55ad1354b6 link: Add --expire and --unlink flags
This adds expire and unlink fields to the PublicLink interface.

This fixes up the affected backends and removes unlink parameters
where they are present.
2020-06-05 14:51:05 +01:00
Nick Craig-Wood
fb61ed8506 b2: Implement server side copy for files > 5GB - fixes #3991
This factors copy out of SetModTime and Copy so it can be called from
both places.

This also reworks all the multipart uploading to use sync.Errgroup and
memory pooling like the other backends. This makes it more memory
efficient and handle errors better.

See: https://forum.rclone.org/t/copying-files-within-a-b2-bucket/16680/10
2020-06-05 13:27:53 +01:00
Nick Craig-Wood
4c7f7582fd obscure: write more help as we are referencing it elsewhere 2020-06-05 12:48:28 +01:00
Caleb Case
a4f1f3d4e8 backend/tardigrade: Upgrade uplink to v1.0.7
This fixes a regression in the rclone tests from the v1.0.6 upgrade of
uplink. The failure was due to an improperly converted error resulting
in the wrong type of error.
2020-06-05 10:51:33 +01:00
Nick Craig-Wood
973e3d6a7b backends: make sure backends expand ~ and environment vars in file names they use
See: https://forum.rclone.org/t/relative-path-in-rclone-config-service-account-json/16693
2020-06-03 17:39:08 +01:00
Nick Craig-Wood
b62d08d136 config: set RCLONE_CONFIG_DIR for use in config files and subprocesses
See: https://forum.rclone.org/t/relative-path-in-rclone-config-service-account-json/16693
2020-06-03 17:39:08 +01:00
Nick Craig-Wood
50e31c6636 vfs: fix OS vs Unix path confusion - fixes ChangeNotify on Windows
See: https://forum.rclone.org/t/windows-mount-polling-not-recognising-all-changes-made-by-another-box/16708
2020-06-03 17:05:58 +01:00
Nick Craig-Wood
151f03378f s3: fix upload of single files into buckets without create permission
Before this change, attempting to upload a single file into an s3
bucket which did not have create permission gave AccessDenied: Access
Denied error when it tried to create the bucket.

This was masked until e2bf91452a was
fixed.

This fix marks the bucket as OK if a fetch on an object indicates it
is OK. This stops rclone thinking it has to create the bucket in the
first place.

Fixes #4297
2020-06-02 14:33:21 +01:00
Nick Craig-Wood
26fb9007da build: fix xgo build after go1.14 go.mod update
Before this change xgo was getting added to go.mod - the build then failed with

    go: inconsistent vendoring in /usr/src/rclone:
    github.com/karalabe/xgo@v0.0.0-20191115072854-c5ccff8648a7: is explicitly required in go.mod, but not marked as explicit in vendor/modules.txt

This change gets xgo in GOPATH mode to avoid it getting added to go.mod
2020-06-02 13:41:54 +01:00
Nick Craig-Wood
3b20335d2a docs: remove leading slash in page reference in footer when present 2020-06-02 12:09:45 +01:00
Nick Craig-Wood
8d55367a6a build: set user_allow_other in /etc/fuse.conf in the Docker image
This allows non root mounts to use the --allow-other flag

See: https://forum.rclone.org/t/trying-utilize-docker-for-the-first-time-having-some-issues-with-an-rclone-mount-user-allow-other-error-etc-fuse-conf-has-been-updated-to-allow/16393
2020-06-01 21:33:48 +01:00
Nick Craig-Wood
187ee62e3d Add edwardxml to contributors 2020-06-01 21:33:48 +01:00
Nick Craig-Wood
10e2ec1fbb Add Matteo Pietro Dazzi to contributors 2020-06-01 21:33:48 +01:00
edwardxml
83999cd1d1 docs: minor tense, punctuation, brevity and positivity changes for the home page 2020-06-01 16:32:27 +01:00
Nick Craig-Wood
fef90ef0a9 build: update Docker build workflows
- prune docker images to ones we normally build binaries for
- add fixed versions
- add fetch-depth to fetch the tags so the version number is correct
- rename the job names
2020-06-01 16:13:23 +01:00
Matteo Pietro Dazzi
72ae5626b0 build: Build Docker images with GitHub actions 2020-06-01 16:13:23 +01:00
Nick Craig-Wood
eee28d0d39 build: remove quicktest from Dockerfile
This is making docker builds take too long and it isn't the place of
the Docker file to be running unit tests.
2020-06-01 16:13:23 +01:00
Nick Craig-Wood
b59999dd59 build: update go.mod to go1.14 to enable -mod=vendor build
When the main module contains a top-level vendor directory and its
go.mod file specifies go 1.14 or higher, the go command now defaults
to -mod=vendor for operations that accept that flag.
2020-06-01 16:13:23 +01:00
Nick Craig-Wood
e62c032184 docs: remove manually set dates and use git dates instead 2020-06-01 13:07:46 +01:00
Nick Craig-Wood
1635b37ff1 docs: Add link to source and modified time to footer of every page 2020-06-01 13:07:46 +01:00
Nick Craig-Wood
8774381e2e cmd: Note commands which need obscured input in the docs - fixes #4252 2020-05-31 12:59:09 +01:00
Nick Craig-Wood
cbfe7a405b Add Alex Guerrero to contributors 2020-05-31 12:59:09 +01:00
Alex Guerrero
80391fbcd4 dropbox: Add copyright detector info in limitations section in the docs
Dropbox files protected by copyright can't be synced and results
in an error. This issue reflects that in the Dropbox's limitations
section to warn rclone users and give info about why this error
occurs due that by default Dropbox API doesn't give enough info.

Proposed by the original repo owner in this comment:
https://github.com/rclone/rclone/issues/2301#issuecomment-388291079

Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
2020-05-31 12:22:54 +01:00
Nick Craig-Wood
cbf3d43561 drive: fix missing items when listing using --fast-list / ListR
This is caused by a bug in Google drive where, in some circumstances
querying for "(A in parents) or (B in parents)" returns nothing
whereas querying for "A in parents" and "B in parents" separately
works fine.

This has been reported here:

https://issuetracker.google.com/issues/149522397

This workaround detects this condition by seeing if a listing for more
than one directory at once returns nothing.

If it does then it retries each one individually.

This can potentially have a false positive if the user has multiple
empty directories which are queried at once. The consequence of this
will be that ListR is disabled for a while until the directories are
found to be actually empty in which case it will be re-enabled.

Fixes #3114 and Fixes #4289
2020-05-31 11:44:15 +01:00
Caleb Case
e7bd392a69 backend/tardigrade: Upgrade to uplink v1.0.6
This fixes an important bug with listing that affects users with more
than 500 objects in a listing operation.
2020-05-29 18:00:08 +01:00
Nick Craig-Wood
764b90a519 Update release notes with Docker build instructions 2020-05-28 13:12:13 +01:00
Nick Craig-Wood
d785942ed5 mountlib: fix rc tests when mount does not work
This fixed the Docker 1.52 build
2020-05-28 13:11:42 +01:00
Nick Craig-Wood
1cceadaf7c Start v1.52.0-DEV development 2020-05-27 18:36:32 +01:00
Nick Craig-Wood
6882aeff97 Version v1.52.0 2020-05-27 17:31:10 +01:00
Nick Craig-Wood
a0922643e6 build: fix build after nfpm change to drop bindir 2020-05-27 17:31:09 +01:00
Nick Craig-Wood
4d7254f88f Add funding links to rclone repo 2020-05-27 17:31:09 +01:00
Nick Craig-Wood
8499090038 docs: Updates to the front page from Edward Barker 2020-05-27 17:31:09 +01:00
Nick Craig-Wood
ca0c4c7585 docs: update index and donate pages after review 2020-05-27 17:31:09 +01:00
Nick Craig-Wood
5aec01100e docs: make primary buttons rclone colors 2020-05-27 17:31:09 +01:00
Nick Craig-Wood
4d6af44045 docs: update all auto generated docs 2020-05-27 17:31:09 +01:00
Nick Craig-Wood
256d17eba8 docs: markdownify the strings in the commands index 2020-05-27 17:31:09 +01:00
Nick Craig-Wood
5777f4481f docs: add missing shortcode 2020-05-27 17:31:09 +01:00
Martin Michlmayr
6e945fba82 docs: wrap code example into <code> 2020-05-27 17:31:09 +01:00
Martin Michlmayr
ce7047d88a docs: fix cosmetic issues 2020-05-27 17:31:09 +01:00
Martin Michlmayr
4a35100130 docs: fix typos 2020-05-27 17:31:09 +01:00
Martin Michlmayr
ef7662d2fa docs: fix cosmetic issue in menu
The menu items shouldn't end with a full stop since that looks
weird and is just clutter.
2020-05-27 17:31:09 +01:00
Nick Craig-Wood
16e89706d3 docs: add a status notice to the cache backend 2020-05-27 17:31:09 +01:00
Nick Craig-Wood
41a58d7f6e docs: tweaks to index and donate page 2020-05-27 17:31:09 +01:00
Nick Craig-Wood
2a7f3eecf4 docs: reduce height of provider entries 2020-05-27 17:31:09 +01:00
Nick Craig-Wood
41e12114a8 docs: fix validation errors 2020-05-27 17:31:09 +01:00
Nick Craig-Wood
1b12d5d346 docs: move most of the chrome into baseof.html as per a modern hugo install 2020-05-27 17:31:09 +01:00
Nick Craig-Wood
74b8cbfb84 docs: set unsafe HTML parsing to false and fix raw HTML insertion
This means that markdown files can't contain <thing> any more.
2020-05-27 17:31:09 +01:00
Nick Craig-Wood
06427371eb docs: add "make validate_website" and fix validation errors 2020-05-27 17:31:09 +01:00
Nick Craig-Wood
d08e1616a7 docs: add autogenerated date to footer 2020-05-27 17:31:09 +01:00
Nick Craig-Wood
79b011305e docs: increase space around titles for better visual feel 2020-05-27 17:31:09 +01:00
Nick Craig-Wood
050879f0ca docs: make command docs titles be one higher 2020-05-27 17:31:09 +01:00
Nick Craig-Wood
0a74f8022e docs: tweak rendering of tables to match the new theme 2020-05-27 17:31:09 +01:00
Nick Craig-Wood
0db20c9c56 docs: Remove spurious styles from donate page
Correct link when not running on real URL
2020-05-27 17:31:09 +01:00
Nick Craig-Wood
33e9db9745 docs: Use versioned css/jss 2020-05-27 17:31:09 +01:00
Nick Craig-Wood
d0145d4359 docs: rename css and js includes to have their version numbers in 2020-05-27 17:31:09 +01:00
Nick Craig-Wood
80dab10ec9 docs: Fix generated docs
In distributed docs
- Make img tags absolute
- Add donate link in docs
- Fix provider table
2020-05-27 17:31:08 +01:00
Nick Craig-Wood
022cda3109 docs: update provider list on front page 2020-05-27 17:31:08 +01:00
Nick Craig-Wood
4030672b66 docs: Add upload_test_website target to Makefile 2020-05-27 17:31:08 +01:00
Nick Craig-Wood
348379625c docs: update donations page 2020-05-27 17:31:08 +01:00
Nick Craig-Wood
ab4a2275eb docs: add copy to clipboard javascript
add popper library for popup menus
2020-05-27 17:31:08 +01:00
Nick Craig-Wood
c8683fc916 docs: bring README up to date 2020-05-27 17:31:08 +01:00
Nick Craig-Wood
273ee0d696 docs: remove unused page types 2020-05-27 17:31:08 +01:00
Nick Craig-Wood
6d19bbba73 docs: add description to commands and index page 2020-05-27 17:31:08 +01:00
Nick Craig-Wood
d6c31b51c6 docs: new content for index and donate pages 2020-05-27 17:31:08 +01:00
Nick Craig-Wood
af19f924ff docs: layout tweaks 2020-05-27 17:31:08 +01:00
Nick Craig-Wood
80572c544b docs: more config fixes for hugo 0.7 2020-05-27 17:31:08 +01:00
Nick Craig-Wood
11f8cb32d1 docs: new flags 2020-05-27 17:31:08 +01:00
Nick Craig-Wood
62781d0925 docs: fix layouts 2020-05-27 17:31:08 +01:00
Nick Craig-Wood
2bd786a452 docs: fix rc docs and update anchors for new Hugo version 2020-05-27 17:31:08 +01:00
Nick Craig-Wood
038648aaca docs: rename about.md to _index.md after Hugo upgrade
Use the official way of including markdown content into index.html
2020-05-27 17:31:08 +01:00
Nick Craig-Wood
0e3ac6b13c docs: updated donate page 2020-05-27 17:31:08 +01:00
Nick Craig-Wood
1e274b01fe docs: fix layouts after bootstrap upgrade
- use bootstrap recommended viewport
2020-05-27 17:31:08 +01:00
Nick Craig-Wood
c4f6439715 docs: update Hugo config after updating to 0.69.2 2020-05-27 17:31:08 +01:00
Nick Craig-Wood
cdecb44789 docs: update bootstrap to 4.4.1 and jQuery to 3.5.1 2020-05-27 17:31:08 +01:00
calisro
8af3f61b6e doc: fix shortcut creation language (#4281) 2020-05-27 11:23:27 -04:00
Martin Michlmayr
48eef2fb3c docs: add missing word 2020-05-26 13:49:09 +01:00
Martin Michlmayr
7a5b531bd0 docs: fix cosmetics issues 2020-05-26 13:49:09 +01:00
Nick Craig-Wood
78ca08ba8a pcloud: fix initial config "Auth state doesn't match" message #4210
pCloud should be passing back the state parameter that rclone passed
in on config but it seems to have got lost somewhere.

This sets a work-around for the pCloud backend allowing an empty state
parameter.

See: https://forum.rclone.org/t/cannot-connect-to-pcloud/16592
See: https://forum.rclone.org/t/cannot-create-pcloud-config-file-on-osx/16583
2020-05-26 11:27:01 +01:00
Nick Craig-Wood
49ba4eeb86 oauthutil: tidy interface to Config to add Options struct
The interface was getting so that a new function was needed for every
Config variant. Adding an Options struct fixes this.
2020-05-26 11:27:01 +01:00
Nick Craig-Wood
c08617c70f box: Calculate Free amount in About call 2020-05-25 16:47:34 +01:00
Nick Craig-Wood
1bd3365868 vfs: fix TestVFSStatfs with known total, used and unknown free 2020-05-25 16:46:56 +01:00
Nick Craig-Wood
31f21551bf mount: change maximum leaf name length to 1024 bytes - fixes #3884
This limit was previously 4k set in 59026c4761 however leaf
names above 1k now produce an IO error.

WinFSP seems to have its own method for dropping too long file names
above 255 long.
2020-05-25 15:41:11 +01:00
Nick Craig-Wood
baaec5b126 testserver: don't allow starting of an empty remote 2020-05-25 15:26:06 +01:00
Martin Michlmayr
2b72c7f709 docs: move link to correct location
The link is for WinFsp, so link from "WinFsp" rather than "open
source".
2020-05-25 12:04:34 +01:00
Martin Michlmayr
f204d95a8b docs: fix range
This is a range so there should be two dots (as in the other ranges).
2020-05-25 11:25:23 +01:00
Martin Michlmayr
7a5b814e59 docs: fix cosmetic issues 2020-05-25 11:25:23 +01:00
Martin Michlmayr
aa6c06d751 docs: use code elements more consistently 2020-05-25 11:25:23 +01:00
Martin Michlmayr
ffb031a71b docs: link to usage section of docs
Since the page refers to the "Usage" section, let's actually link
to that specific section.
2020-05-25 11:25:23 +01:00
Martin Michlmayr
041b201abd doc: fix typos throughout docs and code 2020-05-25 11:23:58 +01:00
Nick Craig-Wood
9db8ecbc32 box: implement About to read size used - fixes #4264 2020-05-23 18:46:44 +01:00
Nick Craig-Wood
518d39815c sync,copy,move: add --check-first to do all checking before starting transfers
See: https://forum.rclone.org/t/rclone-sync-doing-transfer-and-checking-in-paralel/16352/
2020-05-22 17:50:07 +01:00
Nick Craig-Wood
147f97d1f7 sync: allow --max-backlog to be -ve meaning as large as possible 2020-05-22 17:50:07 +01:00
Nick Craig-Wood
7f44735709 Add Daniel Slyman to contributors 2020-05-22 17:50:07 +01:00
Martin Michlmayr
db9d94f5de doc: improve wording 2020-05-20 15:54:51 +01:00
Martin Michlmayr
a36ef8582f doc: use consistent capitalization 2020-05-20 15:54:51 +01:00
Martin Michlmayr
f34a40a709 swift: fix cosmetic issue in error message 2020-05-20 15:54:51 +01:00
Martin Michlmayr
4aee962233 doc: fix typos throughout docs and code 2020-05-20 15:54:51 +01:00
Fred
5f71d186b2 seafile: implement 2FA 2020-05-20 15:46:35 +01:00
Daniel Slyman
56c9fdb53c docs: updates filesize limitations for OneDrive Personal 2020-05-20 08:11:25 +01:00
Animosity022
47474687eb Update drive.md
Changing the and to or..
2020-05-19 21:03:08 -04:00
Animosity022
abe753ca86 Update drive.md
Just adding in for the different scenarios if a Google Suite user or a vanilla Google account.
2020-05-19 21:02:11 -04:00
Nick Craig-Wood
cf5d0f5c1f Revert "drive: server side copy docs use default description if empty"
This reverts commit 9e4b68a364.

This does not work as intended - it only changes docs files and to
make it change drive files would take an extra roundtrip.

I think the sematics of server side copy are now correct - additional
features should be added with a new flag.

See #4230
2020-05-19 16:48:02 +01:00
Nick Craig-Wood
4d431e94b9 oauth2: try to make token expiry messages more helpful - fixes #4250
See also: #4251
2020-05-19 16:19:35 +01:00
Nick Craig-Wood
bdafbad61e cache: fix tests writing to empty path
This meant the tests were writing to the current directory instead of
a temporary directory.
2020-05-19 16:01:35 +01:00
Nick Craig-Wood
eb6e9b194a fspath: Stop empty strings being a valid path - fixes #4239
Before this change you could use "" as a valid remote, so `rclone lsf
""` would work. This was treated as the current directory.

This is unexpected and creates a footgun for scripting when an empty
variable is passed to rclone by accident.

This fix returns the error "can't use empty string as a path" instead
of allowing it.
2020-05-19 12:34:23 +01:00
Nick Craig-Wood
ecdfd80459 Add Brandon McNama to contributors 2020-05-19 12:22:59 +01:00
Nick Craig-Wood
a268eedb1d Add Martin Michlmayr to contributors 2020-05-19 12:22:59 +01:00
Brandon McNama
19ff7c9302 cache: Fix Server Side Copy with Temp Upload
When wrapping a backend that supports Server Side Copy (e.g. `b2`, `s3`)
and configuring the `tmp_upload_path` option, the `cache` backend would
erroneously report that Server Side Copy/Move was not supported, causing
operations such as file moves to fail. This change fixes this issue
under these circumstances such that Server Side Copy will now be used
when the wrapped backend supports it.

Fixes #3206
2020-05-19 12:17:40 +01:00
Martin Michlmayr
fb169a8b54 doc: fix typos throughout docs 2020-05-19 12:02:44 +01:00
calisro
bcbfad1482 sft[: added --sftp-pem-key to support inline key files 2020-05-19 11:55:38 +01:00
Thibault Molleman
8c37ae8f5c docs: drive: google console doesn't list 'other' anymore
The option of 'other' seems to be gone from the

https://console.developers.google.com/apis/credentials/oauthclient

page. It only lists these now:

- Web application
- Android
- Chrome app
- iOS
- TVs and Limited Input devices
- Desktop app
- Universal Windows Platform (UWP)
2020-05-19 11:52:23 +01:00
Nick Craig-Wood
610f40f700 local: implement --local-no-sparse flag for disabling sparse files #2469
This also introduces a one time warning for sparse files and updates
the docs to warn about them.
2020-05-19 10:16:43 +01:00
Nick Craig-Wood
5cb2a2fa3c lib/file: add Implemented constants 2020-05-19 10:15:20 +01:00
Nick Craig-Wood
919a180ad2 config: make config show take "remote:" as well as "remote" 2020-05-18 19:53:13 +01:00
Nick Craig-Wood
951099dbed vfs: change default --vfs-read-wait to 20ms
In my testing with local and remote storage this is a good compromise
between delaying the seeks and failing to wait for in sequence reads.

See: https://forum.rclone.org/t/constantly-high-iowait-add-log/14156/40
2020-05-18 18:09:23 +01:00
Nick Craig-Wood
0f9267d5fc vfs: factor waiting code from read and writes into common function 2020-05-18 18:09:23 +01:00
Nick Craig-Wood
3de9bd9d04 vfs: fix hang in read wait code - Fixes #4039
Before this fix, rclone would sometimes hang in vfs.readAt().

This was due to a race condition causing rclone to miss the timeout
signal.

This was fixed by a small amount of extra locking.

This very likely also fixes a number of "failed to wait for
in-sequence read" errors.
2020-05-18 18:09:23 +01:00
Nick Craig-Wood
57ee25d75a GitHub: enable forum link and disable blank issues 2020-05-18 18:08:24 +01:00
Brandon Philips
633f50cd3e googlephotos: create feature/favorites directory - Fixes #4189
Enable access “Favorite” images on Google Photos backend.

This adds a “feature/favorites” folder in the Google Photos backend
and uses the Feature Filter API:

https://developers.google.com/photos/library/reference/rest/v1/mediaItems/search#Filters
2020-05-18 17:55:16 +01:00
calisro
d04d4edc40 Update ftp.md (#4241) 2020-05-16 15:28:15 -04:00
Nick Craig-Wood
98c34e413d config: add --obscure and --no-obscure flags to config create/update
Before this change there was some ambiguity about whether passwords
were obscured on not passing them into config create or config update.

This change adds the --obscure and --no-obscure flags to make the
intent clear.

It also updates the remote control and the tests.

Fixes #3728
2020-05-15 16:41:37 +01:00
Nick Craig-Wood
c4bc249b66 Add Ben Zenker to contributors 2020-05-15 16:18:25 +01:00
Ben Zenker
899c8e0697 march: added flag to allow Unicode filenames to remain unique
If your filenames contain two near-identical Unicode characters,
rclone will normalize these, making them identical. This flag
gives you the ability to keep them unique. This might
create unintended side effects, such as duplicating files that
contain certain Unicode characters, when downloading them from
certain cloud providers to a macOS filesystem.

Fixes #4228
2020-05-15 12:28:01 +01:00
Nick Craig-Wood
4006345cfb mountlib: add tests for rc mount/mount and friends 2020-05-14 16:38:37 +00:00
Nick Craig-Wood
1319d7333c mountlib: add rc command mount/types and rename mountOption to mountType 2020-05-14 16:38:37 +00:00
Chaitanya
5f168b3b96 rc: add mount/mount command 2020-05-14 16:38:37 +00:00
Nick Craig-Wood
e4f1e19127 sftp: fix post transfer copies failing with 0 size when using set_modtime=false
Before this change we early exited the SetModTime call which means we
skipped reading the info about the file.

This change reads info about the file in the SetModTime call even if
we are skipping setting the modtime.

See: https://forum.rclone.org/t/sftp-and-set-modtime-false-error/16362
2020-05-14 17:30:01 +01:00
Nick Craig-Wood
4a1b644bfb azureblob: implement streaming of unknown sized files
See: https://forum.rclone.org/t/rclone-rcat-azure-blob-container-sas-token-403-error/16286/3
2020-05-14 11:56:15 +01:00
Nick Craig-Wood
8c9c86c3d6 putio: fix parsing of remotes with leading and trailing /
See: https://forum.rclone.org/t/unable-to-copy-from-remote-but-mount-works/16351/
2020-05-14 11:52:43 +01:00
Nick Craig-Wood
8a58e0235d s3: don't leak memory or tokens in edge cases for multipart upload 2020-05-14 07:48:18 +01:00
Nick Craig-Wood
52b7337d28 crypt: change backend encode/decode to output a plain list
This commit changes the output of the rclone backend encode crypt: and
decode commands to output a plain list of decoded or encoded file
names.

This makes the command much more useful for command line scripting.
2020-05-13 18:11:45 +01:00
Nick Craig-Wood
995cd0dc32 backend: add --json flag to always output JSON 2020-05-13 18:07:41 +01:00
Nick Craig-Wood
5eb558e058 backend: fix output of an array of strings 2020-05-13 17:55:56 +01:00
Max Sum
33d9310c49 union: enable ListR when upstreams contain local
Enable fast list functions for union backend when:

- at least one of the upstreams supports fast list
- upstreams only consist of backends that support fast list and local backend.

Fixes #3000
2020-05-13 13:10:35 +01:00
Nick Craig-Wood
aba89e2737 Add Caleb Case @calebcase as the tardigrade backend maintainer 2020-05-13 12:57:29 +01:00
Nick Craig-Wood
d685e7b4b5 bin: add check-merged.go to find local branches which might have been merged 2020-05-13 12:42:45 +01:00
Nick Craig-Wood
9e4b68a364 drive: server side copy docs use default description if empty
When server side copying Google docs files we attempt to preserve the
description.

This patch makes it so that we use the default description if the
original description was empty.

See: 6fdd7149c1 (commitcomment-38008638)
2020-05-13 12:31:37 +01:00
Nick Craig-Wood
044a3b3920 fserrors: Make "tls: use of closed connection" a retriable error
This has happened when uploading very large files to B2. It is
probably a bug in the go runtime but we'll attempt to work-around it
here.

See: https://forum.rclone.org/t/large-file-upload-to-backblaze-failed/16128/5
2020-05-13 11:42:37 +01:00
Nick Craig-Wood
d342f9f942 azureblob: fix permission error on SAS URL limited to container
Before this change, for some operations, eg rcat or copyto (of a file)
rclone would attempt to create the container when using a SAS URL
limited to a container.

After this change we assume the container does not need creating when
using a container SAS URL.

See: https://forum.rclone.org/t/rclone-rcat-azure-blob-container-sas-token-403-error/16286
2020-05-13 09:11:51 +01:00
Nick Craig-Wood
e91b509578 fs: allow --min-age/--max-age to take a date as well as a duration
Fixes #4211
2020-05-12 17:49:33 +01:00
Nick Craig-Wood
8ddb3fbb2e drive: fix using list recursive on shortcuts to directories 2020-05-12 17:08:05 +01:00
Nick Craig-Wood
b91e01fd22 drive: strip trailing slashes in shortcut command #4098
This also fixes typo in the name of the function, and allows making
shortcuts from the root directory which are useful in cross drive
shortcut creation.

This also adds a basic suite of tests for creating listing, removing
shortcuts.
2020-05-12 17:08:05 +01:00
Nick Craig-Wood
177195aeeb accounting: fix race clearing stats
This race was introduced by

10a6a92e52 accounting: reset bytes read during copy retry
2020-05-12 17:02:32 +01:00
Nick Craig-Wood
cb5979a468 accounting: factor stats into its own structure
This makes it very obvious which mutex to take for accessing the
values.
2020-05-12 17:02:32 +01:00
Nick Craig-Wood
32507774de Add Caleb Case to contributors 2020-05-12 17:01:26 +01:00
Caleb Case
0ce662faad Tardigrade Backend 2020-05-12 15:56:50 +00:00
Caleb Case
03b629064a Tardigrade Backend: Dependencies 2020-05-12 15:56:50 +00:00
albertony
962fbc8257 jottacloud: update docs regarding cleanup, removed remains from old auth, and added warning about special mountpoints. 2020-05-11 11:41:17 +02:00
Nick Craig-Wood
bd4b91bd57 test_all: revert running fichier tests one at a time
This didn't achieve the objective of getting the tests to run clean
and makes them take 16 hours!

This reverts commit 97f6f8fe19.
2020-05-11 08:51:33 +01:00
Ankur Gupta
10a6a92e52 accounting: reset bytes read during copy retry - fixes #4178
During a copy/sync command, if an operation fails due to a network
issue and is retried, the underlying io.Reader is re-initialised,
but the stats for bytes already read are not reset, leading to incorrect
stats. THis was fixed by resetting the bytes read when an Account is
re-initialized.
2020-05-10 17:58:22 +00:00
Max Sum
54b16bd054 union: implement ListR 2020-05-10 17:57:03 +00:00
Max Sum
f21e97001b union: fix server-side copy 2020-05-10 17:56:18 +00:00
Nick Craig-Wood
1f005a82ad http: add missing comment to pacify linter 2020-05-10 18:53:38 +01:00
Nick Craig-Wood
bb65974e2f drive: implement backend shortcut command for creating shortcuts #4098 2020-05-09 15:16:15 +01:00
Nick Craig-Wood
bc0f487369 drive: look for dirs as well as files on NewObject
This means that we can return ErrorNotAFile when there is an object
with the same name as a directory rather than potentially creating a
duplicate name.
2020-05-09 15:16:15 +01:00
Nick Craig-Wood
e103c4c26a Add Maxime Suret to contributors 2020-05-09 15:16:15 +01:00
Maxime Suret
79d29bb41e serve sftp: add support for multiple host keys by repeating --key flag 2020-05-09 14:43:17 +01:00
calisro
c80b6d96dd http: improved directory listing with new template from Caddy project
This includes a new directory listing template which was originally
from the Caddy project (used with permission and copyright attribution).

This is used whenever we serve directory listings so `rclone serve
http`, `rclone serve webdav` and `rclone rcd --rc-serve`

This also modifies the tests so they work with the original template which
is easier to debug.
2020-05-08 16:15:21 +01:00
Nick Craig-Wood
97f6f8fe19 test_all: run fichier tests one at a time
This is in attempt to get the tests to run cleanly without hitting the
rate limiter.
2020-05-07 20:39:54 +01:00
Nick Craig-Wood
94920d39ae test_all: increase the test timeout to 60m from 30m
Some tests are failing at 30m - 60m doesn't seem unreasonable
2020-05-07 20:38:40 +01:00
Nick Craig-Wood
9403bd2990 test_all: allow -list-retries to be overridden on the command line 2020-05-07 20:36:45 +01:00
Nick Craig-Wood
e098924e61 fstest: change -list-retries default to 3 from 6
The integration tests have got much better about retrying the tests,
and we aren't testing ACD any more so we don't need it this high.
2020-05-07 20:28:41 +01:00
Nick Craig-Wood
437f9e2cef Add Sébastien Gross to contributors 2020-05-07 11:25:09 +01:00
Sébastien Gross
395f259978 cmd: when running --password-command allow use of stdin
Bind rclone standard input to password command's standard input. This
allows to provide password from a pipe and collect it using cat.

The typical use case is when rclone is on a remote server with an
encrypted configuration. This solved the environment variable
issue (#3368) and the password storage on remote host.

Now the following chain is allowed:

    echo 'secret' | ssh host.example.com \
       sudo -u rclone \
       rclone --config /path/to/rclone.conf \
       --password-command 'cat' ls remote:

Signed-off-by: Sébastien Gross <seb•ɑƬ•chezwam•ɖɵʈ•org>

Co-authored-by: Sébastien Gross <seb•ɑƬ•chezwam•ɖɵʈ•org>
2020-05-07 11:02:52 +01:00
Nick Craig-Wood
b78adc9f03 Add Fred @creativeproject as the seafile backend maintainer 2020-05-07 10:18:10 +01:00
Nick Craig-Wood
b7c21310b4 Add Fred to contributors 2020-05-06 18:33:44 +01:00
Fred
c754e89906 seafile: New backend for seafile server 2020-05-06 17:33:22 +00:00
Fred
62cfe3f384 vendor: add github.com/coreos/go-semver 2020-05-06 17:33:22 +00:00
Nick Craig-Wood
afde340c9e gcs: fix --header-upload - #59
Before this code we were settig the headers on the PUT request. However this isn't where GCS needs them.

After this fix we set the headers in the object upload request itself.

This means that we only support a limited range of headers

- Cache-Control
- Content-Disposition
- Content-Encoding
- Content-Language
- Content-Type
- X-Goog-Meta-

Note for the last of those are for setting custom metadata in the form
"X-Goog-Meta-Key: value".
2020-05-06 17:34:23 +01:00
Nick Craig-Wood
d0ad83de4b Add ElonH to contributors 2020-05-06 17:34:23 +01:00
ElonH
d119bfd934 rcd: disable duplicate log
if running `rclone rcd --rc-user=admin --rc-pass=admin
--rc-allow-origin="*"`, lots of duplicate warnings apperent in log

Warning: Allow origin set to *. This can cause serious security problems.
Warning: Allow origin set to *. This can cause serious security problems.
....

This is not conducive to analyzing debugging info.

Therefore, let's show it only once.
2020-05-05 13:47:25 +00:00
Nick Craig-Wood
bdc91eda0f serve http: add Last-Modified headers to files and directories
This means that using `rclone serve http` preserves modification times
when used with the http backend.

Fixes #4201
2020-05-05 09:41:08 +01:00
Nick Craig-Wood
ef9e6794c2 docs: make serve http/webdav template docs into a table 2020-05-04 17:36:31 +00:00
calistri
4362ca7bb9 serve http, serve webdav: Added a --template flag for user defined markup 2020-05-04 17:36:31 +00:00
Nick Craig-Wood
dcf945ed58 docs: add bin/.ignored-emails for removing email addresses from authors.md
Remove an email as requested for Anagh Kumar Baranwal
2020-05-04 17:38:25 +01:00
Anagh Kumar Baranwal
a86196a156 drive: Added command to change service_account_file and chunk_size
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2020-05-04 16:23:33 +00:00
Anagh Kumar Baranwal
856c2b565f crypt: Added decode/encode commands to replicate functionality of cryptdecode
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2020-05-04 16:23:33 +00:00
Nick Craig-Wood
1a8c5708c5 vfs: ignore file not found errors from Hash in Read.Release
There is nothing we can do about this at this point and this error can
happen when moving files so we ignore it to clean the logs up.
2020-05-04 12:18:28 +01:00
Nick Craig-Wood
14cab0fff0 local: fix "file not found" errors on post transfer Hash calculation
Before this change the local backend was returning file not found
errors for post transfer hashes for files which were moved. This was
caused by the routine which checks for the object being changed.

After this change we ignore file not found errors while checking to
see if the object has changed. If the hash has to be computed then a
file not found error will be thrown when it is opened, otherwise the
cached hash will be returned.
2020-05-04 12:17:46 +01:00
Nick Craig-Wood
69888bf966 cmount: send a hint as to whether the filesystem is case insensitive or not 2020-05-04 11:38:07 +01:00
Nick Craig-Wood
d260238f99 cmount: use ReaddirPlus on Windows to improve directory listing performance
Before this change Windows would read a directory then immedately stat
every item in the directory.

After this change we return the stat information along with the
directory which stops so many callbacks.
2020-05-04 11:38:07 +01:00
Nick Craig-Wood
6ca7198f57 mount: fix disappearing cwd problem - fixes #4104
Before this change, the current working directory could disappear
according to the Linux kernel.

This was caused by mount returning different nodes with the same
information in.

This change uses vfs.Node.SetSys to cache the information so we always
return the same node.
2020-05-04 11:38:07 +01:00
Nick Craig-Wood
cfcdc85b26 vfs: Add SetSys() methods to Node to allow caching stuff on a node 2020-05-04 11:38:07 +01:00
Nick Craig-Wood
7f8d74e903 docs: refresh rclone authorize help 2020-05-04 10:40:26 +01:00
Nick Craig-Wood
f2b1fedc4f drive: follow shortcuts by default, skip with --drive-skip-shortcuts
Before this change rclone would skip all shortcuts with a message

    Ignoring unknown document type "application/vnd.google-apps.shortcut"

After this message rclone resolves the shortcuts by default to the
actual files that they point to. See the docs for more info.

The --drive-skip-shortcuts flag can be used to skip shortcuts.
2020-05-02 18:28:38 +01:00
Nick Craig-Wood
b03cad3cf6 vendor: update google.golang.org/api/drive to pull in shortcuts definition 2020-05-02 18:28:38 +01:00
Nick Craig-Wood
70db13e6e8 vfs: pin the Fs in use in the Fs cache
This means we can reliably look up the Fs from the cache when using
`backend/command`.
2020-05-01 17:11:45 +01:00
Nick Craig-Wood
4c98360356 fs/cache: Add Pin and Unpin and canonicalised lookup
Before this change we stored cached Fs under the config string the
user gave us. As the result of fs.ConfigString() can often be
different after the backend has canonicalised the paths this meant
that we could not look up backends in the cache reliably.

After this change we store cached Fs under their config string as
returned from fs.ConfigString(f) after the Fs has been created. We
also store a map of user to canonical names (where they are different)
so the users can look up Fs under the names they passed to rclone too.

This change along with Pin and Unpin is necessary so we can look up
the Fs in use reliably in the `backend/command` remote control
interface.
2020-05-01 17:11:45 +01:00
Nick Craig-Wood
42f9f7fb5d lib/cache: add Rename, Pin and Unpin 2020-05-01 17:11:45 +01:00
Nick Craig-Wood
ca1856724c fs: add ConfigString function to return a canonical config string 2020-05-01 17:11:45 +01:00
Nick Craig-Wood
b52a39a84e drive: fix merge breakage
In 2f5a2d3c48 an incorrect merge caused compilation to fail
2020-05-01 13:02:32 +01:00
Nick Craig-Wood
a97261c54f Add gitch1 to contributors 2020-05-01 13:02:32 +01:00
gitch1
0f5579c0ba docs : add --password-command Powershell Wiki link
add Windows Powershell example wiki link to  --password-command doc
2020-04-30 17:31:58 +01:00
Nick Craig-Wood
2f5a2d3c48 drive: Don't return nil Object with nil error from newObject* functions.
Before this change the newObject* functions could return object=nil
with err=nil.  The result of these functions are passed outside of the
backend code (eg in Copy, Move) and returning a nil object with a nil
error leads to crashes elsewhere as it breaks expectations.

After this change we return (nil, fs.ErrorObjectNotFound) in these
cases. The one place this is actually needd internally (when turning
items into listings) we detect that error and use it to mean skip the
directory item.

This problem was noticed while testing the shortcuts code. It
shouldn't happen normally but it is conceivable it could.
2020-04-30 17:11:36 +01:00
Nick Craig-Wood
ba7f7c8319 build: add -trimpath to release build for reproduceable builds 2020-04-30 12:43:40 +01:00
Nick Craig-Wood
77fb3c2511 vfs: bring DO NOT EDIT comments in line with "go help generate" 2020-04-30 12:24:44 +01:00
Nick Craig-Wood
74d9dabdff b2: force the case of the SHA1 to lowercase - fixes #4162
Apparently some tools (eg duplicati) upload the SHA1 in uppercase to
b2 to be stored in the `large_file_sha1` metadata. This patch forces
it to lower case.
2020-04-29 17:08:21 +01:00
Nick Craig-Wood
54edf38d0e Add Matan Rosenberg to contributors 2020-04-29 17:08:21 +01:00
Matan Rosenberg
22f06590f7 genautocomplete: add support for fish shell 2020-04-29 16:19:35 +01:00
Matan Rosenberg
3b4c24af4e vendor: update github.com/spf13/cobra to v1.0.0 2020-04-29 16:19:35 +01:00
Nick Craig-Wood
f37af9afec lsjson: Add --hash-type parameter and use it in lsf to speed up hashing
Before this change if you specified --hash MD5 in rclone lsf it would
calculate all the hashes and just return the MD5 hash which was very
slow on the local backend.

Likewise specifying --hash on rclone lsjson was equally slow.

This change introduces the --hash-type flag (and corresponding
internal parameter) so that the hashes required can be selected in
lsjson.

This is used internally in lsf when the --hash parameter is selected
to speed up the hashing by only hashing with the one hash specified.

Fixes #4181
2020-04-29 16:09:45 +01:00
Nick Craig-Wood
a3f0992a22 Add Kush to contributors 2020-04-29 16:09:45 +01:00
Kush
f555873f18 delete: added --rmdirs flag to delete directories as well - fixes #4055
If you supply the --rmdirs flag with delete command,
it will remove all empty directories along with it
leaving the root intact.
2020-04-29 12:15:30 +01:00
Nick Craig-Wood
1c8eab81a5 dbhashsum: hide the command now it is deprecated 2020-04-29 10:12:12 +01:00
Nick Craig-Wood
cbc5af329f cachestats: deprecate in favour of rclone backend stats cache: 2020-04-29 10:10:57 +01:00
Nick Craig-Wood
90d738b561 cache: implement rclone backend stats command 2020-04-29 10:10:57 +01:00
Nick Craig-Wood
d80fdad6da rc: implement backend/command for running backend commands remotely 2020-04-29 10:10:57 +01:00
Nick Craig-Wood
e2916f3a55 local: implement backend command "noop" for testing purposes 2020-04-29 10:10:57 +01:00
Nick Craig-Wood
1aa1a2c174 backend: add new backend command for backend specific commands
These commands are for implementing backend specific
functionality. They have documentation which is placed automatically
into the backend doc.

There is a simple test for the feature in the backend tests.
2020-04-29 10:10:57 +01:00
Nick Craig-Wood
195d152785 rc: add GetStructMissingOK 2020-04-29 09:42:31 +01:00
Nick Craig-Wood
1f61027f51 rc: add -o/--opt and -a/--arg for more structured input 2020-04-29 09:42:31 +01:00
Nick Craig-Wood
37a53570d4 azureblob: implement memory pooling to control memory use
This commit implements memory pooling to control excessive memory use
as was implemented in the s3 backend.
2020-04-28 17:47:10 +01:00
Nick Craig-Wood
ee7219aa20 azureblob: add --azureblob-disable-checksum flag 2020-04-28 17:47:10 +01:00
Nick Craig-Wood
b1d8da484b azureblob: retry InvalidBlobOrBlock error as it may indicate block concurrency problems
According to Microsoft support this error can be caused by

> A timing/concurrency issue where the PUT operations are happening
> about the same time for a single blob. The Put Block List operation
> writes a blob by specifying the list of block IDs that make up the
> blob. In order to be written as part of a blob, a block must have
> been successfully written to the server in a prior Put Block
> operation.
>
> Documentation reference:
>
> https://docs.microsoft.com/en-us/rest/api/storageservices/put-block
>
> This error can happen when doing concurrent upload commits after you
> have started the upload but before you commit. In that case, the
> upload fails. The application can retry this error or attempt some
> other recovery action based on the required scenario.

See: https://forum.rclone.org/t/error-while-syncing-with-azure-blob-storage-x-ms-error-code-invalidbloborblock/15561
2020-04-28 17:47:10 +01:00
Nick Craig-Wood
4e869e03f7 s3: improve docs for --s3-disable-checksum 2020-04-28 17:47:10 +01:00
Nick Craig-Wood
52c9647b06 b2: improve docs for --b2-disable-checksum 2020-04-28 17:47:10 +01:00
Nick Craig-Wood
7238ae18f9 Add Adam Stroud to contributors 2020-04-28 17:47:10 +01:00
Nick Craig-Wood
551a829eba googlephotos: don't put an image in error message - fixes #4144
For a certain class of broken or missing image Google Photos puts an
image in the error message.

Before this fix we blindly chucked it into the error message.

After this fix we replace it with some sensible text.
2020-04-28 16:51:47 +01:00
Adam Stroud
8e91f83174 googlecloudstorage: Add ARCHIVE storage class to help 2020-04-27 11:40:21 +01:00
buengese
7f776c64f0 fichier: implement custom pacer to deal with the new rate limiting 2020-04-26 20:38:56 +02:00
Nick Craig-Wood
8bf6ab2c52 accounting: fix race condition in tests 2020-04-24 12:32:09 +01:00
Nick Craig-Wood
75fc3fe389 fs: fix FixRangeOption so it doesn't add HTTPOptions in place of bad Ranges
Before this fix, FixRangeOption would substitute RangeOptions it
wanted to get rid of with with empty HTTPOption. This caused a problem
now that backends interpret HTTPOptions.

This fix subsitutes those with NullOptions which aren't interpreted as
HTTPOptions. This patch also improves the unit tests.
2020-04-24 12:32:09 +01:00
Xiaoxing Ye
c4572ebc91 rc: fix misplaced http server config - fixes #4130 2020-04-23 20:22:47 +01:00
David
e56976839a jwtutil: Fix error handling 2020-04-23 17:52:14 +01:00
David
0c0ed2fe04 box: Remove unnecessary iat from jws claims 2020-04-23 17:52:14 +01:00
Nick Craig-Wood
ab6ed256e5 putio: add support for --header-upload and --header-download #59 2020-04-23 15:55:52 +01:00
Nick Craig-Wood
7c98ecd3ab putio: make downloading files use the rclone http Client
This fixes `--download-header` and these transactions being missed from
`--dump bodies` or `--tpslimit`
2020-04-23 15:48:30 +01:00
Nick Craig-Wood
f6346a4d29 fs: add --header flag to add options to every HTTP transaction #59 2020-04-23 15:24:21 +01:00
Nick Craig-Wood
b502a74cff gcs: add support for --header-upload and --header-download #59 2020-04-23 11:41:57 +01:00
Nick Craig-Wood
8e9c25063a swift: add support for --header-upload and --header-download #59 2020-04-23 11:34:36 +01:00
Nick Craig-Wood
1dced3b3c4 rcat: add support for --header-upload #59 2020-04-23 11:34:31 +01:00
Nick Craig-Wood
087bf1d584 cat: add support for --header-download #59 2020-04-23 11:34:24 +01:00
Nick Craig-Wood
e051a34fc1 dbhashsum: deprecate: use rclone hashsum DropboxHash instead 2020-04-23 11:13:13 +01:00
Nick Craig-Wood
f5455d865b accounting: check for max transfer in WriteTo
Before this change the max transfer tests were failing for remotes
which were using WriterTo.
2020-04-23 11:13:13 +01:00
Tim Gallant
b705ead3fd docs: adds --header-download and --header-upload 2020-04-23 11:07:21 +01:00
Tim Gallant
c390fc8100 onedrive: pass options to rest.Opts for Put and Update 2020-04-23 11:07:21 +01:00
Tim Gallant
14f6ce1e77 premiumizeme: pass options to rest.Opts for Put and Update 2020-04-23 11:07:21 +01:00
Tim Gallant
385542e2f9 sharefile: pass options to rest.Opts for Put and Update 2020-04-23 11:07:21 +01:00
Tim Gallant
fc946d0c44 fichier: pass options to rest.Opts for uploadFile 2020-04-23 11:07:21 +01:00
Tim Gallant
854c84d0ca pcloud: pass options to rest.Opts for Put and Update 2020-04-23 11:07:21 +01:00
Tim Gallant
90bd0eb44c webdav: pass options to rest.Opts for Put and Update 2020-04-23 11:07:21 +01:00
Tim Gallant
3130f870bb sugarsync: pass options to rest.Opts for Put and Update 2020-04-23 11:07:21 +01:00
Tim Gallant
51b617f601 yandex: pass options to rest.Opts for upload 2020-04-23 11:07:21 +01:00
Tim Gallant
011ca244b2 jottacloud: pass options to rest.Opts for Put and Update 2020-04-23 11:07:21 +01:00
Tim Gallant
9ea1361044 googlephotos: pass options to rest.Opts for Put and Update 2020-04-23 11:07:21 +01:00
Tim Gallant
776966e22c opendrive: pass options to rest.Opts for Put and Update 2020-04-23 11:07:21 +01:00
Tim Gallant
01cb256b84 box: pass options to rest.Opts for uploadPart 2020-04-23 11:07:21 +01:00
Tim Gallant
0b0163dde2 box: pass options to rest.Opts for upload 2020-04-23 11:07:21 +01:00
Tim Gallant
38123c70eb b2: pass options to rest.Opts for Update 2020-04-23 11:07:21 +01:00
Tim Gallant
5cb7229a16 s3: add support for HTTPOption 2020-04-23 11:07:21 +01:00
Tim Gallant
9bf3d3da4c fs: add UploadHeaders, DownloadHeaders to Update/Put/Open options 2020-04-23 11:07:21 +01:00
Tim Gallant
93caa459e3 fs/config: add header-download and header-upload flags 2020-04-23 11:07:21 +01:00
Nick Craig-Wood
f8039deb7c s3: fix detection of BucketAlreadyOwnedByYou and BucketAlreadyExists error
This was being silently ignored until this commit

e2bf91452a s3: report errors on bucket creation (mkdir) correctly
2020-04-22 18:14:03 +01:00
Nick Craig-Wood
86eaf43b00 vfs: fix tests for Statfs when running on backends with unknowns
This was broken in da41db4712
2020-04-22 18:14:03 +01:00
Nick Craig-Wood
8176202e6d Add Sunil Patra to contributors 2020-04-22 18:14:03 +01:00
Nick Craig-Wood
1b74879b8b Add David Bramwell to contributors 2020-04-22 18:14:03 +01:00
Sunil Patra
39319b4858 @Sunil-P
box: Added support for interchangeable root folder for Box backend - #3422
2020-04-22 17:00:13 +01:00
Sunil Patra
4af5c9aed7 pCloud: Added support for interchangeable root folder for pCloud backend - #3957 2020-04-22 16:58:01 +01:00
David Bramwell
8a3c4c6a7b box: add token renew function for jwt auth - Fixes #4901 2020-04-22 16:53:03 +01:00
Nick Craig-Wood
d22e6f5a96 cryptcheck: remove duplicated debug OK message 2020-04-22 11:33:48 +01:00
Nick Craig-Wood
1648c1a0f3 crypt: calculate hashes for uploads from local disk
Before this change crypt would not calculate hashes for files it was
uploading. This is because, in the general case, they have to be
downloaded, encrypted and hashed which is too resource intensive.

However this causes backends which need the hash first before
uploading (eg s3/b2 when uploading chunked files) not to have a hash
of the file. This causes cryptcheck to complain about missing hashes
on large files uploaded via s3/b2.

This change calculates hashes for the upload if the upload is coming
from a local filesystem. It does this by encrypting and hashing the
local file re-using the code used by cryptcheck. For a local disk this
is not a lot more intensive than calculating the hash.

See: https://forum.rclone.org/t/strange-output-for-cryptcheck/15437
Fixes: #2809
2020-04-22 11:33:48 +01:00
Nick Craig-Wood
44b1a591a8 crypt: get rid of the unused Cipher interface as it obfuscated the code 2020-04-22 11:33:48 +01:00
Nick Craig-Wood
bbb6f94377 fstest: create AssertTimeEqualWithPrecision from CheckTimeEqualWithPrecision 2020-04-22 11:33:00 +01:00
Nick Craig-Wood
3f654dac37 mount: map more rclone errors into file systems errors
This improves the error reporting, in particular for
fs.ErrorPermissionDenied which was being reported as an IO error.
2020-04-21 16:31:43 +01:00
Nick Craig-Wood
eed9c5738d vfs: factor the vfs cache into its own package 2020-04-20 10:42:33 +01:00
Nick Craig-Wood
fd39cbc193 vfstests: move functional tests from mountlib and make them work with VFS
The tests are now run for the mount commands and for the plain VFS.

This makes the tests much easier to debug when running with a VFS than
through a mount.
2020-04-20 10:42:33 +01:00
Nick Craig-Wood
b25f5eb0d1 serve sftp: use VFS utility functions instead of own copy 2020-04-19 15:40:55 +01:00
Nick Craig-Wood
0961763082 vfs: add utility methods to match os package 2020-04-19 15:40:55 +01:00
Nick Craig-Wood
49e5299a95 asyncreader: make ErrorStreamAbandoned public 2020-04-19 15:18:49 +01:00
Nick Craig-Wood
07908f3f54 vfs: bring open_tests.go generator back into line with the generated tests
In

54deb01f00 vfs: Make OpenFile and friends return EINVAL if O_RDONLY and O_TRUNC

The generated file open_test.go was edited directly without editing
the generator.

This commit brings the generator make_open_tests.go back into line
with that edit. It also makes it so `go generate` can be used to
regenerate the tests.
2020-04-19 15:18:49 +01:00
Nick Craig-Wood
fdada79ebf accounting: support WriterTo for less memory copying
This should reduce memory copying when the async buffer is in use and
improve speeds.
2020-04-19 15:18:49 +01:00
Nick Craig-Wood
7f15cc9556 operations: make ReOpen and NewReOpen public for re-use elsewhere 2020-04-19 15:18:49 +01:00
Nick Craig-Wood
cd3c699f28 lib/readers: factor ErrorReader from multiple sources 2020-04-19 15:18:49 +01:00
Nick Craig-Wood
36d2c46bcf local: factor PreAllocate and SetSparse to lib/file 2020-04-19 15:18:49 +01:00
Nick Craig-Wood
1f50b70919 vfs: consistently use f.Path() or f._path() in File logs to avoid deadlocks
Previously we were using f which calls f.String() which calls f.Path()
which can cause a deadlock if uses carelessly.

This patch explicitly calls f.Path() or f._path() to bring attention
to the fact that there is a call to a method.
2020-04-19 15:16:43 +01:00
Nick Craig-Wood
19db0df639 vfs: stop reading Dir members from outside dir.go 2020-04-19 15:16:43 +01:00
Nick Craig-Wood
238f26cc90 vfs: stop reading File members from outside file.go
This also fixes locking for ReadFileHandle and WriteFileHandle
accessing File members
2020-04-19 15:16:43 +01:00
Nick Craig-Wood
268fcbb973 vfs: implement lock ordering between File and Dir to eliminate deadlocks
As part of this we take a copy of the directory path as calling
d.Path() violates the total locking order.

See the comment at the top of file.go for details
2020-04-19 15:16:43 +01:00
Nick Craig-Wood
1e4589db18 Add Martin Stone to contributors 2020-04-19 12:41:38 +01:00
Denis
31a1cc46b7 copyurl: add no-clobber flag - fixes #3950 2020-04-19 12:40:17 +01:00
Martin Stone
9d4b3580a5 docs: fix description of --quiet - fixes #4132 2020-04-16 18:12:17 +01:00
Nick Craig-Wood
b07bef2a6b Add Daven to contributors 2020-04-15 18:13:35 +01:00
Nick Craig-Wood
705e60d0ad Add Brandon Philips to contributors 2020-04-15 18:13:35 +01:00
Daven
4c258787b5 googlephotos: make the start year configurable - fixes #3630 2020-04-15 18:08:07 +01:00
Brandon Philips
58ea15078f Dockerfile: remove GOOS and GOARCH
GOOS and GOARCH being set like this makes it impossible to compile on
other archs.

For me GOARCH prevents compilation on my ARM machine.

For others GOOS will prevent windows.

xref https://github.com/rclone/rclone/issues/4086
2020-04-15 17:09:51 +01:00
Dan Dascalescu
756d47fb50 copy: fix typo 2020-04-15 17:08:44 +01:00
Jon Fautley
53874bd8ee cmd: add --error-on-no-transfer option
This allows rclone to exit with a non-zero return code if no files are
transferred. This is useful when calling rclone as part of a workflow/script
pipeline as it allows the end user to stop processing if no files have been
transferred.

NB: Enabling this option will return in rclone exiting non-zero if there are no
transfers. Depending on how your're currently using rclone in your scripts,
this may break existing setups!
2020-04-15 17:06:40 +01:00
Nick Craig-Wood
e2bf91452a s3: report errors on bucket creation (mkdir) correctly
Before this fix errors on bucket creation were being silently
swallowed.

See: https://forum.rclone.org/t/rclone-with-brand-new-aws-account-for-s3/15590
2020-04-15 13:13:13 +01:00
Nick Craig-Wood
9eb17e4ade fs: fix typo in error message 2020-04-15 12:50:26 +01:00
Nick Craig-Wood
2c4aadb588 Revert "install.sh: create ~/.config/rclone directory"
This reverts commit d694bb30e5.

If it creates a config directory then it leaves it owned by root which
is very confusing for new users.
2020-04-11 18:40:46 +01:00
Nick Craig-Wood
424554bc85 fs: generalise machinery for putting extra values when using --use-json-log 2020-04-11 18:16:21 +01:00
reddi
12a208a880 fs: expand stats output for json log 2020-04-11 18:16:21 +01:00
Michał Matczuk
6893ce0bbf s3: do not resize buf on put to memBuf
This is handled by Pool implementation.
2020-04-11 16:35:48 +01:00
Michał Matczuk
399cf18013 s3: use single memory pool
Previously we had a map of pools for different chunk sizes.
In practice the mapping is not very useful and requires a lock.
Pools of size other that ChunkSize can only happen when we have a huge file (over 10k * ChunkSize).
We need to have a bunch of identically sized huge files.
In such case most likely ChunkSize should be increased.

The mapping and its lock is replaced with a single initialised pool for ChunkSize, in other cases pool is allocated and freed on per file basis.
2020-04-11 16:34:05 +01:00
buengese
64b5105edd jottacloud: implement cleanup 2020-04-11 16:42:25 +02:00
buengese
2c2f4a6a05 jottacloud: implement --jottacloud-trashed-only 2020-04-11 16:42:25 +02:00
Nick Craig-Wood
da41db4712 vfs,mount,cmount: report 1PB free for unknown disk sizes
Factor the logic into the VFS layer so we don't have to duplicate it
into mount and cmount.

See: https://forum.rclone.org/t/rclone-mount-question/15454/
2020-04-11 13:31:10 +01:00
Nick Craig-Wood
9f3449d944 Add Michael G to contributors 2020-04-11 13:29:13 +01:00
Michael G
ec8a884787 doc: Clarify 'key' option for host key on serve sftp
The option --key would set the sftp host key. It could be mistaken for a default-user-key. Instead, explicitly call it 'host key' to avoid confusion.
2020-04-10 15:23:58 +01:00
Nick Craig-Wood
fc663d98d1 New email for Ankur Gupta in contributors 2020-04-03 11:51:08 +01:00
Ankur Gupta
08c2cb784f filter: Added --files-from-raw flag
--files-from parses input files by ignoring comments starting with # and ;
and stripping whitespace from start and end of strings.

The --files-from-raw flag was added that reads every line from the file ignoring
comment characters and not stripping whitespace while maintaining
backwards compatibility.

Fixes #3762
2020-04-03 10:36:24 +01:00
Nick Craig-Wood
3911a49256 vfs: make File lock and Dir lock not overlap to avoid deadlock
This was caused by this commit which wasn't part of 1.51.0

3c91abce74 vfs: fix race condition caused by unlocked reading of Dir.path
2020-04-02 21:14:45 +01:00
Nick Craig-Wood
2a62471e4c Add Jack Anderson to contributors 2020-03-31 18:17:36 +01:00
Jack Anderson
815ae7df45 backend/s3: add SSE-C support for AWS, Ceph, and MinIO 2020-03-31 18:16:45 +01:00
Nick Craig-Wood
ff0a299bfb drive: don't delete files with multiple parents to avoid data loss
Rclone can't safely delete files with multiple parents without
PATCHing the parents list. This can be done, but since multiple
parents are going away to be replaced by drive shortcuts we return an
error for now.

See #4013
2020-03-31 17:28:32 +01:00
Nick Craig-Wood
5fa6a28f70 dedupe: Stop dedupe deleting files with identical IDs #4013
Before this change if there were two files with the same name and the
same ID in the same directory, dedupe would delete one of them but
since these are actually the same file (with the same ID) then both
files would be deleted leading to data loss.

This should never actually happen, however it did happen as part of a
bug introduced in rclone which was fixed by

dfc7215bf9 drive: fix duplicate items when using --drive-shared-with-me #4018

This change checks to see if any of the duplicates have the same ID
and if they do it refuses to delete them.
2020-03-31 17:28:26 +01:00
Nick Craig-Wood
9a5178be7a test_all: optionally run Cleanup before cleaning a remote
Add this to the QingStor remote to stop it running out of buckets
2020-03-31 15:56:58 +01:00
Nick Craig-Wood
66e08e0cc8 mount: warn if --allow-non-empty used on Windows and clarify docs 2020-03-31 12:16:03 +01:00
Nick Craig-Wood
b5f78cd7b4 b2: ignore directory markers at the root also
See: https://forum.rclone.org/t/issue-with-lsf-r-files-only-first-line-is-blank/15229/
2020-03-31 11:46:17 +01:00
Nick Craig-Wood
ef99ca68aa gcs: ignore directory markers at the root also
See: https://forum.rclone.org/t/issue-with-lsf-r-files-only-first-line-is-blank/15229/
2020-03-31 11:46:10 +01:00
Nick Craig-Wood
a5c2f2c138 s3: ignore directory markers at the root also
See: https://forum.rclone.org/t/issue-with-lsf-r-files-only-first-line-is-blank/15229/
2020-03-31 11:45:52 +01:00
Nick Craig-Wood
b2c9ef23fa sync: make --track-renames tests only check rename count if expecting renames 2020-03-31 10:58:49 +01:00
Nick Craig-Wood
5f9be3dd05 sync: make --track-renames tests less fragile by using rename stat
Before this change these tests attempted to measure transfers and
checks in lieu of having a rename statistic with a very complicated
heuristic.

The change switches over to using the rename statistic which should be
100% reliable.
2020-03-30 18:30:33 +01:00
Nick Craig-Wood
b5f1bebc52 fs: add renames statistic for file and directory renames 2020-03-30 18:22:28 +01:00
Nick Craig-Wood
ad9c7ff7ed sync: Fix incorrect "nothing to transfer" message using --delete-before
Before this change the first pass of --delete-before would output
"There was nothing to transfer" and then proceed to transfer things.

This makes sure the message isn't printed in the delete phase.

See: https://forum.rclone.org/t/incorrect-debug-output/15267
2020-03-30 16:45:02 +01:00
Nick Craig-Wood
1af9fcbbfa Add Samantha McVey to contributors 2020-03-28 18:33:34 +00:00
Samantha McVey
6765303de4 docs: unmystify how crypt stores encryption password in config
Without explaining exactly how this is generated, it can be confusing
and worrying to not know how the password that encrypts your data is
stored.

This also brings peace of mind to the user that even though
the same password is obscured differently each time, all the data to
get back to the original password remains. Explaining how it works
is much better than the reader of the documentation having to trust
a blackboxy/magical mechanism.
2020-03-26 17:14:45 +00:00
Nick Craig-Wood
304ee97944 Add harry to contributors 2020-03-26 16:23:46 +00:00
harry
d91a547d59 dropbox: make error insufficient space to be fatal 2020-03-26 16:19:50 +00:00
harry
7d9ca3998e drive: Extend --drive-stop-on-upload-limit to respond to teamDriveFileLimitExceeded.
Fixes #3979
2020-03-26 16:19:50 +00:00
harry
9aa32bc269 onedrive: make error quotaLimitReached to be fatal - Fixes #4089 2020-03-26 16:19:50 +00:00
Nick Craig-Wood
d9c8c47e02 onedrive: add missing drive on config - fixes #4068
Before this change we queries /me/drives for a list of the users
drives and asked the user to choose. Sometimes this does not return
the users main drive for reasons unknown.

After this change we query /me/drives first then /me/drive and add
that to the list of drives if it wasn't already there.
2020-03-24 08:44:10 +00:00
Nick Craig-Wood
243a868a5b touch: add --localtime flag to make --timestamp localtime not UTC
Fixes #4067
2020-03-23 17:12:56 +00:00
Nick Craig-Wood
6c351c15f8 Add Mark Spieth to contributors 2020-03-23 17:12:56 +00:00
Mark Spieth
45b63e2d45 oauth: Use custom http client so that --no-check-certificate is honored by oauth token fetch
Fixes #4085
2020-03-22 12:28:19 +00:00
Nick Craig-Wood
32df634cb6 sync: fix --track-renames-strategy modtime test on remotes which don't support modtime 2020-03-22 11:52:40 +00:00
Nick Craig-Wood
e569977c06 Add @Max-Sum as the union backend maintainer 2020-03-21 18:16:42 +00:00
Nick Craig-Wood
b49ab9f9c7 Add Max Sum to contributors 2020-03-21 18:13:05 +00:00
Max Sum
78a9e7440a union: Implement multiple writable remotes 2020-03-21 18:11:24 +00:00
Nick Craig-Wood
93f5125f51 sync: fix --track-renames-strategy tests
This commit corrects the logic for --track-renames-strategy which
broke the integration tests.

It also improves the parsing of the argument and adds a test for that.
2020-03-21 17:39:51 +00:00
Nick Craig-Wood
410e17a2ec Add Elan Ruusamäe to contributors 2020-03-21 17:39:51 +00:00
Elan Ruusamäe
23f7943645 Dropbox: Add info about required redirect url 2020-03-20 14:51:53 +00:00
Nick Craig-Wood
1108895180 Add Bernd Schoolmann to contributors 2020-03-20 14:01:02 +00:00
Bernd Schoolmann
158870bcdb fs: Add --track-renames-strategy for configurable matching criteria for --track-renames
This commit adds the `--track-renames-strategy` flag which allows the
user to choose the strategy for tracking renames when using the
`--track-renames` flag.

This can be "hash" or "modtime" or both currently.

This, when used with `--track-renames-strategy modtime` enables
support for tracking renames in encrypted remotes.

Fixes #3696
Fixes #2721
2020-03-20 13:04:56 +00:00
Nick Craig-Wood
36717c7d98 serve restic: fix tests after restic project removed vendoring 2020-03-18 16:50:01 +00:00
Fernando
1d3987bbbd docs: Fix typo and reword contact.md 2020-03-18 14:21:58 +00:00
Nick Craig-Wood
472d4799d1 qingstor: make rclone cleanup remove pending multipart uploads older than 24h 2020-03-18 12:49:21 +00:00
Nick Craig-Wood
84caf1e158 qingstor: try harder to cancel failed multipart uploads 2020-03-18 12:49:21 +00:00
Nick Craig-Wood
72eba4dbb6 Add greatroar to contributors 2020-03-18 12:49:21 +00:00
greatroar
0f20f23651 cache: move methods used for testing into test file 2020-03-16 18:41:32 +00:00
Nick Craig-Wood
47e2d5c415 config: fsync the config file after writing #3411
This should help with data integrity
2020-03-16 18:20:16 +00:00
Nick Craig-Wood
1e9b8e043a Add fishbullet to contributors 2020-03-16 17:17:08 +00:00
fishbullet
eb0fc21533 fs: filter flags ability to read from stdin - fixes #4034 2020-03-16 17:16:50 +00:00
Lars Lehtonen
a6a2eec392 backend/b2: remove unused largeUpload.clearUploadURL() 2020-03-16 17:11:19 +00:00
Nick Craig-Wood
c227a90b52 sync: implement --order-by xxx,mixed 2020-03-16 15:50:04 +00:00
Nick Craig-Wood
1e3d899db8 sync: replace container/heap with github.com/aalpar/deheap 2020-03-16 15:50:04 +00:00
Nick Craig-Wood
84369286df vendor: add github.com/aalpar/deheap 2020-03-16 15:50:04 +00:00
Nick Craig-Wood
4c82b1f3c6 operations: fix --max-transfer test with jottacloud
Jottacloud was deduplicating the uploads, so make a different upload
each time
2020-03-16 14:05:49 +00:00
Nick Craig-Wood
f94257115f operations: skip part of the --max-transfer test under chunker
This test relies on there being 1 file copied and chunker copies several
2020-03-16 14:05:05 +00:00
Nick Craig-Wood
77e94be280 onedrive: implement --onedrive-server-side-across-configs - fixes #4058 2020-03-15 21:10:23 +00:00
Nick Craig-Wood
37d5e75a56 operations: fix --max-transfer test to have a higher threshold
Before this change backends which introduce overhead (eg crypt) were
failing to upload the first file.

This change increases the threshold to 2k to allow the first file to
go through even with some overhead but the next file to definitely
fail.
2020-03-15 11:13:27 +00:00
Nick Craig-Wood
dc06973796 s3: use rclone's low level retries instead of AWS SDK to fix listing retries
In 5470d34740 "backend/s3: use low-level-retries as the number
of SDK retries" we switched over to using the AWS SDK low level
retries instead of rclone's low level retry logic.

This had the unfortunate attempt that retrying listings to correct XML
Syntax errors failed on non S3 backends such as CEPH. The AWS SDK was
also retrying the XML Syntax error request which doesn't make sense.

This change turns off the AWS SDK retries in favour of just using
rclone's retry logic.
2020-03-14 18:04:24 +00:00
Nick Craig-Wood
b03462ab04 Add Patryk Jakuszew to contributors 2020-03-13 21:45:09 +00:00
Patryk Jakuszew
d4e87a841d fs/log: add support for syslog LOCAL facilities - fixes #4061 2020-03-13 21:44:52 +00:00
Nick Craig-Wood
6d0063d685 operations: Make --max-transfer more accurate
Before this change we checked the transfer was out of range only
before the Read call. This means that we returned all the data to the
reader before declaring an error. This means that some backends wrote
the file even though an error was returned.

This fix checks the transfer after the Read as well, and chops the
excess characters off the read data if we are over the limit so that
we don't ever deliver all the data.

This fixes the tests introduced as part of 6f1766dd9e and #2672
on backends other than local.
2020-03-13 16:40:38 +00:00
Nick Craig-Wood
6fdd7149c1 drive: don't overwrite the description on sever side copy
See: https://forum.rclone.org/t/is-there-a-way-to-sync-while-keeping-file-description-on-the-destination/14609
2020-03-12 10:39:00 +00:00
Harry
fdb07f2f89 onedrive: Added maximum chunk size limit warning in the docs
If chunk size is more than 250M (262,144,000 bytes) then API throws the following error:

Microsoft.SharePoint.Client.InvalidClientQueryException: The request message is too big. The server does not allow messages larger than 262144000 bytes.
2020-03-10 15:14:08 +00:00
Nick Craig-Wood
a433698b00 Add Joachim Brandon LeBlanc to contributors 2020-03-10 12:00:30 +00:00
Anuar Serdaliyev
f14871caf7 accounting: Correct exitcode on Transfer Limit Exceeded flag. Fixes #3203
Before this change the exit code for transfer limit exceeded was
incorrect. This was because the `resolveExitCode` function unwraps the
error thus reading the underlying error which is not the same as the
error it was comparing to (`ErrorMaxTransferLimitReached`).

This change fixes it by splitting the error definition in two so that
when the Fatal error is unwrapped we match against
`ErrorMaxTransferLimitReached` however when we return the error we
return `ErrorMaxTransferLimitReachedFatal`.
2020-03-10 12:00:10 +00:00
Joachim Brandon LeBlanc
132ce94139 backend/s3: use the provided size parameter when allocating a new memory pool - fixes #4047 (#4049) 2020-03-09 16:56:21 +00:00
Nick Craig-Wood
a492c0fb0e local: speed up multi thread downloads by using sparse files on Windows
Before this change rclone didn't use sparse files on Windows. This
means that when you downloaded a file with multithread download it
wrote the entire file with zeros first on the first write not at the
start of the file.

This change makes the file be sparse on Windows. Linux/macOS files
were already sparse.
2020-03-09 10:55:52 +00:00
Nick Craig-Wood
dfc7215bf9 drive: fix duplicate items when using --drive-shared-with-me #4018
Before this change shared with me items with multiple parents (ie most
of them that aren't in the root) would appear twice in the directory
listings.

This fixes the problem by doing an early exit for shared with me
items.
2020-03-07 16:46:53 +00:00
Nick Craig-Wood
38e59ebdf3 drive: fix missing files when using --fast-list and --drive-shared-with-me
This bug was introduced here by removing some necessary code detecting
shared with me items at the root with no parents.

4453fa4ba6 "drive: fix --fast-list when using appDataFolder"

This fix reverts that part of the patch.

Fixes #4018
2020-03-07 16:46:53 +00:00
Yves G
5ee24f804f webdav: report full and consistent usage with about
— allow either Used or Available to be ==0 (remote full or empty)
— compute Total if both values are received
2020-03-05 15:10:19 +00:00
Nick Craig-Wood
747edf42c1 azureblob: document container level SAS URL from root now needs container
In 8a0775ce3c which was released in v1.49.0 we inadvertently
stopped SAS URLs working from the root without a container name.

Previously to this change you could use `rclone mount azsas:` and it
would actually be equivalent to `rclone mount azsas:container`. After
this change, only `rclone mount azsas:container` will work, `rclone
mount azsas:` will have a directory in the root called "container".

After some discussion it was decided not to revert this change as the
current behaviour is more logical and in line with the similar
behaviour for the b2 backend.

Instead the documentation was updated to show exactly how container
level SAS URLs behave.

Fixes #4028
2020-03-05 14:56:36 +00:00
Nick Craig-Wood
ce23cb2093 Add evileye to contributors 2020-03-05 14:07:32 +00:00
evileye
6ff0bb825e mount: fix fail because of too long volume name - fixes #4026 2020-03-05 13:57:20 +00:00
Lars Lehtonen
fef2c6bf7a backend/s3: replace deprecated session.New() with session.NewSession() 2020-03-05 11:34:10 +00:00
Ishuah Kariuki
0c6f14c694 copy/sync: only create empty directories when they don't exist on the remote
Sync/copy now only creates empty directories when they don't exist on the remote (--create-empty-src-dirs flag) - fixes #2800
2020-03-03 16:24:22 +00:00
Nick Craig-Wood
1c800efbac Add Robert-André Mauchin to contributors 2020-03-03 12:41:08 +00:00
Robert-André Mauchin
e2e400e63c Use proper import path go.etcd.io/bbolt
Signed-off-by: Robert-André Mauchin <zebob.m@gmail.com>
2020-03-03 12:40:52 +00:00
Nick Craig-Wood
4d8d1e287b googlephotos: fix "concurrent map write" error - fixes #4003
This adds a bit of missed locking around the uploaded info to fix the
concurrent map write.

All the other accesses have locking - this one must have got missed.
2020-03-02 18:12:46 +00:00
Nick Craig-Wood
452fdbf1c1 Add Franklyn Tackitt to contributors 2020-03-02 17:31:23 +00:00
Nick Craig-Wood
51686bd1ef Add Shing Kit Chan to contributors 2020-03-02 17:31:23 +00:00
Gary Kim
38a4d50e73 rcd: Add Prometheus metrics support - fixes #3858
Signed-off-by: Gary Kim <gary@garykim.dev>
2020-03-01 09:58:34 +00:00
Gary Kim
3fd38cbe8d vendor: add github.com/prometheus/client_golang/prometheus
Signed-off-by: Gary Kim <gary@garykim.dev>
2020-03-01 09:58:34 +00:00
Franklyn Tackitt
2b3d13a841 fs: Use --cutoff-mode hard,soft,catious instead of 3 --max-transfer-mode flags
Fixes #2672
2020-03-01 09:49:55 +00:00
Shing Kit Chan
6f1766dd9e fs: Add support for --max-transfer-cutoff modes #2672
This also adds max transfer cut off check for server side copies too
2020-03-01 09:49:55 +00:00
Nick Craig-Wood
7d70eb0346 ftp: attempt to work-around pureftp sending spurious 150 messages
pureftpd has a bug where it sends messages like this

```
    150-Accepted data connection\r\n
        Response code: File status okay; about to open data connection (150)
        Response arg: Accepted data connection
    150 32768.0 kbytes to download\r\n
    150 0.014 seconds (measured here), 1665.27 Mbytes per second\r\n
```

The last `150` is treated as a new response - the previous `150` should have been `150-`.

This means that rclone sees the `150 0.014 seconds (measured here),
1665.27 Mbytes per second` as a reply to the next message and reports
it as an error.

This fix ignores that specific message when it is received in the
`Close` method. It dumps the FTP connection after as it is out of
sync.

See: #3984
Fixes #3445
2020-03-01 09:17:51 +00:00
Nick Craig-Wood
bae2644667 Add Yves G to contributors 2020-03-01 09:17:51 +00:00
Valeriy.Vyrva
f6f95822c1 doc: fix links in generated documentation 2020-03-01 09:14:37 +00:00
Yves G
b1b5e09081 vfs: make df output more consistent on a rclone mount.
When 2 values are known among vfs:{free,used,total}, compute the 3rd
2020-03-01 08:54:07 +00:00
Nick Craig-Wood
2b268f9724 build: fixup formatting after go1.14 go fmt changes 2020-02-28 16:58:33 +00:00
Nick Craig-Wood
7a5a74cecb crypt: clarify that directory_name_encryption depends on filename_encryption
See: https://forum.rclone.org/t/directory-name-encryption-is-set-to-always-false-when-choosing-filename-encryption-off/14600
2020-02-28 16:26:45 +00:00
Nick Craig-Wood
54a0c6b8ad Add valery1707 to contributors 2020-02-28 16:26:45 +00:00
valery1707
1ad23c4dc8 mailru: Describe 2FA requirements (#4015)
Fair enough
2020-02-28 16:54:09 +03:00
Dan Walters
7586a345ff dlna: cds: use modification time as date in dlna metadata
We havn't been outputting anything for this until now, which leads to my
Samsung showing an epoch/1970 date for all files.
2020-02-27 18:05:18 +01:00
Nick Craig-Wood
393b94bb70 vfs: add --vfs-read-wait and --vfs-write-wait flags
--vfs-read-wait duration    Time to wait for in-sequence read before seeking. (default 5ms)
    --vfs-write-wait duration   Time to wait for in-sequence write before giving error. (default 1s)

See: https://forum.rclone.org/t/constantly-high-iowait-add-log/14156
2020-02-27 16:12:33 +00:00
Nick Craig-Wood
e3c11c9ca1 mount: add --async-read flag to disable asynchronous reads
See: https://forum.rclone.org/t/constantly-high-iowait-add-log/14156
2020-02-27 16:12:33 +00:00
Nick Craig-Wood
3c91abce74 vfs: fix race condition caused by unlocked reading of Dir.path 2020-02-27 15:50:41 +00:00
Nick Craig-Wood
87d856d71b cache: disable race tests until bbolt is fixed
bbolt fails with "unsafe pointer conversion" under the go1.14 race
detector.

Disable race tests until https://github.com/etcd-io/bbolt/issues/187
is fixed.
2020-02-27 08:05:28 +00:00
Nick Craig-Wood
3855c003ce build: update to use go1.14 for the build 2020-02-26 21:26:47 +00:00
Nick Craig-Wood
abb9f89f65 vendor: update all dependencies 2020-02-26 21:26:46 +00:00
Nick Craig-Wood
17b4058ee9 mount: constrain to go1.13 or above otherwise bazil.org/fuse fails to compile 2020-02-26 21:26:46 +00:00
Nick Craig-Wood
9663f9b2ab mount: ignore --allow-root flag with a warning as it has been removed upstream
For background see: https://github.com/bazil/fuse/issues/144
2020-02-26 21:11:25 +00:00
Nick Craig-Wood
d6e10dba33 docs: fix confusion over processor names in download table
See: https://forum.rclone.org/t/intel-processor-download-help/14558
2020-02-26 16:39:46 +00:00
Nick Craig-Wood
da5cbc194a ftp: fix lockup on Close failures when using concurrency limit #3984
Before this change if rclone failed to close a file download for some
reason it would leak a concurrency token. When all the tokens were
leaked then rclone would lock up.

This fix returns the concurrency token regardless of the error status.
2020-02-25 14:38:12 +00:00
Nick Craig-Wood
e8eb658ba5 ftp: fix lockup on failed upload when using concurrency limit #3984
Before this change if rclone failed to upload a file for some reason
it would leak a concurrency token. When all the tokens were leaked
then rclone would lock up.

The fix returns the concurrency token regardless of the error state.
2020-02-25 14:38:12 +00:00
Nick Craig-Wood
28f69f25a0 ftp: fix lockup when using concurrency limit on failed connections #3984
Before this change if rclone failed to make an FTP connection for some
reason it would leak a concurrency token. When all the tokens were
leaked then rclone would lock up.

The fix returns the concurrency token if creating the FTP connection
returns an error.
2020-02-25 14:38:12 +00:00
Nick Craig-Wood
07e4b9bb7f operations: fix multithread copy test to use the correct modify window
In bde0334bd8 "operations: fix setting the timestamp on Windows
for multithread copy" the test for multithread copy failed to take
into account the modify window of the remote under test.
2020-02-25 13:30:35 +00:00
Aleksandar Jankovic
708b967f15 backend/s3: fix multipart abort context
S3 couldn't abort multi-part upload when context is canceled
because canceled context prevents abort request from being sent.
2020-02-25 12:11:32 +01:00
Dan Walters
7e2568a312 dlna: cds: don't specify childCount at all when unknown
Basically, solving #3541 with a different approach - bringing in
the upstream upnpav module, and changing ChildCount from int to a
*int to avoid childCount="0" in the XML output when that value is
simply unknown.

Current approach is leading to some recursion issues and according
to the DLNA spec it shouldn't be necessary, anyway.
2020-02-25 08:41:00 +01:00
Nick Craig-Wood
bde0334bd8 operations: fix setting the timestamp on Windows for multithread copy
Before this fix we attempted to set the modification time on the file
when it was open. This works fine on Linux but not on Windows. The
test was also incorrect testing the source file rather than the
destination file.

This closes the file before setting the modification time and fixes
the tests.

Fixes #3994
2020-02-24 17:30:09 +00:00
Aleksandar Janković
5470d34740 backend/s3: use low-level-retries as the number of SDK retries
Amazon S3 is built to handle different kinds of workloads.
In rare cases where S3 is not able to scale for whatever reason users
will face status 500 errors.
Main mechanism for handling these errors are retries.
Amount of needed retries varies for each different use case.

This change is making retries for s3 backend configurable by using
--low-level-retries option.
2020-02-24 16:43:44 +01:00
Maciej Zimnoch
ac9cb50fdb backend/s3: use memory pool for buffer allocations
Currently each multipart upload allocated his own buffers, which after
file upload was garbaged. Next files couldn't leverage already allocated
memory which resulted in inefficent memory management. This change
introduces backend memory pool keeping memory chunks which can be
used during object operations.

Fixes #3967
2020-02-24 13:32:32 +01:00
buengese
4a8b548add jottacloud: use RawURLEncoding when decoding base64 encoded login token - fixes #3945 2020-02-22 23:12:56 +01:00
Lars Lehtonen
481c8a40ea backend/azureblob: fmt nit 2020-02-20 15:50:53 +01:00
Lars Lehtonen
25ef3a281b backend/azureblob: remove unused Object.parseTimeString() 2020-02-20 15:50:53 +01:00
Lars Lehtonen
219bd97e8a backend/qingstor: lint fix 2020-02-14 18:11:01 +00:00
Lars Lehtonen
8b14cd24aa backend/qingstor: prune multiUploader.list() 2020-02-14 18:11:01 +00:00
Nick Craig-Wood
3893c14889 operations: make rcat obey --ignore-checksum 2020-02-14 12:47:11 +00:00
Nick Craig-Wood
c41fbc0f90 operations: move Rcat tests back to main test file now S3 prob is fixed 2020-02-14 12:26:52 +00:00
Nick Craig-Wood
f45425e5a9 operations: factor CommonHash out of Copy for re-use elsewhere 2020-02-14 12:12:10 +00:00
Nick Craig-Wood
bd9fd629bc test: add TestS3MinioEdge to test leading edge minio too #3934 2020-02-13 11:01:06 +00:00
Nick Craig-Wood
3b19f48929 testserver: add provider to TestS3Minio #3934
This is necessary to pass the TestIntegration/FsMkdir/FsEncoding
listing tests.
2020-02-13 11:01:06 +00:00
Outvi V
4996edc030 oauthutil: make instructions for rclone authorize more robust
It appends "--" to the rclone authorize command line before client_id,
in case that the client_id or client_secret has a prefix of "-"
(OneDrive's does), which affects the argument parsing.
2020-02-13 10:18:23 +00:00
Michał Matczuk
964f1f6a7e fs/accounting: Restore "Max number of stats groups reached" log line
Changed log level to debug.
2020-02-12 21:21:25 +00:00
Michał Matczuk
e75c1f70bb backend/s3: Added 500 as retryErrorCode
The error code 500 Internal Error indicates that Amazon S3 is unable to handle the request at that time. The error code 503 Slow Down typically indicates that the requests to the S3 bucket are very high, exceeding the request rates described in Request Rate and Performance Guidelines.

Because Amazon S3 is a distributed service, a very small percentage of 5xx errors are expected during normal use of the service. All requests that return 5xx errors from Amazon S3 can and should be retried, so we recommend that applications making requests to Amazon S3 have a fault-tolerance mechanism to recover from these errors.

https://aws.amazon.com/premiumsupport/knowledge-center/http-5xx-errors-s3/
2020-02-12 11:43:18 +00:00
Michał Matczuk
19a4d74ee7 backend/s3: Fail fast multipart upload
When a part upload request fails error is returned and gCtx is cancelled.
This does not prevent from other parts being tried.
They immediately fail due to a canceled context, but are retried by rclone anyway...

Example AWS debug output

```
-----------------------------------------------------
2020/02/11 14:12:17 DEBUG: Retrying Request s3/UploadPart, attempt 4
2020/02/11 14:12:17 DEBUG: Request s3/UploadPart Details:
---[ REQUEST POST-SIGN ]-----------------------------
PUT /backuptest-rclone/huge/file.db?partNumber=11&uploadId=190939b4-3c43-4b98-ac11-92303e3f11b0 HTTP/1.1
Host: 192.168.100.99:9000
User-Agent: aws-sdk-go/1.23.8 (go1.13.1; linux; amd64)
Content-Length: 5242880
Authorization: AWS4-HMAC-SHA256 Credential=miniouser/20200211/us-east-1/s3/aws4_request, SignedHeaders=content-length;content-md5;expect;host;x-amz-content-sha256;x-amz-date, Signature=3fc03a01f651cec09b05290459e9ceb26db9a8aa00c4e1b16e8cf5617eb81da8
Content-Md5: XzY+DlipXwbL6bvGYsXftg==
Expect: 100-Continue
X-Amz-Content-Sha256: c036cbb7553a909f8b8877d4461924307f27ecb66cff928eeeafd569c3887e29
X-Amz-Date: 20200211T131217Z
Accept-Encoding: gzip

-----------------------------------------------------
http://192.168.100.99:9000/backuptest-rclone/huge/file.db?partNumber=11&uploadId=190939b4-3c43-4b98-ac11-92303e3f11b0
2020/02/11 14:12:17 DEBUG: Response s3/UploadPart Details:
---[ RESPONSE ]--------------------------------------
HTTP/1.1 500 InternalServerError
Content-Length: 0
-----------------------------------------------------
UploadPartWithContext() error InternalError: We encountered an internal error. Please try again
	status code: 500, request id: , host id:

2020/02/11 14:12:18 DEBUG ERROR: Request s3/UploadPart:
---[ REQUEST DUMP ERROR ]-----------------------------
context canceled
------------------------------------------------------
UploadPartWithContext() error RequestCanceled: request context canceled
caused by: context canceled
2020/02/11 14:12:20 DEBUG ERROR: Request s3/UploadPart:
---[ REQUEST DUMP ERROR ]-----------------------------
context canceled
------------------------------------------------------
UploadPartWithContext() error RequestCanceled: request context canceled
caused by: context canceled
2020/02/11 14:12:22 DEBUG ERROR: Request s3/UploadPart:
---[ REQUEST DUMP ERROR ]-----------------------------
context canceled
------------------------------------------------------
UploadPartWithContext() error RequestCanceled: request context canceled
caused by: context canceled
```

This adds a fail fast behaviour in case the context was cancelled.
2020-02-12 11:40:34 +00:00
Nick Craig-Wood
55b5eded23 Add Frederick Zhang to contributors 2020-02-12 11:33:56 +00:00
Lars Lehtonen
3dbcf0af2d backend/cache: Remove Unused Functions
This removes the unused functions run.writeRemoteRandomBytes() run.writeObjectRandomBytes() run.listPath() Directory.parentRemote() and Persistent.dumpRoot().
2020-02-12 11:23:57 +00:00
Lars Lehtonen
4e1a511f88 vfs: explicitly ignore unused variables 2020-02-12 11:20:54 +00:00
Frederick Zhang
b71e1a16b1 docs: update account type during OneDrive app registration 2020-02-12 11:04:49 +00:00
Nick Craig-Wood
ec1271818f mount2: hide mount2 command for the moment 2020-02-11 14:28:13 +00:00
Nick Craig-Wood
8318020387 Implement mount2 with go-fuse
This passes the tests and works efficiently with the non sequential vfs ReadAt fix.
2020-02-11 14:28:13 +00:00
Nick Craig-Wood
c38d7be373 vendor: add github.com/hanwen/go-fuse/v2@master for mount2 2020-02-11 14:28:13 +00:00
Nick Craig-Wood
dc31212c3d Add Tim Gallant to contributors 2020-02-11 14:28:13 +00:00
Lars Lehtonen
ac60b36e77 backend/premiumizeme: prune unused functions 2020-02-11 12:17:35 +00:00
Tim Gallant
1d73f071f6 fs: improve log output when no changes are made - fixes #3454
- changes a few log messages to debug level
- adds a log message for when 0 bytes are transferred
2020-02-11 12:16:15 +00:00
Nick Craig-Wood
5c869d5bd3 cmd: make stats be printed on non-zero exit code
This also rationalises the exit sequence so that dumping
goroutines/open files happens regardless of exit error state.

See: https://forum.rclone.org/t/transfer-stats-on-non-0-exit/14211
2020-02-10 16:40:43 +00:00
Nick Craig-Wood
a54210a2e4 docs: restore missing mount --daemon docs
This was done as part of ebfeec9fb4 which unfortunately patched
the auto generated files.
2020-02-10 15:29:39 +00:00
Nick Craig-Wood
040d226028 docs: restore missing QingStor doc fix
This was done as part of 64fce8438b which unfortunately patched
the auto generated files.
2020-02-10 15:29:39 +00:00
Nick Craig-Wood
8b664c3ec5 docs: restore lost spelling fixes
These came from 3d424c6e08 which unfortunately got added the
docs to the auto generated files.
2020-02-10 15:29:39 +00:00
Nick Craig-Wood
102a38bb95 docs: restore lost VFS poll interval docs
These came from 3d475dc0ee which unfortunately got added the
docs to the auto generated files.
2020-02-10 15:29:39 +00:00
Nick Craig-Wood
7a54e13110 docs: restore lost VFS case insensitive docs
These came from 1c4e33d4ad which unfortunately
added the docs to the auto generated files.
2020-02-10 15:29:39 +00:00
Nick Craig-Wood
feee92c790 docs: restore lost mount share docs
These came from 162fdfe455 which unfortunately added the docs to
the auto generated files.
2020-02-10 15:29:39 +00:00
Nick Craig-Wood
de93852512 docs: restore lost auth proxy logs
These came from f2a789ea98 which unfortunately added the docs to
the auto generated files.
2020-02-10 15:29:39 +00:00
Nick Craig-Wood
dfb710eab7 gendocs: add autogenerated header to all docs 2020-02-10 15:29:39 +00:00
Nick Craig-Wood
25cfeb2a64 webdav: Fix X-OC-Mtime header for Transip compatibility - fixes #3126 2020-02-10 11:57:12 +00:00
Nick Craig-Wood
90377f5e65 s3: Specify that Minio supports URL encoding in listings
Thanks to @harshavardhana for pointing this out

See #3934 for background
2020-02-09 12:03:20 +00:00
Lars Lehtonen
f1d9bd5eab lib/oauthutil: replace deprecated oauth2.NoContext 2020-02-07 17:49:29 +00:00
Lars Lehtonen
4ee3c21a9d cmd/serve/ftp: replace deprecated os.SEEK_SET with io.SeekStart 2020-02-06 10:58:34 +00:00
Lars Lehtonen
fe6f4135b4 fs/rc: fix dropped error 2020-02-04 11:31:06 +00:00
Nick Craig-Wood
3dfa63b85c onedrive: fix occasional 416 errors on multipart uploads
Before this change, when uploading multipart files, onedrive would
sometimes return an unexpected 416 error and rclone would abort the
transfer.

This is usually after a 500 error which caused rclone to do a retry.

This change checks the upload position on a 416 error and works how
much of the current chunk to skip, then retries (or skips) the current
chunk as appropriate.

If the position is before the current chunk or after the current chunk
then rclone will abort the transfer.

See: https://forum.rclone.org/t/fragment-overlap-error-with-encrypted-onedrive/14001

Fixes #3131
2020-02-01 21:15:07 +00:00
Gary Kim
ff2343475a docs: Update README.md shields for changed CI
Signed-off-by: Gary Kim <gary@garykim.dev>
2020-02-01 20:17:40 +00:00
Nick Craig-Wood
bffd7f0f14 docs: note how to use GitHub's online editor to edit rclone's docs 2020-02-01 13:44:03 +00:00
Nick Craig-Wood
7c55fafe33 Add Durval Menezes to contributors 2020-02-01 13:41:47 +00:00
Nick Craig-Wood
2e7fe06beb Add Dave Koston to contributors 2020-02-01 13:41:47 +00:00
Durval Menezes
8ff91ff31b docs: Update the "Making your own client_id" section for drive
...so it accurately describes the new "Enhanced Security" Google process to get your own Client ID and Client Secret to use with rclone.
2020-02-01 13:41:18 +00:00
Nick Craig-Wood
4d1c616e97 Start v1.51.0-DEV development 2020-02-01 12:32:21 +00:00
Nick Craig-Wood
43daecd89b Version v1.51.0 2020-02-01 10:40:01 +00:00
Dave Koston
9f99c20232 s3: Add StackPath Object Storage Support 2020-01-31 16:05:44 +00:00
Nick Craig-Wood
97ed8db75d drive: hide dangerous config from the configurator
This hides:

- "use_created_date"
- "use_shared_date"
- "size_as_quota"

from the configurator (rclone config) as they interfere with normal
operations and shouldn't be set in a backend config.

They can still be put in the config file by hand and will still work
as variables, etc.

This adds some more docs to "size_as_quota" also.

Fixes #3912
2020-01-31 10:09:33 +00:00
Nick Craig-Wood
f80d98553a dbhashsum: stop it returning UNSUPPORTED on dropbox 2020-01-29 19:49:42 +00:00
Nick Craig-Wood
b3e7a9d01c Add Benjapol Worakan to contributors 2020-01-29 13:38:21 +00:00
Benjapol Worakan
01fc063128 docs: fix spacing error under -P in docs.md 2020-01-29 13:38:03 +00:00
Gary Kim
e71edd5577 cmd: always print elapsed time to tenth place seconds in progress
Before this change, the elapsed time shown with the --progress flag
would not print ".0s" so the elapsed time.

This change will make it so that the line width is kept a bit more
consistent by always printing to a fixed-point.

This does change the displayed value when the elapsed time
is less than 1s, in which it used to be that the value would be shown
in ms or smaller units.

Signed-off-by: Gary Kim <gary@garykim.dev>
2020-01-29 12:28:01 +00:00
Nick Craig-Wood
27a34dd183 Add Motonori IWAMURO to contributors 2020-01-29 12:16:37 +00:00
Motonori IWAMURO
7662f15939 onedrive: add support "Retry-After" header 2020-01-29 12:16:18 +00:00
Nick Craig-Wood
bfd9f32188 lsjson: add --no-mimetype flag, speed up lsf
Before this changed we unconditionally fetched the MimeType. On Some
backends like s3 and swift this takes an extra transaction which meant
that `lsf` on those backends was needlessly slow.

This adds an internal option so `lsf` can declare whether it wants
MimeTypes or not depending on whether the user asked for them and an
external flag `--no-mimetype` for `lsjson`.

See: https://forum.rclone.org/t/reliably-setup-incremental-updates/14006/8
2020-01-26 16:38:00 +00:00
Nick Craig-Wood
9c9cdf1712 fs: don't run tests for --max-duration on remote backends
This is a timing dependent test and to make it long enough so that it
would work with the remotes would make it too long for local tests.

The code paths are identical for local vs non-local so just run on
local.

This fixes the integration tests.
2020-01-26 09:23:03 +00:00
Nick Craig-Wood
0e5537cd25 Add unbelauscht to contributors 2020-01-26 09:23:03 +00:00
unbelauscht
151d0a274e swift: Update OVH API endpoint - Fixes #3890 2020-01-24 17:43:00 +00:00
Nick Craig-Wood
dc77ec4ba1 Add boosh to contributors 2020-01-24 13:29:36 +00:00
boosh
0d7573dd81 fs: Add --max-duration flag to control the maximum duration of a transfer session
This gives you more control over how long rclone will run for, making
it easier to script backups, e.g. via cron. Once the `--max-duration`
time limit is reached, no new transfers will be initiated, but those
already in-flight will be allowed to complete.

Fixes #985
2020-01-24 13:28:56 +00:00
Nick Craig-Wood
e4d2d228bd webdav: add Referer header to fix problems with WAFs - fixes #3868 2020-01-23 15:56:17 +00:00
Nick Craig-Wood
ede36b001b Add Damon Permezel to contributors 2020-01-23 15:40:23 +00:00
Nick Craig-Wood
3afb2a4798 config: use SpaceSepList for argument to --password-command
This is to enable arguments with spaces in.
2020-01-23 15:39:15 +00:00
Nick Craig-Wood
62dbdcdbcc config: use the environment variable which goes with --password-command 2020-01-23 15:39:15 +00:00
Damon Permezel
06df133159 config: add --password-command to allow dynamic config password - fixes #3694 2020-01-23 15:39:15 +00:00
Xiaoxing Ye
0ab2693da6 doc: add desc about gzip and http dump
Fix #3872
2020-01-23 12:42:44 +00:00
Nick Craig-Wood
4b1cb1be43 Add jtagcat to contributors 2020-01-23 12:38:56 +00:00
Nick Craig-Wood
9d96680329 Add thestigma to contributors 2020-01-23 12:38:56 +00:00
jtagcat
d694bb30e5 install.sh: create ~/.config/rclone directory
This allows people with their config already made copy their config to the new server(s), without having to create the directories.
2020-01-23 12:37:46 +00:00
buengese
9c858c3228 crypt: correctly handle trailing dot 2020-01-22 01:40:04 +01:00
thestigma
7125cb10f5 doc: Add note to filtering.md about --files-from and rclone lsf 2020-01-20 17:37:24 +00:00
Nick Craig-Wood
ba421fd069 Add landall to contributors 2020-01-20 17:30:27 +00:00
landall
77e55b8265 hashsum: Add flag --base64 flag - fixes #3663
This flag can be used to output QuickXorHash in the same format as MS
Graph API.
2020-01-20 17:29:58 +00:00
Nick Craig-Wood
18d26e2ddb Revert "vendor: update x/crypto/ssh - to fix Windows password length issues fixes #3798"
This turned out to introduce a regression, not being able to press Enter.

See: #3888 and https://github.com/golang/go/issues/36609

This reverts commit 251cfc100e.
2020-01-20 12:46:52 +00:00
Nick Craig-Wood
f338a2d907 Add Benjamin Richter to contributors 2020-01-20 12:46:46 +00:00
Benjamin Richter
77fa8194f2 onedrive: add Sites.Read.All permission - Fixes #1770 2020-01-20 12:30:19 +00:00
Xiaoxing Ye
ccaca04a5d rcd: move webgui apart; option to disable browser
Fix #3601, #3785
2020-01-20 12:27:55 +00:00
Nick Craig-Wood
84191ac6dc vfs: fix incorrect modtime for mv into mount with --vfs-cache-modes write
When a file has its modtime set while it is open we delay setting the
modtime until the file is closed.

The file is then uploaded in Flush. In Release we check the cached
file has been uploaded by comparing modtimes and or hashes and upload
it again if it has changed.

Before this change we forgot to change the time on the cached file
when we updated the time file on the object, so this mean that Release
reset the time to the wrong time and uploaded the file again on
remotes which don't support hashes (eg crypt).

The fix was to set the modtime of the cached file at the same time we
set the modtime of the remote object. This means that the files check
as identical in Release so it doesn't try to upload the file.

This means that we avoid a double upload and the modtime is correct.

See: https://forum.rclone.org/t/modification-time-with-vfs-cache/13906/8
2020-01-19 12:52:48 +00:00
Nick Craig-Wood
7cf8ea354c dedupe: add missing modes to help string 2020-01-19 11:09:45 +00:00
Nick Craig-Wood
24ef00a258 build: implement a framework for starting test servers during tests
Test servers are implemented by docker containers and run real servers
for rclone to test against.
2020-01-18 16:47:37 +00:00
Nick Craig-Wood
00d30ce0d7 opendrive: implement --opendrive-chunk-size #3707 2020-01-18 11:56:01 +00:00
Nick Craig-Wood
db39adeb3e sftp: open files for update write only to fix AWS SFTP interop - fixes #3776 2020-01-18 11:46:56 +00:00
Nick Craig-Wood
ef7ac088c0 operations: make NewOverrideObjectInfo public and factor uses 2020-01-18 11:41:33 +00:00
Nick Craig-Wood
08a3957880 cache: fix fatal error: concurrent map writes - fixes #2378 2020-01-18 11:27:00 +00:00
Nick Craig-Wood
4499b08afc drive: log an ERROR if an incomplete search is returned 2020-01-18 11:22:26 +00:00
Nick Craig-Wood
422ad38e5b copyurl: add --stdout flag to write to stdout 2020-01-18 11:15:51 +00:00
Nick Craig-Wood
0b7f959433 cmount: when setting dates discard out of range dates
It appears that sometimes Windows/WinFSP/cgofuse sends dates which are
the epoch to rclone.  These dates appear as 1601-01-01 00:00:00 plus
or minus the timezone.

These dates aren't being sent from rclone.

This patch filters dates out before 1601-01-02 so rclone does not
attempt to set them.

See: https://forum.rclone.org/t/bug-corruption-of-modtime-via-vfs-layer/12204
See: https://forum.rclone.org/t/io-error-googleapi-error-403-insufficient-permission-insufficientpermissions/11372
See: https://github.com/billziss-gh/cgofuse/issues/35
2020-01-18 11:13:35 +00:00
Nick Craig-Wood
4b9da601be dropbox: treat insufficient_space errors as non retriable errors
Before this change rclone would keep trying to upload files after
dropbox had signalled it was full.

This change makes the relevant error a non-retriable error.

See: https://forum.rclone.org/t/why-does-a-file-transfer-continue-when-there-is-no-available-storage/13677
2020-01-18 11:10:18 +00:00
Nick Craig-Wood
c789436580 The memory backend
This is a bucket based remote
2020-01-18 10:41:08 +00:00
Nick Craig-Wood
277d94feac fshttp: add --expect-continue-timeout default 1s - fixes #3835
Before this change the expect/continue timeout was set to
--conntimeout which was 60s by default which is too long to wait.

This was noticed when using s3 with a proxy which apparently didn't
support expect / continue properly.

Set --expect-continue-timeout 0 to disable expect/continue.
2020-01-18 09:49:22 +00:00
Nick Craig-Wood
6757244918 drive: use multipart resumable uploads for streaming and uploads in mount
Before this change we used non multipart uploads for files of unknown
size (streaming and uploads in mount).  This is slower and less
reliable and is not recommended by Google for files smaller than 5MB.

After this change we use multipart resumable uploads for all files of
unknown length.  This will use an extra transaction so is less
efficient for files under the chunk size, however the natural
buffering in the operations.Rcat call specified by
`--streaming-upload-cutoff` will overcome this.

See: https://forum.rclone.org/t/upload-behaviour-and-speed-when-using-vfs-cache/9920/
2020-01-17 22:03:10 +00:00
Nick Craig-Wood
36157d8ae5 vendor: update t3rm1n4l/go-mega - fixes mega: couldn't login: crypto/aes: invalid key size 0
Fixes #3740
2020-01-17 21:42:32 +00:00
Nick Craig-Wood
251cfc100e vendor: update x/crypto/ssh - to fix Windows password length issues fixes #3798 2020-01-17 21:33:47 +00:00
Nick Craig-Wood
9fb10064ee vendor: run go mod tidy 2020-01-17 21:19:46 +00:00
Nick Craig-Wood
bedeaf23af sugarsync: new backend - fixes #622 2020-01-17 17:39:34 +00:00
Nick Craig-Wood
14e93bfd8a rest: call the Signer with the mutex unlocked
This enables the signer to adjust rest parameters and call rest again
if necessary.
2020-01-17 15:00:23 +00:00
Nick Craig-Wood
65071599a2 rest: don't canonicalise headers starting with *
This leaves a way of adding headers which shouldn't be canonicalised.
2020-01-17 15:00:23 +00:00
Nick Craig-Wood
5403e1c79a lib/encoder: remove noencode tag and update CONTRIBUTING 2020-01-17 15:00:01 +00:00
Nick Craig-Wood
5697caf20b Add Felix Hungenberg to contributors 2020-01-17 15:00:01 +00:00
Felix Hungenberg
68056f08ab docs: Use correct link for sftp wikipedia article 2020-01-17 13:20:42 +00:00
Nick Craig-Wood
81002747c5 dedupe: implement keep smallest too
This is to help deduping google docs and their exported versions if
they accidentally get uploaded to the source again.

See: https://forum.rclone.org/t/my-stupidity-or-a-bug/13861
2020-01-17 13:08:37 +00:00
Nick Craig-Wood
1bd9f522e0 build: compress the test builds
This is to use less space and to fix the caddy/rclone interaction
using too much bandwidth.

See: https://caddy.community/t/how-to-stop-caddy-sniffing-mime-types-and-return-a-default-mime-type/6819
2020-01-17 11:47:40 +00:00
Thomas Eales
3a1b41ac22 docs: fixed small typo
is -> it
2020-01-16 15:24:36 +00:00
Nick Craig-Wood
375d25f158 sync: implement --order-by flag to order transfers - fixes #1205 2020-01-16 15:24:36 +00:00
Nick Craig-Wood
0e57335396 docs: document new backend encoder parameter 2020-01-16 14:40:36 +00:00
Nick Craig-Wood
bafe7d5a73 backends: move encoding definitions from fs/encodings 2020-01-16 14:40:36 +00:00
Nick Craig-Wood
c555dc71c2 lib/encoder: move definitions here and remove uint casts 2020-01-16 14:40:36 +00:00
Nick Craig-Wood
3c620d521d backend: adjust backends to have encoding parameter
Fixes #3761
Fixes #3836
Fixes #3841
2020-01-16 14:40:36 +00:00
Nick Craig-Wood
0a5c83ece1 lib/encoder: add string rendering and parsing for encodings 2020-01-16 14:40:36 +00:00
Nick Craig-Wood
1ba5e99152 graphics: add more differently sized logos 2020-01-15 16:25:20 +00:00
Nick Craig-Wood
95c83b37fb vfs: only run TestRWCacheRename on remotes which can rename
This fixes the 1fichier integration tests.
2020-01-15 16:25:04 +00:00
Nick Craig-Wood
89634795b0 Add Paul Tinsley to contributors 2020-01-15 16:25:04 +00:00
Nick Craig-Wood
b88dec51e5 proxy: replace use of bcrypt with sha256
Unfortunately bcrypt only hashes the first 72 bytes of a given input
which meant that using it on ssh keys which are longer than 72 bytes
was incorrect.

This swaps over to using sha256 which should be adequate for the
purpose of protecting in memory passwords where the unencrypted
password is likely in memory too.
2020-01-15 16:23:57 +00:00
Paul Tinsley
f2a789ea98 serve sftp: Add support for public key with auth proxy - fixes #3572 2020-01-15 16:23:57 +00:00
Nick Craig-Wood
63128834da vfs: fix open file renaming on drive when using --vfs-cache-mode writes
Before this change, when uploading files from the VFS cache which were
pending a rename, rclone would use the new path of the object when
specifiying the destination remote.  This didn't cause a problem with
most backends as the subsequent rename did nothing, however with the
drive backend, since it updates objects, the incorrect Remote was
embedded in the object.  This caused the rename to apparently succeed
but the object be at the wrong location.

The fix for this was to make sure we upload to the path stored in the
object if available.

This problem was spotted by the new rename tests for the VFS layer.
2020-01-13 17:37:54 +00:00
Nick Craig-Wood
5f822f2660 sftp: fix "failed to parse private key file: ssh: not an encrypted key" error
This error started happening after updating golang/x/crypto which was
done as a side effect of:

3801b8109 vendor: update termbox-go to fix ncdu command on FreeBSD

This turned out to be a deliberate policy of making
ssh.ParsePrivateKeyWithPassphrase fail if the passphrase was empty.

See: https://go-review.googlesource.com/c/crypto/+/207599

This fix calls ssh.ParsePrivateKey if the passphrase is empty and
ssh.ParsePrivateKeyWithPassphrase otherwise which fixes the problem.
2020-01-13 11:05:16 +00:00
Nick Craig-Wood
b81601baff test_all: ignore TestIntegration/FsMkdir/FsPutFiles/FsPutStream/0 on wasabi
This has been reported to Wasabi and they've confirmed as a known
issue that multipart uploads can't be 0 sized even though that is
incompatible with AWS S3.
2020-01-13 09:47:11 +00:00
Nick Craig-Wood
58064bdd2b drive: add --drive-stop-on-upload-limit flag to stop syncs when upload limit reached
If the --drive-stop-on-upload-limit flag is in effect this checks the
error string from Google Drive to see if it is the error you get when
you've breached your 750GB a day limit.

If so then it turns this error into a Fatal error which should stop
the sync.

Fixes #3857
2020-01-12 15:47:31 +00:00
Nick Craig-Wood
ba01d5e8ab Add Thomas Eales to contributors 2020-01-12 14:23:57 +00:00
Nick Craig-Wood
e510d460c2 Add Kuang-che Wu to contributors 2020-01-12 14:23:57 +00:00
Thomas Eales
42de601fa6 crypt: reorder the filename encryption options
This brings the default `standard` to the top of the list to replace `off`
2020-01-12 14:23:35 +00:00
Kuang-che Wu
3801b8109e vendor: update termbox-go to fix ncdu command on FreeBSD
see 58d4fcbce2
2020-01-12 14:20:12 +00:00
Nick Craig-Wood
e0d41da3e3 vendor: add uncommitted files from previous change 2020-01-11 17:56:14 +00:00
Nick Craig-Wood
92662baceb vendor: update github.com/t3rm1n4l/go-mega to fix mega "illegal base64 data at input byte 22"
Thanks to Ajaja for figuring this out.

See: https://forum.rclone.org/t/problem-to-login-with-mega/12276
2020-01-11 16:47:06 +00:00
Nick Craig-Wood
87c844bce1 onedrive: clarify docs around making your own client ID
See: https://forum.rclone.org/t/onedrive-token-configuration-failure/13634
2020-01-11 16:30:13 +00:00
Nick Craig-Wood
ae340cf7d9 log: factor flags into logflags package - fixes #3792 2020-01-09 13:25:37 +00:00
Nick Craig-Wood
11f501bd44 operations: move interface assertion to tests to remove pflag dependency #3792 2020-01-09 13:25:37 +00:00
Nick Craig-Wood
a4bc4daf30 mounttest: fix unreliable tests on Windows CI
The failure is this which is not reproducable locally, only on the CI
servers.

    --- FAIL: TestMount/CacheMode=minimal/TestWriteFileOverwrite (1.01s)
        fs.go:351:
            Error Trace:    fs.go:351
                            write.go:65
            Error:          Received unexpected error:
                            open E:testwrite: The request could not be performed because of an I/O device error.
            Test:           TestMount/CacheMode=minimal/TestWriteFileOverwrite

The corresponding ERROR from the log is this:

    ERROR : IO error: truncate C:\Users\runneradmin\AppData\Local\rclone\vfs\local\C\Users\RUNNER~1\AppData\Local\Temp\rclone298719627\testwrite: Access is denied.

Instead of using ioutil.WriteFile this fix uses an equivalent based on
rclone's lib/file which doesn't set the exclusive flag on
Windows. This allows files to be deleted that are open.  It also
deletes existing files if an error is received and retries.
2020-01-09 11:11:49 +00:00
Nick Craig-Wood
51dca8c8d4 bin: update windows test paths for new setup 2020-01-09 10:55:18 +00:00
Nick Craig-Wood
6b3021209a Add Ole Schütt to contributors 2020-01-09 10:36:44 +00:00
Ole Schütt
f263828edc operations: write debug message when hashes could not be checked 2020-01-09 10:35:31 +00:00
Maciej Zimnoch
b7019a91c2 fs/operations: Clear accounting before low level retry
Statistics of ransfers which were interrupted are not cleared before
retry iteration. These transfers completed with over 100 percentage.

This change clears transfer accounting before next retry iteration is
done in order to keep numbers in track.

Fixes #3861
2020-01-09 10:32:49 +00:00
Alex Chen
27c3481ea4 build: fix CI for forks and related docs (#3847) 2020-01-09 01:27:44 +08:00
Nick Craig-Wood
706da80d88 mount: don't build on go1.10 as bazil/fuse no longer supports it 2020-01-08 08:44:02 +00:00
Nick Craig-Wood
b6e86b2c7f s3: fix missing x-amz-meta-md5chksum headers for multipart uploads
This reverts "s3: fix DisableChecksum condition" which introduced the
problem.

This reverts commit c05bb63f96.

The code was correct as it stands - the comment was incorrect and this
commit updates it.

See: https://forum.rclone.org/t/s3-upload-md5-check-sum/13706
2020-01-07 19:39:39 +00:00
Nick Craig-Wood
4453fa4ba6 drive: fix --fast-list when using appDataFolder
In listings if the ID `appDataFolder` is used to list a directory the
parents of the items returned have the actual ID instead the alias
`appDataFolder`.  This confused the ListR routine into ignoring all
these items.

This change makes the listing routine accept all parent IDs returned
if there was only one ID in the query.  This fixes the `appDataFolder`
problem. This means we are relying on Google Drive to only return the
items we asked for which is probably OK.

Fixes #3851
2020-01-05 19:57:13 +00:00
Nick Craig-Wood
540fd3f173 local: fix update of hidden files on Windows - fixes #3839 2020-01-05 19:52:22 +00:00
Nick Craig-Wood
1af4bb0c84 Add Tennix to contributors 2020-01-05 19:50:00 +00:00
Tennix
15d19131bd s3: use aws web identity role provider 2020-01-05 19:49:31 +00:00
Nick Craig-Wood
9d993e584b s3: force path style bucket access to off for AWS deprecation
AWS are deprecating path style bucket access so rclone should stop
using it by default for this provider.

This change shouldn't break any workflows as all AWS endpoints support
virtual hosted style lookups of buckets.  It may even improve
performance.

See: https://aws.amazon.com/blogs/aws/amazon-s3-path-deprecation-plan-the-rest-of-the-story/
2020-01-05 17:53:45 +00:00
Nick Craig-Wood
21b17b14a9 vendor: update bazil.org/fuse to fix FreeBSD 12.1 - fixes #3697 2020-01-05 16:35:30 +00:00
Nick Craig-Wood
1b89b38a4c vfs: skip rename tests on remotes which can't rename 2020-01-05 12:34:47 +00:00
Nick Craig-Wood
7242c7ce95 s3: fix multipart upload uploading 0 length files
This regression was introduced by the recent re-write of the s3
multipart upload code.
2020-01-05 12:32:55 +00:00
Nick Craig-Wood
ad2bb86d8c fstests: add test for 0 sized streaming upload 2020-01-05 12:32:55 +00:00
Michał Matczuk
eb10ac346f fs/accounting: Added StatsInfo locking in statsGroups sum function (#3844)
Without the fix we can have a race, example:

```
Write at 0x00c000432039 by goroutine 187:
  github.com/rclone/rclone/fs/accounting.(*StatsInfo).Error()
      fs/accounting/stats.go:495 +0x3f1
  github.com/rclone/rclone/fs/accounting.(*StatsInfo).Error-fm()
      fs/accounting/stats.go:477 +0x55
  github.com/rclone/rclone/fs/walk.listRwalk.func1()
      fs/walk/walk.go:162 +0xd2
  github.com/rclone/rclone/fs/walk.walk.func2()
      fs/walk/walk.go:402 +0x30f

Previous read at 0x00c000432039 by goroutine 184:
  github.com/rclone/rclone/fs/accounting.(*statsGroups).sum()
      fs/accounting/stats_groups.go:351 +0xcae
  github.com/rclone/rclone/fs/accounting.rcTransferredStats()
      fs/accounting/stats_groups.go:132 +0x1f4
```

Fixes #3844
2020-01-04 16:45:24 +00:00
Nick Craig-Wood
7e6fac8b1e s3: re-implement multipart upload to fix memory issues
There have been quite a few reports of problems with the multipart
uploader using too much memory and not retrying possible errors.

Before this change the multipart uploader used the s3manager
abstraction in the AWS SDK.  There are numerous bug reports of this
using up too much memory.

This change re-implements a much simplified version of the s3manager
code specialized for rclone's purposes.

This should use much less memory and retry chunks properly.

See: https://forum.rclone.org/t/memory-usage-s3-alike-to-glacier-without-big-directories/13563
See: https://forum.rclone.org/t/copy-from-local-to-s3-has-high-memory-usage/13405
See: https://forum.rclone.org/t/big-file-upload-to-s3-fails/13575
2020-01-03 22:19:28 +00:00
Nick Craig-Wood
2e0774f3cf Add Thomas Kriechbaumer to contributors 2020-01-03 22:18:23 +00:00
Aleksandar Jankovic
b9fb313f71 fs/accounting: add option to delete stats
Removed PruneAllTransfers because it had no use. startedTransfers are
set to nil in ResetCounters.
2020-01-03 17:44:05 +00:00
Aleksandar Jankovic
0e64df4b4c fs/accounting: consistency cleanup 2020-01-03 17:44:05 +00:00
buengese
69ac04fec9 docs: add GetSky to list of supported providers 2020-01-02 15:37:33 +01:00
buengese
8a2d1dbe24 jottacloud: add support whitelabel versions 2020-01-02 15:37:33 +01:00
Thomas Kriechbaumer
584e705c0c s3: introduce list_chunk option for bucket listing
The S3 ListObject API returns paginated bucket listings, with
"MaxKeys" items for each GET call.

The default value is 1000 entries, but for buckets with millions of
objects it might make sense to request more elements per request, if
the backend supports it. This commit adds a "list_chunk" option for
the user to specify a lower or higher value.

This commit does not add safe guards around this value - if a user
decides to request a too large list, it might result in connection
timeouts (on the server or client).

In AWS S3, there is a fixed limit of 1000, some other services might
have one too.  In Ceph, this can be configured in RadosGW.
2020-01-02 12:15:01 +00:00
Nick Craig-Wood
32a3ba9e3f Add Outvi V to contributors 2020-01-02 11:52:43 +00:00
Outvi V
db1c7f9ca8 s3: Add new region Asia Patific (Hong Kong) 2020-01-02 11:10:48 +00:00
Nick Craig-Wood
207474abab sync: add --no-check-dest flag - fixes #3616 2019-12-29 16:47:57 +00:00
Nick Craig-Wood
f754d897e5 Add Wei He to contributors 2019-12-28 13:29:08 +00:00
Wei He
4daecd3158 docs: fix in-page anchor navigation positioning 2019-12-22 23:33:12 +00:00
Cnly
59c75ba442 accounting: fix error count shown as checks - fixes #3814 2019-12-23 03:03:19 +08:00
Nick Craig-Wood
0ecb8bc2f9 s3: fix url decoding of NextMarker - fixes #3799
Before this patch we were failing to URL decode the NextMarker when
url encoding was used for the listing.

The result of this was duplicated listings entries for directories
with >1000 entries where the NextMarker was a file containing a space.
2019-12-12 13:33:30 +00:00
Nick Craig-Wood
1ab4985046 vfs: when renaming files in the cache, rename the cache item in memory too 2019-12-12 13:31:10 +00:00
Nick Craig-Wood
6e683b4359 vfs: fix rename of open files when using the VFS cache
Before this change, renaming an open file when using the VFS cache was
delayed until the file was closed.  This meant that the file was not
readable after a rename even though it is was in the cache.

After this change we rename the local cache file and the in memory
cache, delaying only the rename of the file in object storage.

See: https://forum.rclone.org/t/xen-orchestra-ebadf-bad-file-descriptor-write/13104
2019-12-12 13:31:10 +00:00
Nick Craig-Wood
241921c786 vfs: don't cache the path in RW file objects to fix renaming 2019-12-12 13:31:10 +00:00
buengese
a186284b23 asyncreader: fix EOF error 2019-12-10 12:12:29 +00:00
Ivan Andreev
41ba1bba2b chunker: reduce length of temporary suffix 2019-12-09 16:56:32 +00:00
Nick Craig-Wood
50bb9b7bdd check: fix --one-way recursing more directories than it needs to
Before this change rclone traversed all directories in the destination.

After this change rclone doesn't traverse directories in the
destination that don't exist in the source if the `--one-way` flag is
set.

See: https://forum.rclone.org/t/check-with-one-way-flag-should-not-traverses-all-destination-directories/13263
2019-12-07 13:26:55 +00:00
Nick Craig-Wood
4537d9b5cf operations: make reopen code error on NoLowLevelRetry errors - fixes #3777 2019-12-06 10:54:03 +00:00
Nick Craig-Wood
684dbe0e9d local: make source file being updated errors be NoLowLevelRetry errors #3777 2019-12-06 10:54:03 +00:00
Nick Craig-Wood
572c1079a5 fserrors: Make a new NoLowLevelRetry error and don't retry them #3777 2019-12-06 10:54:03 +00:00
Nick Craig-Wood
cb97239a60 build: pin actions/checkout to v1 to fix build failure 2019-12-04 13:48:03 +00:00
Nick Craig-Wood
e48145f959 Add David Cole to contributors 2019-12-04 12:14:30 +00:00
Nick Craig-Wood
2150cf7362 Add email for Aleksandar Janković 2019-12-04 12:14:21 +00:00
David Cole
707e51eac7 docs: correct typo in gui docs 2019-12-04 12:08:52 +00:00
Nick Craig-Wood
0d10640aaa s3: add --s3-copy-cutoff for size to switch to multipart copy
Before this change we used the same (relatively low limits) for server
side copy as we did for multipart uploads.  It doesn't make sense to
use the same limits since no data is being downloaded or uploaded for
a server side copy.

This change introduces a new parameter --s3-copy-cutoff to control
when the switch from single to multipart server size copy happens and
defaults it to the maximum 5GB.

This makes server side copies much more efficient.

It also fixes the erroneous error when trying to set the modification
time of a file bigger than 5GB.

See #3778
2019-12-03 10:37:55 +00:00
Nick Craig-Wood
f4746f5064 s3: fix multipart copy - fixes #3778
Before this change multipart copies were giving the error

    Range specified is not valid for source object of size

This was due to an off by one error in the range source introduced in
7b1274e29a "s3: support for multipart copy"
2019-12-03 10:37:55 +00:00
Aleksandar Janković
c05bb63f96 s3: fix DisableChecksum condition 2019-12-02 15:15:59 +00:00
Danil Semelenov
e2773b3b4e Fix completion with an encrypted config
Closes #3767.
2019-11-29 14:48:12 +00:00
Nick Craig-Wood
d3b0bed091 drive: make sure invalid auth for teamdrives always reports an error
For some reason Google doesn't return an error if you use a service
account with the wrong permissions to list a team drive.  This gives
the user the false impression that the drive is empty.

This change:
- calls teamdrives get on rclone about
- calls teamdrives get on a listing of the root which returned no entries

These will both detect a team drive which has the incorrect auth and
workaround the issue.

Fixes: #3763
See: https://forum.rclone.org/t/rclone-missing-error-code-when-sas-have-no-permission/13086
See: https://forum.rclone.org/t/need-need-bug-verification-rclone-about-doesnt-work-on-teamdrives-empty-output/13105
2019-11-28 10:51:17 +00:00
Nick Craig-Wood
33c80bbb96 jottacloud: add URL to generate Login Token to config wizard 2019-11-28 10:03:48 +00:00
Nick Craig-Wood
705e4694ed webdav: fix case of "Bearer" in Authorization: header to agree with RFC
Before this change rclone used "Authorization: BEARER token".  However
according the the RFC this should be "Bearer"

https://tools.ietf.org/html/rfc6750#section-2.1

This changes it to "Authorization: Bearer token"

Fixes #3751 and interop with Salesforce Webdav server
2019-11-27 12:04:31 +00:00
Nick Craig-Wood
4fbc90d115 webdav: make nextcloud only upload SHA1 checksums
When using nextcloud, before this change we only uploaded one of SHA1
or MD5 checksum in the OC-Checksum header with preference to SHA1 if
both were set.

This makes the MD5 checksums read as empty string which makes syncing
with checksums less useful than they should be as all the MD5
checksums are blank.

This change makes it so that we only upload the SHA1 to nextcloud.

The behaviour of owncloud is unchanged as owncloud uses the checksum
as an upload integrity check only and calculates its own checksums.

See: https://forum.rclone.org/t/how-to-specify-hash-method-to-checksum/13055
2019-11-27 11:58:55 +00:00
Nick Craig-Wood
ed39adc65b Add Fernando to contributors 2019-11-27 11:40:44 +00:00
Fernando
162fdfe455 mount: document remotes as network shares on Windows
Provided instructions for mounting remotes as network shares/network drives in a Windows environment
2019-11-27 11:40:24 +00:00
buengese
8f33c932f2 jottacloud: update docs for new auth method 2019-11-26 13:49:49 +00:00
buengese
4195bd7880 jottacloud: use new auth method used by official client 2019-11-26 13:49:49 +00:00
Marco Molteni
d72f3e31c0 docs/install: explain how to workaround macOS Gatekeeper requiring notarization
Fix #3689
2019-11-26 12:33:30 +00:00
Garry McNulty
11f44cff50 drive: add --drive-use-shared-date to use date file was shared instead of modified date - fixes #3624 2019-11-26 12:19:44 +00:00
SezalAgrawal
c3751e9a50 operations: fix dedupe continuing on errors like insufficientFilePermisson - fixes #3470
* Fix dedupe on merge continuing on errors like insufficientFilePermisson
* Sorted the directories to remove recursion logic
2019-11-26 10:58:52 +00:00
Nick Craig-Wood
420ae905b5 vfs: make sure existing files opened for write show correct size
Before this change if an existing file was opened for write without
truncate its size would show as 0 rather than the full size of the
file.
2019-11-25 11:31:44 +00:00
Nick Craig-Wood
a7d65bd519 sftp: add --sftp-skip-links to skip symlinks and non regular files - fixes #3716
This also corrects the symlink detection logic to only check symlink
files.  Previous to this it was checking all directories too which was
making it do more stat calls than was necessary.
2019-11-24 16:10:53 +00:00
Nick Craig-Wood
1db31d7149 swift: fix parsing of X-Object-Manifest
Before this change we forgot to URL decode the X-Object-Manifest in a dynamic large object.

This problem was introduced by 2fe8285f89 "swift: reserve
segments of dynamic large object when delete objects in container what
was enabled versioning."
2019-11-21 13:25:02 +00:00
Nick Craig-Wood
4641bd5116 Add anuar45 to contributors 2019-11-21 11:16:04 +00:00
anuar45
7e602dbf39 stats: show deletes in stats and hide zero stats
This shows deletes in the stats.  It also doesn't show zero stats
in order not to make the stats block too long.
2019-11-21 11:15:47 +00:00
Nick Craig-Wood
e14d968f8d Start v1.50.2-DEV development 2019-11-19 16:51:32 +00:00
Nick Craig-Wood
e0eeeaafcd accounting: don't show entries in both transferring and checking
See: https://forum.rclone.org/t/showing-progress-checking/12958
2019-11-19 13:22:33 +00:00
Nick Craig-Wood
d46f8d0ae5 accounting: fix memory leak on retries operations
Before this change if an operation was retried on operations.Copy and
the operation was large enough to use an async buffer then an async
buffer was leaked on the retry.  This leaked memory, a file handle and
a go routine.

After this change if Account.WithBuffer is called and there is already
a buffer, then a new one won't be allocated.
2019-11-19 12:11:59 +00:00
Nick Craig-Wood
1e6278556c Add Maciej Zimnoch to contributors 2019-11-18 16:28:19 +00:00
Nick Craig-Wood
303f4ee152 Add Ankur Gupta to contributors 2019-11-18 16:28:19 +00:00
Nguyễn Hữu Luân
2fe8285f89 swift: reserve segments of dynamic large object when delete objects in container what was enabled versioning.
add code handle move object when moving the object is contained by the container what was enabled versioning with "X-History-Location".
2019-11-18 16:26:10 +00:00
Maciej Zimnoch
f5443ac939 accounting: clear finished transfer in stats-reset
In order to reduce memory usage `stats-reset` also
clears finished transfers.

Fixes #3734
2019-11-18 14:25:32 +00:00
Maciej Zimnoch
7cf056b2c2 accounting: allow MaxCompletedTransfers to be configurable
rclone library users might be intrested in changing default value to
other, or even disabling it. With current version it's impossible which
leads to races when number of uploaded objects exceeds default limit.

Fixes #3732
2019-11-18 14:25:32 +00:00
Ankur Gupta
75a6c49f87 Fix error counter - fixes #3650
For few commands, RClone counts a error multiple times. This was fixed by
creating a new error type which keeps a flag to remember if the error has
already been counted or not. The CountError function now wraps the original
error eith the above new error type and returns it.
2019-11-18 14:13:02 +00:00
Nick Craig-Wood
19229b1215 drive: fix --drive-root-folder-id with team/shared drives
Before this change rclone used the team_drive ID as the root if set
even if the root_folder_id was set too.

This change uses the root_folder_id in preference over the team_drive
which restores the functionality.

This problem was introduced by ba7c2ac443

Fixes #3742
2019-11-16 18:38:21 +00:00
Nick Craig-Wood
b5bb4c2a21 vfs: fix tests not to upload a 0 length file
Some remotes can't upload 0 length files, so this fixes the
TestCacheRename test so that it writes something to the file.
2019-11-15 09:26:40 +00:00
Nick Craig-Wood
479c803fd9 vendor: update all dependencies 2019-11-14 21:51:34 +00:00
Nick Craig-Wood
3dcf1e61cf cache: follow move of upstream library github.com/coreos/bbolt github.com/etcd-io/bbolt 2019-11-14 21:51:34 +00:00
Nick Craig-Wood
3da1cbfc81 Add Marco Molteni to contributors 2019-11-14 21:51:34 +00:00
Marco Molteni
0c9a8cf776 doc: add Scaleway to the S3 table of contents
Hello, documentation for Scaleway was already there, but the TOC was missing it.
2019-11-14 21:49:43 +00:00
Nick Craig-Wood
f3871377c3 Add Sebastian Brandt to contributors 2019-11-14 12:54:42 +00:00
Nick Craig-Wood
cc9a7dc073 Add Barry Muldrey to contributors 2019-11-14 12:54:42 +00:00
Nick Craig-Wood
b61dd809ee Add new email for Anagh Kumar Baranwal 2019-11-14 12:54:38 +00:00
Sebastian Brandt
f158a398f3 sftp: Retry Creation of Connection - fixes #3656
Removes the existing rate limiter because it is implicitly included in
the pacer.
2019-11-14 12:50:01 +00:00
jaKa
acefa5c40d koofr: use rclone HTTP client. 2019-11-14 11:36:44 +00:00
Barry Muldrey
2784c3234b fs/config/configflags: fix --compare-dest and --copy-dest help strings
from rsync manual:

--compare-dest=DIR
    This option instructs rsync to use DIR on the destination machine as an
    additional hierarchy to compare destination files against doing transfers
    (if the files are missing in the destination directory). If a file is found
    in DIR that is identical to the sender's file, the file will NOT be
    transferred to the destination directory. This is useful for creating
    a sparse backup of just files that have changed from an earlier backup.

--copy-dest=DIR
    This option behaves like --compare-dest, but rsync will also copy unchanged
     files found in DIR to the destination directory using a local copy.
      This is useful for doing transfers to a new destination while leaving
       existing files intact, and then doing a flash-cutover when all files
        have been successfully transferred.
2019-11-12 13:37:58 +00:00
Nick Craig-Wood
c21a4fee58 mount,cmount: make sure we call unmount when exiting 2019-11-11 22:08:52 +00:00
Nick Craig-Wood
358f5a8084 vfs: fix edge cases when reading ModTime from file
This fixes the unreliable test TestMount/CacheMode=full/TestFileModTime
2019-11-11 16:20:28 +00:00
Nick Craig-Wood
9115752679 proxy: reduce the internal bcrypt strength to fix race tests
Before this change the race tests were taking too long.  The bcrypt
function went from about 20ms to 1s under the race detector and this
is called for every transaction on webdav.

This change reduces the bcrypt strength so it takes 1ms non race so
the race tests pass and still has adequate security for in memory only
storage.
2019-11-11 16:20:28 +00:00
Nick Craig-Wood
51efb349ac vfs: revise locking in file and dir to fix race conditions 2019-11-11 16:20:27 +00:00
Nick Craig-Wood
e0d9314059 mounttest: fix occasionally failing test TestRenameOpenHandle 2019-11-11 16:20:27 +00:00
Nick Craig-Wood
21c6babdbb mount: enable async reads for a 20% speedup
Now that the vfs can cope with out of order reads we can enable the
async read feature for an increase in througput on the local disk of
about 20%.
2019-11-11 16:20:27 +00:00
Nick Craig-Wood
5beeac7959 vfs: make ReadAt for non cached files work better with non-sequential reads
This makes ReadAt for non cached files wait a short time (up to 5mS)
if it gets an out of order read (which would normally cause a seek and
which take a long time) to see if the gap will be filled with an in
order read.

This makes mount2 based on go-fuse work more efficiently and enables
async reading in normal mount.

A similar change was done for WriteAt in af030f74f5
2019-11-11 16:20:27 +00:00
Nick Craig-Wood
be5392f448 vfs: only calculate one hash for reads
This speeds up mounting on the local backend enormously.
2019-11-11 16:20:27 +00:00
Nick Craig-Wood
c00dcb7e67 chunkedreader: disable hash calculation for first segment
This will produce a slight speedup for small files.
2019-11-11 16:20:27 +00:00
Nick Craig-Wood
6150ae89d6 vfs: add a newly created file straight into the directory 2019-11-11 15:20:09 +00:00
Nick Craig-Wood
1e423d21e1 drive: fix listing of the root directory with drive.files scope
We attempt to find the ID of the root folder by doing a GET on the
folder ID "root". With scope "drive.files" this fails with a 404
message.

After this change if we get the 404 message, we just carry on using
"root" as the root folder ID and we cache that for future lookups.

This means that changenotify messages will not work correctly in the
root folder but otherwise has minor consequences.

See: https://forum.rclone.org/t/fresh-raspberry-pi-build-google-drive-404-error-failed-to-ls-googleapi-error-404-file-not-found/12791
2019-11-11 09:07:34 +00:00
Brett Dutro
53d55ae760 Add test for cache renaming functionality 2019-11-10 11:58:46 +00:00
Anagh Kumar Baranwal
5928704e1b On rename, rename in cache too if the file exists
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2019-11-10 11:58:46 +00:00
buengese
5ddfa9f7f6 config: SetValueAndSave ignore error if config section does not exist yet 2019-11-09 16:44:08 +00:00
Nick Craig-Wood
9b5308144f s3: Reduce memory usage streaming files by reducing max stream upload size
Before this change rclone would allow the user to stream (eg with
rclone mount, rclone rcat or uploading google photos or docs) 5TB
files.  This meant that rclone allocated 4 * 525 MB buffers per
transfer which is way too much memory by default.

This change makes rclone use the configured chunk size for streamed
uploads.  This is 5MB by default which means that rclone can stream
upload files up to 48GB by default staying below the 10,000 chunks
limit.

This can be increased with --s3-chunk-size if necessary.

If rclone detects that a file is being streamed to s3 it will make a
single NOTICE level log stating the limitation.

This fixes the enormous memory usage.

Fixes #3568
See: https://forum.rclone.org/t/how-much-memory-does-rclone-need/12743
2019-11-09 15:55:19 +00:00
Aleksandar Jankovic
4b20afa94a backend/s3: fix ExpiryWindow value
ExpiryWindow accepts duration but it was set to value 3.
This changes it to 3 * time.Minute since default is 5 min.
2019-11-05 13:55:55 +00:00
Nick Craig-Wood
049ff1f269 config: check a remote exists when creating a new one 2019-11-05 12:39:33 +00:00
Nick Craig-Wood
3f7af64316 config: give config questions default values - fixes #3672 2019-11-05 11:53:44 +00:00
Nick Craig-Wood
0eaf5475ef Start v1.50.1-DEV development 2019-11-02 15:26:01 +00:00
Nick Craig-Wood
7bf056316f local: fix listings of . on Windows - fixes #3676 2019-10-30 16:00:18 +00:00
Xiaoxing Ye
520ddbcceb config: do not open browser on headless if google fs
On google fs (drive, google photos, and google cloud storage), if
headless is selected, do not open browser.

This also supplies a new option "auth-no-open-browser" for authorize
if the user does not want it.

This should fix #3323.
2019-10-30 14:12:42 +00:00
Nick Craig-Wood
1ce1ea34aa hash: fix hash names for DropboxHash and CRC-32
These were unintentionally renamed as part of 1dc8bcd48c

Fixes #3679
2019-10-30 12:20:10 +00:00
Nick Craig-Wood
e6378daadf fshttp: don't print token bucket errors on context cancelled
These happen as a natural part of exceeding --max-transfer and we
don't need to worry the user with them.
2019-10-30 12:20:10 +00:00
Nick Craig-Wood
7ff95c6250 Add Xiaoxing Ye to contributors 2019-10-30 12:20:10 +00:00
Xiaoxing Ye
6d58d9a86f vendor: change goftp/server url
Closing #3674
2019-10-29 17:41:56 +00:00
Chaitanya
e0356f5aae rcd: Adding group parameter to stats 2019-10-29 16:39:37 +00:00
Xiaoxing Ye
191cfb79d1 onedrive: no trailing slash reading metadata...
No trailing slash when reading metadata of an item given item ID.

This should fix #3664.
2019-10-29 13:33:11 +00:00
Nick Craig-Wood
e81eca4055 fshttp: fix error reporting on tpslimit token bucket errors 2019-10-28 22:11:38 +00:00
Nick Craig-Wood
ee3215ac76 build: make replacement of new rclone binary atomic on build
This avoids the "text file busy" message when trying to replace the
binary of a running rclone.
2019-10-28 22:11:38 +00:00
Nick Craig-Wood
199ac61bde rc: add methods to turn on blocking and mutex profiling 2019-10-28 22:11:38 +00:00
Nick Craig-Wood
a40cc1167d Add zero-24 to contributors 2019-10-28 16:49:33 +00:00
zero-24
c57ea8d867 docs: add instructions to create your own dropbox app ID 2019-10-28 16:49:16 +00:00
Nick Craig-Wood
1868c77e16 rc: fix formatting of docs 2019-10-27 10:43:40 +00:00
Brett Dutro
378a3f4133 mount: replace use of WriteAt with Write for cache mode >= writes and O_APPEND
os.File.WriteAt returns an error if a file was opened with O_APPEND.
This replaces it with os.File.Write if the file was opened with
O_APPEND.
2019-10-26 17:27:52 +01:00
Nick Craig-Wood
daff5a824e Start v1.50.0-DEV development 2019-10-26 12:42:06 +01:00
2745 changed files with 163887 additions and 773323 deletions

4
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,4 @@
github: [ncw]
patreon: njcw
liberapay: ncw
custom: ["https://rclone.org/donate/"]

View File

@@ -5,19 +5,31 @@ about: Report a problem with rclone
<!--
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
We understand you are having a problem with rclone; we want to help you with that!
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
**STOP and READ**
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
Please show the effort you've put in to solving the problem and please be specific.
People are volunteering their time to help! Low effort posts are not likely to get good answers!
If you think you might have found a bug, try to replicate it with the latest beta (or stable).
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
If you can still replicate it or just got a question then please use the rclone forum:
https://forum.rclone.org/
instead of filing an issue for a quick response.
for a quick response instead of filing an issue on this repo.
If you think you might have found a bug, please can you try to replicate it with the latest beta?
If nothing else helps, then please fill in the info below which helps us help you.
https://beta.rclone.org/
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
**DO NOT REDACT** any information except passwords/keys/personal info.
You should use 3 backticks to begin and end your paste to make it readable.
Make sure to include a log obtained with '-vv'.
You can also use '-vv --log-file bug.log' and a service such as https://pastebin.com or https://gist.github.com/
Thank you
@@ -25,6 +37,11 @@ The Rclone Developers
-->
#### The associated forum post URL from `https://forum.rclone.org`
#### What is the problem you are having with rclone?
@@ -33,18 +50,18 @@ The Rclone Developers
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
#### Which OS you are using and how many bits (e.g. Windows 7, 64 bit)
#### Which cloud storage system are you using? (eg Google Drive)
#### Which cloud storage system are you using? (e.g. Google Drive)
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
#### The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
#### A log from the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)

View File

@@ -7,12 +7,16 @@ about: Suggest a new feature or enhancement for rclone
Welcome :-)
So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
So you've got an idea to improve rclone? We love that!
You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
Here is a checklist of things to do:
Probably the latest beta (or stable) release has your feature, so try to update your rclone.
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
2. Discuss on the forum first: https://forum.rclone.org/
If it still isn't there, here is a checklist of things to do:
1. Search the old issues for your idea and +1 or comment on an existing issue if possible.
2. Discuss on the forum: https://forum.rclone.org/
3. Make a feature request issue (this is the right place!).
4. Be prepared to get involved making the feature :-)
@@ -23,6 +27,10 @@ The Rclone Developers
-->
#### The associated forum post URL from `https://forum.rclone.org`
#### What is your current rclone version (output from `rclone version`)?

5
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@@ -0,0 +1,5 @@
blank_issues_enabled: false
contact_links:
- name: Rclone Forum Community Support
url: https://forum.rclone.org/
about: Please ask and answer questions here.

View File

@@ -19,82 +19,79 @@ jobs:
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'modules_race', 'go1.10', 'go1.11', 'go1.12']
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.13', 'go1.14', 'go1.15']
include:
- job_name: linux
os: ubuntu-latest
go: '1.13.x'
modules: 'off'
go: '1.16.x'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
quicktest: true
racequicktest: true
deploy: true
- job_name: mac
- job_name: mac_amd64
os: macOS-latest
go: '1.13.x'
modules: 'off'
gotags: '' # cmount doesn't work on osx travis for some reason
go: '1.16.x'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
racequicktest: true
deploy: true
- job_name: mac_arm64
os: macOS-latest
go: '1.16.x'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows_amd64
os: windows-latest
go: '1.13.x'
modules: 'off'
go: '1.16.x'
gotags: cmount
build_flags: '-include "^windows/amd64" -cgo'
build_args: '-buildmode exe'
quicktest: true
racequicktest: true
deploy: true
- job_name: windows_386
os: windows-latest
go: '1.13.x'
modules: 'off'
go: '1.16.x'
gotags: cmount
goarch: '386'
cgo: '1'
build_flags: '-include "^windows/386" -cgo'
build_args: '-buildmode exe'
quicktest: true
deploy: true
- job_name: other_os
os: ubuntu-latest
go: '1.13.x'
modules: 'off'
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
go: '1.16.x'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: modules_race
- job_name: go1.13
os: ubuntu-latest
go: '1.13.x'
modules: 'on'
quicktest: true
- job_name: go1.14
os: ubuntu-latest
go: '1.14.x'
quicktest: true
racequicktest: true
- job_name: go1.10
- job_name: go1.15
os: ubuntu-latest
go: '1.10.x'
modules: 'off'
quicktest: true
- job_name: go1.11
os: ubuntu-latest
go: '1.11.x'
modules: 'off'
quicktest: true
- job_name: go1.12
os: ubuntu-latest
go: '1.12.x'
modules: 'off'
go: '1.15.x'
quicktest: true
racequicktest: true
name: ${{ matrix.job_name }}
@@ -102,25 +99,24 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@master
uses: actions/checkout@v2
with:
path: ./src/github.com/${{ github.repository }}
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@v1
uses: actions/setup-go@v2
with:
stable: 'false'
go-version: ${{ matrix.go }}
- name: Set environment variables
shell: bash
run: |
echo '::set-env name=GOPATH::${{ runner.workspace }}'
echo '::add-path::${{ runner.workspace }}/bin'
echo '::set-env name=GO111MODULE::${{ matrix.modules }}'
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
echo 'BUILD_ARGS=${{ matrix.build_args }}' >> $GITHUB_ENV
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
- name: Install Libraries on Linux
shell: bash
@@ -135,7 +131,7 @@ jobs:
shell: bash
run: |
brew update
brew cask install osxfuse
brew install --cask macfuse
if: matrix.os == 'macOS-latest'
- name: Install Libraries on Windows
@@ -143,10 +139,10 @@ jobs:
run: |
$ProgressPreference = 'SilentlyContinue'
choco install -y winfsp zip
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
if ($env:GOARCH -eq "386") {
choco install -y mingw --forcex86 --force
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
}
# Copy mingw32-make.exe to make.exe so the same command line
# can be used on Windows as on macOS and Linux
@@ -166,10 +162,22 @@ jobs:
printf "\n\nSystem environment:\n\n"
env
- name: Run tests
- name: Go module cache
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Build rclone
shell: bash
run: |
make
- name: Run tests
shell: bash
run: |
make quicktest
if: matrix.quicktest
@@ -196,55 +204,107 @@ jobs:
- name: Deploy built binaries
shell: bash
run: |
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep ; fi
make travis_beta
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
if [[ "${{ matrix.os }}" == "windows-latest" ]]; then make release_dep_windows ; fi
make ci_beta
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# working-directory: '$(modulePath)'
if: matrix.deploy && github.head_ref == ''
# Deploy binaries if enabled in config && not a PR && not a fork
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
xgo:
timeout-minutes: 60
name: "xgo cross compile"
runs-on: ubuntu-latest
android:
timeout-minutes: 30
name: "android-all"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
steps:
# Upgrade together with NDK version
- name: Set up Go 1.14
uses: actions/setup-go@v1
with:
go-version: 1.14
- name: Checkout
uses: actions/checkout@master
with:
path: ./src/github.com/${{ github.repository }}
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
- name: Force NDK version
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;21.4.7075529" | grep -v = || true
- name: Set environment variables
shell: bash
run: |
echo '::set-env name=GOPATH::${{ runner.workspace }}'
echo '::add-path::${{ runner.workspace }}/bin'
- name: Go module cache
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Cross-compile rclone
run: |
docker pull billziss/xgo-cgofuse
go get -v github.com/karalabe/xgo
xgo \
-image=billziss/xgo-cgofuse \
-targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
-tags cmount \
-dest build \
.
xgo \
-image=billziss/xgo-cgofuse \
-targets=android/*,ios/* \
-dest build \
.
- name: Set global environment variables
shell: bash
run: |
echo "VERSION=$(make version)" >> $GITHUB_ENV
- name: Build rclone
run: |
docker pull golang
docker run --rm -v "$PWD":/usr/src/rclone -w /usr/src/rclone golang go build -mod=vendor -v
- name: build native rclone
run: |
make
- name: Upload artifacts
run: |
make circleci_upload
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
if: github.head_ref == ''
- name: arm-v7a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm' >> $GITHUB_ENV
echo 'GOARM=7' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm-v7a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
- name: arm64-v8a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm64-v8a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
- name: x86 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=386' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x86 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
- name: x64 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=amd64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x64 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
- name: Upload artifacts
run: |
make ci_upload
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork
if: github.head_ref == '' && github.repository == 'rclone/rclone'

View File

@@ -0,0 +1,25 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Build and publish image
uses: ilteoood/docker_buildx@1.1.0
with:
tag: beta
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}

View File

@@ -0,0 +1,33 @@
name: Docker release build
on:
release:
types: [published]
jobs:
build:
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Get actual patch version
id: actual_patch_version
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
- name: Get actual minor version
id: actual_minor_version
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
- name: Get actual major version
id: actual_major_version
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
- name: Build and publish image
uses: ilteoood/docker_buildx@1.1.0
with:
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}

5
.gitignore vendored
View File

@@ -1,10 +1,13 @@
*~
_junk/
rclone
rclone.exe
build
docs/public
rclone.iml
.idea
.history
*.test
*.log
*.log
*.iml
fuzz-build.zip

View File

@@ -12,10 +12,10 @@ When filing an issue, please include the following information if
possible as well as a description of the problem. Make sure you test
with the [latest beta of rclone](https://beta.rclone.org/):
* Rclone version (eg output from `rclone -V`)
* Which OS you are using and how many bits (eg Windows 7, 64 bit)
* The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
* A log of the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
* Rclone version (e.g. output from `rclone -V`)
* Which OS you are using and how many bits (e.g. Windows 7, 64 bit)
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
* if the log contains secrets then edit the file with a text editor first to obscure them
## Submitting a pull request ##
@@ -33,10 +33,11 @@ page](https://github.com/rclone/rclone).
Now in your terminal
go get -u github.com/rclone/rclone
cd $GOPATH/src/github.com/rclone/rclone
git clone https://github.com/rclone/rclone.git
cd rclone
git remote rename origin upstream
git remote add origin git@github.com:YOURUSER/rclone.git
go build
Make a branch to add your new feature
@@ -48,7 +49,7 @@ When ready - run the unit tests for the code you changed
go test -v
Note that you may need to make a test remote, eg `TestSwift` for some
Note that you may need to make a test remote, e.g. `TestSwift` for some
of the unit tests.
Note the top level Makefile targets
@@ -72,23 +73,26 @@ Make sure you
When you are done with that
git push origin my-new-feature
git push -u origin my-new-feature
Go to the GitHub website and click [Create pull
request](https://help.github.com/articles/creating-a-pull-request/).
You patch will get reviewed and you might get asked to fix some stuff.
If so, then make the changes in the same branch, squash the commits,
rebase it to master then push it to GitHub with `--force`.
If so, then make the changes in the same branch, squash the commits (make multiple commits one commit) by running:
```
git log # See how many commits you want to squash
git reset --soft HEAD~2 # This squashes the 2 latest commits together.
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
git commit # Add a new commit message.
git push --force # Push the squashed commit to your GitHub repo.
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com
```
## Enabling CI for your fork ##
## CI for your fork ##
The CI config files for rclone have taken care of forks of the project, so you can enable CI for your fork repo easily.
rclone currently uses [Travis CI](https://travis-ci.org/), [AppVeyor](https://ci.appveyor.com/), and
[Circle CI](https://circleci.com/) to build the project. To enable them for your fork, simply go into their
websites, find your fork of rclone, and enable building there.
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
## Testing ##
@@ -96,7 +100,7 @@ rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests.
go test -v ./...
rclone contains a mixture of unit tests and integration tests.
Because it is difficult (and in some respects pointless) to test cloud
storage systems by mocking all their interfaces, rclone unit tests can
@@ -112,8 +116,8 @@ are skipped if `TestDrive:` isn't defined.
cd backend/drive
go test -v
You can then run the integration tests which tests all of rclone's
operations. Normally these get run against the local filing system,
You can then run the integration tests which test all of rclone's
operations. Normally these get run against the local file system,
but they can be run against any of the remotes.
cd fs/sync
@@ -124,7 +128,7 @@ but they can be run against any of the remotes.
go test -v -remote TestDrive:
If you want to use the integration test framework to run these tests
all together with an HTML report and test retries then from the
altogether with an HTML report and test retries then from the
project root:
go install github.com/rclone/rclone/fstest/test_all
@@ -152,6 +156,7 @@ with modules beneath.
* ...commands
* docs - the documentation and website
* content - adjust these docs only - everything else is autogenerated
* command - these are auto generated - edit the corresponding .go file
* fs - main rclone definitions - minimal amount of code
* accounting - bandwidth limiting and statistics
* asyncreader - an io.Reader which reads ahead
@@ -161,12 +166,12 @@ with modules beneath.
* fserrors - rclone specific error handling
* fshttp - http handling for rclone
* fspath - path handling for rclone
* hash - defines rclones hash types and functions
* hash - defines rclone's hash types and functions
* list - list a remote
* log - logging facilities
* march - iterates directories in lock step
* object - in memory Fs objects
* operations - primitives for sync, eg Copy, Move
* operations - primitives for sync, e.g. Copy, Move
* sync - sync directories
* walk - walk a directory
* fstest - provides integration test framework
@@ -174,7 +179,7 @@ with modules beneath.
* mockdir - mocks an fs.Directory
* mockobject - mocks an fs.Object
* test_all - Runs integration tests for everything
* graphics - the images used in the website etc
* graphics - the images used in the website, etc.
* lib - libraries used by the backend
* atexit - register functions to run when rclone exits
* dircache - directory ID to name caching
@@ -182,7 +187,6 @@ with modules beneath.
* pacer - retries with backoff and paces operations
* readers - a selection of useful io.Readers
* rest - a thin abstraction over net/http for REST
* vendor - 3rd party code managed by `go mod`
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
## Writing Documentation ##
@@ -199,14 +203,17 @@ for the flag help, the remainder is shown to the user in `rclone
config` and is added to the docs with `make backenddocs`.
The only documentation you need to edit are the `docs/content/*.md`
files. The MANUAL.*, rclone.1, web site etc are all auto generated
files. The `MANUAL.*`, `rclone.1`, web site, etc. are all auto generated
from those during the release process. See the `make doc` and `make
website` targets in the Makefile if you are interested in how. You
don't need to run these when adding a feature.
Documentation for rclone sub commands is with their code, eg
Documentation for rclone sub commands is with their code, e.g.
`cmd/ls/ls.go`.
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
for small changes in the docs which makes it very easy.
## Making a release ##
There are separate instructions for making a release in the RELEASE.md
@@ -259,43 +266,27 @@ rclone uses the [go
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
support in go1.11 and later to manage its dependencies.
**NB** you must be using go1.11 or above to add a dependency to
rclone. Rclone will still build with older versions of go, but we use
the `go mod` command for dependencies which is only in go1.11 and
above.
rclone can be built with modules outside of the GOPATH, but for
backwards compatibility with older go versions, rclone also maintains
a `vendor` directory with all the external code rclone needs for
building.
The `vendor` directory is entirely managed by the `go mod` tool, do
not add things manually.
rclone can be built with modules outside of the `GOPATH`.
To add a dependency `github.com/ncw/new_dependency` see the
instructions below. These will fetch the dependency, add it to
`go.mod` and `go.sum` and vendor it for older go versions.
instructions below. These will fetch the dependency and add it to
`go.mod` and `go.sum`.
GO111MODULE=on go get github.com/ncw/new_dependency
GO111MODULE=on go mod vendor
You can add constraints on that package when doing `go get` (see the
go docs linked above), but don't unless you really need to.
Please check in the changes generated by `go mod` including the
`vendor` directory and `go.mod` and `go.sum` in a single commit
separate from any other code changes with the title "vendor: add
github.com/ncw/new_dependency". Remember to `git add` any new files
in `vendor`.
Please check in the changes generated by `go mod` including `go.mod`
and `go.sum` in the same commit as your other changes.
## Updating a dependency ##
If you need to update a dependency then run
GO111MODULE=on go get -u github.com/pkg/errors
GO111MODULE=on go mod vendor
Check in in a single commit as above.
Check in a single commit as above.
## Updating all the dependencies ##
@@ -341,11 +332,10 @@ Getting going
* Add your remote to the imports in `backend/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* Try to implement as many optional methods as possible as it makes the remote more usable.
* Use fs/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* `go install -tags noencode`
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* `rclone purge -v TestRemote:rclone-info`
* `rclone info -vv --write-json remote.json TestRemote:rclone-info`
* `go run cmd/info/internal/build_csv/main.go -o remote.csv remote.json`
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
* open `remote.csv` in a spreadsheet and examine
Unit tests
@@ -359,7 +349,7 @@ Integration tests
* Add your backend to `fstest/test_all/config.yaml`
* Once you've done that then you can use the integration test framework from the project root:
* go install ./...
* test_all -backend remote
* test_all -backends remote
Or if you want to run the integration tests manually:
@@ -375,7 +365,7 @@ See the [testing](#testing) section for more information on integration tests.
Add your fs to the docs - you'll need to pick an icon for it from
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
alphabetical order of full name of remote (eg `drive` is ordered as
alphabetical order of full name of remote (e.g. `drive` is ordered as
`Google Drive`) but with the local file system last.
* `README.md` - main GitHub page
@@ -384,7 +374,7 @@ alphabetical order of full name of remote (eg `drive` is ordered as
* update them with `make backenddocs` - revert any changes in other backends
* `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section
* `docs/content/about.md` - front page of rclone.org
* `docs/content/_index.md` - front page of rclone.org
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
* `bin/make_manual.py` - add the page to the `docs` constant
@@ -411,7 +401,7 @@ Usage
- If this variable doesn't exist, plugin support is disabled.
- Plugins must be compiled against the exact version of rclone to work.
(The rclone used during building the plugin must be the same as the source of rclone)
Building
To turn your existing additions into a Go plugin, move them to an external repository

View File

@@ -3,19 +3,21 @@ FROM golang AS builder
COPY . /go/src/github.com/rclone/rclone/
WORKDIR /go/src/github.com/rclone/rclone/
RUN make quicktest
RUN \
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
CGO_ENABLED=0 \
make
RUN ./rclone version
# Begin final image
FROM alpine:latest
RUN apk --no-cache add ca-certificates fuse
RUN apk --no-cache add ca-certificates fuse tzdata && \
echo "user_allow_other" >> /etc/fuse.conf
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
RUN addgroup -g 1009 rclone && adduser -u 1009 -Ds /bin/sh -G rclone rclone
ENTRYPOINT [ "rclone" ]
WORKDIR /data

View File

@@ -2,17 +2,20 @@
Current active maintainers of rclone are:
| Name | GitHub ID | Specific Responsibilities |
| :--------------- | :---------- | :-------------------------- |
| Nick Craig-Wood | @ncw | overall project health |
| Stefan Breunig | @breunigs | |
| Ishuah Kariuki | @ishuah | |
| Remus Bunduc | @remusb | cache backend |
| Fabian Möller | @B4dM4n | |
| Alex Chen | @Cnly | onedrive backend |
| Sandeep Ummadi | @sandeepkru | azureblob backend |
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
| Ivan Andreev | @ivandeex | chunker & mailru backends |
| Name | GitHub ID | Specific Responsibilities |
| :--------------- | :---------------- | :-------------------------- |
| Nick Craig-Wood | @ncw | overall project health |
| Stefan Breunig | @breunigs | |
| Ishuah Kariuki | @ishuah | |
| Remus Bunduc | @remusb | cache backend |
| Fabian Möller | @B4dM4n | |
| Alex Chen | @Cnly | onedrive backend |
| Sandeep Ummadi | @sandeepkru | azureblob backend |
| Sebastian Bünger | @buengese | jottacloud, yandex & compress backends |
| Ivan Andreev | @ivandeex | chunker & mailru backends |
| Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend |
| Caleb Case | @calebcase | tardigrade backend |
**This is a work in progress Draft**
@@ -30,11 +33,11 @@ Rclone uses the labels like this:
* `duplicate` - normally close these and ask the user to subscribe to the original
* `enhancement: new remote` - a new rclone backend
* `enhancement` - a new feature
* `FUSE` - do do with `rclone mount` command
* `FUSE` - to do with `rclone mount` command
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
* `maintenance` - internal enhancement, code re-organisation etc
* `maintenance` - internal enhancement, code re-organisation, etc.
* `Needs Go 1.XX` - waiting for that version of Go to be released
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
* `Remote: XXX` - which rclone backend this affects
@@ -42,7 +45,7 @@ Rclone uses the labels like this:
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (eg the next go release).
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (e.g. the next go release).
The milestones have these meanings:

26401
MANUAL.html generated

File diff suppressed because one or more lines are too long

15729
MANUAL.md generated

File diff suppressed because it is too large Load Diff

29064
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

144
Makefile
View File

@@ -1,33 +1,35 @@
SHELL = bash
# Branch we are working on
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD))
BRANCH := $(or $(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD))
# Tag of the current commit, if any. If this is not "" then we are building a release
RELEASE_TAG := $(shell git tag -l --points-at HEAD)
# Version of last release (may not be on this branch)
VERSION := $(shell cat VERSION)
# Last tag on this branch
LAST_TAG := $(shell git describe --tags --abbrev=0)
# Next version
NEXT_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2+1,0}')
NEXT_PATCH_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2,$$3+1}')
# If we are working on a release, override branch to master
ifdef RELEASE_TAG
BRANCH := master
LAST_TAG := $(shell git describe --abbrev=0 --tags $(VERSION)^)
endif
TAG_BRANCH := -$(BRANCH)
BRANCH_PATH := branch/
TAG_BRANCH := .$(BRANCH)
BRANCH_PATH := branch/$(BRANCH)/
# If building HEAD or master then unset TAG_BRANCH and BRANCH_PATH
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
TAG_BRANCH :=
BRANCH_PATH :=
endif
# Make version suffix -DDD-gCCCCCCCC (D=commits since last relase, C=Commit) or blank
VERSION_SUFFIX := $(shell git describe --abbrev=8 --tags | perl -lpe 's/^v\d+\.\d+\.\d+//; s/^-(\d+)/"-".sprintf("%03d",$$1)/e;')
# TAG is current version + number of commits since last release + branch
# Make version suffix -beta.NNNN.CCCCCCCC (N=Commit number, C=Commit)
VERSION_SUFFIX := -beta.$(shell git rev-list --count HEAD).$(shell git show --no-patch --no-notes --pretty='%h' HEAD)
# TAG is current version + commit number + commit + branch
TAG := $(VERSION)$(VERSION_SUFFIX)$(TAG_BRANCH)
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
ifndef RELEASE_TAG
TAG := $(TAG)-beta
ifdef RELEASE_TAG
TAG := $(RELEASE_TAG)
endif
GO_VERSION := $(shell go version)
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
ifdef BETA_SUBDIR
BETA_SUBDIR := /$(BETA_SUBDIR)
endif
@@ -44,22 +46,26 @@ endif
.PHONY: rclone test_all vars version
rclone:
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
mkdir -p `go env GOPATH`/bin/
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
test_all:
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
vars:
@echo SHELL="'$(SHELL)'"
@echo BRANCH="'$(BRANCH)'"
@echo TAG="'$(TAG)'"
@echo VERSION="'$(VERSION)'"
@echo NEXT_VERSION="'$(NEXT_VERSION)'"
@echo GO_VERSION="'$(GO_VERSION)'"
@echo BETA_URL="'$(BETA_URL)'"
btest:
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
@echo "Copied markdown of beta release to clip board"
version:
@echo '$(TAG)'
@@ -70,10 +76,10 @@ test: rclone test_all
# Quick test
quicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
racequicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
# Do source code quality checks
check: rclone
@@ -85,35 +91,42 @@ check: rclone
build_dep:
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
# Get the release dependencies
release_dep:
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
go run bin/get-github-release.go -extract github-release aktau/github-release 'linux-amd64-github-release.tar.bz2'
# Get the release dependencies we only install on linux
release_dep_linux:
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
# Get the release dependencies we only install on Windows
release_dep_windows:
GO111MODULE=off GOOS="" GOARCH="" go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo
# Update dependencies
showupdates:
@echo "*** Direct dependencies that could be updated ***"
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
# Update direct and indirect dependencies and test dependencies
update:
GO111MODULE=on go get -u ./...
GO111MODULE=on go get -u -t ./...
-#GO111MODULE=on go get -d $(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
GO111MODULE=on go mod tidy
GO111MODULE=on go mod vendor
# Tidy the module dependencies
tidy:
GO111MODULE=on go mod tidy
GO111MODULE=on go mod vendor
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
rclone.1: MANUAL.md
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
pandoc -s --from markdown-smart --to man MANUAL.md -o rclone.1
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs rcdocs
./bin/make_manual.py
MANUAL.html: MANUAL.md
pandoc -s --from markdown --to html MANUAL.md -o MANUAL.html
pandoc -s --from markdown-smart --to html MANUAL.md -o MANUAL.html
MANUAL.txt: MANUAL.md
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
@@ -135,14 +148,27 @@ clean:
rm -f rclone fs/operations/operations.test fs/sync/sync.test fs/test_all.log test.log
website:
rm -rf docs/public
cd docs && hugo
@if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi
upload_website: website
rclone -v sync docs/public memstore:www-rclone-org
upload_test_website: website
rclone -P sync docs/public test-rclone-org:
validate_website: website
find docs/public -type f -name "*.html" | xargs tidy --mute-id yes -errors --gnu-emacs yes --drop-empty-elements no --warn-proprietary-attributes no --mute MISMATCHED_ATTRIBUTE_WARN
tarball:
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
vendorball:
go mod vendor
tar -zcf build/rclone-$(TAG)-vendor.tar.gz vendor
rm -rf vendor
sign_upload:
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
@@ -161,10 +187,10 @@ upload_github:
./bin/upload-github $(TAG)
cross: doc
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
go run bin/cross-compile.go -release current $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
beta:
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
@@ -172,61 +198,61 @@ log_since_last_release:
git log $(LAST_TAG)..
compile_all:
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
go run bin/cross-compile.go -compile-only $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
appveyor_upload:
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
endif
@echo Beta release ready at $(BETA_URL)
circleci_upload:
ci_upload:
sudo chown -R $$USER build
find build -type l -delete
gzip -r9v build
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
ifndef BRANCH_PATH
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
endif
@echo Beta release ready at $(BETA_URL)/testbuilds
travis_beta:
ifeq (linux,$(filter linux,$(subst Linux,linux,$(TRAVIS_OS_NAME) $(AGENT_OS))))
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
endif
ci_beta:
git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
endif
@echo Beta release ready at $(BETA_URL)
# Fetch the binary builds from travis and appveyor
# Fetch the binary builds from GitHub actions
fetch_binaries:
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
serve: website
cd docs && hugo server -v -w
cd docs && hugo server -v -w --disableFastRender
tag: doc
@echo "Old tag is $(VERSION)"
@echo "New tag is $(NEXT_VERSION)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_VERSION)" > VERSION
git tag -s -m "Version $(NEXT_VERSION)" $(NEXT_VERSION)
bin/make_changelog.py $(LAST_TAG) $(NEXT_VERSION) > docs/content/changelog.md.new
tag: retag doc
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
mv docs/content/changelog.md.new docs/content/changelog.md
@echo "Edit the new changelog in docs/content/changelog.md"
@echo "Then commit all the changes"
@echo git commit -m \"Version $(NEXT_VERSION)\" -a -v
@echo "And finally run make retag before make cross etc"
@echo git commit -m \"Version $(VERSION)\" -a -v
@echo "And finally run make retag before make cross, etc."
retag:
@echo "Version is $(VERSION)"
git tag -f -s -m "Version $(VERSION)" $(VERSION)
startdev:
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(VERSION)-DEV\"\n" | gofmt > fs/version.go
git commit -m "Start $(VERSION)-DEV development" fs/version.go
@echo "Version is $(VERSION)"
@echo "Next version is $(NEXT_VERSION)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_VERSION)" > VERSION
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
startstable:
@echo "Version is $(VERSION)"
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_PATCH_VERSION)" > VERSION
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
winzip:
zip -9 rclone-$(TAG).zip rclone.exe

View File

@@ -8,10 +8,7 @@
[Installation](https://rclone.org/install/) |
[Forum](https://forum.rclone.org/)
[![Build Status](https://travis-ci.org/rclone/rclone.svg?branch=master)](https://travis-ci.org/rclone/rclone)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/rclone/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/rclone/rclone)
[![Build Status](https://dev.azure.com/rclone/rclone/_apis/build/status/rclone.rclone?branchName=master)](https://dev.azure.com/rclone/rclone/_build/latest?definitionId=2&branchName=master)
[![CircleCI](https://circleci.com/gh/rclone/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/rclone/rclone/tree/master)
[![Build Status](https://github.com/rclone/rclone/workflows/build/badge.svg)](https://github.com/rclone/rclone/actions?query=workflow%3Abuild)
[![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone)
[![Docker Pulls](https://img.shields.io/docker/pulls/rclone/rclone)](https://hub.docker.com/r/rclone/rclone)
@@ -33,10 +30,13 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
* HTTP [:page_facing_up:](https://rclone.org/http/)
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
@@ -45,6 +45,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
* Memory [:page_facing_up:](https://rclone.org/memory/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
@@ -60,10 +61,16 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
@@ -78,6 +85,7 @@ Please see [the full list of all storage providers and their features](https://r
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
* Can sync to and from network, e.g. two different cloud accounts
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
* Optional transparent compression ([Compress](https://rclone.org/compress/))
* Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional cache ([Cache](https://rclone.org/cache/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))

View File

@@ -4,35 +4,37 @@ This file describes how to make the various kinds of releases
## Extra required software for making a release
* [github-release](https://github.com/aktau/github-release) for uploading packages
* [gh the github cli](https://github.com/cli/cli) for uploading packages
* pandoc for making the html and man pages
## Making a release
* git checkout master # see below for stable branch
* git pull
* git status - make sure everything is checked in
* Check travis & appveyor builds are green
* make check
* Check GitHub actions build for master is Green
* make test # see integration test server or run locally
* make tag
* edit docs/content/changelog.md
* edit docs/content/changelog.md # make sure to remove duplicate logs from point releases
* make tidy
* make doc
* git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0"
* make retag
* git push --tags origin master
* # Wait for the appveyor and travis builds to complete then...
* git push --follow-tags origin
* # Wait for the GitHub builds to complete then...
* make fetch_binaries
* make tarball
* make vendorball
* make sign_upload
* make check_sign
* make upload
* make upload_website
* make upload_github
* make startdev
* # announce with forum post, twitter post, G+ post
* make startdev # make startstable for stable branch
* # announce with forum post, twitter post, patreon post
Early in the next release cycle update the vendored dependencies
Early in the next release cycle update the dependencies
* Review any pinned packages in go.mod and remove if possible
* make update
@@ -40,71 +42,63 @@ Early in the next release cycle update the vendored dependencies
* git add new files
* git commit -a -v
If `make update` fails with errors like this:
```
# github.com/cpuguy83/go-md2man/md2man
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
```
Can be fixed with
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
* GO111MODULE=on go mod tidy
* GO111MODULE=on go mod vendor
## Making a point release
If rclone needs a point release due to some horrendous bug:
Set vars
* BASE_TAG=v1.XX # e.g. v1.52
* NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
First make the release branch. If this is a second point release then
this will be done already.
* BASE_TAG=v1.XX # eg v1.49
* NEW_TAG=${BASE_TAG}.Y # eg v1.49.1
* echo $BASE_TAG $NEW_TAG # v1.49 v1.49.1
* git branch ${BASE_TAG} ${BASE_TAG}-fixes
* git branch ${BASE_TAG} ${BASE_TAG}-stable
* git co ${BASE_TAG}-stable
* make startstable
Now
* git co ${BASE_TAG}-fixes
* git co ${BASE_TAG}-stable
* git cherry-pick any fixes
* Test (see above)
* make NEXT_VERSION=${NEW_TAG} tag
* edit docs/content/changelog.md
* make TAG=${NEW_TAG} doc
* git commit -a -v -m "Version ${NEW_TAG}"
* git tag -d ${NEW_TAG}
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
* git push --tags -u origin ${BASE_TAG}-fixes
* Wait for builds to complete
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
* make TAG=${NEW_TAG} tarball
* make TAG=${NEW_TAG} sign_upload
* make TAG=${NEW_TAG} check_sign
* make TAG=${NEW_TAG} upload
* make TAG=${NEW_TAG} upload_website
* make TAG=${NEW_TAG} upload_github
* NB this overwrites the current beta so we need to do this
* Do the steps as above
* make startstable
* git co master
* make LAST_TAG=${NEW_TAG} startdev
* # cherry pick the changes to the changelog and VERSION
* git checkout ${BASE_TAG}-fixes VERSION docs/content/changelog.md
* git commit --amend
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
* git push
* Announce!
## Making a manual build of docker
The rclone docker image should autobuild on docker hub. If it doesn't
The rclone docker image should autobuild on via GitHub actions. If it doesn't
or needs to be updated then rebuild like this.
See: https://github.com/ilteoood/docker_buildx/issues/19
See: https://github.com/ilteoood/docker_buildx/blob/master/scripts/install_buildx.sh
```
docker build -t rclone/rclone:1.49.1 -t rclone/rclone:1.49 -t rclone/rclone:1 -t rclone/rclone:latest .
docker push rclone/rclone:1.49.1
docker push rclone/rclone:1.49
git co v1.54.1
docker pull golang
export DOCKER_CLI_EXPERIMENTAL=enabled
docker buildx create --name actions_builder --use
docker run --rm --privileged docker/binfmt:820fdd95a9972a5308930a2bdfb8573dd4447ad3
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
SUPPORTED_PLATFORMS=$(docker buildx inspect --bootstrap | grep 'Platforms:*.*' | cut -d : -f2,3)
echo "Supported platforms: $SUPPORTED_PLATFORMS"
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
docker buildx stop actions_builder
```
### Old build for linux/amd64 only
```
docker pull golang
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
docker push rclone/rclone:1.52.0
docker push rclone/rclone:1.52
docker push rclone/rclone:1
docker push rclone/rclone:latest
```

View File

@@ -1 +1 @@
v1.50.0
v1.56.0

View File

@@ -1,10 +1,12 @@
package alias
import (
"context"
"errors"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
@@ -33,7 +35,7 @@ type Options struct {
// NewFs constructs an Fs from the path.
//
// The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -46,9 +48,5 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if strings.HasPrefix(opt.Remote, name+":") {
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
}
fsInfo, configName, fsPath, config, err := fs.ConfigFs(opt.Remote)
if err != nil {
return nil, err
}
return fsInfo.NewFs(configName, fspath.JoinRootPath(fsPath, root), config)
return cache.Get(ctx, fspath.JoinRootPath(opt.Remote, root))
}

View File

@@ -11,6 +11,7 @@ import (
_ "github.com/rclone/rclone/backend/local" // pull in test backend
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/stretchr/testify/require"
)
@@ -19,7 +20,7 @@ var (
)
func prepare(t *testing.T, root string) {
config.LoadConfig()
configfile.LoadConfig(context.Background())
// Configure the remote
config.FileSet(remoteName, "type", "alias")
@@ -54,21 +55,22 @@ func TestNewFS(t *testing.T) {
{"four/under four.txt", 9, false},
}},
{"four", "..", "", true, []testEntry{
{"four", -1, true},
{"one%.txt", 6, false},
{"three", -1, true},
{"two.html", 7, false},
{"five", -1, true},
{"under four.txt", 9, false},
}},
{"four", "../three", "", true, []testEntry{
{"", "../../three", "", true, []testEntry{
{"underthree.txt", 9, false},
}},
{"four", "../../five", "", true, []testEntry{
{"underfive.txt", 6, false},
}},
} {
what := fmt.Sprintf("test %d remoteRoot=%q, fsRoot=%q, fsList=%q", testi, test.remoteRoot, test.fsRoot, test.fsList)
remoteRoot, err := filepath.Abs(filepath.FromSlash(path.Join("test/files", test.remoteRoot)))
require.NoError(t, err, what)
prepare(t, remoteRoot)
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
require.NoError(t, err, what)
gotEntries, err := f.List(context.Background(), test.fsList)
require.NoError(t, err, what)
@@ -90,7 +92,7 @@ func TestNewFS(t *testing.T) {
func TestNewFSNoRemote(t *testing.T) {
prepare(t, "")
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:", remoteName))
require.Error(t, err)
require.Nil(t, f)
@@ -98,7 +100,7 @@ func TestNewFSNoRemote(t *testing.T) {
func TestNewFSInvalidRemote(t *testing.T) {
prepare(t, "not_existing_test_remote:")
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:", remoteName))
require.Error(t, err)
require.Nil(t, f)

View File

@@ -9,13 +9,16 @@ import (
_ "github.com/rclone/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/chunker"
_ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/dropbox"
_ "github.com/rclone/rclone/backend/fichier"
_ "github.com/rclone/rclone/backend/filefabric"
_ "github.com/rclone/rclone/backend/ftp"
_ "github.com/rclone/rclone/backend/googlecloudstorage"
_ "github.com/rclone/rclone/backend/googlephotos"
_ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/hubic"
_ "github.com/rclone/rclone/backend/jottacloud"
@@ -23,6 +26,7 @@ import (
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/mailru"
_ "github.com/rclone/rclone/backend/mega"
_ "github.com/rclone/rclone/backend/memory"
_ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/pcloud"
@@ -30,10 +34,14 @@ import (
_ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/tardigrade"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/webdav"
_ "github.com/rclone/rclone/backend/yandex"
_ "github.com/rclone/rclone/backend/zoho"
)

View File

@@ -28,18 +28,17 @@ import (
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"golang.org/x/oauth2"
)
const (
enc = encodings.AmazonCloudDrive
folderKind = "FOLDER"
fileKind = "FILE"
statusAvailable = "AVAILABLE"
@@ -71,29 +70,13 @@ func init() {
Prefix: "acd",
Description: "Amazon Drive",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
err := oauthutil.Config("amazon cloud drive", name, m, acdConfig)
Config: func(ctx context.Context, name string, m configmap.Mapper) {
err := oauthutil.Config(ctx, "amazon cloud drive", name, m, acdConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Amazon Application Client ID.",
Required: true,
}, {
Name: config.ConfigClientSecret,
Help: "Amazon Application Client Secret.",
Required: true,
}, {
Name: config.ConfigAuthURL,
Help: "Auth server URL.\nLeave blank to use Amazon's.",
Advanced: true,
}, {
Name: config.ConfigTokenURL,
Help: "Token server url.\nleave blank to use Amazon's.",
Advanced: true,
}, {
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "checkpoint",
Help: "Checkpoint for internal polling (debug).",
Hide: fs.OptionHideBoth,
@@ -137,15 +120,23 @@ which downloads the file through a temporary URL directly from the
underlying S3 storage.`,
Default: defaultTempLinkThreshold,
Advanced: true,
}},
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: (encoder.Base |
encoder.EncodeInvalidUtf8),
}}...),
})
}
// Options defines the configuration for this backend
type Options struct {
Checkpoint string `config:"checkpoint"`
UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"`
TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"`
Checkpoint string `config:"checkpoint"`
UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"`
TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote acd server
@@ -153,6 +144,7 @@ type Fs struct {
name string // name of this remote
features *fs.Features // optional features
opt Options // options for this Fs
ci *fs.ConfigInfo // global config
c *acd.Client // the connection to the acd server
noAuthClient *http.Client // unauthenticated http client
root string // the path we are working on
@@ -162,7 +154,7 @@ type Fs struct {
tokenRenewer *oauthutil.Renew // renew the token on expiry
}
// Object describes a acd object
// Object describes an acd object
//
// Will definitely have info but maybe not meta
type Object struct {
@@ -213,7 +205,10 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if resp != nil {
if resp.StatusCode == 401 {
f.tokenRenewer.Invalidate()
@@ -222,7 +217,7 @@ func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
}
// Work around receiving this error sporadically on authentication
//
// HTTP code 403: "403 Forbidden", reponse body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"}
// HTTP code 403: "403 Forbidden", response body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"}
if resp.StatusCode == 403 && strings.Contains(err.Error(), "Authorization header requires") {
fs.Debugf(f, "403 \"Authorization header requires...\" error received - retry")
return true, err
@@ -248,8 +243,7 @@ func filterRequest(req *http.Request) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -257,7 +251,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
root = parsePath(root)
baseClient := fshttp.NewClient(fs.Config)
baseClient := fshttp.NewClient(ctx)
if do, ok := baseClient.Transport.(interface {
SetRequestFilter(f func(req *http.Request))
}); ok {
@@ -265,29 +259,31 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} else {
fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
}
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, acdConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
}
c := acd.NewClient(oAuthClient)
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: ci,
c: c,
pacer: fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
noAuthClient: fshttp.NewClient(fs.Config),
pacer: fs.NewPacer(ctx, pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
noAuthClient: fshttp.NewClient(ctx),
}
f.features = (&fs.Features{
CaseInsensitive: true,
ReadMimeType: true,
CanHaveEmptyDirectories: true,
}).Fill(f)
}).Fill(ctx, f)
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.getRootInfo()
_, err := f.getRootInfo(ctx)
return err
})
@@ -295,14 +291,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
_, resp, err = f.c.Account.GetEndpoints()
return f.shouldRetry(resp, err)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get endpoints")
}
// Get rootID
rootInfo, err := f.getRootInfo()
rootInfo, err := f.getRootInfo(ctx)
if err != nil || rootInfo.Id == nil {
return nil, errors.Wrap(err, "failed to get root")
}
@@ -344,11 +340,11 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
// getRootInfo gets the root folder info
func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) {
func (f *Fs) getRootInfo(ctx context.Context) (rootInfo *acd.Folder, err error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
rootInfo, resp, err = f.c.Nodes.GetRoot()
return f.shouldRetry(resp, err)
return f.shouldRetry(ctx, resp, err)
})
return rootInfo, err
}
@@ -386,8 +382,8 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
var resp *http.Response
var subFolder *acd.Folder
err = f.pacer.Call(func() (bool, error) {
subFolder, resp, err = folder.GetFolder(enc.FromStandardName(leaf))
return f.shouldRetry(resp, err)
subFolder, resp, err = folder.GetFolder(f.opt.Enc.FromStandardName(leaf))
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
if err == acd.ErrorNodeNotFound {
@@ -413,8 +409,8 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
var resp *http.Response
var info *acd.Folder
err = f.pacer.Call(func() (bool, error) {
info, resp, err = folder.CreateFolder(enc.FromStandardName(leaf))
return f.shouldRetry(resp, err)
info, resp, err = folder.CreateFolder(f.opt.Enc.FromStandardName(leaf))
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
//fmt.Printf("...Error %v\n", err)
@@ -435,7 +431,7 @@ type listAllFn func(*acd.Node) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
func (f *Fs) listAll(ctx context.Context, dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
query := "parents:" + dirID
if directoriesOnly {
query += " AND kind:" + folderKind
@@ -456,7 +452,7 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
var resp *http.Response
err = f.pacer.CallNoRetry(func() (bool, error) {
nodes, resp, err = f.c.Nodes.GetNodes(&opts)
return f.shouldRetry(resp, err)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return false, err
@@ -481,7 +477,7 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
if !hasValidParent {
continue
}
*node.Name = enc.ToStandardName(*node.Name)
*node.Name = f.opt.Enc.ToStandardName(*node.Name)
// Store the nodes up in case we have to retry the listing
out = append(out, node)
}
@@ -507,19 +503,15 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
}
maxTries := fs.Config.LowLevelRetries
maxTries := f.ci.LowLevelRetries
var iErr error
for tries := 1; tries <= maxTries; tries++ {
entries = nil
_, err = f.listAll(directoryID, "", false, false, func(node *acd.Node) bool {
_, err = f.listAll(ctx, directoryID, "", false, false, func(node *acd.Node) bool {
remote := path.Join(dir, *node.Name)
switch *node.Kind {
case folderKind:
@@ -536,7 +528,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
entries = append(entries, o)
default:
// ignore ASSET etc
// ignore ASSET, etc.
}
return false
})
@@ -658,7 +650,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
return nil, err
}
// If not create it
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
if err != nil {
return nil, err
}
@@ -671,14 +663,14 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
err = f.pacer.CallNoRetry(func() (bool, error) {
start := time.Now()
f.tokenRenewer.Start()
info, resp, err = folder.Put(in, enc.FromStandardName(leaf))
info, resp, err = folder.Put(in, f.opt.Enc.FromStandardName(leaf))
f.tokenRenewer.Stop()
var ok bool
ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
if ok {
return false, nil
}
return f.shouldRetry(resp, err)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
@@ -689,17 +681,11 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
_, err := f.dirCache.FindDir(ctx, dir, true)
return err
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
@@ -717,10 +703,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// create the destination directory if necessary
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return nil, err
}
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
if err != nil {
return nil, err
@@ -729,7 +711,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if err != nil {
return nil, err
}
err = f.moveNode(srcObj.remote, dstLeaf, dstDirectoryID, srcObj.info, srcLeaf, srcDirectoryID, false)
err = f.moveNode(ctx, srcObj.remote, dstLeaf, dstDirectoryID, srcObj.info, srcLeaf, srcDirectoryID, false)
if err != nil {
return nil, err
}
@@ -740,7 +722,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
dstObj fs.Object
srcErr, dstErr error
)
for i := 1; i <= fs.Config.LowLevelRetries; i++ {
for i := 1; i <= f.ci.LowLevelRetries; i++ {
_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
// exit if error on source
@@ -755,7 +737,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// finished if src not found and dst found
break
}
fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, fs.Config.LowLevelRetries)
fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, f.ci.LowLevelRetries)
time.Sleep(1 * time.Second)
}
return dstObj, dstErr
@@ -768,7 +750,7 @@ func (f *Fs) DirCacheFlush() {
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -790,54 +772,24 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return errors.New("can't move root directory")
}
// find the root src directory
err = srcFs.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
} else {
if f.dirCache.FoundRoot() {
return fs.ErrorDirExists
}
}
// Find ID of dst parent, creating subdirs if necessary
findPath := dstRemote
if dstRemote == "" {
findPath = f.root
}
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, findPath, true)
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, dstRemote, true)
if err != nil {
return err
}
// Check destination does not exist
if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs.ErrorDirExists
}
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs.ErrorDirExists
}
// Find ID of src parent
findPath = srcRemote
var srcDirectoryID string
if srcRemote == "" {
srcDirectoryID, err = srcFs.dirCache.RootParentID()
} else {
_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false)
}
_, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcRemote, false)
if err != nil {
return err
}
@@ -854,7 +806,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
var jsonStr string
err = srcFs.pacer.Call(func() (bool, error) {
jsonStr, err = srcInfo.GetMetadata()
return srcFs.shouldRetry(nil, err)
return srcFs.shouldRetry(ctx, nil, err)
})
if err != nil {
fs.Debugf(src, "DirMove error: error reading src metadata: %v", err)
@@ -866,7 +818,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return err
}
err = f.moveNode(srcPath, dstLeaf, dstDirectoryID, srcInfo, srcLeaf, srcDirectoryID, true)
err = f.moveNode(ctx, srcPath, dstLeaf, dstDirectoryID, srcInfo, srcLeaf, srcDirectoryID, true)
if err != nil {
return err
}
@@ -883,10 +835,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return errors.New("can't purge root directory")
}
dc := f.dirCache
err := dc.FindRoot(ctx, false)
if err != nil {
return err
}
rootID, err := dc.FindDir(ctx, dir, false)
if err != nil {
return err
@@ -895,7 +843,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
if check {
// check directory is empty
empty := true
_, err = f.listAll(rootID, "", false, false, func(node *acd.Node) bool {
_, err = f.listAll(ctx, rootID, "", false, false, func(node *acd.Node) bool {
switch *node.Kind {
case folderKind:
empty = false
@@ -920,7 +868,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = node.Trash()
return f.shouldRetry(resp, err)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return err
@@ -950,7 +898,7 @@ func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -978,8 +926,8 @@ func (f *Fs) Hashes() hash.Set {
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
}
// ------------------------------------------------------------
@@ -1030,7 +978,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
if o.info != nil {
return nil
}
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, o.remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return fs.ErrorObjectNotFound
@@ -1041,8 +989,8 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
var resp *http.Response
var info *acd.File
err = o.fs.pacer.Call(func() (bool, error) {
info, resp, err = folder.GetFile(enc.FromStandardName(leaf))
return o.fs.shouldRetry(resp, err)
info, resp, err = folder.GetFile(o.fs.opt.Enc.FromStandardName(leaf))
return o.fs.shouldRetry(ctx, resp, err)
})
if err != nil {
if err == acd.ErrorNodeNotFound {
@@ -1099,7 +1047,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
} else {
in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers)
}
return o.fs.shouldRetry(resp, err)
return o.fs.shouldRetry(ctx, resp, err)
})
return in, err
}
@@ -1122,7 +1070,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if ok {
return false, nil
}
return o.fs.shouldRetry(resp, err)
return o.fs.shouldRetry(ctx, resp, err)
})
if err != nil {
return err
@@ -1132,70 +1080,70 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// Remove a node
func (f *Fs) removeNode(info *acd.Node) error {
func (f *Fs) removeNode(ctx context.Context, info *acd.Node) error {
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = info.Trash()
return f.shouldRetry(resp, err)
return f.shouldRetry(ctx, resp, err)
})
return err
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return o.fs.removeNode(o.info)
return o.fs.removeNode(ctx, o.info)
}
// Restore a node
func (f *Fs) restoreNode(info *acd.Node) (newInfo *acd.Node, err error) {
func (f *Fs) restoreNode(ctx context.Context, info *acd.Node) (newInfo *acd.Node, err error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
newInfo, resp, err = info.Restore()
return f.shouldRetry(resp, err)
return f.shouldRetry(ctx, resp, err)
})
return newInfo, err
}
// Changes name of given node
func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) {
func (f *Fs) renameNode(ctx context.Context, info *acd.Node, newName string) (newInfo *acd.Node, err error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
newInfo, resp, err = info.Rename(enc.FromStandardName(newName))
return f.shouldRetry(resp, err)
newInfo, resp, err = info.Rename(f.opt.Enc.FromStandardName(newName))
return f.shouldRetry(ctx, resp, err)
})
return newInfo, err
}
// Replaces one parent with another, effectively moving the file. Leaves other
// parents untouched. ReplaceParent cannot be used when the file is trashed.
func (f *Fs) replaceParent(info *acd.Node, oldParentID string, newParentID string) error {
func (f *Fs) replaceParent(ctx context.Context, info *acd.Node, oldParentID string, newParentID string) error {
return f.pacer.Call(func() (bool, error) {
resp, err := info.ReplaceParent(oldParentID, newParentID)
return f.shouldRetry(resp, err)
return f.shouldRetry(ctx, resp, err)
})
}
// Adds one additional parent to object.
func (f *Fs) addParent(info *acd.Node, newParentID string) error {
func (f *Fs) addParent(ctx context.Context, info *acd.Node, newParentID string) error {
return f.pacer.Call(func() (bool, error) {
resp, err := info.AddParent(newParentID)
return f.shouldRetry(resp, err)
return f.shouldRetry(ctx, resp, err)
})
}
// Remove given parent from object, leaving the other possible
// parents untouched. Object can end up having no parents.
func (f *Fs) removeParent(info *acd.Node, parentID string) error {
func (f *Fs) removeParent(ctx context.Context, info *acd.Node, parentID string) error {
return f.pacer.Call(func() (bool, error) {
resp, err := info.RemoveParent(parentID)
return f.shouldRetry(resp, err)
return f.shouldRetry(ctx, resp, err)
})
}
// moveNode moves the node given from the srcLeaf,srcDirectoryID to
// the dstLeaf,dstDirectoryID
func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, srcLeaf, srcDirectoryID string, useDirErrorMsgs bool) (err error) {
func (f *Fs) moveNode(ctx context.Context, name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, srcLeaf, srcDirectoryID string, useDirErrorMsgs bool) (err error) {
// fs.Debugf(name, "moveNode dst(%q,%s) <- src(%q,%s)", dstLeaf, dstDirectoryID, srcLeaf, srcDirectoryID)
cantMove := fs.ErrorCantMove
if useDirErrorMsgs {
@@ -1209,7 +1157,7 @@ func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, s
if srcLeaf != dstLeaf {
// fs.Debugf(name, "renaming")
_, err = f.renameNode(srcInfo, dstLeaf)
_, err = f.renameNode(ctx, srcInfo, dstLeaf)
if err != nil {
fs.Debugf(name, "Move: quick path rename failed: %v", err)
goto OnConflict
@@ -1217,7 +1165,7 @@ func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, s
}
if srcDirectoryID != dstDirectoryID {
// fs.Debugf(name, "trying parent replace: %s -> %s", oldParentID, newParentID)
err = f.replaceParent(srcInfo, srcDirectoryID, dstDirectoryID)
err = f.replaceParent(ctx, srcInfo, srcDirectoryID, dstDirectoryID)
if err != nil {
fs.Debugf(name, "Move: quick path parent replace failed: %v", err)
return err
@@ -1230,13 +1178,13 @@ OnConflict:
fs.Debugf(name, "Could not directly rename file, presumably because there was a file with the same name already. Instead, the file will now be trashed where such operations do not cause errors. It will be restored to the correct parent after. If any of the subsequent calls fails, the rename/move will be in an invalid state.")
// fs.Debugf(name, "Trashing file")
err = f.removeNode(srcInfo)
err = f.removeNode(ctx, srcInfo)
if err != nil {
fs.Debugf(name, "Move: remove node failed: %v", err)
return err
}
// fs.Debugf(name, "Renaming file")
_, err = f.renameNode(srcInfo, dstLeaf)
_, err = f.renameNode(ctx, srcInfo, dstLeaf)
if err != nil {
fs.Debugf(name, "Move: rename node failed: %v", err)
return err
@@ -1244,19 +1192,19 @@ OnConflict:
// note: replacing parent is forbidden by API, modifying them individually is
// okay though
// fs.Debugf(name, "Adding target parent")
err = f.addParent(srcInfo, dstDirectoryID)
err = f.addParent(ctx, srcInfo, dstDirectoryID)
if err != nil {
fs.Debugf(name, "Move: addParent failed: %v", err)
return err
}
// fs.Debugf(name, "removing original parent")
err = f.removeParent(srcInfo, srcDirectoryID)
err = f.removeParent(ctx, srcInfo, srcDirectoryID)
if err != nil {
fs.Debugf(name, "Move: removeParent failed: %v", err)
return err
}
// fs.Debugf(name, "Restoring")
_, err = f.restoreNode(srcInfo)
_, err = f.restoreNode(ctx, srcInfo)
if err != nil {
fs.Debugf(name, "Move: restoreNode node failed: %v", err)
return err
@@ -1357,7 +1305,7 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoin
if len(node.Parents) > 0 {
if path, ok := f.dirCache.GetInv(node.Parents[0]); ok {
// and append the drive file name to compute the full file name
name := enc.ToStandardName(*node.Name)
name := f.opt.Enc.ToStandardName(*node.Name)
if len(path) > 0 {
path = path + "/" + name
} else {

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
// +build !plan9,!solaris
// +build !plan9,!solaris,!js,go1.14
package azureblob
@@ -16,3 +16,20 @@ func (f *Fs) InternalTest(t *testing.T) {
enabled = f.Features().GetTier
assert.True(t, enabled)
}
func TestIncrement(t *testing.T) {
for _, test := range []struct {
in []byte
want []byte
}{
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
} {
increment(test.in)
assert.Equal(t, test.want, test.in)
}
}

View File

@@ -1,14 +1,16 @@
// Test AzureBlob filesystem interface
// +build !plan9,!solaris
// +build !plan9,!solaris,!js,go1.14
package azureblob
import (
"context"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
)
// TestIntegration runs integration tests against the remote
@@ -27,11 +29,36 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
func TestServicePrincipalFileSuccess(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"password": "my secret",
"tenant": "my active directory tenant ID"
}
`
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
if assert.NoError(t, err) {
assert.NotNil(t, tokenRefresher)
}
}
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
func TestServicePrincipalFileFailure(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"tenant": "my active directory tenant ID"
}
`
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
assert.Error(t, err)
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
}

View File

@@ -1,6 +1,6 @@
// Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9 solaris
// +build plan9 solaris js !go1.14
package azureblob

137
backend/azureblob/imds.go Normal file
View File

@@ -0,0 +1,137 @@
// +build !plan9,!solaris,!js,go1.14
package azureblob
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
)
const (
azureResource = "https://storage.azure.com"
imdsAPIVersion = "2018-02-01"
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
)
// This custom type is used to add the port the test server has bound to
// to the request context.
type testPortKey string
type msiIdentifierType int
const (
msiClientID msiIdentifierType = iota
msiObjectID
msiResourceID
)
type userMSI struct {
Type msiIdentifierType
Value string
}
type httpError struct {
Response *http.Response
}
func (e httpError) Error() string {
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
}
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
// Metadata Service.
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
// Attempt to get an MSI token; silently continue if unsuccessful.
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
result := adal.Token{}
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
if err != nil {
fs.Debugf(nil, "Failed to create request: %v", err)
return result, err
}
params := req.URL.Query()
params.Set("resource", azureResource)
params.Set("api-version", imdsAPIVersion)
// Specify user-assigned identity if requested.
if identity != nil {
switch identity.Type {
case msiClientID:
params.Set("client_id", identity.Value)
case msiObjectID:
params.Set("object_id", identity.Value)
case msiResourceID:
params.Set("mi_res_id", identity.Value)
default:
// If this happens, the calling function and this one don't agree on
// what valid ID types exist.
return result, fmt.Errorf("unknown MSI identity type specified")
}
}
req.URL.RawQuery = params.Encode()
// The Metadata header is required by all calls to IMDS.
req.Header.Set("Metadata", "true")
// If this function is run in a test, query the test server instead of IMDS.
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
if isTest {
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
req.Host = req.URL.Host
}
// Send request
httpClient := fshttp.NewClient(ctx)
resp, err := httpClient.Do(req)
if err != nil {
return result, errors.Wrap(err, "MSI is not enabled on this VM")
}
defer func() { // resp and Body should not be nil
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
}
err = resp.Body.Close()
if err != nil {
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
}
}()
// Check if the status code indicates success
// The request returns 200 currently, add 201 and 202 as well for possible extension.
switch resp.StatusCode {
case 200, 201, 202:
break
default:
body, _ := ioutil.ReadAll(resp.Body)
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
return result, httpError{Response: resp}
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, errors.Wrap(err, "Couldn't read IMDS response")
}
// Remove BOM, if any. azcopy does this so I'm following along.
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
// This would be a good place to persist the token if a large number of rclone
// invocations are being made in a short amount of time. If the token is
// persisted, the azureblob code will need to check for expiry before every
// storage API call.
err = json.Unmarshal(b, &result)
if err != nil {
return result, errors.Wrap(err, "Couldn't unmarshal IMDS response")
}
return result, nil
}

View File

@@ -0,0 +1,117 @@
// +build !plan9,!solaris,!js,go1.14
package azureblob
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
require.NoError(t, err)
parameters := r.URL.Query()
(*actual)["path"] = r.URL.Path
(*actual)["Metadata"] = r.Header.Get("Metadata")
(*actual)["method"] = r.Method
for paramName := range parameters {
(*actual)[paramName] = parameters.Get(paramName)
}
// Make response.
response := adal.Token{}
responseBytes, err := json.Marshal(response)
require.NoError(t, err)
_, err = w.Write(responseBytes)
require.NoError(t, err)
}
}
func TestManagedIdentity(t *testing.T) {
// test user-assigned identity specifiers to use
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
tests := []struct {
identity *userMSI
identityParameterName string
expectedAbsent []string
}{
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
}
alwaysExpected := map[string]string{
"path": "/metadata/identity/oauth2/token",
"resource": "https://storage.azure.com",
"Metadata": "true",
"api-version": "2018-02-01",
"method": "GET",
}
for _, test := range tests {
actual := make(map[string]string, 10)
testServer := httptest.NewServer(handler(t, &actual))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, test.identity)
require.NoError(t, err)
// Validate expected query parameters present
expected := make(map[string]string)
for k, v := range alwaysExpected {
expected[k] = v
}
if test.identity != nil {
expected[test.identityParameterName] = test.identity.Value
}
for key := range expected {
value, exists := actual[key]
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
test.identityParameterName, key) {
assert.Equalf(t, expected[key], value,
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
}
}
// Validate unexpected query parameters absent
for _, key := range test.expectedAbsent {
_, exists := actual[key]
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
}
}
}
func errorHandler(resultCode int) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Test error generated", resultCode)
}
}
func TestIMDSErrors(t *testing.T) {
errorCodes := []int{404, 429, 500}
for _, code := range errorCodes {
testServer := httptest.NewServer(errorHandler(code))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, nil)
require.Error(t, err)
httpErr, ok := err.(httpError)
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
}
}

View File

@@ -337,3 +337,11 @@ type CopyFileRequest struct {
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
}
// CopyPartRequest is the request for b2_copy_part - the response is UploadPartResponse
type CopyPartRequest struct {
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file.
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
}

View File

@@ -23,20 +23,20 @@ import (
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/rest"
)
const enc = encodings.B2
const (
defaultEndpoint = "https://api.backblazeb2.com"
headerPrefix = "x-bz-info-" // lower case as that is what the server returns
@@ -44,8 +44,10 @@ const (
timeHeader = headerPrefix + timeKey
sha1Key = "large_file_sha1"
sha1Header = "X-Bz-Content-Sha1"
sha1InfoHeader = headerPrefix + sha1Key
testModeHeader = "X-Bz-Test-Mode"
idHeader = "X-Bz-File-Id"
nameHeader = "X-Bz-File-Name"
timestampHeader = "X-Bz-Upload-Timestamp"
retryAfterHeader = "Retry-After"
minSleep = 10 * time.Millisecond
maxSleep = 5 * time.Minute
@@ -55,6 +57,9 @@ const (
minChunkSize = 5 * fs.MebiByte
defaultChunkSize = 96 * fs.MebiByte
defaultUploadCutoff = 200 * fs.MebiByte
largeFileCopyCutoff = 4 * fs.GibiByte // 5E9 is the max
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
)
// Globals
@@ -114,6 +119,16 @@ Files above this size will be uploaded in chunks of "--b2-chunk-size".
This value should be set no larger than 4.657GiB (== 5GB).`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "copy_cutoff",
Help: `Cutoff for switching to multipart copy
Any files larger than this that need to be server-side copied will be
copied in chunks of this size.
The minimum is 0 and the maximum is 4.6GB.`,
Default: largeFileCopyCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Upload chunk size. Must fit in memory.
@@ -125,8 +140,13 @@ minimum size.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files`,
Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files
Normally rclone will calculate the SHA1 checksum of the input before
uploading it so it can add it to metadata on the object. This is great
for data integrity checking but can cause long delays for large files
to start uploading.`,
Default: false,
Advanced: true,
}, {
@@ -135,7 +155,9 @@ minimum size.`,
This is usually set to a Cloudflare CDN URL as Backblaze offers
free egress for data downloaded through the Cloudflare network.
This is probably only useful for a public bucket.
Rclone works with private buckets by sending an "Authorization" header.
If the custom endpoint rewrites the requests for authentication,
e.g., in Cloudflare Workers, this header needs to be handled properly.
Leave blank if you want to use the endpoint provided by Backblaze.`,
Advanced: true,
}, {
@@ -146,23 +168,49 @@ The duration before the download authorization token will expire.
The minimum value is 1 second. The maximum value is one week.`,
Default: fs.Duration(7 * 24 * time.Hour),
Advanced: true,
}, {
Name: "memory_pool_flush_time",
Default: memoryPoolFlushTime,
Advanced: true,
Help: `How often internal memory buffer pools will be flushed.
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
}, {
Name: "memory_pool_use_mmap",
Default: memoryPoolUseMmap,
Advanced: true,
Help: `Whether to use mmap buffers in internal memory pool.`,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// See: https://www.backblaze.com/b2/docs/files.html
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
// FIXME: allow /, but not leading, trailing or double
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Account string `config:"account"`
Key string `config:"key"`
Endpoint string `config:"endpoint"`
TestMode string `config:"test_mode"`
Versions bool `config:"versions"`
HardDelete bool `config:"hard_delete"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableCheckSum bool `config:"disable_checksum"`
DownloadURL string `config:"download_url"`
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
Account string `config:"account"`
Key string `config:"key"`
Endpoint string `config:"endpoint"`
TestMode string `config:"test_mode"`
Versions bool `config:"versions"`
HardDelete bool `config:"hard_delete"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableCheckSum bool `config:"disable_checksum"`
DownloadURL string `config:"download_url"`
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote b2 server
@@ -170,6 +218,7 @@ type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed config options
ci *fs.ConfigInfo // global config
features *fs.Features // optional features
srv *rest.Client // the connection to the b2 server
rootBucket string // bucket part of root (if any)
@@ -184,7 +233,8 @@ type Fs struct {
uploads map[string][]*api.GetUploadURLResponse // Upload URLs by buckedID
authMu sync.Mutex // lock for authorizing the account
pacer *fs.Pacer // To pace and retry the API calls
bufferTokens chan []byte // control concurrency of multipart uploads
uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
}
// Object describes a b2 object
@@ -245,7 +295,7 @@ func (o *Object) split() (bucket, bucketPath string) {
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
401, // Unauthorized (eg "Token has expired")
401, // Unauthorized (e.g. "Token has expired")
408, // Request Timeout
429, // Rate exceeded.
500, // Get occasional 500 Internal Server Error
@@ -255,7 +305,10 @@ var retryErrorCodes = []int{
// shouldRetryNoAuth returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
func (f *Fs) shouldRetryNoReauth(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
// For 429 or 503 errors look at the Retry-After: header and
// set the retry appropriately, starting with a minimum of 1
// second if it isn't set.
@@ -286,7 +339,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
}
return true, err
}
return f.shouldRetryNoReauth(resp, err)
return f.shouldRetryNoReauth(ctx, resp, err)
}
// errorHandler parses a non 2xx error response into an error
@@ -320,7 +373,6 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
f.fillBufferTokens() // reset the buffer tokens
}
return
}
@@ -347,14 +399,17 @@ func (f *Fs) setRoot(root string) {
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if opt.UploadCutoff < opt.ChunkSize {
opt.UploadCutoff = opt.ChunkSize
fs.Infof(nil, "b2: raising upload cutoff to chunk size: %v", opt.UploadCutoff)
}
err = checkUploadCutoff(opt, opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "b2: upload cutoff")
@@ -372,15 +427,24 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if opt.Endpoint == "" {
opt.Endpoint = defaultEndpoint
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
opt: *opt,
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
ci: ci,
srv: rest.NewClient(fshttp.NewClient(ctx)).SetErrorHandler(errorHandler),
cache: bucket.NewCache(),
_bucketID: make(map[string]string, 1),
_bucketType: make(map[string]string, 1),
uploads: make(map[string][]*api.GetUploadURLResponse),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
ci.Transfers,
opt.MemoryPoolUseMmap,
),
}
f.setRoot(root)
f.features = (&fs.Features{
@@ -388,21 +452,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
}).Fill(f)
}).Fill(ctx, f)
// Set the test flag if required
if opt.TestMode != "" {
testMode := strings.TrimSpace(opt.TestMode)
f.srv.SetHeader(testModeHeader, testMode)
fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
}
f.fillBufferTokens()
err = f.authorizeAccount(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to authorize account")
}
// If this is a key limited to a single bucket, it must exist already
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
allowedBucket := enc.ToStandardName(f.info.Allowed.BucketName)
allowedBucket := f.opt.Enc.ToStandardName(f.info.Allowed.BucketName)
if allowedBucket == "" {
return nil, errors.New("bucket that application key is restricted to no longer exists")
}
@@ -419,12 +482,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.setRoot(newRoot)
_, err := f.NewObject(ctx, leaf)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
f.setRoot(oldRoot)
return f, nil
}
return nil, err
// File doesn't exist so return old f
f.setRoot(oldRoot)
return f, nil
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
@@ -447,7 +507,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &f.info)
return f.shouldRetryNoReauth(resp, err)
return f.shouldRetryNoReauth(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to authenticate")
@@ -518,32 +578,25 @@ func (f *Fs) clearUploadURL(bucketID string) {
f.uploadMu.Unlock()
}
// Fill up (or reset) the buffer tokens
func (f *Fs) fillBufferTokens() {
f.bufferTokens = make(chan []byte, fs.Config.Transfers)
for i := 0; i < fs.Config.Transfers; i++ {
f.bufferTokens <- nil
// getBuf gets a buffer of f.opt.ChunkSize and an upload token
//
// If noBuf is set then it just gets an upload token
func (f *Fs) getBuf(noBuf bool) (buf []byte) {
f.uploadToken.Get()
if !noBuf {
buf = f.pool.Get()
}
}
// getUploadBlock gets a block from the pool of size chunkSize
func (f *Fs) getUploadBlock() []byte {
buf := <-f.bufferTokens
if buf == nil {
buf = make([]byte, f.opt.ChunkSize)
}
// fs.Debugf(f, "Getting upload block %p", buf)
return buf
}
// putUploadBlock returns a block to the pool of size chunkSize
func (f *Fs) putUploadBlock(buf []byte) {
buf = buf[:cap(buf)]
if len(buf) != int(f.opt.ChunkSize) {
panic("bad blocksize returned to pool")
// putBuf returns a buffer to the memory pool and an upload token
//
// If noBuf is set then it just returns the upload token
func (f *Fs) putBuf(buf []byte, noBuf bool) {
if !noBuf {
f.pool.Put(buf)
}
// fs.Debugf(f, "Returning upload block %p", buf)
f.bufferTokens <- buf
f.uploadToken.Put()
}
// Return an Object from a path
@@ -623,11 +676,11 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
var request = api.ListFileNamesRequest{
BucketID: bucketID,
MaxFileCount: chunkSize,
Prefix: enc.FromStandardPath(directory),
Prefix: f.opt.Enc.FromStandardPath(directory),
Delimiter: delimiter,
}
if directory != "" {
request.StartFileName = enc.FromStandardPath(directory)
request.StartFileName = f.opt.Enc.FromStandardPath(directory)
}
opts := rest.Opts{
Method: "POST",
@@ -647,7 +700,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
for i := range response.Files {
file := &response.Files[i]
file.Name = enc.ToStandardPath(file.Name)
file.Name = f.opt.Enc.ToStandardPath(file.Name)
// Finish if file name no longer has prefix
if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
return nil
@@ -658,8 +711,8 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
remote := file.Name[len(prefix):]
// Check for directory
isDirectory := strings.HasSuffix(remote, "/")
if isDirectory {
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
if isDirectory && len(remote) > 1 {
remote = remote[:len(remote)-1]
}
if addBucket {
@@ -848,7 +901,7 @@ func (f *Fs) listBucketsToFn(ctx context.Context, fn listBucketFn) error {
f._bucketType = make(map[string]string, 1)
for i := range response.Buckets {
bucket := &response.Buckets[i]
bucket.Name = enc.ToStandardName(bucket.Name)
bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
f.cache.MarkOK(bucket.Name)
f._bucketID[bucket.Name] = bucket.ID
f._bucketType[bucket.Name] = bucket.Type
@@ -970,7 +1023,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
}
var request = api.CreateBucketRequest{
AccountID: f.info.AccountID,
Name: enc.FromStandardName(bucket),
Name: f.opt.Enc.FromStandardName(bucket),
Type: "allPrivate",
}
var response api.Bucket
@@ -1054,7 +1107,7 @@ func (f *Fs) hide(ctx context.Context, bucket, bucketPath string) error {
}
var request = api.HideFileRequest{
BucketID: bucketID,
Name: enc.FromStandardPath(bucketPath),
Name: f.opt.Enc.FromStandardPath(bucketPath),
}
var response api.File
err = f.pacer.Call(func() (bool, error) {
@@ -1082,7 +1135,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
}
var request = api.DeleteFileRequest{
ID: ID,
Name: enc.FromStandardPath(Name),
Name: f.opt.Enc.FromStandardPath(Name),
}
var response api.File
err := f.pacer.Call(func() (bool, error) {
@@ -1100,7 +1153,8 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
// if oldOnly is true then it deletes only non current files.
//
// Implemented here so we can make sure we delete old versions.
func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool) error {
func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
bucket, directory := f.split(dir)
if bucket == "" {
return errors.New("can't purge from root")
}
@@ -1124,10 +1178,10 @@ func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool)
}
// Delete Config.Transfers in parallel
toBeDeleted := make(chan *api.File, fs.Config.Transfers)
toBeDeleted := make(chan *api.File, f.ci.Transfers)
var wg sync.WaitGroup
wg.Add(fs.Config.Transfers)
for i := 0; i < fs.Config.Transfers; i++ {
wg.Add(f.ci.Transfers)
for i := 0; i < f.ci.Transfers; i++ {
go func() {
defer wg.Done()
for object := range toBeDeleted {
@@ -1139,7 +1193,7 @@ func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool)
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
err = f.deleteByID(ctx, object.ID, object.Name)
checkErr(err)
tr.Done(err)
tr.Done(ctx, err)
}
}()
}
@@ -1167,7 +1221,7 @@ func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool)
toBeDeleted <- object
}
last = remote
tr.Done(nil)
tr.Done(ctx, nil)
}
return nil
}))
@@ -1175,22 +1229,79 @@ func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool)
wg.Wait()
if !oldOnly {
checkErr(f.Rmdir(ctx, ""))
checkErr(f.Rmdir(ctx, dir))
}
return errReturn
}
// Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context) error {
return f.purge(ctx, f.rootBucket, f.rootDirectory, false)
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purge(ctx, dir, false)
}
// CleanUp deletes all the hidden files.
func (f *Fs) CleanUp(ctx context.Context) error {
return f.purge(ctx, f.rootBucket, f.rootDirectory, true)
return f.purge(ctx, "", true)
}
// Copy src to this remote using server side copy operations.
// copy does a server-side copy from dstObj <- srcObj
//
// If newInfo is nil then the metadata will be copied otherwise it
// will be replaced with newInfo
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *api.File) (err error) {
if srcObj.size >= int64(f.opt.CopyCutoff) {
if newInfo == nil {
newInfo, err = srcObj.getMetaData(ctx)
if err != nil {
return err
}
}
up, err := f.newLargeUpload(ctx, dstObj, nil, srcObj, f.opt.CopyCutoff, true, newInfo)
if err != nil {
return err
}
return up.Upload(ctx)
}
dstBucket, dstPath := dstObj.split()
err = f.makeBucket(ctx, dstBucket)
if err != nil {
return err
}
destBucketID, err := f.getBucketID(ctx, dstBucket)
if err != nil {
return err
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_file",
}
var request = api.CopyFileRequest{
SourceID: srcObj.id,
Name: f.opt.Enc.FromStandardPath(dstPath),
DestBucketID: destBucketID,
}
if newInfo == nil {
request.MetadataDirective = "COPY"
} else {
request.MetadataDirective = "REPLACE"
request.ContentType = newInfo.ContentType
request.Info = newInfo.Info
}
var response api.FileInfo
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return err
}
return dstObj.decodeMetaDataFileInfo(&response)
}
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -1200,47 +1311,21 @@ func (f *Fs) CleanUp(ctx context.Context) error {
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote)
err := f.makeBucket(ctx, dstBucket)
if err != nil {
return nil, err
}
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
destBucketID, err := f.getBucketID(ctx, dstBucket)
if err != nil {
return nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_file",
}
var request = api.CopyFileRequest{
SourceID: srcObj.id,
Name: enc.FromStandardPath(dstPath),
MetadataDirective: "COPY",
DestBucketID: destBucketID,
}
var response api.FileInfo
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
o := &Object{
// Temporary Object under construction
dstObj := &Object{
fs: f,
remote: remote,
}
err = o.decodeMetaDataFileInfo(&response)
err := f.copy(ctx, dstObj, srcObj, nil)
if err != nil {
return nil, err
}
return o, nil
return dstObj, nil
}
// Hashes returns the supported hash sets.
@@ -1268,7 +1353,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
}
var request = api.GetDownloadAuthorizationRequest{
BucketID: bucketID,
FileNamePrefix: enc.FromStandardPath(path.Join(f.root, remote)),
FileNamePrefix: f.opt.Enc.FromStandardPath(path.Join(f.root, remote)),
ValidDurationInSeconds: validDurationInSeconds,
}
var response api.GetDownloadAuthorizationResponse
@@ -1283,7 +1368,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
}
// PublicLink returns a link for downloading without account
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
bucket, bucketPath := f.split(remote)
var RootURL string
if f.opt.DownloadURL == "" {
@@ -1360,6 +1445,21 @@ func (o *Object) Size() int64 {
return o.size
}
// Clean the SHA1
//
// Make sure it is lower case
//
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
// Some tools (e.g. Cyberduck) use this
func cleanSHA1(sha1 string) (out string) {
out = strings.ToLower(sha1)
const unverified = "unverified:"
if strings.HasPrefix(out, unverified) {
out = out[len(unverified):]
}
return out
}
// decodeMetaDataRaw sets the metadata from the data passed in
//
// Sets
@@ -1375,12 +1475,7 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
if o.sha1 == "" || o.sha1 == "none" {
o.sha1 = Info[sha1Key]
}
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
// Some tools (eg Cyberduck) use this
const unverified = "unverified:"
if strings.HasPrefix(o.sha1, unverified) {
o.sha1 = o.sha1[len(unverified):]
}
o.sha1 = cleanSHA1(o.sha1)
o.size = Size
// Use the UploadTimestamp if can't get file info
o.modTime = time.Time(UploadTimestamp)
@@ -1409,8 +1504,11 @@ func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
}
// getMetaData gets the metadata from the object unconditionally
func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
// getMetaDataListing gets the metadata from the object unconditionally from the listing
//
// Note that listing is a class C transaction which costs more than
// the B transaction used in getMetaData
func (o *Object) getMetaDataListing(ctx context.Context) (info *api.File, err error) {
bucket, bucketPath := o.split()
maxSearched := 1
var timestamp api.Timestamp
@@ -1443,6 +1541,19 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
return info, nil
}
// getMetaData gets the metadata from the object unconditionally
func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
// If using versions and have a version suffix, need to list the directory to find the correct versions
if o.fs.opt.Versions {
timestamp, _ := api.RemoveVersion(o.remote)
if !timestamp.IsZero() {
return o.getMetaDataListing(ctx)
}
}
_, info, err = o.getOrHead(ctx, "HEAD", nil)
return info, err
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// Sets
@@ -1501,28 +1612,10 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if err != nil {
return err
}
_, bucketPath := o.split()
info.Info[timeKey] = timeString(modTime)
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_file",
}
var request = api.CopyFileRequest{
SourceID: o.id,
Name: enc.FromStandardPath(bucketPath), // copy to same name
MetadataDirective: "REPLACE",
ContentType: info.ContentType,
Info: info.Info,
}
var response api.FileInfo
err = o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, &request, &response)
return o.fs.shouldRetry(ctx, resp, err)
})
if err != nil {
return err
}
return o.decodeMetaDataFileInfo(&response)
// Copy to the same name, overwriting the metadata only
return o.fs.copy(ctx, o, o, info)
}
// Storable returns if this object is storable
@@ -1590,12 +1683,11 @@ func (file *openFile) Close() (err error) {
// Check it satisfies the interfaces
var _ io.ReadCloser = &openFile{}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.size)
func (o *Object) getOrHead(ctx context.Context, method string, options []fs.OpenOption) (resp *http.Response, info *api.File, err error) {
opts := rest.Opts{
Method: "GET",
Options: options,
Method: method,
Options: options,
NoResponse: method == "HEAD",
}
// Use downloadUrl from backblaze if downloadUrl is not set
@@ -1606,43 +1698,81 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
opts.RootURL = o.fs.opt.DownloadURL
}
// Download by id if set otherwise by name
if o.id != "" {
// Download by id if set and not using DownloadURL otherwise by name
if o.id != "" && o.fs.opt.DownloadURL == "" {
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
} else {
bucket, bucketPath := o.split()
opts.Path += "/file/" + urlEncode(enc.FromStandardName(bucket)) + "/" + urlEncode(enc.FromStandardPath(bucketPath))
opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath))
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return o.fs.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to open for download")
// 404 for files, 400 for directories
if resp != nil && (resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusBadRequest) {
return nil, nil, fs.ErrorObjectNotFound
}
return nil, nil, errors.Wrapf(err, "failed to %s for download", method)
}
// Parse the time out of the headers if possible
err = o.parseTimeString(resp.Header.Get(timeHeader))
// NB resp may be Open here - don't return err != nil without closing
// Convert the Headers into an api.File
var uploadTimestamp api.Timestamp
err = uploadTimestamp.UnmarshalJSON([]byte(resp.Header.Get(timestampHeader)))
if err != nil {
fs.Debugf(o, "Bad "+timestampHeader+" header: %v", err)
}
var Info = make(map[string]string)
for k, vs := range resp.Header {
k = strings.ToLower(k)
for _, v := range vs {
if strings.HasPrefix(k, headerPrefix) {
Info[k[len(headerPrefix):]] = v
}
}
}
info = &api.File{
ID: resp.Header.Get(idHeader),
Name: resp.Header.Get(nameHeader),
Action: "upload",
Size: resp.ContentLength,
UploadTimestamp: uploadTimestamp,
SHA1: resp.Header.Get(sha1Header),
ContentType: resp.Header.Get("Content-Type"),
Info: Info,
}
// When reading files from B2 via cloudflare using
// --b2-download-url cloudflare strips the Content-Length
// headers (presumably so it can inject stuff) so use the old
// length read from the listing.
if info.Size < 0 {
info.Size = o.size
}
return resp, info, nil
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.size)
resp, info, err := o.getOrHead(ctx, "GET", options)
if err != nil {
return nil, err
}
// Don't check length or hash or metadata on partial content
if resp.StatusCode == http.StatusPartialContent {
return resp.Body, nil
}
err = o.decodeMetaData(info)
if err != nil {
_ = resp.Body.Close()
return nil, err
}
// Read sha1 from header if it isn't set
if o.sha1 == "" {
o.sha1 = resp.Header.Get(sha1Header)
fs.Debugf(o, "Reading sha1 from header - %q", o.sha1)
// if sha1 header is "none" (in big files), then need
// to read it from the metadata
if o.sha1 == "none" {
o.sha1 = resp.Header.Get(sha1InfoHeader)
fs.Debugf(o, "Reading sha1 from info - %q", o.sha1)
}
}
// Don't check length or hash on partial content
if resp.StatusCode == http.StatusPartialContent {
return resp.Body, nil
}
return newOpenFile(o, resp), nil
}
@@ -1697,7 +1827,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
if size == -1 {
// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
buf := o.fs.getUploadBlock()
buf := o.fs.getBuf(false)
n, err := io.ReadFull(in, buf)
if err == nil {
bufReader := bufio.NewReader(in)
@@ -1707,22 +1838,24 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err == nil {
fs.Debugf(o, "File is big enough for chunked streaming")
up, err := o.fs.newLargeUpload(ctx, o, in, src)
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
if err != nil {
o.fs.putUploadBlock(buf)
o.fs.putBuf(buf, false)
return err
}
// NB Stream returns the buffer and token
return up.Stream(ctx, buf)
} else if err == io.EOF || err == io.ErrUnexpectedEOF {
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
defer o.fs.putUploadBlock(buf)
defer o.fs.putBuf(buf, false)
size = int64(n)
in = bytes.NewReader(buf[:n])
} else {
o.fs.putBuf(buf, false)
return err
}
} else if size > int64(o.fs.opt.UploadCutoff) {
up, err := o.fs.newLargeUpload(ctx, o, in, src)
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
if err != nil {
return err
}
@@ -1806,9 +1939,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Method: "POST",
RootURL: upload.UploadURL,
Body: in,
Options: options,
ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken,
"X-Bz-File-Name": urlEncode(enc.FromStandardPath(bucketPath)),
"X-Bz-File-Name": urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath)),
"Content-Type": fs.MimeType(ctx, src),
sha1Header: calculatedSha1,
timeHeader: timeString(modTime),

View File

@@ -20,7 +20,9 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/sync/errgroup"
)
type hashAppendingReader struct {
@@ -68,20 +70,26 @@ func newHashAppendingReader(in io.Reader, h gohash.Hash) *hashAppendingReader {
// largeUpload is used to control the upload of large files which need chunking
type largeUpload struct {
f *Fs // parent Fs
o *Object // object being uploaded
in io.Reader // read the data from here
wrap accounting.WrapFn // account parts being transferred
id string // ID of the file being uploaded
size int64 // total size
parts int64 // calculated number of parts, if known
sha1s []string // slice of SHA1s for each part
uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
f *Fs // parent Fs
o *Object // object being uploaded
doCopy bool // doing copy rather than upload
what string // text name of operation for logs
in io.Reader // read the data from here
wrap accounting.WrapFn // account parts being transferred
id string // ID of the file being uploaded
size int64 // total size
parts int64 // calculated number of parts, if known
sha1s []string // slice of SHA1s for each part
uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
chunkSize int64 // chunk size to use
src *Object // if copying, object we are reading from
}
// newLargeUpload starts an upload of object o from in with metadata in src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
//
// If newInfo is set then metadata from that will be used instead of reading it from src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, chunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
remote := o.remote
size := src.Size()
parts := int64(0)
@@ -89,8 +97,8 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else {
parts = size / int64(o.fs.opt.ChunkSize)
if size%int64(o.fs.opt.ChunkSize) != 0 {
parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 {
parts++
}
if parts > maxParts {
@@ -99,7 +107,6 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
sha1SliceSize = parts
}
modTime := src.ModTime(ctx)
opts := rest.Opts{
Method: "POST",
Path: "/b2_start_large_file",
@@ -110,18 +117,24 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
return nil, err
}
var request = api.StartLargeFileRequest{
BucketID: bucketID,
Name: enc.FromStandardPath(bucketPath),
ContentType: fs.MimeType(ctx, src),
Info: map[string]string{
timeKey: timeString(modTime),
},
BucketID: bucketID,
Name: f.opt.Enc.FromStandardPath(bucketPath),
}
// Set the SHA1 if known
if !o.fs.opt.DisableCheckSum {
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1
if newInfo == nil {
modTime := src.ModTime(ctx)
request.ContentType = fs.MimeType(ctx, src)
request.Info = map[string]string{
timeKey: timeString(modTime),
}
// Set the SHA1 if known
if !o.fs.opt.DisableCheckSum || doCopy {
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1
}
}
} else {
request.ContentType = newInfo.ContentType
request.Info = newInfo.Info
}
var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) {
@@ -131,18 +144,24 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
if err != nil {
return nil, err
}
up = &largeUpload{
f: f,
o: o,
doCopy: doCopy,
what: "upload",
id: response.ID,
size: size,
parts: parts,
sha1s: make([]string, sha1SliceSize),
chunkSize: int64(chunkSize),
}
// unwrap the accounting from the input, we use wrap to put it
// back on after the buffering
in, wrap := accounting.UnWrap(in)
up = &largeUpload{
f: f,
o: o,
in: in,
wrap: wrap,
id: response.ID,
size: size,
parts: parts,
sha1s: make([]string, sha1SliceSize),
if doCopy {
up.what = "copy"
up.src = src.(*Object)
} else {
up.in, up.wrap = accounting.UnWrap(in)
}
return up, nil
}
@@ -184,13 +203,6 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
up.uploadMu.Unlock()
}
// clearUploadURL clears the current UploadURL and the AuthorizationToken
func (up *largeUpload) clearUploadURL() {
up.uploadMu.Lock()
up.uploads = nil
up.uploadMu.Unlock()
}
// Transfer a chunk
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
err := up.f.pacer.Call(func() (bool, error) {
@@ -263,9 +275,41 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
return err
}
// Copy a chunk
func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_part",
}
offset := (part - 1) * up.chunkSize // where we are in the source file
var request = api.CopyPartRequest{
SourceID: up.src.id,
LargeFileID: up.id,
PartNumber: part,
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
}
var response api.UploadPartResponse
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
retry, err := up.f.shouldRetry(ctx, resp, err)
if err != nil {
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
}
up.sha1s[part-1] = response.SHA1
return retry, err
})
if err != nil {
fs.Debugf(up.o, "Error copying chunk %d: %v", part, err)
} else {
fs.Debugf(up.o, "Done copying chunk %d", part)
}
return err
}
// finish closes off the large upload
func (up *largeUpload) finish(ctx context.Context) error {
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
opts := rest.Opts{
Method: "POST",
Path: "/b2_finish_large_file",
@@ -287,6 +331,7 @@ func (up *largeUpload) finish(ctx context.Context) error {
// cancel aborts the large upload
func (up *largeUpload) cancel(ctx context.Context) error {
fs.Debugf(up.o, "Cancelling large file %s", up.what)
opts := rest.Opts{
Method: "POST",
Path: "/b2_cancel_large_file",
@@ -299,139 +344,139 @@ func (up *largeUpload) cancel(ctx context.Context) error {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
return up.f.shouldRetry(ctx, resp, err)
})
return err
}
func (up *largeUpload) managedTransferChunk(ctx context.Context, wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
wg.Add(1)
go func(part int64, buf []byte) {
defer wg.Done()
defer up.f.putUploadBlock(buf)
err := up.transferChunk(ctx, part, buf)
if err != nil {
select {
case errs <- err:
default:
}
}
}(part, buf)
}
func (up *largeUpload) finishOrCancelOnError(ctx context.Context, err error, errs chan error) error {
if err == nil {
select {
case err = <-errs:
default:
}
}
if err != nil {
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
cancelErr := up.cancel(ctx)
if cancelErr != nil {
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
}
return err
fs.Errorf(up.o, "Failed to cancel large file %s: %v", up.what, err)
}
return up.finish(ctx)
return err
}
// Stream uploads the chunks from the input, starting with a required initial
// chunk. Assumes the file size is unknown and will upload until the input
// reaches EOF.
//
// Note that initialUploadBlock must be returned to f.putBuf()
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
errs := make(chan error, 1)
hasMoreParts := true
var wg sync.WaitGroup
// Transfer initial chunk
var (
g, gCtx = errgroup.WithContext(ctx)
hasMoreParts = true
)
up.size = int64(len(initialUploadBlock))
up.managedTransferChunk(ctx, &wg, errs, 1, initialUploadBlock)
g.Go(func() error {
for part := int64(1); hasMoreParts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
var buf []byte
if part == 1 {
buf = initialUploadBlock
} else {
buf = up.f.getBuf(false)
}
outer:
for part := int64(2); hasMoreParts; part++ {
// Check any errors
select {
case err = <-errs:
break outer
default:
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putBuf(buf, false)
return nil
}
// Read the chunk
var n int
if part == 1 {
n = len(buf)
} else {
n, err = io.ReadFull(up.in, buf)
if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putBuf(buf, false)
return nil
} else if err != nil {
// other kinds of errors indicate failure
up.f.putBuf(buf, false)
return err
}
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
up.f.putBuf(buf, false)
return errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, false)
return up.transferChunk(gCtx, part, buf)
})
}
// Get a block of memory
buf := up.f.getUploadBlock()
// Read the chunk
var n int
n, err = io.ReadFull(up.in, buf)
if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
err = nil
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putUploadBlock(buf)
err = nil
break outer
} else if err != nil {
// other kinds of errors indicate failure
up.f.putUploadBlock(buf)
break outer
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
err = errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
break outer
}
// Transfer the chunk
up.managedTransferChunk(ctx, &wg, errs, part, buf)
return nil
})
err = g.Wait()
if err != nil {
return err
}
wg.Wait()
up.sha1s = up.sha1s[:up.parts]
return up.finishOrCancelOnError(ctx, err, errs)
return up.finish(ctx)
}
// Upload uploads the chunks from the input
func (up *largeUpload) Upload(ctx context.Context) error {
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
remaining := up.size
errs := make(chan error, 1)
var wg sync.WaitGroup
var err error
outer:
for part := int64(1); part <= up.parts; part++ {
// Check any errors
select {
case err = <-errs:
break outer
default:
func (up *largeUpload) Upload(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
var (
g, gCtx = errgroup.WithContext(ctx)
remaining = up.size
)
g.Go(func() error {
for part := int64(1); part <= up.parts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
buf := up.f.getBuf(up.doCopy)
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putBuf(buf, up.doCopy)
return nil
}
reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
if !up.doCopy {
// Read the chunk
buf = buf[:reqSize]
_, err = io.ReadFull(up.in, buf)
if err != nil {
up.f.putBuf(buf, up.doCopy)
return err
}
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, up.doCopy)
if !up.doCopy {
err = up.transferChunk(gCtx, part, buf)
} else {
err = up.copyChunk(gCtx, part, reqSize)
}
return err
})
remaining -= reqSize
}
reqSize := remaining
if reqSize >= int64(up.f.opt.ChunkSize) {
reqSize = int64(up.f.opt.ChunkSize)
}
// Get a block of memory
buf := up.f.getUploadBlock()[:reqSize]
// Read the chunk
_, err = io.ReadFull(up.in, buf)
if err != nil {
up.f.putUploadBlock(buf)
break outer
}
// Transfer the chunk
up.managedTransferChunk(ctx, &wg, errs, part, buf)
remaining -= reqSize
return nil
})
err = g.Wait()
if err != nil {
return err
}
wg.Wait()
return up.finishOrCancelOnError(ctx, err, errs)
return up.finish(ctx)
}

View File

@@ -222,3 +222,23 @@ type AppAuth struct {
PrivateKey string `json:"privateKey"`
Passphrase string `json:"passphrase"`
}
// User is returned from /users/me
type User struct {
Type string `json:"type"`
ID string `json:"id"`
Name string `json:"name"`
Login string `json:"login"`
CreatedAt time.Time `json:"created_at"`
ModifiedAt time.Time `json:"modified_at"`
Language string `json:"language"`
Timezone string `json:"timezone"`
SpaceAmount int64 `json:"space_amount"`
SpaceUsed int64 `json:"space_used"`
MaxUploadSize int64 `json:"max_upload_size"`
Status string `json:"status"`
JobTitle string `json:"job_title"`
Phone string `json:"phone"`
Address string `json:"address"`
AvatarURL string `json:"avatar_url"`
}

View File

@@ -25,6 +25,8 @@ import (
"strings"
"time"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/jwtutil"
"github.com/youmark/pkcs8"
@@ -36,7 +38,6 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
@@ -48,15 +49,12 @@ import (
"golang.org/x/oauth2/jws"
)
const enc = encodings.Box
const (
rcloneClientID = "d0374ba6pgmaguie02ge15sv1mllndho"
rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
rootID = "0" // ID of root folder is always this
decayConstant = 2 // bigger for slower decay, exponential
rootURL = "https://api.box.com/2.0"
uploadURL = "https://upload.box.com/api/2.0"
listChunks = 1000 // chunk size to read directory listings
@@ -86,46 +84,36 @@ func init() {
Name: "box",
Description: "Box",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
Config: func(ctx context.Context, name string, m configmap.Mapper) {
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
var err error
// If using box config.json, use JWT auth
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
claims, err := getClaims(boxConfig, boxSubType)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(fs.Config)
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
if err != nil {
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
}
} else {
err = oauthutil.Config("box", name, m, oauthConfig)
// Else, if not using an access token, use oauth2
} else if boxAccessToken == "" || !boxAccessTokenOk {
err = oauthutil.Config(ctx, "box", name, m, oauthConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
}
}
},
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Box App Client Id.\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Box App Client Secret\nLeave blank normally.",
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "0",
Advanced: true,
}, {
Name: "box_config_file",
Help: "Box App config.json location\nLeave blank normally.",
Help: "Box App config.json location\nLeave blank normally." + env.ShellExpandHelp,
}, {
Name: "access_token",
Help: "Box App Primary Access Token\nLeave blank normally.",
}, {
Name: "box_sub_type",
Default: "user",
@@ -146,10 +134,46 @@ func init() {
Help: "Max number of times to try committing a multipart file.",
Default: 100,
Advanced: true,
}},
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// From https://developer.box.com/docs/error-codes#section-400-bad-request :
// > Box only supports file or folder names that are 255 characters or less.
// > File names containing non-printable ascii, "/" or "\", names with leading
// > or trailing spaces, and the special names “.” and “..” are also unsupported.
//
// Testing revealed names with leading spaces work fine.
// Also encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8),
}}...),
})
}
func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
jsonFile = env.ShellExpand(jsonFile)
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
claims, err := getClaims(boxConfig, boxSubType)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(ctx)
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
return err
}
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := ioutil.ReadFile(configFile)
if err != nil {
@@ -172,7 +196,6 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimS
Iss: boxConfig.BoxAppSettings.ClientID,
Sub: boxConfig.EnterpriseID,
Aud: tokenURL,
Iat: time.Now().Unix(),
Exp: time.Now().Add(time.Second * 45).Unix(),
PrivateClaims: map[string]interface{}{
"box_sub_type": boxSubType,
@@ -220,8 +243,11 @@ func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err
// Options defines the configuration for this backend
type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CommitRetries int `config:"commit_retries"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CommitRetries int `config:"commit_retries"`
Enc encoder.MultiEncoder `config:"encoding"`
RootFolderID string `config:"root_folder_id"`
AccessToken string `config:"access_token"`
}
// Fs represents a remote box
@@ -273,7 +299,7 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// parsePath parses an box 'url'
// parsePath parses a box 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
@@ -291,10 +317,13 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
authRetry := false
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
if resp != nil && resp.StatusCode == 401 && strings.Contains(resp.Header.Get("Www-Authenticate"), "expired_token") {
authRetry = true
fs.Debugf(nil, "Should retry: %v", err)
}
@@ -304,7 +333,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false)
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound
@@ -313,7 +342,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
}
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
if item.Name == leaf {
if strings.EqualFold(item.Name, leaf) {
info = item
return true
}
@@ -346,8 +375,7 @@ func errorHandler(resp *http.Response) error {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -360,32 +388,60 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
root = parsePath(root)
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
client := fshttp.NewClient(ctx)
var ts *oauthutil.TokenSource
// If not using an accessToken, create an oauth client and tokensource
if opt.AccessToken == "" {
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
}
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
}
f.features = (&fs.Features{
CaseInsensitive: true,
CanHaveEmptyDirectories: true,
}).Fill(f)
}).Fill(ctx, f)
f.srv.SetErrorHandler(errorHandler)
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
return err
})
// If using an accessToken, set the Authorization header
if f.opt.AccessToken != "" {
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
}
// Get rootID
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
if ts != nil {
// If using box config.json and JWT, renewing should just refresh the token and
// should do so whether there are uploads pending or not.
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
err := refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
return err
})
f.tokenRenewer.Start()
} else {
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
return err
})
}
}
// Get rootFolderID
rootID := f.opt.RootFolderID
f.dirCache = dircache.New(root, rootID, f)
// Find the current root
@@ -410,7 +466,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
return nil, err
}
f.features.Fill(&tempF)
f.features.Fill(ctx, &tempF)
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
@@ -461,7 +517,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
if item.Name == leaf {
if strings.EqualFold(item.Name, leaf) {
pathIDOut = item.ID
return true
}
@@ -488,14 +544,14 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
Parameters: fieldsValue(),
}
mkdir := api.CreateFolder{
Name: enc.FromStandardName(leaf),
Name: f.opt.Enc.FromStandardName(leaf),
Parent: api.Parent{
ID: pathID,
},
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
//fmt.Printf("...Error %v\n", err)
@@ -532,7 +588,7 @@ OUTER:
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return found, errors.Wrap(err, "couldn't list files")
@@ -554,7 +610,7 @@ OUTER:
if item.ItemStatus != api.ItemStatusActive {
continue
}
item.Name = enc.ToStandardName(item.Name)
item.Name = f.opt.Enc.ToStandardName(item.Name)
if fn(item) {
found = true
break OUTER
@@ -578,10 +634,6 @@ OUTER:
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
@@ -622,7 +674,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
// Create the directory for the object if it doesn't exist
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
if err != nil {
return
}
@@ -678,13 +730,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
_, err := f.dirCache.FindDir(ctx, dir, true)
return err
}
@@ -697,7 +743,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error {
}
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(ctx, &opts)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
}
@@ -709,10 +755,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return errors.New("can't purge root directory")
}
dc := f.dirCache
err := dc.FindRoot(ctx, false)
if err != nil {
return err
}
rootID, err := dc.FindDir(ctx, dir, false)
if err != nil {
return err
@@ -728,7 +770,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "rmdir failed")
@@ -752,7 +794,7 @@ func (f *Fs) Precision() time.Duration {
return time.Second
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -791,7 +833,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
Parameters: fieldsValue(),
}
copyFile := api.CopyFile{
Name: enc.FromStandardName(leaf),
Name: f.opt.Enc.FromStandardName(leaf),
Parent: api.Parent{
ID: directoryID,
},
@@ -800,7 +842,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var info *api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &copyFile, &info)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
@@ -817,8 +859,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
}
// move a file or folder
@@ -830,7 +872,7 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
Parameters: fieldsValue(),
}
move := api.UpdateFileMove{
Name: enc.FromStandardName(leaf),
Name: f.opt.Enc.FromStandardName(leaf),
Parent: api.Parent{
ID: directoryID,
},
@@ -838,7 +880,7 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
@@ -846,7 +888,31 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
return info, nil
}
// Move src to this remote using server side move operations.
// About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/users/me",
}
var user api.User
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &user)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to read user info")
}
// FIXME max upload size would be useful to use in Update
usage = &fs.Usage{
Used: fs.NewUsageValue(user.SpaceUsed), // bytes in use
Total: fs.NewUsageValue(user.SpaceAmount), // bytes total
Free: fs.NewUsageValue(user.SpaceAmount - user.SpaceUsed), // bytes free
}
return usage, nil
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
@@ -882,7 +948,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -895,64 +961,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcPath := path.Join(srcFs.root, srcRemote)
dstPath := path.Join(f.root, dstRemote)
// Refuse to move to or from the root
if srcPath == "" || dstPath == "" {
fs.Debugf(src, "DirMove error: Can't move root")
return errors.New("can't move root directory")
}
// find the root src directory
err := srcFs.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
} else {
if f.dirCache.FoundRoot() {
return fs.ErrorDirExists
}
}
// Find ID of dst parent, creating subdirs if necessary
var leaf, directoryID string
findPath := dstRemote
if dstRemote == "" {
findPath = f.root
}
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
if err != nil {
return err
}
// Check destination does not exist
if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs.ErrorDirExists
}
}
// Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
if err != nil {
return err
}
// Do the move
_, err = f.move(ctx, "/folders/", srcID, leaf, directoryID)
_, err = f.move(ctx, "/folders/", srcID, dstLeaf, dstDirectoryID)
if err != nil {
return err
}
@@ -961,7 +977,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
id, err := f.dirCache.FindDir(ctx, remote, false)
var opts rest.Opts
if err == nil {
@@ -995,11 +1011,71 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &shareLink, &info)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
return info.SharedLink.URL, err
}
// deletePermanently permanently deletes a trashed file
func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
opts := rest.Opts{
Method: "DELETE",
NoResponse: true,
}
if itemType == api.ItemTypeFile {
opts.Path = "/files/" + id + "/trash"
} else {
opts.Path = "/folders/" + id + "/trash"
}
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
})
}
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) (err error) {
opts := rest.Opts{
Method: "GET",
Path: "/folders/trash/items",
Parameters: url.Values{
"fields": []string{"type", "id"},
},
}
opts.Parameters.Set("limit", strconv.Itoa(listChunks))
offset := 0
for {
opts.Parameters.Set("offset", strconv.Itoa(offset))
var result api.FolderItems
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't list trash")
}
for i := range result.Entries {
item := &result.Entries[i]
if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile {
err := f.deletePermanently(ctx, item.Type, item.ID)
if err != nil {
return errors.Wrap(err, "failed to delete file")
}
} else {
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
continue
}
}
offset += result.Limit
if offset >= result.TotalCount {
break
}
}
return
}
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func (f *Fs) DirCacheFlush() {
@@ -1109,7 +1185,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
var info *api.Item
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
return info, err
}
@@ -1142,7 +1218,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
@@ -1153,9 +1229,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// upload does a single non-multipart upload
//
// This is recommended for less than 50 MB of content
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) {
upload := api.UploadFile{
Name: enc.FromStandardName(leaf),
Name: o.fs.opt.Enc.FromStandardName(leaf),
ContentModifiedAt: api.Time(modTime),
ContentCreatedAt: api.Time(modTime),
Parent: api.Parent{
@@ -1172,6 +1248,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
MultipartContentName: "contents",
MultipartFileName: upload.Name,
RootURL: uploadURL,
Options: options,
}
// If object has an ID then it is existing so create a new version
if o.id != "" {
@@ -1181,7 +1258,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &upload, &result)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return err
@@ -1198,24 +1275,26 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
if o.fs.tokenRenewer != nil {
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
}
size := src.Size()
modTime := src.ModTime(ctx)
remote := o.Remote()
// Create the directory for the object if it doesn't exist
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true)
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true)
if err != nil {
return err
}
// Upload with simple or multipart
if size <= int64(o.fs.opt.UploadCutoff) {
err = o.upload(ctx, in, leaf, directoryID, modTime)
err = o.upload(ctx, in, leaf, directoryID, modTime, options...)
} else {
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime)
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime, options...)
}
return err
}
@@ -1236,10 +1315,12 @@ var (
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -1,4 +1,4 @@
// multpart upload for box
// multipart upload for box
package box
@@ -19,6 +19,7 @@ import (
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/rest"
)
@@ -38,12 +39,12 @@ func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID stri
} else {
opts.Path = "/files/upload_sessions"
request.FolderID = directoryID
request.FileName = enc.FromStandardName(leaf)
request.FileName = o.fs.opt.Enc.FromStandardName(leaf)
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
return
}
@@ -54,7 +55,7 @@ func sha1Digest(digest []byte) string {
}
// uploadPart uploads a part in an upload session
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn, options ...fs.OpenOption) (response *api.UploadPartResponse, err error) {
chunkSize := int64(len(chunk))
sha1sum := sha1.Sum(chunk)
opts := rest.Opts{
@@ -64,6 +65,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
ContentType: "application/octet-stream",
ContentLength: &chunkSize,
ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, totalSize),
Options: options,
ExtraHeaders: map[string]string{
"Digest": sha1Digest(sha1sum[:]),
},
@@ -72,7 +74,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
err = o.fs.pacer.Call(func() (bool, error) {
opts.Body = wrap(bytes.NewReader(chunk))
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
@@ -107,10 +109,10 @@ outer:
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
if err != nil {
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
}
body, err = rest.ReadBody(resp)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
delay := defaultDelay
var why string
@@ -165,13 +167,13 @@ func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error)
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
return err
}
// uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time, options ...fs.OpenOption) (err error) {
// Create upload session
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
if err != nil {
@@ -181,15 +183,13 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
// Cancel the session if something went wrong
defer func() {
if err != nil {
fs.Debugf(o, "Cancelling multipart upload: %v", err)
cancelErr := o.abortUpload(ctx, session.ID)
if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
}
defer atexit.OnError(&err, func() {
fs.Debugf(o, "Cancelling multipart upload: %v", err)
cancelErr := o.abortUpload(ctx, session.ID)
if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr)
}
}()
})()
// unwrap the accounting from the input, we use wrap to put it
// back on after the buffering
@@ -236,7 +236,7 @@ outer:
defer wg.Done()
defer o.fs.uploadToken.Put()
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap)
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to upload part")
select {

111
backend/cache/cache.go vendored
View File

@@ -1,4 +1,4 @@
// +build !plan9
// +build !plan9,!js
package cache
@@ -65,9 +65,10 @@ func init() {
Name: "cache",
Description: "Cache a remote",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Help: "Remote to cache.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true,
}, {
Name: "plex_url",
@@ -86,7 +87,7 @@ func init() {
Advanced: true,
}, {
Name: "plex_insecure",
Help: "Skip all certificate verifications when connecting to the Plex server",
Help: "Skip all certificate verification when connecting to the Plex server",
Advanced: true,
}, {
Name: "chunk_size",
@@ -108,7 +109,7 @@ will need to be cleared or unexpected EOF errors will occur.`,
}},
}, {
Name: "info_age",
Help: `How long to cache file structure information (directory listings, file size, times etc).
Help: `How long to cache file structure information (directory listings, file size, times, etc.).
If all write operations are done through the cache then you can safely make
this value very large as the cache store will also be updated in real time.`,
Default: DefCacheInfoAge,
@@ -338,8 +339,8 @@ func parseRootPath(path string) (string, error) {
return strings.Trim(path, "/"), nil
}
// NewFs constructs a Fs from the path, container:path
func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -360,15 +361,10 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
}
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(opt.Remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", opt.Remote)
}
remotePath := fspath.JoinRootPath(wPath, rootPath)
wrappedFs, wrapErr := wInfo.NewFs(wName, remotePath, wConfig)
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
wrappedFs, wrapErr := cache.Get(ctx, remotePath)
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
return nil, errors.Wrapf(wrapErr, "failed to make remote %s:%s to wrap", wName, remotePath)
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
}
var fsErr error
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
@@ -389,6 +385,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
cleanupChan: make(chan bool, 1),
notifiedRemotes: make(map[string]bool),
}
cache.PinUntilFinalized(f.Fs, f)
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
f.plexConnector = &plexConnector{}
@@ -482,7 +479,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
}
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
f.tempFs, err = cache.Get(f.opt.TempWritePath)
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
if err != nil {
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
}
@@ -509,19 +506,16 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
pollInterval := make(chan time.Duration, 1)
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
doChangeNotify(context.Background(), f.receiveChangeNotify, pollInterval)
doChangeNotify(ctx, f.receiveChangeNotify, pollInterval)
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
DuplicateFiles: false, // storage doesn't permit this
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// override only those features that use a temp fs and it doesn't support them
//f.features.ChangeNotify = f.ChangeNotify
if f.opt.TempWritePath != "" {
if f.tempFs.Features().Copy == nil {
f.features.Copy = nil
}
if f.tempFs.Features().Move == nil {
f.features.Move = nil
}
@@ -587,7 +581,7 @@ Some valid examples are:
"0:10" -> the first ten chunks
Any parameter with a key that starts with "file" can be used to
specify files to fetch, eg
specify files to fetch, e.g.
rclone rc cache/fetch chunks=0 file=hello file2=home/goodbye
@@ -1242,7 +1236,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
@@ -1523,7 +1517,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
return f.put(ctx, in, src, options, do)
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
@@ -1532,6 +1526,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Errorf(src, "source remote (%v) doesn't support Copy", src.Fs())
return nil, fs.ErrorCantCopy
}
if f.opt.TempWritePath != "" && src.Fs() == f.tempFs {
return nil, fs.ErrorCantCopy
}
// the source must be a cached object or we abort
srcObj, ok := src.(*Object)
if !ok {
@@ -1597,7 +1594,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return co, nil
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
@@ -1701,17 +1698,20 @@ func (f *Fs) Hashes() hash.Set {
return f.Fs.Hashes()
}
// Purge all files in the root and the root directory
func (f *Fs) Purge(ctx context.Context) error {
fs.Infof(f, "purging cache")
f.cache.Purge()
// Purge all files in the directory
func (f *Fs) Purge(ctx context.Context, dir string) error {
if dir == "" {
// FIXME this isn't quite right as it should purge the dir prefix
fs.Infof(f, "purging cache")
f.cache.Purge()
}
do := f.Fs.Features().Purge
if do == nil {
return nil
return fs.ErrorCantPurge
}
err := do(ctx)
err := do(ctx, dir)
if err != nil {
return err
}
@@ -1828,6 +1828,19 @@ func (f *Fs) isRootInPath(p string) bool {
return strings.HasPrefix(p, f.Root()+"/")
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
do := f.Fs.Features().MergeDirs
if do == nil {
return errors.New("MergeDirs not supported")
}
for _, dir := range dirs {
_ = f.cache.RemoveDir(dir.Remote())
}
return do(ctx, dirs)
}
// DirCacheFlush flushes the dir cache
func (f *Fs) DirCacheFlush() {
_ = f.cache.RemoveDir("")
@@ -1882,6 +1895,41 @@ func (f *Fs) Disconnect(ctx context.Context) error {
return do(ctx)
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
do := f.Fs.Features().Shutdown
if do == nil {
return nil
}
return do(ctx)
}
var commandHelp = []fs.CommandHelp{
{
Name: "stats",
Short: "Print stats on the cache backend in JSON format.",
},
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
switch name {
case "stats":
return f.Stats()
default:
return nil, fs.ErrorCommandNotFound
}
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -1899,4 +1947,7 @@ var (
_ fs.Abouter = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
)

View File

@@ -1,4 +1,5 @@
// +build !plan9
// +build !plan9,!js
// +build !race
package cache_test
@@ -15,9 +16,7 @@ import (
"os"
"path"
"path/filepath"
"runtime"
"runtime/debug"
"strconv"
"strings"
"testing"
"time"
@@ -31,12 +30,10 @@ import (
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/testy"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -52,9 +49,7 @@ const (
var (
remoteName string
mountDir string
uploadDir string
useMount bool
runInstance *run
errNotSupported = errors.New("not supported")
decryptedToEncryptedRemotes = map[string]string{
@@ -90,9 +85,7 @@ var (
func init() {
goflag.StringVar(&remoteName, "remote-internal", "TestInternalCache", "Remote to test with, defaults to local filesystem")
goflag.StringVar(&mountDir, "mount-dir-internal", "", "")
goflag.StringVar(&uploadDir, "upload-dir-internal", "", "")
goflag.BoolVar(&useMount, "cache-use-mount", false, "Test only with mount")
}
// TestMain drives the tests
@@ -100,7 +93,7 @@ func TestMain(m *testing.M) {
goflag.Parse()
var rc int
log.Printf("Running with the following params: \n remote: %v, \n mount: %v", remoteName, useMount)
log.Printf("Running with the following params: \n remote: %v", remoteName)
runInstance = newRun()
rc = m.Run()
os.Exit(rc)
@@ -273,32 +266,8 @@ func TestInternalObjNotFound(t *testing.T) {
require.Nil(t, obj)
}
func TestInternalRemoteWrittenFileFoundInMount(t *testing.T) {
if !runInstance.useMount {
t.Skip("test needs mount mode")
}
id := fmt.Sprintf("tirwffim%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
var testData []byte
if runInstance.rootIsCrypt {
testData, err = base64.StdEncoding.DecodeString(cryptedTextBase64)
require.NoError(t, err)
} else {
testData = []byte("test content")
}
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test"), testData)
data, err := runInstance.readDataFromRemote(t, rootFs, "test", 0, int64(len([]byte("test content"))), false)
require.NoError(t, err)
require.Equal(t, "test content", string(data))
}
func TestInternalCachedWrittenContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
@@ -342,6 +311,7 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
}
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticucm%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
@@ -691,79 +661,6 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
}
func TestInternalChangeSeenAfterRc(t *testing.T) {
cacheExpire := rc.Calls.Get("cache/expire")
assert.NotNil(t, cacheExpire)
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if !runInstance.useMount {
t.Skipf("needs mount")
}
if !runInstance.wrappedIsExternal {
t.Skipf("needs drive")
}
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
// create some rand test data
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
// update in the wrapped fs
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err)
wrappedTime := time.Now().Add(-1 * time.Hour)
err = o.SetModTime(context.Background(), wrappedTime)
require.NoError(t, err)
// get a new instance from the cache
co, err := rootFs.NewObject(context.Background(), "data.bin")
require.NoError(t, err)
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
// Call the rc function
m, err := cacheExpire.Fn(context.Background(), rc.Params{"remote": "data.bin"})
require.NoError(t, err)
require.Contains(t, m, "status")
require.Contains(t, m, "message")
require.Equal(t, "ok", m["status"])
require.Contains(t, m["message"], "cached file cleared")
// get a new instance from the cache
co, err = rootFs.NewObject(context.Background(), "data.bin")
require.NoError(t, err)
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
_, err = runInstance.list(t, rootFs, "")
require.NoError(t, err)
// create some rand test data
testData2 := randStringBytes(int(chunkSize))
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test2"), testData2)
// list should have 1 item only
li1, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
require.Len(t, li1, 1)
// Call the rc function
m, err = cacheExpire.Fn(context.Background(), rc.Params{"remote": "/"})
require.NoError(t, err)
require.Contains(t, m, "status")
require.Contains(t, m, "message")
require.Equal(t, "ok", m["status"])
require.Contains(t, m["message"], "cached directory cleared")
// list should have 2 items now
li2, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
require.Len(t, li2, 2)
}
func TestInternalCacheWrites(t *testing.T) {
id := "ticw"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
@@ -911,15 +808,9 @@ func TestInternalBug2117(t *testing.T) {
type run struct {
okDiff time.Duration
runDefaultCfgMap configmap.Simple
mntDir string
tmpUploadDir string
useMount bool
isMounted bool
rootIsCrypt bool
wrappedIsExternal bool
unmountFn func() error
unmountRes chan error
vfs *vfs.VFS
tempFiles []*os.File
dbPath string
chunkPath string
@@ -929,9 +820,7 @@ type run struct {
func newRun() *run {
var err error
r := &run{
okDiff: time.Second * 9, // really big diff here but the build machines seem to be slow. need a different way for this
useMount: useMount,
isMounted: false,
okDiff: time.Second * 9, // really big diff here but the build machines seem to be slow. need a different way for this
}
// Read in all the defaults for all the options
@@ -944,32 +833,6 @@ func newRun() *run {
r.runDefaultCfgMap.Set(option.Name, fmt.Sprint(option.Default))
}
if mountDir == "" {
if runtime.GOOS != "windows" {
r.mntDir, err = ioutil.TempDir("", "rclonecache-mount")
if err != nil {
log.Fatalf("Failed to create mount dir: %v", err)
return nil
}
} else {
// Find a free drive letter
drive := ""
for letter := 'E'; letter <= 'Z'; letter++ {
drive = string(letter) + ":"
_, err := os.Stat(drive + "\\")
if os.IsNotExist(err) {
goto found
}
}
log.Print("Couldn't find free drive letter for test")
found:
r.mntDir = drive
}
} else {
r.mntDir = mountDir
}
log.Printf("Mount Dir: %v", r.mntDir)
if uploadDir == "" {
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
if err != nil {
@@ -1009,6 +872,15 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
return nil, nil
}
// Config to pass to NewFs
m := configmap.Simple{}
for k, v := range r.runDefaultCfgMap {
m.Set(k, v)
}
for k, v := range flags {
m.Set(k, v)
}
// if the remote doesn't exist, create a new one with a local one for it
// identify which is the cache remote (it can be wrapped by a crypt too)
rootIsCrypt := false
@@ -1017,10 +889,10 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
localRemote := remote + "-local"
config.FileSet(localRemote, "type", "local")
config.FileSet(localRemote, "nounc", "true")
config.FileSet(remote, "type", "cache")
config.FileSet(remote, "remote", localRemote+":/var/tmp/"+localRemote)
m.Set("type", "cache")
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
} else {
remoteType := config.FileGet(remote, "type", "")
remoteType := config.FileGet(remote, "type")
if remoteType == "" {
t.Skipf("skipped due to invalid remote type for %v", remote)
return nil, nil
@@ -1028,17 +900,17 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
if remoteType != "cache" {
if remoteType == "crypt" {
rootIsCrypt = true
config.FileSet(remote, "password", cryptPassword1)
config.FileSet(remote, "password2", cryptPassword2)
m.Set("password", cryptPassword1)
m.Set("password2", cryptPassword2)
}
remoteRemote := config.FileGet(remote, "remote", "")
remoteRemote := config.FileGet(remote, "remote")
if remoteRemote == "" {
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
return nil, nil
}
remoteRemoteParts := strings.Split(remoteRemote, ":")
remoteWrapping := remoteRemoteParts[0]
remoteType := config.FileGet(remoteWrapping, "type", "")
remoteType := config.FileGet(remoteWrapping, "type")
if remoteType != "cache" {
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
return nil, nil
@@ -1053,22 +925,15 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
require.NoError(t, err)
fs.Config.LowLevelRetries = 1
m := configmap.Simple{}
for k, v := range r.runDefaultCfgMap {
m.Set(k, v)
}
for k, v := range flags {
m.Set(k, v)
}
ci := fs.GetConfig(context.Background())
ci.LowLevelRetries = 1
// Instantiate root
if purge {
boltDb.PurgeTempUploads()
_ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id))
}
f, err := cache.NewFs(remote, id, m)
f, err := cache.NewFs(context.Background(), remote, id, m)
require.NoError(t, err)
cfs, err := r.getCacheFs(f)
require.NoError(t, err)
@@ -1082,33 +947,21 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
}
if purge {
_ = f.Features().Purge(context.Background())
_ = f.Features().Purge(context.Background(), "")
require.NoError(t, err)
}
err = f.Mkdir(context.Background(), "")
require.NoError(t, err)
if r.useMount && !r.isMounted {
r.mountFs(t, f)
}
return f, boltDb
}
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
if r.useMount && r.isMounted {
r.unmountFs(t, f)
}
err := f.Features().Purge(context.Background())
err := f.Features().Purge(context.Background(), "")
require.NoError(t, err)
cfs, err := r.getCacheFs(f)
require.NoError(t, err)
cfs.StopBackgroundRunners()
if r.useMount && runtime.GOOS != "windows" {
err = os.RemoveAll(r.mntDir)
require.NoError(t, err)
}
err = os.RemoveAll(r.tmpUploadDir)
require.NoError(t, err)
@@ -1139,23 +992,6 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
return f
}
func (r *run) writeRemoteRandomBytes(t *testing.T, f fs.Fs, p string, size int64) string {
remote := path.Join(p, strconv.Itoa(rand.Int())+".bin")
// create some rand test data
testData := randStringBytes(int(size))
r.writeRemoteBytes(t, f, remote, testData)
return remote
}
func (r *run) writeObjectRandomBytes(t *testing.T, f fs.Fs, p string, size int64) fs.Object {
remote := path.Join(p, strconv.Itoa(rand.Int())+".bin")
// create some rand test data
testData := randStringBytes(int(size))
return r.writeObjectBytes(t, f, remote, testData)
}
func (r *run) writeRemoteString(t *testing.T, f fs.Fs, remote, content string) {
r.writeRemoteBytes(t, f, remote, []byte(content))
}
@@ -1165,37 +1001,11 @@ func (r *run) writeObjectString(t *testing.T, f fs.Fs, remote, content string) f
}
func (r *run) writeRemoteBytes(t *testing.T, f fs.Fs, remote string, data []byte) {
var err error
if r.useMount {
err = r.retryBlock(func() error {
return ioutil.WriteFile(path.Join(r.mntDir, remote), data, 0600)
}, 3, time.Second*3)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
} else {
r.writeObjectBytes(t, f, remote, data)
}
r.writeObjectBytes(t, f, remote, data)
}
func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.ReadCloser) {
defer func() {
_ = in.Close()
}()
if r.useMount {
out, err := os.Create(path.Join(r.mntDir, remote))
require.NoError(t, err)
defer func() {
_ = out.Close()
}()
_, err = io.Copy(out, in)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
} else {
r.writeObjectReader(t, f, remote, in)
}
r.writeObjectReader(t, f, remote, in)
}
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
@@ -1212,10 +1022,6 @@ func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Read
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
obj, err := f.Put(context.Background(), in, objInfo)
require.NoError(t, err)
if r.useMount {
r.vfs.WaitForWriters(10 * time.Second)
}
return obj
}
@@ -1223,26 +1029,16 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b
var err error
var obj fs.Object
if r.useMount {
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data1, 0600)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
obj, err = f.NewObject(context.Background(), remote)
} else {
in1 := bytes.NewReader(data1)
in2 := bytes.NewReader(data2)
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
in1 := bytes.NewReader(data1)
in2 := bytes.NewReader(data2)
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
obj, err = f.Put(context.Background(), in1, objInfo1)
require.NoError(t, err)
obj, err = f.NewObject(context.Background(), remote)
require.NoError(t, err)
err = obj.Update(context.Background(), in2, objInfo2)
}
_, err = f.Put(context.Background(), in1, objInfo1)
require.NoError(t, err)
obj, err = f.NewObject(context.Background(), remote)
require.NoError(t, err)
err = obj.Update(context.Background(), in2, objInfo2)
require.NoError(t, err)
return obj
@@ -1252,30 +1048,12 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
size := end - offset
checkSample := make([]byte, size)
if r.useMount {
f, err := os.Open(path.Join(r.mntDir, remote))
defer func() {
_ = f.Close()
}()
if err != nil {
return checkSample, err
}
_, _ = f.Seek(offset, io.SeekStart)
totalRead, err := io.ReadFull(f, checkSample)
checkSample = checkSample[:totalRead]
if err == io.EOF || err == io.ErrUnexpectedEOF {
err = nil
}
if err != nil {
return checkSample, err
}
} else {
co, err := f.NewObject(context.Background(), remote)
if err != nil {
return checkSample, err
}
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
co, err := f.NewObject(context.Background(), remote)
if err != nil {
return checkSample, err
}
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
if !noLengthCheck && size != int64(len(checkSample)) {
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
}
@@ -1298,28 +1076,19 @@ func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLe
}
func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) {
var err error
if r.useMount {
err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
} else {
err = f.Mkdir(context.Background(), remote)
}
err := f.Mkdir(context.Background(), remote)
require.NoError(t, err)
}
func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
var err error
if r.useMount {
err = os.Remove(path.Join(r.mntDir, remote))
var obj fs.Object
obj, err = f.NewObject(context.Background(), remote)
if err != nil {
err = f.Rmdir(context.Background(), remote)
} else {
var obj fs.Object
obj, err = f.NewObject(context.Background(), remote)
if err != nil {
err = f.Rmdir(context.Background(), remote)
} else {
err = obj.Remove(context.Background())
}
err = obj.Remove(context.Background())
}
return err
@@ -1328,42 +1097,14 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
var err error
var l []interface{}
if r.useMount {
var list []os.FileInfo
list, err = ioutil.ReadDir(path.Join(r.mntDir, remote))
for _, ll := range list {
l = append(l, ll)
}
} else {
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
for _, ll := range list {
l = append(l, ll)
}
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
for _, ll := range list {
l = append(l, ll)
}
return l, err
}
func (r *run) listPath(t *testing.T, f fs.Fs, remote string) []string {
var err error
var l []string
if r.useMount {
var list []os.FileInfo
list, err = ioutil.ReadDir(path.Join(r.mntDir, remote))
for _, ll := range list {
l = append(l, ll.Name())
}
} else {
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
for _, ll := range list {
l = append(l, ll.Remote())
}
}
require.NoError(t, err)
return l
}
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
in, err := os.Open(src)
if err != nil {
@@ -1388,13 +1129,7 @@ func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error
if runInstance.useMount {
err = os.Rename(path.Join(runInstance.mntDir, src), path.Join(runInstance.mntDir, dst))
if err != nil {
return err
}
r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().DirMove != nil {
if rootFs.Features().DirMove != nil {
err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst)
if err != nil {
return err
@@ -1410,13 +1145,7 @@ func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error
if runInstance.useMount {
err = os.Rename(path.Join(runInstance.mntDir, src), path.Join(runInstance.mntDir, dst))
if err != nil {
return err
}
r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().Move != nil {
if rootFs.Features().Move != nil {
obj1, err := rootFs.NewObject(context.Background(), src)
if err != nil {
return err
@@ -1436,13 +1165,7 @@ func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error
if r.useMount {
err = r.copyFile(t, rootFs, path.Join(r.mntDir, src), path.Join(r.mntDir, dst))
if err != nil {
return err
}
r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().Copy != nil {
if rootFs.Features().Copy != nil {
obj, err := rootFs.NewObject(context.Background(), src)
if err != nil {
return err
@@ -1462,13 +1185,6 @@ func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error) {
var err error
if r.useMount {
fi, err := os.Stat(path.Join(runInstance.mntDir, src))
if err != nil {
return time.Time{}, err
}
return fi.ModTime(), nil
}
obj1, err := rootFs.NewObject(context.Background(), src)
if err != nil {
return time.Time{}, err
@@ -1479,13 +1195,6 @@ func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error)
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
var err error
if r.useMount {
fi, err := os.Stat(path.Join(runInstance.mntDir, src))
if err != nil {
return int64(0), err
}
return fi.Size(), nil
}
obj1, err := rootFs.NewObject(context.Background(), src)
if err != nil {
return int64(0), err
@@ -1496,28 +1205,15 @@ func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) error {
var err error
if r.useMount {
var f *os.File
f, err = os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return err
}
defer func() {
_ = f.Close()
r.vfs.WaitForWriters(10 * time.Second)
}()
_, err = f.WriteString(data + append)
} else {
var obj1 fs.Object
obj1, err = rootFs.NewObject(context.Background(), src)
if err != nil {
return err
}
data1 := []byte(data + append)
r := bytes.NewReader(data1)
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
err = obj1.Update(context.Background(), r, objInfo1)
var obj1 fs.Object
obj1, err = rootFs.NewObject(context.Background(), src)
if err != nil {
return err
}
data1 := []byte(data + append)
reader := bytes.NewReader(data1)
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
err = obj1.Update(context.Background(), reader, objInfo1)
return err
}

View File

@@ -1,78 +0,0 @@
// +build !plan9,!windows
package cache_test
import (
"os"
"testing"
"time"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/rclone/rclone/cmd/mount"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/require"
)
func (r *run) mountFs(t *testing.T, f fs.Fs) {
device := f.Name() + ":" + f.Root()
var options = []fuse.MountOption{
fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
fuse.Subtype("rclone"),
fuse.FSName(device), fuse.VolumeName(device),
fuse.NoAppleDouble(),
fuse.NoAppleXattr(),
//fuse.AllowOther(),
}
err := os.MkdirAll(r.mntDir, os.ModePerm)
require.NoError(t, err)
c, err := fuse.Mount(r.mntDir, options...)
require.NoError(t, err)
filesys := mount.NewFS(f)
server := fusefs.New(c, nil)
// Serve the mount point in the background returning error to errChan
r.unmountRes = make(chan error, 1)
go func() {
err := server.Serve(filesys)
closeErr := c.Close()
if err == nil {
err = closeErr
}
r.unmountRes <- err
}()
// check if the mount process has an error to report
<-c.Ready
require.NoError(t, c.MountError)
r.unmountFn = func() error {
// Shutdown the VFS
filesys.VFS.Shutdown()
return fuse.Unmount(r.mntDir)
}
r.vfs = filesys.VFS
r.isMounted = true
}
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
var err error
for i := 0; i < 4; i++ {
err = r.unmountFn()
if err != nil {
//log.Printf("signal to umount failed - retrying: %v", err)
time.Sleep(3 * time.Second)
continue
}
break
}
require.NoError(t, err)
err = <-r.unmountRes
require.NoError(t, err)
err = r.vfs.CleanUp()
require.NoError(t, err)
r.isMounted = false
}

View File

@@ -1,124 +0,0 @@
// +build windows
package cache_test
import (
"fmt"
"os"
"testing"
"time"
"github.com/billziss-gh/cgofuse/fuse"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd/cmount"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/require"
)
// waitFor runs fn() until it returns true or the timeout expires
func waitFor(fn func() bool) (ok bool) {
const totalWait = 10 * time.Second
const individualWait = 10 * time.Millisecond
for i := 0; i < int(totalWait/individualWait); i++ {
ok = fn()
if ok {
return ok
}
time.Sleep(individualWait)
}
return false
}
func (r *run) mountFs(t *testing.T, f fs.Fs) {
// FIXME implement cmount
t.Skip("windows not supported yet")
device := f.Name() + ":" + f.Root()
options := []string{
"-o", "fsname=" + device,
"-o", "subtype=rclone",
"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
"-o", "uid=-1",
"-o", "gid=-1",
"-o", "allow_other",
// This causes FUSE to supply O_TRUNC with the Open
// call which is more efficient for cmount. However
// it does not work with cgofuse on Windows with
// WinFSP so cmount must work with or without it.
"-o", "atomic_o_trunc",
"--FileSystemName=rclone",
}
fsys := cmount.NewFS(f)
host := fuse.NewFileSystemHost(fsys)
// Serve the mount point in the background returning error to errChan
r.unmountRes = make(chan error, 1)
go func() {
var err error
ok := host.Mount(r.mntDir, options)
if !ok {
err = errors.New("mount failed")
}
r.unmountRes <- err
}()
// unmount
r.unmountFn = func() error {
// Shutdown the VFS
fsys.VFS.Shutdown()
if host.Unmount() {
if !waitFor(func() bool {
_, err := os.Stat(r.mntDir)
return err != nil
}) {
t.Fatalf("mountpoint %q didn't disappear after unmount - continuing anyway", r.mntDir)
}
return nil
}
return errors.New("host unmount failed")
}
// Wait for the filesystem to become ready, checking the file
// system didn't blow up before starting
select {
case err := <-r.unmountRes:
require.NoError(t, err)
case <-time.After(time.Second * 3):
}
// Wait for the mount point to be available on Windows
// On Windows the Init signal comes slightly before the mount is ready
if !waitFor(func() bool {
_, err := os.Stat(r.mntDir)
return err == nil
}) {
t.Errorf("mountpoint %q didn't became available on mount", r.mntDir)
}
r.vfs = fsys.VFS
r.isMounted = true
}
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
// FIXME implement cmount
t.Skip("windows not supported yet")
var err error
for i := 0; i < 4; i++ {
err = r.unmountFn()
if err != nil {
//log.Printf("signal to umount failed - retrying: %v", err)
time.Sleep(3 * time.Second)
continue
}
break
}
require.NoError(t, err)
err = <-r.unmountRes
require.NoError(t, err)
err = r.vfs.CleanUp()
require.NoError(t, err)
r.isMounted = false
}

View File

@@ -1,6 +1,7 @@
// Test Cache filesystem interface
// +build !plan9
// +build !plan9,!js
// +build !race
package cache_test
@@ -17,7 +18,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "MergeDirs", "OpenWriterAt"},
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
})

View File

@@ -1,6 +1,6 @@
// Build for cache for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9
// +build plan9 js
package cache

View File

@@ -1,4 +1,5 @@
// +build !plan9
// +build !plan9,!js
// +build !race
package cache_test

View File

@@ -1,4 +1,4 @@
// +build !plan9
// +build !plan9,!js
package cache
@@ -101,15 +101,6 @@ func (d *Directory) abs() string {
return cleanPath(path.Join(d.Dir, d.Name))
}
// parentRemote returns the absolute path parent remote
func (d *Directory) parentRemote() string {
absPath := d.abs()
if absPath == "" {
return ""
}
return cleanPath(path.Dir(absPath))
}
// ModTime returns the cached ModTime
func (d *Directory) ModTime(ctx context.Context) time.Time {
return time.Unix(0, d.CacheModTime)

View File

@@ -1,4 +1,4 @@
// +build !plan9
// +build !plan9,!js
package cache

View File

@@ -1,4 +1,4 @@
// +build !plan9
// +build !plan9,!js
package cache
@@ -24,15 +24,16 @@ const (
type Object struct {
fs.Object `json:"-"`
ParentFs fs.Fs `json:"-"` // parent fs
CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory
Dir string `json:"dir"` // abs path of the object
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
CacheStorable bool `json:"storable"` // says whether this object can be stored
CacheType string `json:"cacheType"`
CacheTs time.Time `json:"cacheTs"`
ParentFs fs.Fs `json:"-"` // parent fs
CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory
Dir string `json:"dir"` // abs path of the object
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
CacheStorable bool `json:"storable"` // says whether this object can be stored
CacheType string `json:"cacheType"`
CacheTs time.Time `json:"cacheTs"`
cacheHashesMu sync.Mutex
CacheHashes map[hash.Type]string // all supported hashes cached
refreshMutex sync.Mutex
@@ -103,7 +104,9 @@ func (o *Object) updateData(ctx context.Context, source fs.Object) {
o.CacheSize = source.Size()
o.CacheStorable = source.Storable()
o.CacheTs = time.Now()
o.cacheHashesMu.Lock()
o.CacheHashes = make(map[hash.Type]string)
o.cacheHashesMu.Unlock()
}
// Fs returns its FS info
@@ -268,7 +271,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
o.CacheModTime = src.ModTime(ctx).UnixNano()
o.CacheSize = src.Size()
o.cacheHashesMu.Lock()
o.CacheHashes = make(map[hash.Type]string)
o.cacheHashesMu.Unlock()
o.CacheTs = time.Now()
o.persist()
@@ -309,11 +314,12 @@ func (o *Object) Remove(ctx context.Context) error {
// since it might or might not be called, this is lazy loaded
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
_ = o.refresh(ctx)
o.cacheHashesMu.Lock()
if o.CacheHashes == nil {
o.CacheHashes = make(map[hash.Type]string)
}
cachedHash, found := o.CacheHashes[ht]
o.cacheHashesMu.Unlock()
if found {
return cachedHash, nil
}
@@ -324,7 +330,9 @@ func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
if err != nil {
return "", err
}
o.cacheHashesMu.Lock()
o.CacheHashes[ht] = liveHash
o.cacheHashesMu.Unlock()
o.persist()
fs.Debugf(o, "object hash cached: %v", liveHash)

View File

@@ -1,4 +1,4 @@
// +build !plan9
// +build !plan9,!js
package cache

View File

@@ -1,4 +1,4 @@
// +build !plan9
// +build !plan9,!js
package cache

View File

@@ -1,4 +1,4 @@
// +build !plan9
// +build !plan9,!js
package cache
@@ -16,10 +16,10 @@ import (
"sync"
"time"
bolt "github.com/coreos/bbolt"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
bolt "go.etcd.io/bbolt"
)
// Constants
@@ -767,31 +767,6 @@ func (b *Persistent) iterateBuckets(buk *bolt.Bucket, bucketFn func(name string)
return err
}
func (b *Persistent) dumpRoot() string {
var itBuckets func(buk *bolt.Bucket) map[string]interface{}
itBuckets = func(buk *bolt.Bucket) map[string]interface{} {
m := make(map[string]interface{})
c := buk.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
if v == nil {
buk2 := buk.Bucket(k)
m[string(k)] = itBuckets(buk2)
} else {
m[string(k)] = "-"
}
}
return m
}
var mm map[string]interface{}
_ = b.db.View(func(tx *bolt.Tx) error {
mm = itBuckets(tx.Bucket([]byte(RootBucket)))
return nil
})
raw, _ := json.MarshalIndent(mm, "", " ")
return string(raw)
}
// addPendingUpload adds a new file to the pending queue of uploads
func (b *Persistent) addPendingUpload(destPath string, started bool) error {
return b.db.Update(func(tx *bolt.Tx) error {
@@ -1005,15 +980,6 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
})
}
// SetPendingUploadToStarted is a way to mark an entry as started (even if it's not already)
// TO BE USED IN TESTING ONLY
func (b *Persistent) SetPendingUploadToStarted(remote string) error {
return b.updatePendingUpload(remote, func(item *tempUploadInfo) error {
item.Started = true
return nil
})
}
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error {
return b.db.Update(func(tx *bolt.Tx) error {
@@ -1061,19 +1027,6 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
})
}
// PurgeTempUploads will remove all the pending uploads from the queue
// TO BE USED IN TESTING ONLY
func (b *Persistent) PurgeTempUploads() {
b.tempQueueMux.Lock()
defer b.tempQueueMux.Unlock()
_ = b.db.Update(func(tx *bolt.Tx) error {
_ = tx.DeleteBucket([]byte(tempBucket))
_, _ = tx.CreateBucketIfNotExists([]byte(tempBucket))
return nil
})
}
// Close should be called when the program ends gracefully
func (b *Persistent) Close() {
b.cleanupMux.Lock()

23
backend/cache/utils_test.go vendored Normal file
View File

@@ -0,0 +1,23 @@
package cache
import bolt "go.etcd.io/bbolt"
// PurgeTempUploads will remove all the pending uploads from the queue
func (b *Persistent) PurgeTempUploads() {
b.tempQueueMux.Lock()
defer b.tempQueueMux.Unlock()
_ = b.db.Update(func(tx *bolt.Tx) error {
_ = tx.DeleteBucket([]byte(tempBucket))
_, _ = tx.CreateBucketIfNotExists([]byte(tempBucket))
return nil
})
}
// SetPendingUploadToStarted is a way to mark an entry as started (even if it's not already)
func (b *Persistent) SetPendingUploadToStarted(remote string) error {
return b.updatePendingUpload(remote, func(item *tempUploadInfo) error {
item.Started = true
return nil
})
}

File diff suppressed because it is too large Load Diff

View File

@@ -13,6 +13,7 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
@@ -64,35 +65,40 @@ func testChunkNameFormat(t *testing.T, f *Fs) {
assert.Error(t, err)
}
assertMakeName := func(wantChunkName, mainName string, chunkNo int, ctrlType string, xactNo int64) {
gotChunkName := f.makeChunkName(mainName, chunkNo, ctrlType, xactNo)
assert.Equal(t, wantChunkName, gotChunkName)
assertMakeName := func(wantChunkName, mainName string, chunkNo int, ctrlType, xactID string) {
gotChunkName := ""
assert.NotPanics(t, func() {
gotChunkName = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
}, "makeChunkName(%q,%d,%q,%q) must not panic", mainName, chunkNo, ctrlType, xactID)
if gotChunkName != "" {
assert.Equal(t, wantChunkName, gotChunkName)
}
}
assertMakeNamePanics := func(mainName string, chunkNo int, ctrlType string, xactNo int64) {
assertMakeNamePanics := func(mainName string, chunkNo int, ctrlType, xactID string) {
assert.Panics(t, func() {
_ = f.makeChunkName(mainName, chunkNo, ctrlType, xactNo)
}, "makeChunkName(%q,%d,%q,%d) should panic", mainName, chunkNo, ctrlType, xactNo)
_ = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
}, "makeChunkName(%q,%d,%q,%q) should panic", mainName, chunkNo, ctrlType, xactID)
}
assertParseName := func(fileName, wantMainName string, wantChunkNo int, wantCtrlType string, wantXactNo int64) {
gotMainName, gotChunkNo, gotCtrlType, gotXactNo := f.parseChunkName(fileName)
assertParseName := func(fileName, wantMainName string, wantChunkNo int, wantCtrlType, wantXactID string) {
gotMainName, gotChunkNo, gotCtrlType, gotXactID := f.parseChunkName(fileName)
assert.Equal(t, wantMainName, gotMainName)
assert.Equal(t, wantChunkNo, gotChunkNo)
assert.Equal(t, wantCtrlType, gotCtrlType)
assert.Equal(t, wantXactNo, gotXactNo)
assert.Equal(t, wantXactID, gotXactID)
}
const newFormatSupported = false // support for patterns not starting with base name (*)
// valid formats
assertFormat(`*.rclone_chunk.###`, `%s.rclone_chunk.%03d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]{3,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
assertFormat(`*.rclone_chunk.#`, `%s.rclone_chunk.%d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
assertFormat(`*_chunk_#####`, `%s_chunk_%05d`, `%s_chunk__%s`, `^(.+?)_chunk_(?:([0-9]{5,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
assertFormat(`*-chunk-#`, `%s-chunk-%d`, `%s-chunk-_%s`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
assertFormat(`*-chunk-#-%^$()[]{}.+-!?:\`, `%s-chunk-%d-%%^$()[]{}.+-!?:\`, `%s-chunk-_%s-%%^$()[]{}.+-!?:\`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z]{3,9}))-%\^\$\(\)\[\]\{\}\.\+-!\?:\\(?:\.\.tmp_([0-9]{10,19}))?$`)
assertFormat(`*.rclone_chunk.###`, `%s.rclone_chunk.%03d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*.rclone_chunk.#`, `%s.rclone_chunk.%d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*_chunk_#####`, `%s_chunk_%05d`, `%s_chunk__%s`, `^(.+?)_chunk_(?:([0-9]{5,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*-chunk-#`, `%s-chunk-%d`, `%s-chunk-_%s`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*-chunk-#-%^$()[]{}.+-!?:\`, `%s-chunk-%d-%%^$()[]{}.+-!?:\`, `%s-chunk-_%s-%%^$()[]{}.+-!?:\`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))-%\^\$\(\)\[\]\{\}\.\+-!\?:\\(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
if newFormatSupported {
assertFormat(`_*-chunk-##,`, `_%s-chunk-%02d,`, `_%s-chunk-_%s,`, `^_(.+?)-chunk-(?:([0-9]{2,})|_([a-z]{3,9})),(?:\.\.tmp_([0-9]{10,19}))?$`)
assertFormat(`_*-chunk-##,`, `_%s-chunk-%02d,`, `_%s-chunk-_%s,`, `^_(.+?)-chunk-(?:([0-9]{2,})|_([a-z][a-z0-9]{2,6})),(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
}
// invalid formats
@@ -111,142 +117,223 @@ func testChunkNameFormat(t *testing.T, f *Fs) {
// quick tests
if newFormatSupported {
assertFormat(`part_*_#`, `part_%s_%d`, `part_%s__%s`, `^part_(.+?)_(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
assertFormat(`part_*_#`, `part_%s_%d`, `part_%s__%s`, `^part_(.+?)_(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9][0-9a-z]{3,8})\.\.tmp_([0-9]{10,13}))?$`)
f.opt.StartFrom = 1
assertMakeName(`part_fish_1`, "fish", 0, "", -1)
assertParseName(`part_fish_43`, "fish", 42, "", -1)
assertMakeName(`part_fish_3..tmp_0000000004`, "fish", 2, "", 4)
assertParseName(`part_fish_4..tmp_0000000005`, "fish", 3, "", 5)
assertMakeName(`part_fish__locks`, "fish", -2, "locks", -3)
assertParseName(`part_fish__locks`, "fish", -1, "locks", -1)
assertMakeName(`part_fish__blockinfo..tmp_1234567890123456789`, "fish", -3, "blockinfo", 1234567890123456789)
assertParseName(`part_fish__blockinfo..tmp_1234567890123456789`, "fish", -1, "blockinfo", 1234567890123456789)
assertMakeName(`part_fish_1`, "fish", 0, "", "")
assertParseName(`part_fish_43`, "fish", 42, "", "")
assertMakeName(`part_fish__locks`, "fish", -2, "locks", "")
assertParseName(`part_fish__locks`, "fish", -1, "locks", "")
assertMakeName(`part_fish__x2y`, "fish", -2, "x2y", "")
assertParseName(`part_fish__x2y`, "fish", -1, "x2y", "")
assertMakeName(`part_fish_3_0004`, "fish", 2, "", "4")
assertParseName(`part_fish_4_0005`, "fish", 3, "", "0005")
assertMakeName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -3, "blkinfo", "jj5fvo3wr")
assertParseName(`part_fish__blkinfo_zz9fvo3wr`, "fish", -1, "blkinfo", "zz9fvo3wr")
// old-style temporary suffix (parse only)
assertParseName(`part_fish_4..tmp_0000000011`, "fish", 3, "", "000b")
assertParseName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -1, "blkinfo", "jj5fvo3wr")
}
// prepare format for long tests
assertFormat(`*.chunk.###`, `%s.chunk.%03d`, `%s.chunk._%s`, `^(.+?)\.chunk\.(?:([0-9]{3,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
assertFormat(`*.chunk.###`, `%s.chunk.%03d`, `%s.chunk._%s`, `^(.+?)\.chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
f.opt.StartFrom = 2
// valid data chunks
assertMakeName(`fish.chunk.003`, "fish", 1, "", -1)
assertMakeName(`fish.chunk.011..tmp_0000054321`, "fish", 9, "", 54321)
assertMakeName(`fish.chunk.011..tmp_1234567890`, "fish", 9, "", 1234567890)
assertMakeName(`fish.chunk.1916..tmp_123456789012345`, "fish", 1914, "", 123456789012345)
assertMakeName(`fish.chunk.003`, "fish", 1, "", "")
assertParseName(`fish.chunk.003`, "fish", 1, "", "")
assertMakeName(`fish.chunk.021`, "fish", 19, "", "")
assertParseName(`fish.chunk.021`, "fish", 19, "", "")
assertParseName(`fish.chunk.003`, "fish", 1, "", -1)
assertParseName(`fish.chunk.004..tmp_0000000021`, "fish", 2, "", 21)
assertParseName(`fish.chunk.021`, "fish", 19, "", -1)
assertParseName(`fish.chunk.323..tmp_1234567890123456789`, "fish", 321, "", 1234567890123456789)
// valid temporary data chunks
assertMakeName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
assertParseName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
assertMakeName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
assertParseName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
assertMakeName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
assertParseName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
assertMakeName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
assertParseName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
// valid temporary data chunks (old temporary suffix, only parse)
assertParseName(`fish.chunk.004..tmp_0000000047`, "fish", 2, "", "001b")
assertParseName(`fish.chunk.323..tmp_9994567890123`, "fish", 321, "", "3jjfvo3wr")
// parsing invalid data chunk names
assertParseName(`fish.chunk.3`, "", -1, "", -1)
assertParseName(`fish.chunk.001`, "", -1, "", -1)
assertParseName(`fish.chunk.21`, "", -1, "", -1)
assertParseName(`fish.chunk.-21`, "", -1, "", -1)
assertParseName(`fish.chunk.3`, "", -1, "", "")
assertParseName(`fish.chunk.001`, "", -1, "", "")
assertParseName(`fish.chunk.21`, "", -1, "", "")
assertParseName(`fish.chunk.-21`, "", -1, "", "")
assertParseName(`fish.chunk.004.tmp_0000000021`, "", -1, "", -1)
assertParseName(`fish.chunk.003..tmp_123456789`, "", -1, "", -1)
assertParseName(`fish.chunk.003..tmp_012345678901234567890123456789`, "", -1, "", -1)
assertParseName(`fish.chunk.003..tmp_-1`, "", -1, "", -1)
assertParseName(`fish.chunk.004abcd`, "", -1, "", "") // missing underscore delimiter
assertParseName(`fish.chunk.004__1234`, "", -1, "", "") // extra underscore delimiter
assertParseName(`fish.chunk.004_123`, "", -1, "", "") // too short temporary suffix
assertParseName(`fish.chunk.004_1234567890`, "", -1, "", "") // too long temporary suffix
assertParseName(`fish.chunk.004_-1234`, "", -1, "", "") // temporary suffix must be positive
assertParseName(`fish.chunk.004_123E`, "", -1, "", "") // uppercase not allowed
assertParseName(`fish.chunk.004_12.3`, "", -1, "", "") // punctuation not allowed
// parsing invalid data chunk names (old temporary suffix)
assertParseName(`fish.chunk.004.tmp_0000000021`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_123456789`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_012345678901234567890123456789`, "", -1, "", "")
assertParseName(`fish.chunk.323..tmp_12345678901234`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_-1`, "", -1, "", "")
// valid control chunks
assertMakeName(`fish.chunk._info`, "fish", -1, "info", -1)
assertMakeName(`fish.chunk._locks`, "fish", -2, "locks", -1)
assertMakeName(`fish.chunk._blockinfo`, "fish", -3, "blockinfo", -1)
assertMakeName(`fish.chunk._info`, "fish", -1, "info", "")
assertMakeName(`fish.chunk._locks`, "fish", -2, "locks", "")
assertMakeName(`fish.chunk._blkinfo`, "fish", -3, "blkinfo", "")
assertMakeName(`fish.chunk._x2y`, "fish", -4, "x2y", "")
assertParseName(`fish.chunk._info`, "fish", -1, "info", -1)
assertParseName(`fish.chunk._locks`, "fish", -1, "locks", -1)
assertParseName(`fish.chunk._blockinfo`, "fish", -1, "blockinfo", -1)
assertParseName(`fish.chunk._info`, "fish", -1, "info", "")
assertParseName(`fish.chunk._locks`, "fish", -1, "locks", "")
assertParseName(`fish.chunk._blkinfo`, "fish", -1, "blkinfo", "")
assertParseName(`fish.chunk._x2y`, "fish", -1, "x2y", "")
// valid temporary control chunks
assertMakeName(`fish.chunk._info..tmp_0000000021`, "fish", -1, "info", 21)
assertMakeName(`fish.chunk._locks..tmp_0000054321`, "fish", -2, "locks", 54321)
assertMakeName(`fish.chunk._uploads..tmp_0000000000`, "fish", -3, "uploads", 0)
assertMakeName(`fish.chunk._blockinfo..tmp_1234567890123456789`, "fish", -4, "blockinfo", 1234567890123456789)
assertMakeName(`fish.chunk._info_0001`, "fish", -1, "info", "1")
assertMakeName(`fish.chunk._locks_4321`, "fish", -2, "locks", "4321")
assertMakeName(`fish.chunk._uploads_abcd`, "fish", -3, "uploads", "abcd")
assertMakeName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -4, "blkinfo", "xyzabcdef")
assertMakeName(`fish.chunk._x2y_1aaa`, "fish", -5, "x2y", "1aaa")
assertParseName(`fish.chunk._info..tmp_0000000021`, "fish", -1, "info", 21)
assertParseName(`fish.chunk._locks..tmp_0000054321`, "fish", -1, "locks", 54321)
assertParseName(`fish.chunk._uploads..tmp_0000000000`, "fish", -1, "uploads", 0)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789`, "fish", -1, "blockinfo", 1234567890123456789)
assertParseName(`fish.chunk._info_0001`, "fish", -1, "info", "0001")
assertParseName(`fish.chunk._locks_4321`, "fish", -1, "locks", "4321")
assertParseName(`fish.chunk._uploads_9abc`, "fish", -1, "uploads", "9abc")
assertParseName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -1, "blkinfo", "xyzabcdef")
assertParseName(`fish.chunk._x2y_1aaa`, "fish", -1, "x2y", "1aaa")
// valid temporary control chunks (old temporary suffix, parse only)
assertParseName(`fish.chunk._info..tmp_0000000047`, "fish", -1, "info", "001b")
assertParseName(`fish.chunk._locks..tmp_0000054321`, "fish", -1, "locks", "15wx")
assertParseName(`fish.chunk._uploads..tmp_0000000000`, "fish", -1, "uploads", "0000")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123`, "fish", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._x2y..tmp_0000000000`, "fish", -1, "x2y", "0000")
// parsing invalid control chunk names
assertParseName(`fish.chunk.info`, "", -1, "", -1)
assertParseName(`fish.chunk.locks`, "", -1, "", -1)
assertParseName(`fish.chunk.uploads`, "", -1, "", -1)
assertParseName(`fish.chunk.blockinfo`, "", -1, "", -1)
assertParseName(`fish.chunk.metadata`, "", -1, "", "") // must be prepended by underscore
assertParseName(`fish.chunk.info`, "", -1, "", "")
assertParseName(`fish.chunk.locks`, "", -1, "", "")
assertParseName(`fish.chunk.uploads`, "", -1, "", "")
assertParseName(`fish.chunk._os`, "", -1, "", -1)
assertParseName(`fish.chunk._futuredata`, "", -1, "", -1)
assertParseName(`fish.chunk._me_ta`, "", -1, "", -1)
assertParseName(`fish.chunk._in-fo`, "", -1, "", -1)
assertParseName(`fish.chunk._.bin`, "", -1, "", -1)
assertParseName(`fish.chunk._os`, "", -1, "", "") // too short
assertParseName(`fish.chunk._metadata`, "", -1, "", "") // too long
assertParseName(`fish.chunk._blockinfo`, "", -1, "", "") // way too long
assertParseName(`fish.chunk._4me`, "", -1, "", "") // cannot start with digit
assertParseName(`fish.chunk._567`, "", -1, "", "") // cannot be all digits
assertParseName(`fish.chunk._me_ta`, "", -1, "", "") // punctuation not allowed
assertParseName(`fish.chunk._in-fo`, "", -1, "", "")
assertParseName(`fish.chunk._.bin`, "", -1, "", "")
assertParseName(`fish.chunk._.2xy`, "", -1, "", "")
assertParseName(`fish.chunk._locks..tmp_123456789`, "", -1, "", -1)
assertParseName(`fish.chunk._meta..tmp_-1`, "", -1, "", -1)
assertParseName(`fish.chunk._blockinfo..tmp_012345678901234567890123456789`, "", -1, "", -1)
// parsing invalid temporary control chunks
assertParseName(`fish.chunk._blkinfo1234`, "", -1, "", "") // missing underscore delimiter
assertParseName(`fish.chunk._info__1234`, "", -1, "", "") // extra underscore delimiter
assertParseName(`fish.chunk._info_123`, "", -1, "", "") // too short temporary suffix
assertParseName(`fish.chunk._info_1234567890`, "", -1, "", "") // too long temporary suffix
assertParseName(`fish.chunk._info_-1234`, "", -1, "", "") // temporary suffix must be positive
assertParseName(`fish.chunk._info_123E`, "", -1, "", "") // uppercase not allowed
assertParseName(`fish.chunk._info_12.3`, "", -1, "", "") // punctuation not allowed
assertParseName(`fish.chunk._locks..tmp_123456789`, "", -1, "", "")
assertParseName(`fish.chunk._meta..tmp_-1`, "", -1, "", "")
assertParseName(`fish.chunk._blockinfo..tmp_012345678901234567890123456789`, "", -1, "", "")
// short control chunk names: 3 letters ok, 1-2 letters not allowed
assertMakeName(`fish.chunk._ext`, "fish", -1, "ext", -1)
assertMakeName(`fish.chunk._ext..tmp_0000000021`, "fish", -1, "ext", 21)
assertParseName(`fish.chunk._int`, "fish", -1, "int", -1)
assertParseName(`fish.chunk._int..tmp_0000000021`, "fish", -1, "int", 21)
assertMakeNamePanics("fish", -1, "in", -1)
assertMakeNamePanics("fish", -1, "up", 4)
assertMakeNamePanics("fish", -1, "x", -1)
assertMakeNamePanics("fish", -1, "c", 4)
assertMakeName(`fish.chunk._ext`, "fish", -1, "ext", "")
assertParseName(`fish.chunk._int`, "fish", -1, "int", "")
assertMakeNamePanics("fish", -1, "in", "")
assertMakeNamePanics("fish", -1, "up", "4")
assertMakeNamePanics("fish", -1, "x", "")
assertMakeNamePanics("fish", -1, "c", "1z")
assertMakeName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0")
assertMakeName(`fish.chunk._ext_0026`, "fish", -1, "ext", "26")
assertMakeName(`fish.chunk._int_0abc`, "fish", -1, "int", "abc")
assertMakeName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertParseName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0000")
assertParseName(`fish.chunk._ext_0026`, "fish", -1, "ext", "0026")
assertParseName(`fish.chunk._int_0abc`, "fish", -1, "int", "0abc")
assertParseName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
// base file name can sometimes look like a valid chunk name
assertParseName(`fish.chunk.003.chunk.004`, "fish.chunk.003", 2, "", -1)
assertParseName(`fish.chunk.003.chunk.005..tmp_0000000021`, "fish.chunk.003", 3, "", 21)
assertParseName(`fish.chunk.003.chunk._info`, "fish.chunk.003", -1, "info", -1)
assertParseName(`fish.chunk.003.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk.003", -1, "blockinfo", 1234567890123456789)
assertParseName(`fish.chunk.003.chunk._Meta`, "", -1, "", -1)
assertParseName(`fish.chunk.003.chunk._x..tmp_0000054321`, "", -1, "", -1)
assertParseName(`fish.chunk.003.chunk.004`, "fish.chunk.003", 2, "", "")
assertParseName(`fish.chunk.003.chunk._info`, "fish.chunk.003", -1, "info", "")
assertParseName(`fish.chunk.003.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.004`, "fish.chunk.004..tmp_0000000021", 2, "", -1)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.005..tmp_0000000021`, "fish.chunk.004..tmp_0000000021", 3, "", 21)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._info`, "fish.chunk.004..tmp_0000000021", -1, "info", -1)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk.004..tmp_0000000021", -1, "blockinfo", 1234567890123456789)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._Meta`, "", -1, "", -1)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._x..tmp_0000054321`, "", -1, "", -1)
assertParseName(`fish.chunk._info.chunk.004`, "fish.chunk._info", 2, "", "")
assertParseName(`fish.chunk._info.chunk._info`, "fish.chunk._info", -1, "info", "")
assertParseName(`fish.chunk._info.chunk._info.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk._info.chunk.004`, "fish.chunk._info", 2, "", -1)
assertParseName(`fish.chunk._info.chunk.005..tmp_0000000021`, "fish.chunk._info", 3, "", 21)
assertParseName(`fish.chunk._info.chunk._info`, "fish.chunk._info", -1, "info", -1)
assertParseName(`fish.chunk._info.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk._info", -1, "blockinfo", 1234567890123456789)
assertParseName(`fish.chunk._info.chunk._info.chunk._Meta`, "", -1, "", -1)
assertParseName(`fish.chunk._info.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", -1)
// base file name looking like a valid chunk name (old temporary suffix)
assertParseName(`fish.chunk.003.chunk.005..tmp_0000000022`, "fish.chunk.003", 3, "", "000m")
assertParseName(`fish.chunk.003.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._info.chunk.005..tmp_0000000023`, "fish.chunk._info", 3, "", "000n")
assertParseName(`fish.chunk._info.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk.004`, "fish.chunk._blockinfo..tmp_1234567890123456789", 2, "", -1)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk.005..tmp_0000000021`, "fish.chunk._blockinfo..tmp_1234567890123456789", 3, "", 21)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info`, "fish.chunk._blockinfo..tmp_1234567890123456789", -1, "info", -1)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk._blockinfo..tmp_1234567890123456789", -1, "blockinfo", 1234567890123456789)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info.chunk._Meta`, "", -1, "", -1)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", -1)
assertParseName(`fish.chunk.003.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.003", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._info.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._info", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.004`, "fish.chunk.004..tmp_0000000021", 2, "", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.005..tmp_0000000025`, "fish.chunk.004..tmp_0000000021", 3, "", "000p")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._info`, "fish.chunk.004..tmp_0000000021", -1, "info", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.004..tmp_0000000021", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.004`, "fish.chunk._blkinfo..tmp_9994567890123", 2, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.005..tmp_0000000026`, "fish.chunk._blkinfo..tmp_9994567890123", 3, "", "000q")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "info", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.004`, "fish.chunk._blkinfo..tmp_1234567890123456789", 2, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.005..tmp_0000000022`, "fish.chunk._blkinfo..tmp_1234567890123456789", 3, "", "000m")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "info", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
// attempts to make invalid chunk names
assertMakeNamePanics("fish", -1, "", -1) // neither data nor control
assertMakeNamePanics("fish", 0, "info", -1) // both data and control
assertMakeNamePanics("fish", -1, "futuredata", -1) // control type too long
assertMakeNamePanics("fish", -1, "123", -1) // digits not allowed
assertMakeNamePanics("fish", -1, "Meta", -1) // only lower case letters allowed
assertMakeNamePanics("fish", -1, "in-fo", -1) // punctuation not allowed
assertMakeNamePanics("fish", -1, "_info", -1)
assertMakeNamePanics("fish", -1, "info_", -1)
assertMakeNamePanics("fish", -2, ".bind", -3)
assertMakeNamePanics("fish", -2, "bind.", -3)
assertMakeNamePanics("fish", -1, "", "") // neither data nor control
assertMakeNamePanics("fish", 0, "info", "") // both data and control
assertMakeNamePanics("fish", -1, "metadata", "") // control type too long
assertMakeNamePanics("fish", -1, "blockinfo", "") // control type way too long
assertMakeNamePanics("fish", -1, "2xy", "") // first digit not allowed
assertMakeNamePanics("fish", -1, "123", "") // all digits not allowed
assertMakeNamePanics("fish", -1, "Meta", "") // only lower case letters allowed
assertMakeNamePanics("fish", -1, "in-fo", "") // punctuation not allowed
assertMakeNamePanics("fish", -1, "_info", "")
assertMakeNamePanics("fish", -1, "info_", "")
assertMakeNamePanics("fish", -2, ".bind", "")
assertMakeNamePanics("fish", -2, "bind.", "")
assertMakeNamePanics("fish", -1, "", 1) // neither data nor control
assertMakeNamePanics("fish", 0, "info", 12) // both data and control
assertMakeNamePanics("fish", -1, "futuredata", 45) // control type too long
assertMakeNamePanics("fish", -1, "123", 123) // digits not allowed
assertMakeNamePanics("fish", -1, "Meta", 456) // only lower case letters allowed
assertMakeNamePanics("fish", -1, "in-fo", 321) // punctuation not allowed
assertMakeNamePanics("fish", -1, "_info", 15678)
assertMakeNamePanics("fish", -1, "info_", 999)
assertMakeNamePanics("fish", -2, ".bind", 0)
assertMakeNamePanics("fish", -2, "bind.", 0)
assertMakeNamePanics("fish", -1, "", "1") // neither data nor control
assertMakeNamePanics("fish", 0, "info", "23") // both data and control
assertMakeNamePanics("fish", -1, "metadata", "45") // control type too long
assertMakeNamePanics("fish", -1, "blockinfo", "7") // control type way too long
assertMakeNamePanics("fish", -1, "2xy", "abc") // first digit not allowed
assertMakeNamePanics("fish", -1, "123", "def") // all digits not allowed
assertMakeNamePanics("fish", -1, "Meta", "mnk") // only lower case letters allowed
assertMakeNamePanics("fish", -1, "in-fo", "xyz") // punctuation not allowed
assertMakeNamePanics("fish", -1, "_info", "5678")
assertMakeNamePanics("fish", -1, "info_", "999")
assertMakeNamePanics("fish", -2, ".bind", "0")
assertMakeNamePanics("fish", -2, "bind.", "0")
assertMakeNamePanics("fish", 0, "", "1234567890") // temporary suffix too long
assertMakeNamePanics("fish", 0, "", "123F4") // uppercase not allowed
assertMakeNamePanics("fish", 0, "", "123.") // punctuation not allowed
assertMakeNamePanics("fish", 0, "", "_123")
}
func testSmallFileInternals(t *testing.T, f *Fs) {
@@ -381,9 +468,15 @@ func testPreventCorruption(t *testing.T, f *Fs) {
return obj
}
billyObj := newFile("billy")
billyTxn := billyObj.(*Object).xactID
if f.useNoRename {
require.True(t, billyTxn != "")
} else {
require.True(t, billyTxn == "")
}
billyChunkName := func(chunkNo int) string {
return f.makeChunkName(billyObj.Remote(), chunkNo, "", -1)
return f.makeChunkName(billyObj.Remote(), chunkNo, "", billyTxn)
}
err := f.Mkdir(ctx, billyChunkName(1))
@@ -400,11 +493,13 @@ func testPreventCorruption(t *testing.T, f *Fs) {
// accessing chunks in strict mode is prohibited
f.opt.FailHard = true
billyChunk4Name := billyChunkName(4)
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
_, err = f.base.NewObject(ctx, billyChunk4Name)
require.NoError(t, err)
_, err = f.NewObject(ctx, billyChunk4Name)
assertOverlapError(err)
f.opt.FailHard = false
billyChunk4, err = f.NewObject(ctx, billyChunk4Name)
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
assert.NoError(t, err)
require.NotNil(t, billyChunk4)
@@ -433,7 +528,8 @@ func testPreventCorruption(t *testing.T, f *Fs) {
// recreate billy in case it was anyhow corrupted
willyObj := newFile("willy")
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", -1)
willyTxn := willyObj.(*Object).xactID
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", willyTxn)
f.opt.FailHard = false
willyChunk, err := f.NewObject(ctx, willyChunkName)
f.opt.FailHard = true
@@ -474,17 +570,20 @@ func testChunkNumberOverflow(t *testing.T, f *Fs) {
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
contents := random.String(100)
newFile := func(f fs.Fs, name string) (fs.Object, string) {
filename := path.Join(dir, name)
newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) {
filename = path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
_, obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
return obj, filename
if chunkObj, isChunkObj := obj.(*Object); isChunkObj {
txnID = chunkObj.xactID
}
return
}
f.opt.FailHard = false
file, fileName := newFile(f, "wreaker")
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", -1))
file, fileName, fileTxn := newFile(f, "wreaker")
wreak, _, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", fileTxn))
f.opt.FailHard = false
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
@@ -532,7 +631,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
filename := path.Join(dir, name)
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
part := putFile(f.base, f.makeChunkName(filename, 0, "", -1), "oops", "", true)
part := putFile(f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
_ = putFile(f, filename, contents, "upload "+description, false)
obj, err := f.NewObject(ctx, filename)
@@ -563,7 +662,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
}
}
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "")
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "", "")
require.NoError(t, err)
todaysMeta := string(metaData)
runSubtest(todaysMeta, "today")
@@ -577,6 +676,174 @@ func testMetadataInput(t *testing.T, f *Fs) {
runSubtest(futureMeta, "future")
}
// Test that chunker refuses to change on objects with future/unknown metadata
func testFutureProof(t *testing.T, f *Fs) {
if f.opt.MetaFormat == "none" {
t.Skip("this test requires metadata support")
}
saveOpt := f.opt
ctx := context.Background()
f.opt.FailHard = true
const dir = "future"
const file = dir + "/test"
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
putPart := func(name string, part int, data, msg string) {
if part > 0 {
name = f.makeChunkName(name, part-1, "", "")
}
item := fstest.Item{Path: name, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
assert.NotNil(t, obj, msg)
}
// simulate chunked object from future
meta := `{"ver":999,"nchunks":3,"size":9,"garbage":"litter","sha1":"0707f2970043f9f7c22029482db27733deaec029"}`
putPart(file, 0, meta, "metaobject")
putPart(file, 1, "abc", "chunk1")
putPart(file, 2, "def", "chunk2")
putPart(file, 3, "ghi", "chunk3")
// List should succeed
ls, err := f.List(ctx, dir)
assert.NoError(t, err)
assert.Equal(t, 1, len(ls))
assert.Equal(t, int64(9), ls[0].Size())
// NewObject should succeed
obj, err := f.NewObject(ctx, file)
assert.NoError(t, err)
assert.Equal(t, file, obj.Remote())
assert.Equal(t, int64(9), obj.Size())
// Hash must fail
_, err = obj.Hash(ctx, hash.SHA1)
assert.Equal(t, ErrMetaUnknown, err)
// Move must fail
mobj, err := operations.Move(ctx, f, nil, file+"2", obj)
assert.Nil(t, mobj)
assert.Error(t, err)
if err != nil {
assert.Contains(t, err.Error(), "please upgrade rclone")
}
// Put must fail
oi := object.NewStaticObjectInfo(file, modTime, 3, true, nil, nil)
buf := bytes.NewBufferString("abc")
_, err = f.Put(ctx, buf, oi)
assert.Error(t, err)
// Rcat must fail
in := ioutil.NopCloser(bytes.NewBufferString("abc"))
robj, err := operations.Rcat(ctx, f, file, in, modTime)
assert.Nil(t, robj)
assert.NotNil(t, err)
if err != nil {
assert.Contains(t, err.Error(), "please upgrade rclone")
}
}
// The newer method of doing transactions without renaming should still be able to correctly process chunks that were created with renaming
// If you attempt to do the inverse, however, the data chunks will be ignored causing commands to perform incorrectly
func testBackwardsCompatibility(t *testing.T, f *Fs) {
if !f.useMeta {
t.Skip("Can't do norename transactions without metadata")
}
const dir = "backcomp"
ctx := context.Background()
saveOpt := f.opt
saveUseNoRename := f.useNoRename
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
f.useNoRename = saveUseNoRename
}()
f.opt.ChunkSize = fs.SizeSuffix(10)
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
contents := random.String(250)
newFile := func(f fs.Fs, name string) (fs.Object, string) {
filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
return obj, filename
}
f.opt.FailHard = false
f.useNoRename = false
file, fileName := newFile(f, "renamefile")
f.opt.FailHard = false
item := fstest.NewItem(fileName, contents, modTime)
var items []fstest.Item
items = append(items, item)
f.useNoRename = true
fstest.CheckListingWithRoot(t, f, dir, items, nil, f.Precision())
_, err := f.NewObject(ctx, fileName)
assert.NoError(t, err)
f.opt.FailHard = true
_, err = f.List(ctx, dir)
assert.NoError(t, err)
f.opt.FailHard = false
_ = file.Remove(ctx)
}
func testChunkerServerSideMove(t *testing.T, f *Fs) {
if !f.useMeta {
t.Skip("Can't test norename transactions without metadata")
}
ctx := context.Background()
const dir = "servermovetest"
subRemote := fmt.Sprintf("%s:%s/%s", f.Name(), f.Root(), dir)
subFs1, err := fs.NewFs(ctx, subRemote+"/subdir1")
assert.NoError(t, err)
fs1, isChunkerFs := subFs1.(*Fs)
assert.True(t, isChunkerFs)
fs1.useNoRename = false
fs1.opt.ChunkSize = fs.SizeSuffix(3)
subFs2, err := fs.NewFs(ctx, subRemote+"/subdir2")
assert.NoError(t, err)
fs2, isChunkerFs := subFs2.(*Fs)
assert.True(t, isChunkerFs)
fs2.useNoRename = true
fs2.opt.ChunkSize = fs.SizeSuffix(3)
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
item := fstest.Item{Path: "movefile", ModTime: modTime}
contents := "abcdef"
_, file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
dstOverwritten, _ := fs2.NewObject(ctx, "movefile")
dstFile, err := operations.Move(ctx, fs2, dstOverwritten, "movefile", file)
assert.NoError(t, err)
assert.Equal(t, int64(len(contents)), dstFile.Size())
r, err := dstFile.Open(ctx)
assert.NoError(t, err)
assert.NotNil(t, r)
data, err := ioutil.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()
_ = operations.Purge(ctx, f.base, dir)
}
// InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("PutLarge", func(t *testing.T) {
@@ -600,6 +867,15 @@ func (f *Fs) InternalTest(t *testing.T) {
t.Run("MetadataInput", func(t *testing.T) {
testMetadataInput(t, f)
})
t.Run("FutureProof", func(t *testing.T) {
testFutureProof(t, f)
})
t.Run("BackwardsCompatibility", func(t *testing.T) {
testBackwardsCompatibility(t, f)
})
t.Run("ChunkerServerSideMove", func(t *testing.T) {
testChunkerServerSideMove(t, f)
})
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -15,10 +15,10 @@ import (
// Command line flags
var (
// Invalid characters are not supported by some remotes, eg. Mailru.
// Invalid characters are not supported by some remotes, e.g. Mailru.
// We enable testing with invalid characters when -remote is not set, so
// chunker overlays a local directory, but invalid characters are disabled
// by default when -remote is set, eg. when test_all runs backend tests.
// by default when -remote is set, e.g. when test_all runs backend tests.
// You can still test with invalid characters using the below flag.
UseBadChars = flag.Bool("bad-chars", false, "Set to test bad characters in file names when -remote is set")
)

1
backend/compress/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
test

1416
backend/compress/compress.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,65 @@
// Test Crypt filesystem interface
package compress
import (
"os"
"path/filepath"
"testing"
_ "github.com/rclone/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{}}
fstests.Run(t, &opt)
}
// TestRemoteGzip tests GZIP compression
func TestRemoteGzip(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
name := "TestCompressGzip"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
UnimplementableObjectMethods: []string{
"GetTier",
"SetTier",
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
},
})
}

View File

@@ -71,30 +71,6 @@ type ReadSeekCloser interface {
// OpenRangeSeek opens the file handle at the offset with the limit given
type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error)
// Cipher is used to swap out the encryption implementations
type Cipher interface {
// EncryptFileName encrypts a file path
EncryptFileName(string) string
// DecryptFileName decrypts a file path, returns error if decrypt was invalid
DecryptFileName(string) (string, error)
// EncryptDirName encrypts a directory path
EncryptDirName(string) string
// DecryptDirName decrypts a directory path, returns error if decrypt was invalid
DecryptDirName(string) (string, error)
// EncryptData
EncryptData(io.Reader) (io.Reader, error)
// DecryptData
DecryptData(io.ReadCloser) (io.ReadCloser, error)
// DecryptDataSeek decrypt at a given position
DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
// EncryptedSize calculates the size of the data when encrypted
EncryptedSize(int64) int64
// DecryptedSize calculates the size of the data when decrypted
DecryptedSize(int64) (int64, error)
// NameEncryptionMode returns the used mode for name handling
NameEncryptionMode() NameEncryptionMode
}
// NameEncryptionMode is the type of file name encryption in use
type NameEncryptionMode int
@@ -136,7 +112,8 @@ func (mode NameEncryptionMode) String() (out string) {
return out
}
type cipher struct {
// Cipher defines an encoding and decoding cipher for the crypt backend
type Cipher struct {
dataKey [32]byte // Key for secretbox
nameKey [32]byte // 16,24 or 32 bytes
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
@@ -148,8 +125,8 @@ type cipher struct {
}
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*cipher, error) {
c := &cipher{
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*Cipher, error) {
c := &Cipher{
mode: mode,
cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt,
@@ -170,9 +147,9 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
// If salt is "" we use a fixed salt just to make attackers lives
// slighty harder than using no salt.
//
// Note that empty passsword makes all 0x00 keys which is used in the
// Note that empty password makes all 0x00 keys which is used in the
// tests.
func (c *cipher) Key(password, salt string) (err error) {
func (c *Cipher) Key(password, salt string) (err error) {
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
var saltBytes = defaultSalt
if salt != "" {
@@ -196,12 +173,12 @@ func (c *cipher) Key(password, salt string) (err error) {
}
// getBlock gets a block from the pool of size blockSize
func (c *cipher) getBlock() []byte {
func (c *Cipher) getBlock() []byte {
return c.buffers.Get().([]byte)
}
// putBlock returns a block to the pool of size blockSize
func (c *cipher) putBlock(buf []byte) {
func (c *Cipher) putBlock(buf []byte) {
if len(buf) != blockSize {
panic("bad blocksize returned to pool")
}
@@ -240,13 +217,13 @@ func decodeFileName(in string) ([]byte, error) {
// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and
// Rogaway.
//
// This makes for determinstic encryption which is what we want - the
// This makes for deterministic encryption which is what we want - the
// same filename must encrypt to the same thing.
//
// This means that
// * filenames with the same name will encrypt the same
// * filenames which start the same won't have a common prefix
func (c *cipher) encryptSegment(plaintext string) string {
func (c *Cipher) encryptSegment(plaintext string) string {
if plaintext == "" {
return ""
}
@@ -256,7 +233,7 @@ func (c *cipher) encryptSegment(plaintext string) string {
}
// decryptSegment decrypts a path segment
func (c *cipher) decryptSegment(ciphertext string) (string, error) {
func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
if ciphertext == "" {
return "", nil
}
@@ -283,7 +260,7 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
}
// Simple obfuscation routines
func (c *cipher) obfuscateSegment(plaintext string) string {
func (c *Cipher) obfuscateSegment(plaintext string) string {
if plaintext == "" {
return ""
}
@@ -370,7 +347,7 @@ func (c *cipher) obfuscateSegment(plaintext string) string {
return result.String()
}
func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) {
func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
if ciphertext == "" {
return "", nil
}
@@ -457,7 +434,7 @@ func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) {
}
// encryptFileName encrypts a file path
func (c *cipher) encryptFileName(in string) string {
func (c *Cipher) encryptFileName(in string) string {
segments := strings.Split(in, "/")
for i := range segments {
// Skip directory name encryption if the user chose to
@@ -475,7 +452,7 @@ func (c *cipher) encryptFileName(in string) string {
}
// EncryptFileName encrypts a file path
func (c *cipher) EncryptFileName(in string) string {
func (c *Cipher) EncryptFileName(in string) string {
if c.mode == NameEncryptionOff {
return in + encryptedSuffix
}
@@ -483,7 +460,7 @@ func (c *cipher) EncryptFileName(in string) string {
}
// EncryptDirName encrypts a directory path
func (c *cipher) EncryptDirName(in string) string {
func (c *Cipher) EncryptDirName(in string) string {
if c.mode == NameEncryptionOff || !c.dirNameEncrypt {
return in
}
@@ -491,7 +468,7 @@ func (c *cipher) EncryptDirName(in string) string {
}
// decryptFileName decrypts a file path
func (c *cipher) decryptFileName(in string) (string, error) {
func (c *Cipher) decryptFileName(in string) (string, error) {
segments := strings.Split(in, "/")
for i := range segments {
var err error
@@ -514,7 +491,7 @@ func (c *cipher) decryptFileName(in string) (string, error) {
}
// DecryptFileName decrypts a file path
func (c *cipher) DecryptFileName(in string) (string, error) {
func (c *Cipher) DecryptFileName(in string) (string, error) {
if c.mode == NameEncryptionOff {
remainingLength := len(in) - len(encryptedSuffix)
if remainingLength > 0 && strings.HasSuffix(in, encryptedSuffix) {
@@ -526,14 +503,15 @@ func (c *cipher) DecryptFileName(in string) (string, error) {
}
// DecryptDirName decrypts a directory path
func (c *cipher) DecryptDirName(in string) (string, error) {
func (c *Cipher) DecryptDirName(in string) (string, error) {
if c.mode == NameEncryptionOff || !c.dirNameEncrypt {
return in, nil
}
return c.decryptFileName(in)
}
func (c *cipher) NameEncryptionMode() NameEncryptionMode {
// NameEncryptionMode returns the encryption mode in use for names
func (c *Cipher) NameEncryptionMode() NameEncryptionMode {
return c.mode
}
@@ -581,7 +559,7 @@ func (n *nonce) increment() {
n.carry(0)
}
// add an uint64 to the nonce
// add a uint64 to the nonce
func (n *nonce) add(x uint64) {
carry := uint16(0)
for i := 0; i < 8; i++ {
@@ -601,7 +579,7 @@ func (n *nonce) add(x uint64) {
type encrypter struct {
mu sync.Mutex
in io.Reader
c *cipher
c *Cipher
nonce nonce
buf []byte
readBuf []byte
@@ -611,7 +589,7 @@ type encrypter struct {
}
// newEncrypter creates a new file handle encrypting on the fly
func (c *cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
fh := &encrypter{
in: in,
c: c,
@@ -655,11 +633,8 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
}
// possibly err != nil here, but we will process the
// data and the next call to ReadFull will return 0, err
// Write nonce to start of block
copy(fh.buf, fh.nonce[:])
// Encrypt the block using the nonce
block := fh.buf
secretbox.Seal(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
fh.bufIndex = 0
fh.bufSize = blockHeaderSize + n
fh.nonce.increment()
@@ -683,13 +658,19 @@ func (fh *encrypter) finish(err error) (int, error) {
}
// Encrypt data encrypts the data stream
func (c *cipher) EncryptData(in io.Reader) (io.Reader, error) {
func (c *Cipher) encryptData(in io.Reader) (io.Reader, *encrypter, error) {
in, wrap := accounting.UnWrap(in) // unwrap the accounting off the Reader
out, err := c.newEncrypter(in, nil)
if err != nil {
return nil, err
return nil, nil, err
}
return wrap(out), nil // and wrap the accounting back on
return wrap(out), out, nil // and wrap the accounting back on
}
// EncryptData encrypts the data stream
func (c *Cipher) EncryptData(in io.Reader) (io.Reader, error) {
out, _, err := c.encryptData(in)
return out, err
}
// decrypter decrypts an io.ReaderCloser on the fly
@@ -698,7 +679,7 @@ type decrypter struct {
rc io.ReadCloser
nonce nonce
initialNonce nonce
c *cipher
c *Cipher
buf []byte
readBuf []byte
bufIndex int
@@ -709,7 +690,7 @@ type decrypter struct {
}
// newDecrypter creates a new file handle decrypting on the fly
func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
fh := &decrypter{
rc: rc,
c: c,
@@ -737,7 +718,7 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
}
// newDecrypterSeek creates a new file handle decrypting on the fly
func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
var rc io.ReadCloser
doRangeSeek := false
setLimit := false
@@ -798,8 +779,7 @@ func (fh *decrypter) fillBuffer() (err error) {
return ErrorEncryptedFileBadHeader
}
// Decrypt the block using the nonce
block := fh.buf
_, ok := secretbox.Open(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
if !ok {
if err != nil {
return err // return pending error as it is likely more accurate
@@ -1012,7 +992,7 @@ func (fh *decrypter) finishAndClose(err error) error {
}
// DecryptData decrypts the data stream
func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
out, err := c.newDecrypter(rc)
if err != nil {
return nil, err
@@ -1025,7 +1005,7 @@ func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
// The open function must return a ReadCloser opened to the offset supplied
//
// You must use this form of DecryptData if you might want to Seek the file handle
func (c *cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
out, err := c.newDecrypterSeek(ctx, open, offset, limit)
if err != nil {
return nil, err
@@ -1034,7 +1014,7 @@ func (c *cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset
}
// EncryptedSize calculates the size of the data when encrypted
func (c *cipher) EncryptedSize(size int64) int64 {
func (c *Cipher) EncryptedSize(size int64) int64 {
blocks, residue := size/blockDataSize, size%blockDataSize
encryptedSize := int64(fileHeaderSize) + blocks*(blockHeaderSize+blockDataSize)
if residue != 0 {
@@ -1044,7 +1024,7 @@ func (c *cipher) EncryptedSize(size int64) int64 {
}
// DecryptedSize calculates the size of the data when decrypted
func (c *cipher) DecryptedSize(size int64) (int64, error) {
func (c *Cipher) DecryptedSize(size int64) (int64, error) {
size -= int64(fileHeaderSize)
if size < 0 {
return 0, ErrorEncryptedFileTooShort
@@ -1063,7 +1043,6 @@ func (c *cipher) DecryptedSize(size int64) (int64, error) {
// check interfaces
var (
_ Cipher = (*cipher)(nil)
_ io.ReadCloser = (*decrypter)(nil)
_ io.Seeker = (*decrypter)(nil)
_ fs.RangeSeeker = (*decrypter)(nil)

View File

@@ -12,6 +12,7 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -784,7 +785,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err)
in := &errorReader{io.ErrUnexpectedEOF}
in := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
fh, err := c.newEncrypter(in, nil)
assert.NoError(t, err)
@@ -793,14 +794,6 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
assert.Equal(t, int64(32), n)
}
type errorReader struct {
err error
}
func (er errorReader) Read(p []byte) (n int, err error) {
return 0, er.err
}
type closeDetector struct {
io.Reader
closed int
@@ -838,7 +831,7 @@ func TestNewDecrypter(t *testing.T) {
assert.Equal(t, 1, cd.closed)
}
er := &errorReader{errors.New("potato")}
er := &readers.ErrorReader{Err: errors.New("potato")}
cd = newCloseDetector(er)
fh, err = c.newDecrypter(cd)
assert.Nil(t, fh)
@@ -864,7 +857,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err)
in2 := &errorReader{io.ErrUnexpectedEOF}
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
in1 := bytes.NewBuffer(file16)
in := ioutil.NopCloser(io.MultiReader(in1, in2))
@@ -936,7 +929,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
assert.Equal(t, 0, n)
}
// Now try decoding it with a open/seek
// Now try decoding it with an open/seek
for _, offset := range trials {
for _, limit := range limits {
if offset+limit > len(plaintext) {
@@ -1118,7 +1111,7 @@ func TestDecrypterRead(t *testing.T) {
// Test producing an error on the file on Read the underlying file
in1 := bytes.NewBuffer(file1)
in2 := &errorReader{errors.New("potato")}
in2 := &readers.ErrorReader{Err: errors.New("potato")}
in := io.MultiReader(in1, in2)
cd := newCloseDetector(in)
fh, err := c.newDecrypter(cd)

View File

@@ -5,12 +5,14 @@ import (
"context"
"fmt"
"io"
"path"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
@@ -25,9 +27,10 @@ func init() {
Name: "crypt",
Description: "Encrypt/Decrypt a remote",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true,
}, {
Name: "filename_encryption",
@@ -35,19 +38,21 @@ func init() {
Default: "standard",
Examples: []fs.OptionExample{
{
Value: "off",
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
}, {
Value: "standard",
Help: "Encrypt the filenames see the docs for the details.",
}, {
Value: "obfuscate",
Help: "Very simple filename obfuscation.",
}, {
Value: "off",
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
},
},
}, {
Name: "directory_name_encryption",
Help: "Option to either encrypt directory names or leave them intact.",
Name: "directory_name_encryption",
Help: `Option to either encrypt directory names or leave them intact.
NB If filename_encryption is "off" then this option will do nothing.`,
Default: true,
Examples: []fs.OptionExample{
{
@@ -63,10 +68,25 @@ func init() {
Name: "password",
Help: "Password or pass phrase for encryption.",
IsPassword: true,
Required: true,
}, {
Name: "password2",
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
IsPassword: true,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
Normally this option is not what you want, but if you have two crypts
pointing to the same backend you can use it.
This can be used, for example, to change file name encryption type
without re-uploading all the data. Just make two crypt backends
pointing to two different directories with the single changed
parameter and use rclone move to move the files between the crypt
remotes.`,
Advanced: true,
}, {
Name: "show_mapping",
Help: `For all files listed show how the names encrypt.
@@ -81,12 +101,27 @@ names, or for debugging purposes.`,
Default: false,
Hide: fs.OptionHideConfigurator,
Advanced: true,
}, {
Name: "no_data_encryption",
Help: "Option to either encrypt file data or leave it unencrypted.",
Default: false,
Advanced: true,
Examples: []fs.OptionExample{
{
Value: "true",
Help: "Don't encrypt file data, leave it unencrypted.",
},
{
Value: "false",
Help: "Encrypt file data.",
},
},
}},
})
}
// newCipherForConfig constructs a Cipher for the given config name
func newCipherForConfig(opt *Options) (Cipher, error) {
func newCipherForConfig(opt *Options) (*Cipher, error) {
mode, err := NewNameEncryptionMode(opt.FilenameEncryption)
if err != nil {
return nil, err
@@ -113,7 +148,7 @@ func newCipherForConfig(opt *Options) (Cipher, error) {
}
// NewCipher constructs a Cipher for the given config
func NewCipher(m configmap.Mapper) (Cipher, error) {
func NewCipher(m configmap.Mapper) (*Cipher, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -124,7 +159,7 @@ func NewCipher(m configmap.Mapper) (Cipher, error) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -139,20 +174,25 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
}
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
// Make sure to remove trailing . referring to the current dir
if path.Base(rpath) == "." {
rpath = strings.TrimSuffix(rpath, ".")
}
// Look for a file first
remotePath := fspath.JoinRootPath(wPath, cipher.EncryptFileName(rpath))
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
// if that didn't produce a file, look for a directory
if err != fs.ErrorIsFile {
remotePath = fspath.JoinRootPath(wPath, cipher.EncryptDirName(rpath))
wrappedFs, err = wInfo.NewFs(wName, remotePath, wConfig)
var wrappedFs fs.Fs
if rpath == "" {
wrappedFs, err = cache.Get(ctx, remote)
} else {
remotePath := fspath.JoinRootPath(remote, cipher.EncryptFileName(rpath))
wrappedFs, err = cache.Get(ctx, remotePath)
// if that didn't produce a file, look for a directory
if err != fs.ErrorIsFile {
remotePath = fspath.JoinRootPath(remote, cipher.EncryptDirName(rpath))
wrappedFs, err = cache.Get(ctx, remotePath)
}
}
if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remote)
}
f := &Fs{
Fs: wrappedFs,
@@ -161,6 +201,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
opt: *opt,
cipher: cipher,
}
cache.PinUntilFinalized(f.Fs, f)
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
f.features = (&fs.Features{
@@ -172,7 +213,8 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
CanHaveEmptyDirectories: true,
SetTier: true,
GetTier: true,
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
return f, err
}
@@ -182,8 +224,10 @@ type Options struct {
Remote string `config:"remote"`
FilenameEncryption string `config:"filename_encryption"`
DirectoryNameEncryption bool `config:"directory_name_encryption"`
NoDataEncryption bool `config:"no_data_encryption"`
Password string `config:"password"`
Password2 string `config:"password2"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ShowMapping bool `config:"show_mapping"`
}
@@ -195,7 +239,7 @@ type Fs struct {
root string
opt Options
features *fs.Features // optional features
cipher Cipher
cipher *Cipher
}
// Name of the remote (as passed into NewFs)
@@ -232,7 +276,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
*entries = append(*entries, f.newObject(obj))
}
// Encrypt an directory file name to entries.
// Encrypt a directory file name to entries.
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
@@ -318,8 +362,12 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
// put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
if f.opt.NoDataEncryption {
return put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
}
// Encrypt the data into wrappedIn
wrappedIn, err := f.cipher.EncryptData(in)
wrappedIn, encrypter, err := f.cipher.encryptData(in)
if err != nil {
return nil, err
}
@@ -343,7 +391,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
}
// Transfer the data
o, err := put(ctx, wrappedIn, f.newObjectInfo(src), options...)
o, err := put(ctx, wrappedIn, f.newObjectInfo(src, encrypter.nonce), options...)
if err != nil {
return nil, err
}
@@ -356,13 +404,16 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil {
return nil, errors.Wrap(err, "failed to read destination hash")
}
if srcHash != "" && dstHash != "" && srcHash != dstHash {
// remove object
err = o.Remove(ctx)
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
if srcHash != "" && dstHash != "" {
if srcHash != dstHash {
// remove object
err = o.Remove(ctx)
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
}
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
fs.Debugf(src, "%v = %s OK", ht, srcHash)
}
}
@@ -402,21 +453,21 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir))
}
// Purge all files in the root and the root directory
// Purge all files in the directory specified
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context) error {
func (f *Fs) Purge(ctx context.Context, dir string) error {
do := f.Fs.Features().Purge
if do == nil {
return fs.ErrorCantPurge
}
return do(ctx)
return do(ctx, f.cipher.EncryptDirName(dir))
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -441,7 +492,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return f.newObject(oResult), nil
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
@@ -467,7 +518,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -496,11 +547,11 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
if do == nil {
return nil, errors.New("can't PutUnchecked")
}
wrappedIn, err := f.cipher.EncryptData(in)
wrappedIn, encrypter, err := f.cipher.encryptData(in)
if err != nil {
return nil, err
}
o, err := do(ctx, wrappedIn, f.newObjectInfo(src))
o, err := do(ctx, wrappedIn, f.newObjectInfo(src, encrypter.nonce))
if err != nil {
return nil, err
}
@@ -553,18 +604,53 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
return f.cipher.DecryptFileName(encryptedFileName)
}
// computeHashWithNonce takes the nonce and encrypts the contents of
// src with it, and calculates the hash given by HashType on the fly
//
// Note that we break lots of encapsulation in this function.
func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Object, hashType hash.Type) (hashStr string, err error) {
// Open the src for input
in, err := src.Open(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to open src")
}
defer fs.CheckClose(in, &err)
// Now encrypt the src with the nonce
out, err := f.cipher.newEncrypter(in, &nonce)
if err != nil {
return "", errors.Wrap(err, "failed to make encrypter")
}
// pipe into hash
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil {
return "", errors.Wrap(err, "failed to make hasher")
}
_, err = io.Copy(m, out)
if err != nil {
return "", errors.Wrap(err, "failed to hash data")
}
return m.Sums()[hashType], nil
}
// ComputeHash takes the nonce from o, and encrypts the contents of
// src with it, and calculates the hash given by HashType on the fly
//
// Note that we break lots of encapsulation in this function.
func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
if f.opt.NoDataEncryption {
return src.Hash(ctx, hashType)
}
// Read the nonce - opening the file is sufficient to read the nonce in
// use a limited read so we only read the header
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
if err != nil {
return "", errors.Wrap(err, "failed to open object to read nonce")
}
d, err := f.cipher.(*cipher).newDecrypter(in)
d, err := f.cipher.newDecrypter(in)
if err != nil {
_ = in.Close()
return "", errors.Wrap(err, "failed to open object to read nonce")
@@ -589,30 +675,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
return "", errors.Wrap(err, "failed to close nonce read")
}
// Open the src for input
in, err = src.Open(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to open src")
}
defer fs.CheckClose(in, &err)
// Now encrypt the src with the nonce
out, err := f.cipher.(*cipher).newEncrypter(in, &nonce)
if err != nil {
return "", errors.Wrap(err, "failed to make encrypter")
}
// pipe into hash
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil {
return "", errors.Wrap(err, "failed to make hasher")
}
_, err = io.Copy(m, out)
if err != nil {
return "", errors.Wrap(err, "failed to hash data")
}
return m.Sums()[hashType], nil
return f.computeHashWithNonce(ctx, nonce, src, hashType)
}
// MergeDirs merges the contents of all the directories passed
@@ -639,7 +702,7 @@ func (f *Fs) DirCacheFlush() {
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
do := f.Fs.Features().PublicLink
if do == nil {
return "", errors.New("PublicLink not supported")
@@ -647,9 +710,9 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
o, err := f.NewObject(ctx, remote)
if err != nil {
// assume it is a directory
return do(ctx, f.cipher.EncryptDirName(remote))
return do(ctx, f.cipher.EncryptDirName(remote), expire, unlink)
}
return do(ctx, o.(*Object).Object.Remote())
return do(ctx, o.(*Object).Object.Remote(), expire, unlink)
}
// ChangeNotify calls the passed function with a path
@@ -684,6 +747,67 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
do(ctx, wrappedNotifyFunc, pollIntervalChan)
}
var commandHelp = []fs.CommandHelp{
{
Name: "encode",
Short: "Encode the given filename(s)",
Long: `This encodes the filenames given as arguments returning a list of
strings of the encoded results.
Usage Example:
rclone backend encode crypt: file1 [file2...]
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
`,
},
{
Name: "decode",
Short: "Decode the given filename(s)",
Long: `This decodes the filenames given as arguments returning a list of
strings of the decoded results. It will return an error if any of the
inputs are invalid.
Usage Example:
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
`,
},
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "decode":
out := make([]string, 0, len(arg))
for _, encryptedFileName := range arg {
fileName, err := f.DecryptFileName(encryptedFileName)
if err != nil {
return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName))
}
out = append(out, fileName)
}
return out, nil
case "encode":
out := make([]string, 0, len(arg))
for _, fileName := range arg {
encryptedFileName := f.EncryptFileName(fileName)
out = append(out, encryptedFileName)
}
return out, nil
default:
return nil, fs.ErrorCommandNotFound
}
}
// Object describes a wrapped for being read from the Fs
//
// This decrypts the remote name and decrypts the data
@@ -725,9 +849,13 @@ func (o *Object) Remote() string {
// Size returns the size of the file
func (o *Object) Size() int64 {
size, err := o.f.cipher.DecryptedSize(o.Object.Size())
if err != nil {
fs.Debugf(o, "Bad size for decrypt: %v", err)
size := o.Object.Size()
if !o.f.opt.NoDataEncryption {
var err error
size, err = o.f.cipher.DecryptedSize(size)
if err != nil {
fs.Debugf(o, "Bad size for decrypt: %v", err)
}
}
return size
}
@@ -745,6 +873,10 @@ func (o *Object) UnWrap() fs.Object {
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
if o.f.opt.NoDataEncryption {
return o.Object.Open(ctx, options...)
}
var openOptions []fs.OpenOption
var offset, limit int64 = 0, -1
for _, option := range options {
@@ -820,18 +952,30 @@ func (f *Fs) Disconnect(ctx context.Context) error {
return do(ctx)
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
do := f.Fs.Features().Shutdown
if do == nil {
return nil
}
return do(ctx)
}
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
//
// This encrypts the remote name and adjusts the size
type ObjectInfo struct {
fs.ObjectInfo
f *Fs
f *Fs
nonce nonce
}
func (f *Fs) newObjectInfo(src fs.ObjectInfo) *ObjectInfo {
func (f *Fs) newObjectInfo(src fs.ObjectInfo, nonce nonce) *ObjectInfo {
return &ObjectInfo{
ObjectInfo: src,
f: f,
nonce: nonce,
}
}
@@ -857,6 +1001,23 @@ func (o *ObjectInfo) Size() int64 {
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
var srcObj fs.Object
var ok bool
// Get the underlying object if there is one
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
// Prefer direct interface assertion
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
// Otherwise likely is an operations.OverrideRemote
srcObj = do.UnWrap()
} else {
return "", nil
}
// if this is wrapping a local object then we work out the hash
if srcObj.Fs().Features().IsLocal {
// Read the data and encrypt it to calculate the hash
fs.Debugf(o, "Computing %v hash of encrypted source", hash)
return o.f.computeHashWithNonce(ctx, o.nonce, srcObj, hash)
}
return "", nil
}
@@ -895,6 +1056,7 @@ var (
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
@@ -908,6 +1070,7 @@ var (
_ fs.PublicLinker = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)

View File

@@ -0,0 +1,143 @@
package crypt
import (
"bytes"
"context"
"crypto/md5"
"fmt"
"io"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type testWrapper struct {
fs.ObjectInfo
}
// UnWrap returns the Object that this Object is wrapping or nil if it
// isn't wrapping anything
func (o testWrapper) UnWrap() fs.Object {
if o, ok := o.ObjectInfo.(fs.Object); ok {
return o
}
return nil
}
// Create a temporary local fs to upload things from
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
localFs, err := fs.TemporaryLocalFs(context.Background())
require.NoError(t, err)
cleanup = func() {
require.NoError(t, localFs.Rmdir(context.Background(), ""))
}
return localFs, cleanup
}
// Upload a file to a remote
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
inBuf := bytes.NewBufferString(contents)
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
obj, err := f.Put(context.Background(), inBuf, upSrc)
require.NoError(t, err)
cleanup = func() {
require.NoError(t, obj.Remove(context.Background()))
}
return obj, cleanup
}
// Test the ObjectInfo
func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
var (
contents = random.String(100)
path = "hash_test_object"
ctx = context.Background()
)
if wrap {
path = "_wrap"
}
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
obj, cleanupObj := uploadFile(t, localFs, path, contents)
defer cleanupObj()
// encrypt the data
inBuf := bytes.NewBufferString(contents)
var outBuf bytes.Buffer
enc, err := f.cipher.newEncrypter(inBuf, nil)
require.NoError(t, err)
nonce := enc.nonce // read the nonce at the start
_, err = io.Copy(&outBuf, enc)
require.NoError(t, err)
var oi fs.ObjectInfo = obj
if wrap {
// wrap the object in an fs.ObjectUnwrapper if required
oi = testWrapper{oi}
}
// wrap the object in a crypt for upload using the nonce we
// saved from the encrypter
src := f.newObjectInfo(oi, nonce)
// Test ObjectInfo methods
assert.Equal(t, int64(outBuf.Len()), src.Size())
assert.Equal(t, f, src.Fs())
assert.NotEqual(t, path, src.Remote())
// Test ObjectInfo.Hash
wantHash := md5.Sum(outBuf.Bytes())
gotHash, err := src.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, fmt.Sprintf("%x", wantHash), gotHash)
}
func testComputeHash(t *testing.T, f *Fs) {
var (
contents = random.String(100)
path = "compute_hash_test"
ctx = context.Background()
hashType = f.Fs.Hashes().GetOne()
)
if hashType == hash.None {
t.Skipf("%v: does not support hashes", f.Fs)
}
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
// Upload a file to localFs as a test object
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
defer cleanupLocalObj()
// Upload the same data to the remote Fs also
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
defer cleanupRemoteObj()
// Calculate the expected Hash of the remote object
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)
require.NoError(t, err)
// Test computed hash matches remote object hash
remoteObjHash, err := remoteObj.(*Object).Object.Hash(ctx, hashType)
require.NoError(t, err)
assert.Equal(t, remoteObjHash, computedHash)
}
// InternalTest is called by fstests.Run to extra tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("ObjectInfo", func(t *testing.T) { testObjectInfo(t, f, false) })
t.Run("ObjectInfoWrap", func(t *testing.T) { testObjectInfo(t, f, true) })
t.Run("ComputeHash", func(t *testing.T) { testComputeHash(t, f) })
}

View File

@@ -91,3 +91,26 @@ func TestObfuscate(t *testing.T) {
UnimplementableObjectMethods: []string{"MimeType"},
})
}
// TestNoDataObfuscate runs integration tests against the remote
func TestNoDataObfuscate(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
name := "TestCrypt4"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*crypt.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "crypt"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
{Name: name, Key: "no_data_encryption", Value: "true"},
},
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}

1885
backend/drive/drive.go Normal file → Executable file

File diff suppressed because it is too large Load Diff

View File

@@ -7,15 +7,21 @@ import (
"io"
"io/ioutil"
"mime"
"os"
"path"
"path/filepath"
"strings"
"testing"
"time"
"github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/api/drive/v3"
@@ -105,6 +111,7 @@ func TestInternalParseExtensions(t *testing.T) {
}
func TestInternalFindExportFormat(t *testing.T) {
ctx := context.Background()
item := &drive.File{
Name: "file",
MimeType: "application/vnd.google-apps.document",
@@ -122,7 +129,7 @@ func TestInternalFindExportFormat(t *testing.T) {
} {
f := new(Fs)
f.exportExtensions = test.extensions
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(ctx, item)
assert.Equal(t, test.wantExtension, gotExtension)
if test.wantExtension != "" {
assert.Equal(t, item.Name+gotExtension, gotFilename)
@@ -190,7 +197,7 @@ func (f *Fs) InternalTestDocumentImport(t *testing.T) {
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
require.NoError(t, err)
testFilesFs, err := fs.NewFs(testFilesPath)
testFilesFs, err := fs.NewFs(context.Background(), testFilesPath)
require.NoError(t, err)
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
@@ -204,7 +211,7 @@ func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
require.NoError(t, err)
testFilesFs, err := fs.NewFs(testFilesPath)
testFilesFs, err := fs.NewFs(context.Background(), testFilesPath)
require.NoError(t, err)
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
@@ -268,6 +275,192 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
}
}
const (
// from fstest/fstests/fstests.go
existingDir = "hello? sausage"
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
existingSubDir = "êé"
)
// TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts
func (f *Fs) InternalTestShortcuts(t *testing.T) {
ctx := context.Background()
srcObj, err := f.NewObject(ctx, existingFile)
require.NoError(t, err)
srcHash, err := srcObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.NotEqual(t, "", srcHash)
t.Run("Errors", func(t *testing.T) {
_, err := f.makeShortcut(ctx, "", f, "")
assert.Error(t, err)
assert.Contains(t, err.Error(), "can't be root")
_, err = f.makeShortcut(ctx, "notfound", f, "dst")
assert.Error(t, err)
assert.Contains(t, err.Error(), "can't find source")
_, err = f.makeShortcut(ctx, existingFile, f, existingFile)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not overwriting")
assert.Contains(t, err.Error(), "existing file")
_, err = f.makeShortcut(ctx, existingFile, f, existingDir)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not overwriting")
assert.Contains(t, err.Error(), "existing directory")
})
t.Run("File", func(t *testing.T) {
dstObj, err := f.makeShortcut(ctx, existingFile, f, "shortcut.txt")
require.NoError(t, err)
require.NotNil(t, dstObj)
assert.Equal(t, "shortcut.txt", dstObj.Remote())
dstHash, err := dstObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, srcHash, dstHash)
require.NoError(t, dstObj.Remove(ctx))
})
t.Run("Dir", func(t *testing.T) {
dstObj, err := f.makeShortcut(ctx, existingDir, f, "shortcutdir")
require.NoError(t, err)
require.Nil(t, dstObj)
entries, err := f.List(ctx, "shortcutdir")
require.NoError(t, err)
require.Equal(t, 1, len(entries))
require.Equal(t, "shortcutdir/"+existingSubDir, entries[0].Remote())
require.NoError(t, f.Rmdir(ctx, "shortcutdir"))
})
t.Run("Command", func(t *testing.T) {
_, err := f.Command(ctx, "shortcut", []string{"one"}, nil)
require.Error(t, err)
require.Contains(t, err.Error(), "need exactly 2 arguments")
_, err = f.Command(ctx, "shortcut", []string{"one", "two"}, map[string]string{
"target": "doesnotexistremote:",
})
require.Error(t, err)
require.Contains(t, err.Error(), "couldn't find target")
_, err = f.Command(ctx, "shortcut", []string{"one", "two"}, map[string]string{
"target": ".",
})
require.Error(t, err)
require.Contains(t, err.Error(), "target is not a drive backend")
dstObjI, err := f.Command(ctx, "shortcut", []string{existingFile, "shortcut2.txt"}, map[string]string{
"target": fs.ConfigString(f),
})
require.NoError(t, err)
dstObj := dstObjI.(*Object)
assert.Equal(t, "shortcut2.txt", dstObj.Remote())
dstHash, err := dstObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, srcHash, dstHash)
require.NoError(t, dstObj.Remove(ctx))
dstObjI, err = f.Command(ctx, "shortcut", []string{existingFile, "shortcut3.txt"}, nil)
require.NoError(t, err)
dstObj = dstObjI.(*Object)
assert.Equal(t, "shortcut3.txt", dstObj.Remote())
dstHash, err = dstObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, srcHash, dstHash)
require.NoError(t, dstObj.Remove(ctx))
})
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/UnTrash
func (f *Fs) InternalTestUnTrash(t *testing.T) {
ctx := context.Background()
// Make some objects, one in a subdir
contents := random.String(100)
file1 := fstest.NewItem("trashDir/toBeTrashed", contents, time.Now())
_, obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
file2 := fstest.NewItem("trashDir/subdir/toBeTrashed", contents, time.Now())
_, _ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
// Check objects
checkObjects := func() {
fstest.CheckListingWithRoot(t, f, "trashDir", []fstest.Item{
file1,
file2,
}, []string{
"trashDir/subdir",
}, f.Precision())
}
checkObjects()
// Make sure we are using the trash
require.Equal(t, true, f.opt.UseTrash)
// Remove the object and the dir
require.NoError(t, obj1.Remove(ctx))
require.NoError(t, f.Purge(ctx, "trashDir/subdir"))
// Check objects gone
fstest.CheckListingWithRoot(t, f, "trashDir", []fstest.Item{}, []string{}, f.Precision())
// Restore the object and directory
r, err := f.unTrashDir(ctx, "trashDir", true)
require.NoError(t, err)
assert.Equal(t, unTrashResult{Errors: 0, Untrashed: 2}, r)
// Check objects restored
checkObjects()
// Remove the test dir
require.NoError(t, f.Purge(ctx, "trashDir"))
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
func (f *Fs) InternalTestCopyID(t *testing.T) {
ctx := context.Background()
obj, err := f.NewObject(ctx, existingFile)
require.NoError(t, err)
o := obj.(*Object)
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(dir)
}()
checkFile := func(name string) {
filePath := filepath.Join(dir, name)
fi, err := os.Stat(filePath)
require.NoError(t, err)
assert.Equal(t, int64(100), fi.Size())
err = os.Remove(filePath)
require.NoError(t, err)
}
t.Run("BadID", func(t *testing.T) {
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
require.Error(t, err)
assert.Contains(t, err.Error(), "couldn't find id")
})
t.Run("Directory", func(t *testing.T) {
rootID, err := f.dirCache.RootID(ctx, false)
require.NoError(t, err)
err = f.copyID(ctx, rootID, dir+"/")
require.Error(t, err)
assert.Contains(t, err.Error(), "can't copy directory")
})
t.Run("WithoutDestName", func(t *testing.T) {
err = f.copyID(ctx, o.id, dir+"/")
require.NoError(t, err)
checkFile(path.Base(existingFile))
})
t.Run("WithDestName", func(t *testing.T) {
err = f.copyID(ctx, o.id, dir+"/potato.txt")
require.NoError(t, err)
checkFile("potato.txt")
})
}
func (f *Fs) InternalTest(t *testing.T) {
// These tests all depend on each other so run them as nested tests
t.Run("DocumentImport", func(t *testing.T) {
@@ -282,6 +475,9 @@ func (f *Fs) InternalTest(t *testing.T) {
})
})
})
t.Run("Shortcuts", f.InternalTestShortcuts)
t.Run("UnTrash", f.InternalTestUnTrash)
t.Run("CopyID", f.InternalTestCopyID)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -11,16 +11,15 @@
package drive
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"regexp"
"strconv"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/readers"
@@ -78,23 +77,24 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
return false, err
}
var req *http.Request
req, err = http.NewRequest(method, urls, body)
req, err = http.NewRequestWithContext(ctx, method, urls, body)
if err != nil {
return false, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
googleapi.Expand(req.URL, map[string]string{
"fileId": fileID,
})
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
req.Header.Set("X-Upload-Content-Type", contentType)
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
if size >= 0 {
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
}
res, err = f.client.Do(req)
if err == nil {
defer googleapi.CloseBody(res)
err = googleapi.CheckResponse(res)
}
return shouldRetry(err)
return f.shouldRetry(ctx, err)
})
if err != nil {
return nil, err
@@ -113,52 +113,21 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
// Make an http.Request for the range passed in
func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request {
req, _ := http.NewRequest("POST", rx.URI, body)
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
req, _ := http.NewRequestWithContext(ctx, "POST", rx.URI, body)
req.ContentLength = reqSize
totalSize := "*"
if rx.ContentLength >= 0 {
totalSize = strconv.FormatInt(rx.ContentLength, 10)
}
if reqSize != 0 {
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, totalSize))
} else {
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", totalSize))
}
req.Header.Set("Content-Type", rx.MediaType)
return req
}
// rangeRE matches the transfer status response from the server. $1 is
// the last byte index uploaded.
var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
// Query drive for the amount transferred so far
//
// If error is nil, then start should be valid
func (rx *resumableUpload) transferStatus(ctx context.Context) (start int64, err error) {
req := rx.makeRequest(ctx, 0, nil, 0)
res, err := rx.f.client.Do(req)
if err != nil {
return 0, err
}
defer googleapi.CloseBody(res)
if res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
return rx.ContentLength, nil
}
if res.StatusCode != statusResumeIncomplete {
err = googleapi.CheckResponse(res)
if err != nil {
return 0, err
}
return 0, errors.Errorf("unexpected http return code %v", res.StatusCode)
}
Range := res.Header.Get("Range")
if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
start, err = strconv.ParseInt(m[1], 10, 64)
if err == nil {
return start, nil
}
}
return 0, errors.Errorf("unable to parse range %q", Range)
}
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
_, _ = chunk.Seek(0, io.SeekStart)
@@ -200,18 +169,40 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
var StatusCode int
var err error
buf := make([]byte, int(rx.f.opt.ChunkSize))
for start < rx.ContentLength {
reqSize := rx.ContentLength - start
if reqSize >= int64(rx.f.opt.ChunkSize) {
reqSize = int64(rx.f.opt.ChunkSize)
for finished := false; !finished; {
var reqSize int64
var chunk io.ReadSeeker
if rx.ContentLength >= 0 {
// If size known use repeatable reader for smoother bwlimit
if start >= rx.ContentLength {
break
}
reqSize = rx.ContentLength - start
if reqSize >= int64(rx.f.opt.ChunkSize) {
reqSize = int64(rx.f.opt.ChunkSize)
}
chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
} else {
// If size unknown read into buffer
var n int
n, err = readers.ReadFill(rx.Media, buf)
if err == io.EOF {
// Send the last chunk with the correct ContentLength
// otherwise Google doesn't know we've finished
rx.ContentLength = start + int64(n)
finished = true
} else if err != nil {
return nil, err
}
reqSize = int64(n)
chunk = bytes.NewReader(buf[:reqSize])
}
chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
// Transfer the chunk
err = rx.f.pacer.Call(func() (bool, error) {
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize)
again, err := shouldRetry(err)
again, err := rx.f.shouldRetry(ctx, err)
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
again = false
err = nil

843
backend/dropbox/dropbox.go Normal file → Executable file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,44 @@
package dropbox
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestInternalCheckPathLength(t *testing.T) {
rep := func(n int, r rune) (out string) {
rs := make([]rune, n)
for i := range rs {
rs[i] = r
}
return string(rs)
}
for _, test := range []struct {
in string
ok bool
}{
{in: "", ok: true},
{in: rep(maxFileNameLength, 'a'), ok: true},
{in: rep(maxFileNameLength+1, 'a'), ok: false},
{in: rep(maxFileNameLength, '£'), ok: true},
{in: rep(maxFileNameLength+1, '£'), ok: false},
{in: rep(maxFileNameLength, '☺'), ok: true},
{in: rep(maxFileNameLength+1, '☺'), ok: false},
{in: rep(maxFileNameLength, '你'), ok: true},
{in: rep(maxFileNameLength+1, '你'), ok: false},
{in: "/ok/ok", ok: true},
{in: "/ok/" + rep(maxFileNameLength, 'a') + "/ok", ok: true},
{in: "/ok/" + rep(maxFileNameLength+1, 'a') + "/ok", ok: false},
{in: "/ok/" + rep(maxFileNameLength, '£') + "/ok", ok: true},
{in: "/ok/" + rep(maxFileNameLength+1, '£') + "/ok", ok: false},
{in: "/ok/" + rep(maxFileNameLength, '☺') + "/ok", ok: true},
{in: "/ok/" + rep(maxFileNameLength+1, '☺') + "/ok", ok: false},
{in: "/ok/" + rep(maxFileNameLength, '你') + "/ok", ok: true},
{in: "/ok/" + rep(maxFileNameLength+1, '你') + "/ok", ok: false},
} {
err := checkPathLength(test.in)
assert.Equal(t, test.ok, err == nil, test.in)
}
}

View File

@@ -6,6 +6,7 @@ import (
"net/http"
"regexp"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
@@ -17,6 +18,7 @@ import (
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
403, // Forbidden (may happen when request limit is exceeded)
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
@@ -26,12 +28,64 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
// Detect this error which the integration tests provoke
// error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
//
// https://1fichier.com/api.html
//
// file/ls.cgi is limited :
//
// Warning (can be changed in case of abuses) :
// List all files of the account is limited to 1 request per hour.
// List folders is limited to 5 000 results and 1 request per folder per 30s.
if err != nil && strings.Contains(err.Error(), "Flood detected") {
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
time.Sleep(30 * time.Second)
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, leaf string, directoryID string, err error) {
// Create the directory for the object if it doesn't exist
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
if err != nil {
return
}
// Temporary Object under construction
o = &Object{
fs: f,
remote: remote,
}
return o, leaf, directoryID, nil
}
func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
request := FileInfoRequest{
URL: url,
}
opts := rest.Opts{
Method: "POST",
Path: "/file/info.cgi",
}
var file File
err := f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, &file)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't read file info")
}
return &file, err
}
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
request := DownloadRequest{
URL: url,
@@ -45,7 +99,7 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
var token GetTokenResponse
err := f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
@@ -73,7 +127,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
var sharedFiles SharedFolderResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, &sharedFiles)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
@@ -102,14 +156,14 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
filesList = &FilesList{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, filesList)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
}
for i := range filesList.Items {
item := &filesList.Items[i]
item.Filename = enc.ToStandardName(item.Filename)
item.Filename = f.opt.Enc.ToStandardName(item.Filename)
}
return filesList, nil
@@ -130,15 +184,15 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
foldersList = &FoldersList{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, foldersList)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list folders")
}
foldersList.Name = enc.ToStandardName(foldersList.Name)
foldersList.Name = f.opt.Enc.ToStandardName(foldersList.Name)
for i := range foldersList.SubFolders {
folder := &foldersList.SubFolders[i]
folder.Name = enc.ToStandardName(folder.Name)
folder.Name = f.opt.Enc.ToStandardName(folder.Name)
}
// fs.Debugf(f, "Got FoldersList for id `%s`", directoryID)
@@ -147,11 +201,6 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
}
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
@@ -213,7 +262,7 @@ func getRemote(dir, fileName string) string {
}
func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (response *MakeFolderResponse, err error) {
name := enc.FromStandardName(leaf)
name := f.opt.Enc.FromStandardName(leaf)
// fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID)
request := MakeFolderRequest{
@@ -229,7 +278,7 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
response = &MakeFolderResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, response)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create folder")
@@ -256,7 +305,7 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't remove folder")
@@ -285,7 +334,7 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
response = &GenericOKResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
@@ -297,6 +346,56 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
return response, nil
}
func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename string) (response *MoveFileResponse, err error) {
request := &MoveFileRequest{
URLs: []string{url},
FolderID: folderID,
Rename: rename,
}
opts := rest.Opts{
Method: "POST",
Path: "/file/mv.cgi",
}
response = &MoveFileResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't copy file")
}
return response, nil
}
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
request := &CopyFileRequest{
URLs: []string{url},
FolderID: folderID,
Rename: rename,
}
opts := rest.Opts{
Method: "POST",
Path: "/file/cp.cgi",
}
response = &CopyFileResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't copy file")
}
return response, nil
}
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
// fs.Debugf(f, "Requesting Upload node")
@@ -309,7 +408,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
response = &GetUploadNodeResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "didnt got an upload node")
@@ -320,10 +419,10 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
return response, err
}
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string, options ...fs.OpenOption) (response *http.Response, err error) {
// fs.Debugf(f, "Uploading File `%s`", fileName)
fileName = enc.FromStandardName(fileName)
fileName = f.opt.Enc.FromStandardName(fileName)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("Invalid UploadID")
@@ -338,6 +437,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
NoResponse: true,
Body: in,
ContentLength: &size,
Options: options,
MultipartContentName: "file[]",
MultipartFileName: fileName,
MultipartParams: map[string][]string{
@@ -351,7 +451,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
err = f.pacer.CallNoRetry(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, nil)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
@@ -385,7 +485,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
response = &EndFileUploadResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {

View File

@@ -11,52 +11,75 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
const (
rootID = "0"
apiBaseURL = "https://api.1fichier.com/v1"
minSleep = 334 * time.Millisecond // 3 API calls per second is recommended
maxSleep = 5 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
rootID = "0"
apiBaseURL = "https://api.1fichier.com/v1"
minSleep = 400 * time.Millisecond // api is extremely rate limited now
maxSleep = 5 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
attackConstant = 0 // start with max sleep
)
const enc = encodings.Fichier
func init() {
fs.Register(&fs.RegInfo{
Name: "fichier",
Description: "1Fichier",
Config: func(name string, config configmap.Mapper) {
Config: func(ctx context.Context, name string, config configmap.Mapper) {
},
NewFs: NewFs,
Options: []fs.Option{
{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
Name: "api_key",
},
{
Help: "If you want to download a shared folder, add this parameter",
Name: "shared_folder",
Required: false,
Advanced: true,
},
},
Options: []fs.Option{{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
Name: "api_key",
}, {
Help: "If you want to download a shared folder, add this parameter",
Name: "shared_folder",
Required: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Characters that need escaping
//
// '\\': '', // FULLWIDTH REVERSE SOLIDUS
// '<': '', // FULLWIDTH LESS-THAN SIGN
// '>': '', // FULLWIDTH GREATER-THAN SIGN
// '"': '', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
// '\'': '', // FULLWIDTH APOSTROPHE
// '$': '', // FULLWIDTH DOLLAR SIGN
// '`': '', // FULLWIDTH GRAVE ACCENT
//
// Leading space and trailing space
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeSingleQuote |
encoder.EncodeBackQuote |
encoder.EncodeDoubleQuote |
encoder.EncodeLtGt |
encoder.EncodeDollar |
encoder.EncodeLeftSpace |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs is the interface a cloud storage system must provide
@@ -64,9 +87,9 @@ type Fs struct {
root string
name string
features *fs.Features
opt Options
dirCache *dircache.DirCache
baseClient *http.Client
options *Options
pacer *fs.Pacer
rest *rest.Client
}
@@ -144,7 +167,7 @@ func (f *Fs) Features() *fs.Features {
//
// On Windows avoid single character remote names as they can be mixed
// up with drive letters.
func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
func NewFs(ctx context.Context, name string, root string, config configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(config, opt)
if err != nil {
@@ -162,26 +185,25 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
f := &Fs{
name: name,
root: root,
options: opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
opt: *opt,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
baseClient: &http.Client{},
}
f.features = (&fs.Features{
DuplicateFiles: true,
CanHaveEmptyDirectories: true,
}).Fill(f)
ReadMimeType: true,
}).Fill(ctx, f)
client := fshttp.NewClient(fs.Config)
client := fshttp.NewClient(ctx)
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
f.rest.SetHeader("Authorization", "Bearer "+f.options.APIKey)
f.rest.SetHeader("Authorization", "Bearer "+f.opt.APIKey)
f.dirCache = dircache.New(root, rootID, f)
ctx := context.Background()
// Find the current root
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
@@ -204,7 +226,7 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
}
return nil, err
}
f.features.Fill(&tempF)
f.features.Fill(ctx, &tempF)
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
@@ -226,8 +248,8 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.options.SharedFolder != "" {
return f.listSharedFiles(ctx, f.options.SharedFolder)
if f.opt.SharedFolder != "" {
return f.listSharedFiles(ctx, f.opt.SharedFolder)
}
dirContent, err := f.listDir(ctx, dir)
@@ -241,7 +263,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false)
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound
@@ -275,7 +297,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// Put in to the remote path with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
// return an error or upload it properly (rather than e.g. calling panic).
//
@@ -283,10 +305,10 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
exisitingObj, err := f.NewObject(ctx, src.Remote())
existingObj, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
return existingObj, existingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src, options...)
@@ -300,7 +322,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(100e9) {
if size > int64(300e9) {
return nil, errors.New("File too big, cant upload")
} else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
@@ -311,12 +333,12 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
return nil, err
}
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
if err != nil {
return nil, err
}
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL, options...)
if err != nil {
return nil, err
}
@@ -326,8 +348,10 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
return nil, err
}
if len(fileUploadResponse.Links) != 1 {
return nil, errors.New("unexpected amount of files")
if len(fileUploadResponse.Links) == 0 {
return nil, errors.New("upload response not found")
} else if len(fileUploadResponse.Links) > 1 {
fs.Debugf(remote, "Multiple upload responses found, using the first")
}
link := fileUploadResponse.Links[0]
@@ -341,7 +365,6 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
fs: f,
remote: remote,
file: File{
ACL: 0,
CDN: 0,
Checksum: link.Whirlpool,
ContentType: "",
@@ -366,13 +389,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
_, err := f.dirCache.FindDir(ctx, dir, true)
return err
}
@@ -380,11 +397,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
@@ -405,9 +417,89 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return nil
}
// Move src to this remote using server side move operations.
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Create temporary object
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
if err != nil {
return nil, err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
}
if resp.Status != "OK" {
return nil, errors.New("couldn't move file")
}
file, err := f.readFileInfo(ctx, resp.URLs[0])
if err != nil {
return nil, errors.New("couldn't read file data")
}
dstObj.setMetaData(*file)
return dstObj, nil
}
// Copy src to this remote using server side move operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Create temporary object
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
if err != nil {
return nil, err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
resp, err := f.copyFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
}
if resp.Status != "OK" {
return nil, errors.New("couldn't move file")
}
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
if err != nil {
return nil, errors.New("couldn't read file data")
}
dstObj.setMetaData(*file)
return dstObj, nil
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
o, err := f.NewObject(ctx, remote)
if err != nil {
return "", err
}
return o.(*Object).file.URL, nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ dircache.DirCacher = (*Fs)(nil)
)

View File

@@ -4,13 +4,11 @@ package fichier
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fs.Config.LogLevel = fs.LogLevelDebug
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFichier:",
})

View File

@@ -72,6 +72,10 @@ func (o *Object) SetModTime(context.Context, time.Time) error {
//return errors.New("setting modtime is not supported for 1fichier remotes")
}
func (o *Object) setMetaData(file File) {
o.file = file
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
fs.FixRangeOption(options, o.file.Size)
@@ -90,7 +94,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.rest.Call(ctx, &opts)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
@@ -101,7 +105,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
// Update in to the object with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {

View File

@@ -1,5 +1,10 @@
package fichier
// FileInfoRequest is the request structure of the corresponding request
type FileInfoRequest struct {
URL string `json:"url"`
}
// ListFolderRequest is the request structure of the corresponding request
type ListFolderRequest struct {
FolderID int `json:"folder_id"`
@@ -49,6 +54,39 @@ type MakeFolderResponse struct {
FolderID int `json:"folder_id"`
}
// MoveFileRequest is the request structure of the corresponding request
type MoveFileRequest struct {
URLs []string `json:"urls"`
FolderID int `json:"destination_folder_id"`
Rename string `json:"rename,omitempty"`
}
// MoveFileResponse is the response structure of the corresponding request
type MoveFileResponse struct {
Status string `json:"status"`
URLs []string `json:"urls"`
}
// CopyFileRequest is the request structure of the corresponding request
type CopyFileRequest struct {
URLs []string `json:"urls"`
FolderID int `json:"folder_id"`
Rename string `json:"rename,omitempty"`
}
// CopyFileResponse is the response structure of the corresponding request
type CopyFileResponse struct {
Status string `json:"status"`
Copied int `json:"copied"`
URLs []FileCopy `json:"urls"`
}
// FileCopy is used in the the CopyFileResponse
type FileCopy struct {
FromURL string `json:"from_url"`
ToURL string `json:"to_url"`
}
// GetUploadNodeResponse is the response structure of the corresponding request
type GetUploadNodeResponse struct {
ID string `json:"id"`
@@ -86,7 +124,6 @@ type EndFileUploadResponse struct {
// File is the structure how 1Fichier returns a File
type File struct {
ACL int `json:"acl"`
CDN int `json:"cdn"`
Checksum string `json:"checksum"`
ContentType string `json:"content-type"`

View File

@@ -0,0 +1,391 @@
// Package api has type definitions for filefabric
//
// Converted from the API responses with help from https://mholt.github.io/json-to-go/
package api
import (
"bytes"
"fmt"
"reflect"
"strings"
"time"
)
const (
// TimeFormat for parameters (UTC)
timeFormatParameters = `2006-01-02 15:04:05`
// "2020-08-11 10:10:04" for JSON parsing
timeFormatJSON = `"` + timeFormatParameters + `"`
)
// Time represents represents date and time information for the
// filefabric API
type Time time.Time
// MarshalJSON turns a Time into JSON (in UTC)
func (t *Time) MarshalJSON() (out []byte, err error) {
timeString := (*time.Time)(t).UTC().Format(timeFormatJSON)
return []byte(timeString), nil
}
var zeroTime = []byte(`"0000-00-00 00:00:00"`)
// UnmarshalJSON turns JSON into a Time (in UTC)
func (t *Time) UnmarshalJSON(data []byte) error {
// Set a Zero time.Time if we receive a zero time input
if bytes.Equal(data, zeroTime) {
*t = Time(time.Time{})
return nil
}
newT, err := time.Parse(timeFormatJSON, string(data))
if err != nil {
return err
}
*t = Time(newT)
return nil
}
// String turns a Time into a string in UTC suitable for the API
// parameters
func (t Time) String() string {
return time.Time(t).UTC().Format(timeFormatParameters)
}
// Status return returned in all status responses
type Status struct {
Code string `json:"status"`
Message string `json:"statusmessage"`
TaskID string `json:"taskid"`
// Warning string `json:"warning"` // obsolete
}
// Status statisfies the error interface
func (e *Status) Error() string {
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
}
// OK returns true if the status is all good
func (e *Status) OK() bool {
return e.Code == "ok"
}
// GetCode returns the status code if any
func (e *Status) GetCode() string {
return e.Code
}
// OKError defines an interface for items which can be OK or be an error
type OKError interface {
error
OK() bool
GetCode() string
}
// Check Status satisfies the OKError interface
var _ OKError = (*Status)(nil)
// EmptyResponse is response which just returns the error condition
type EmptyResponse struct {
Status
}
// GetTokenByAuthTokenResponse is the response to getTokenByAuthToken
type GetTokenByAuthTokenResponse struct {
Status
Token string `json:"token"`
UserID string `json:"userid"`
AllowLoginRemember string `json:"allowloginremember"`
LastLogin Time `json:"lastlogin"`
AutoLoginCode string `json:"autologincode"`
}
// ApplianceInfo is the response to getApplianceInfo
type ApplianceInfo struct {
Status
Sitetitle string `json:"sitetitle"`
OauthLoginSupport string `json:"oauthloginsupport"`
IsAppliance string `json:"isappliance"`
SoftwareVersion string `json:"softwareversion"`
SoftwareVersionLabel string `json:"softwareversionlabel"`
}
// GetFolderContentsResponse is returned from getFolderContents
type GetFolderContentsResponse struct {
Status
Total int `json:"total,string"`
Items []Item `json:"filelist"`
Folder Item `json:"folder"`
From int `json:"from,string"`
//Count int `json:"count"`
Pid string `json:"pid"`
RefreshResult Status `json:"refreshresult"`
// Curfolder Item `json:"curfolder"` - sometimes returned as "ROOT"?
Parents []Item `json:"parents"`
CustomPermissions CustomPermissions `json:"custompermissions"`
}
// ItemType determine whether it is a file or a folder
type ItemType uint8
// Types of things in Item
const (
ItemTypeFile ItemType = 0
ItemTypeFolder ItemType = 1
)
// Item ia a File or a Folder
type Item struct {
ID string `json:"fi_id"`
PID string `json:"fi_pid"`
// UID string `json:"fi_uid"`
Name string `json:"fi_name"`
// S3Name string `json:"fi_s3name"`
// Extension string `json:"fi_extension"`
// Description string `json:"fi_description"`
Type ItemType `json:"fi_type,string"`
// Created Time `json:"fi_created"`
Size int64 `json:"fi_size,string"`
ContentType string `json:"fi_contenttype"`
// Tags string `json:"fi_tags"`
// MainCode string `json:"fi_maincode"`
// Public int `json:"fi_public,string"`
// Provider string `json:"fi_provider"`
// ProviderFolder string `json:"fi_providerfolder"` // folder
// Encrypted int `json:"fi_encrypted,string"`
// StructType string `json:"fi_structtype"`
// Bname string `json:"fi_bname"` // folder
// OrgID string `json:"fi_orgid"`
// Favorite int `json:"fi_favorite,string"`
// IspartOf string `json:"fi_ispartof"` // folder
Modified Time `json:"fi_modified"`
// LastAccessed Time `json:"fi_lastaccessed"`
// Hits int64 `json:"fi_hits,string"`
// IP string `json:"fi_ip"` // folder
// BigDescription string `json:"fi_bigdescription"`
LocalTime Time `json:"fi_localtime"`
// OrgfolderID string `json:"fi_orgfolderid"`
// StorageIP string `json:"fi_storageip"` // folder
// RemoteTime Time `json:"fi_remotetime"`
// ProviderOptions string `json:"fi_provideroptions"`
// Access string `json:"fi_access"`
// Hidden string `json:"fi_hidden"` // folder
// VersionOf string `json:"fi_versionof"`
Trash bool `json:"trash"`
// Isbucket string `json:"isbucket"` // filelist
SubFolders int64 `json:"subfolders"` // folder
}
// ItemFields is a | separated list of fields in Item
var ItemFields = mustFields(Item{})
// fields returns the JSON fields in use by opt as a | separated
// string.
func fields(opt interface{}) (pipeTags string, err error) {
var tags []string
def := reflect.ValueOf(opt)
defType := def.Type()
for i := 0; i < def.NumField(); i++ {
field := defType.Field(i)
tag, ok := field.Tag.Lookup("json")
if !ok {
continue
}
if comma := strings.IndexRune(tag, ','); comma >= 0 {
tag = tag[:comma]
}
if tag == "" {
continue
}
tags = append(tags, tag)
}
return strings.Join(tags, "|"), nil
}
// mustFields returns the JSON fields in use by opt as a | separated
// string. It panics on failure.
func mustFields(opt interface{}) string {
tags, err := fields(opt)
if err != nil {
panic(err)
}
return tags
}
// CustomPermissions is returned as part of GetFolderContentsResponse
type CustomPermissions struct {
Upload string `json:"upload"`
CreateSubFolder string `json:"createsubfolder"`
Rename string `json:"rename"`
Delete string `json:"delete"`
Move string `json:"move"`
ManagePermissions string `json:"managepermissions"`
ListOnly string `json:"listonly"`
VisibleInTrash string `json:"visibleintrash"`
}
// DoCreateNewFolderResponse is response from foCreateNewFolder
type DoCreateNewFolderResponse struct {
Status
Item Item `json:"file"`
}
// DoInitUploadResponse is response from doInitUpload
type DoInitUploadResponse struct {
Status
ProviderID string `json:"providerid"`
UploadCode string `json:"uploadcode"`
FileType string `json:"filetype"`
DirectUploadSupport string `json:"directuploadsupport"`
ResumeAllowed string `json:"resumeallowed"`
}
// UploaderResponse is returned from /cgi-bin/uploader/uploader1.cgi
//
// Sometimes the response is returned as XML and sometimes as JSON
type UploaderResponse struct {
FileSize int64 `xml:"filesize" json:"filesize,string"`
MD5 string `xml:"md5" json:"md5"`
Success string `xml:"success" json:"success"`
}
// UploadStatus is returned from getUploadStatus
type UploadStatus struct {
Status
UploadCode string `json:"uploadcode"`
Metafile string `json:"metafile"`
Percent int `json:"percent,string"`
Uploaded int64 `json:"uploaded,string"`
Size int64 `json:"size,string"`
Filename string `json:"filename"`
Nofile string `json:"nofile"`
Completed string `json:"completed"`
Completsuccess string `json:"completsuccess"`
Completerror string `json:"completerror"`
}
// DoCompleteUploadResponse is the response to doCompleteUpload
type DoCompleteUploadResponse struct {
Status
UploadedSize int64 `json:"uploadedsize,string"`
StorageIP string `json:"storageip"`
UploadedName string `json:"uploadedname"`
// Versioned []interface{} `json:"versioned"`
// VersionedID int `json:"versionedid"`
// Comment interface{} `json:"comment"`
File Item `json:"file"`
// UsSize string `json:"us_size"`
// PaSize string `json:"pa_size"`
// SpaceInfo SpaceInfo `json:"spaceinfo"`
}
// Providers is returned as part of UploadResponse
type Providers struct {
Max string `json:"max"`
Used string `json:"used"`
ID string `json:"id"`
Private string `json:"private"`
Limit string `json:"limit"`
Percent int `json:"percent"`
}
// Total is returned as part of UploadResponse
type Total struct {
Max string `json:"max"`
Used string `json:"used"`
ID string `json:"id"`
Priused string `json:"priused"`
Primax string `json:"primax"`
Limit string `json:"limit"`
Percent int `json:"percent"`
Pripercent int `json:"pripercent"`
}
// UploadResponse is returned as part of SpaceInfo
type UploadResponse struct {
Providers []Providers `json:"providers"`
Total Total `json:"total"`
}
// SpaceInfo is returned as part of DoCompleteUploadResponse
type SpaceInfo struct {
Response UploadResponse `json:"response"`
Status string `json:"status"`
}
// DeleteResponse is returned from doDeleteFile
type DeleteResponse struct {
Status
Deleted []string `json:"deleted"`
Errors []interface{} `json:"errors"`
ID string `json:"fi_id"`
BackgroundTask int `json:"backgroundtask"`
UsSize string `json:"us_size"`
PaSize string `json:"pa_size"`
//SpaceInfo SpaceInfo `json:"spaceinfo"`
}
// FileResponse is returned from doRenameFile
type FileResponse struct {
Status
Item Item `json:"file"`
Exists string `json:"exists"`
}
// MoveFilesResponse is returned from doMoveFiles
type MoveFilesResponse struct {
Status
Filesleft string `json:"filesleft"`
Addedtobackground string `json:"addedtobackground"`
Moved string `json:"moved"`
Item Item `json:"file"`
IDs []string `json:"fi_ids"`
Length int `json:"length"`
DirID string `json:"dir_id"`
MovedObjects []Item `json:"movedobjects"`
// FolderTasks []interface{} `json:"foldertasks"`
}
// TasksResponse is the response to getUserBackgroundTasks
type TasksResponse struct {
Status
Tasks []Task `json:"tasks"`
Total string `json:"total"`
}
// BtData is part of TasksResponse
type BtData struct {
Callback string `json:"callback"`
}
// Task describes a task returned in TasksResponse
type Task struct {
BtID string `json:"bt_id"`
UsID string `json:"us_id"`
BtType string `json:"bt_type"`
BtData BtData `json:"bt_data"`
BtStatustext string `json:"bt_statustext"`
BtStatusdata string `json:"bt_statusdata"`
BtMessage string `json:"bt_message"`
BtProcent string `json:"bt_procent"`
BtAdded string `json:"bt_added"`
BtStatus string `json:"bt_status"`
BtCompleted string `json:"bt_completed"`
BtTitle string `json:"bt_title"`
BtCredentials string `json:"bt_credentials"`
BtHidden string `json:"bt_hidden"`
BtAutoremove string `json:"bt_autoremove"`
BtDevsite string `json:"bt_devsite"`
BtPriority string `json:"bt_priority"`
BtReport string `json:"bt_report"`
BtSitemarker string `json:"bt_sitemarker"`
BtExecuteafter string `json:"bt_executeafter"`
BtCompletestatus string `json:"bt_completestatus"`
BtSubtype string `json:"bt_subtype"`
BtCanceled string `json:"bt_canceled"`
Callback string `json:"callback"`
CanBeCanceled bool `json:"canbecanceled"`
CanBeRestarted bool `json:"canberestarted"`
Type string `json:"type"`
Status string `json:"status"`
Settings string `json:"settings"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,17 @@
// Test filefabric filesystem interface
package filefabric_test
import (
"testing"
"github.com/rclone/rclone/backend/filefabric"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFileFabric:",
NilObject: (*filefabric.Object)(nil),
})
}

View File

@@ -5,25 +5,40 @@ import (
"context"
"crypto/tls"
"io"
"net"
"net/textproto"
"os"
"path"
"runtime"
"strings"
"sync"
"time"
"github.com/jlaffaye/ftp"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
)
const enc = encodings.FTP
var (
currentUser = env.CurrentUser()
)
const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
// Register with Fs
func init() {
@@ -31,75 +46,125 @@ func init() {
Name: "ftp",
Description: "FTP Connection",
NewFs: NewFs,
Options: []fs.Option{
{
Name: "host",
Help: "FTP host to connect to",
Required: true,
Examples: []fs.OptionExample{{
Value: "ftp.example.com",
Help: "Connect to ftp.example.com",
}},
}, {
Name: "user",
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
}, {
Name: "port",
Help: "FTP port, leave blank to use default (21)",
}, {
Name: "pass",
Help: "FTP password",
IsPassword: true,
Required: true,
}, {
Name: "tls",
Help: "Use FTP over TLS (Implicit)",
Default: false,
}, {
Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
Default: 0,
Advanced: true,
}, {
Name: "no_check_certificate",
Help: "Do not verify the TLS certificate of the server",
Default: false,
Advanced: true,
}, {
Name: "disable_epsv",
Help: "Disable using EPSV even if server advertises support",
Default: false,
Advanced: true,
},
},
Options: []fs.Option{{
Name: "host",
Help: "FTP host to connect to",
Required: true,
Examples: []fs.OptionExample{{
Value: "ftp.example.com",
Help: "Connect to ftp.example.com",
}},
}, {
Name: "user",
Help: "FTP username, leave blank for current username, " + currentUser,
}, {
Name: "port",
Help: "FTP port, leave blank to use default (21)",
}, {
Name: "pass",
Help: "FTP password",
IsPassword: true,
Required: true,
}, {
Name: "tls",
Help: `Use Implicit FTPS (FTP over TLS)
When using implicit FTP over TLS the client connects using TLS
right from the start which breaks compatibility with
non-TLS-aware servers. This is usually served over port 990 rather
than port 21. Cannot be used in combination with explicit FTP.`,
Default: false,
}, {
Name: "explicit_tls",
Help: `Use Explicit FTPS (FTP over TLS)
When using explicit FTP over TLS the client explicitly requests
security from the server in order to upgrade a plain text connection
to an encrypted one. Cannot be used in combination with implicit FTP.`,
Default: false,
}, {
Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
Default: 0,
Advanced: true,
}, {
Name: "no_check_certificate",
Help: "Do not verify the TLS certificate of the server",
Default: false,
Advanced: true,
}, {
Name: "disable_epsv",
Help: "Disable using EPSV even if server advertises support",
Default: false,
Advanced: true,
}, {
Name: "disable_mlsd",
Help: "Disable using MLSD even if server advertises support",
Default: false,
Advanced: true,
}, {
Name: "idle_timeout",
Default: fs.Duration(60 * time.Second),
Help: `Max time before closing idle connections
If no connections have been returned to the connection pool in the time
given, rclone will empty the connection pool.
Set to 0 to keep connections indefinitely.
`,
Advanced: true,
}, {
Name: "close_timeout",
Help: "Maximum time to wait for a response to close.",
Default: fs.Duration(60 * time.Second),
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// The FTP protocol can't handle trailing spaces (for instance
// pureftpd turns them into _)
//
// proftpd can't handle '*' in file names
// pureftpd can't handle '[', ']' or '*'
Default: (encoder.Display |
encoder.EncodeRightSpace),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Host string `config:"host"`
User string `config:"user"`
Pass string `config:"pass"`
Port string `config:"port"`
TLS bool `config:"tls"`
Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
Host string `config:"host"`
User string `config:"user"`
Pass string `config:"pass"`
Port string `config:"port"`
TLS bool `config:"tls"`
ExplicitTLS bool `config:"explicit_tls"`
Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
DisableMLSD bool `config:"disable_mlsd"`
IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote FTP server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
ci *fs.ConfigInfo // global config
features *fs.Features // optional features
url string
user string
pass string
dialAddr string
poolMu sync.Mutex
pool []*ftp.ServerConn
drain *time.Timer // used to drain the pool when we stop using the connections
tokens *pacer.TokenDispenser
tlsConf *tls.Config
pacer *fs.Pacer // pacer for FTP connections
}
// Object describes an FTP file
@@ -139,39 +204,123 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// Open a new connection to the FTP server.
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
fs.Debugf(f, "Connecting to FTP server")
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)}
if f.opt.TLS {
tlsConfig := &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
// Enable debugging output
type debugLog struct {
mu sync.Mutex
auth bool
}
// Write writes len(p) bytes from p to the underlying data stream. It returns
// the number of bytes written from p (0 <= n <= len(p)) and any error
// encountered that caused the write to stop early. Write must return a non-nil
// error if it returns n < len(p). Write must not modify the slice data, even
// temporarily.
//
// Implementations must not retain p.
//
// This writes debug info to the log
func (dl *debugLog) Write(p []byte) (n int, err error) {
dl.mu.Lock()
defer dl.mu.Unlock()
_, file, _, ok := runtime.Caller(1)
direction := "FTP Rx"
if ok && strings.Contains(file, "multi") {
direction = "FTP Tx"
}
lines := strings.Split(string(p), "\r\n")
if lines[len(lines)-1] == "" {
lines = lines[:len(lines)-1]
}
for _, line := range lines {
if !dl.auth && strings.HasPrefix(line, "PASS") {
fs.Debugf(direction, "PASS *****")
continue
}
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
fs.Debugf(direction, "%q", line)
}
return len(p), nil
}
type dialCtx struct {
f *Fs
ctx context.Context
}
// dial a new connection with fshttp dialer
func (d *dialCtx) dial(network, address string) (net.Conn, error) {
conn, err := fshttp.NewDialer(d.ctx).Dial(network, address)
if err != nil {
return nil, err
}
if d.f.tlsConf != nil {
conn = tls.Client(conn, d.f.tlsConf)
}
return conn, err
}
// shouldRetry returns a boolean as to whether this err deserve to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusNotAvailable:
return true, err
}
}
return fserrors.ShouldRetry(err), err
}
// Open a new connection to the FTP server.
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
fs.Debugf(f, "Connecting to FTP server")
dCtx := dialCtx{f, ctx}
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dCtx.dial)}
if f.opt.ExplicitTLS {
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
// Initial connection needs to be cleartext for explicit TLS
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
if err != nil {
return nil, err
}
ftpConfig = append(ftpConfig, ftp.DialWithNetConn(conn))
}
if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
}
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
if err != nil {
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
return nil, errors.Wrap(err, "ftpConnection Dial")
if f.opt.DisableMLSD {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
}
err = c.Login(f.user, f.pass)
if err != nil {
_ = c.Quit()
fs.Errorf(f, "Error while Logging in into %s: %s", f.dialAddr, err)
return nil, errors.Wrap(err, "ftpConnection Login")
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
}
return c, nil
err = f.pacer.Call(func() (bool, error) {
c, err = ftp.Dial(f.dialAddr, ftpConfig...)
if err != nil {
return shouldRetry(ctx, err)
}
err = c.Login(f.user, f.pass)
if err != nil {
_ = c.Quit()
return shouldRetry(ctx, err)
}
return false, nil
})
if err != nil {
err = errors.Wrapf(err, "failed to make FTP connection to %q", f.dialAddr)
}
return c, err
}
// Get an FTP connection from the pool, or open a new one
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
func (f *Fs) getFtpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.opt.Concurrency > 0 {
f.tokens.Get()
}
accounting.LimitTPS(ctx)
f.poolMu.Lock()
if len(f.pool) > 0 {
c = f.pool[0]
@@ -181,7 +330,11 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
if c != nil {
return c, nil
}
return f.ftpConnection()
c, err = f.ftpConnection(ctx)
if err != nil && f.opt.Concurrency > 0 {
f.tokens.Put()
}
return c, err
}
// Return an FTP connection to the pool
@@ -194,7 +347,13 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
if f.opt.Concurrency > 0 {
defer f.tokens.Put()
}
if pc == nil {
return
}
c := *pc
if c == nil {
return
}
*pc = nil
if err != nil {
// If not a regular FTP error code then check the connection
@@ -210,12 +369,34 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
}
f.poolMu.Lock()
f.pool = append(f.pool, c)
if f.opt.IdleTimeout > 0 {
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
}
f.poolMu.Unlock()
}
// Drain the pool of any connections
func (f *Fs) drainPool(ctx context.Context) (err error) {
f.poolMu.Lock()
defer f.poolMu.Unlock()
if f.opt.IdleTimeout > 0 {
f.drain.Stop()
}
if len(f.pool) != 0 {
fs.Debugf(f, "closing %d unused connections", len(f.pool))
}
for i, c := range f.pool {
if cErr := c.Quit(); cErr != nil {
err = cErr
}
f.pool[i] = nil
}
f.pool = nil
return err
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
ctx := context.Background()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
// Parse config into Options struct
opt := new(Options)
@@ -229,7 +410,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
}
user := opt.User
if user == "" {
user = os.Getenv("USER")
user = currentUser
}
port := opt.Port
if port == "" {
@@ -241,22 +422,40 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
if opt.TLS {
protocol = "ftps://"
}
if opt.TLS && opt.ExplicitTLS {
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
}
var tlsConfig *tls.Config
if opt.TLS || opt.ExplicitTLS {
tlsConfig = &tls.Config{
ServerName: opt.Host,
InsecureSkipVerify: opt.SkipVerifyTLSCert,
}
}
u := protocol + path.Join(dialAddr+"/", root)
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: ci,
url: u,
user: user,
pass: pass,
dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency),
tlsConf: tlsConfig,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(f)
}).Fill(ctx, f)
// set the pool drainer timer going
if f.opt.IdleTimeout > 0 {
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
}
// Make a connection and pool it to return errors early
c, err := f.getFtpConnection()
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "NewFs")
}
@@ -283,6 +482,12 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
return f, err
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
return f.drainPool(ctx)
}
// translateErrorFile turns FTP errors into rclone errors if possible for a file
func translateErrorFile(err error) error {
switch errX := err.(type) {
@@ -308,26 +513,26 @@ func translateErrorDir(err error) error {
}
// entryToStandard converts an incoming ftp.Entry to Standard encoding
func entryToStandard(entry *ftp.Entry) {
func (f *Fs) entryToStandard(entry *ftp.Entry) {
// Skip . and .. as we don't want these encoded
if entry.Name == "." || entry.Name == ".." {
return
}
entry.Name = enc.ToStandardName(entry.Name)
entry.Target = enc.ToStandardPath(entry.Target)
entry.Name = f.opt.Enc.ToStandardName(entry.Name)
entry.Target = f.opt.Enc.ToStandardPath(entry.Target)
}
// dirFromStandardPath returns dir in encoded form.
func dirFromStandardPath(dir string) string {
func (f *Fs) dirFromStandardPath(dir string) string {
// Skip . and .. as we don't want these encoded
if dir == "." || dir == ".." {
return dir
}
return enc.FromStandardPath(dir)
return f.opt.Enc.FromStandardPath(dir)
}
// findItem finds a directory entry for the name in its parent directory
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
fullPath := path.Join(f.root, remote)
if fullPath == "" || fullPath == "." || fullPath == "/" {
@@ -341,17 +546,17 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
dir := path.Dir(fullPath)
base := path.Base(fullPath)
c, err := f.getFtpConnection()
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "findItem")
}
files, err := c.List(dirFromStandardPath(dir))
files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err)
if err != nil {
return nil, translateErrorFile(err)
}
for _, file := range files {
entryToStandard(file)
f.entryToStandard(file)
if file.Name == base {
return file, nil
}
@@ -363,7 +568,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
entry, err := f.findItem(remote)
entry, err := f.findItem(ctx, remote)
if err != nil {
return nil, err
}
@@ -385,8 +590,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
}
// dirExists checks the directory pointed to by remote exists or not
func (f *Fs) dirExists(remote string) (exists bool, err error) {
entry, err := f.findItem(remote)
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
entry, err := f.findItem(ctx, remote)
if err != nil {
return false, errors.Wrap(err, "dirExists")
}
@@ -407,7 +612,7 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
c, err := f.getFtpConnection()
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "list")
}
@@ -418,7 +623,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
resultchan := make(chan []*ftp.Entry, 1)
errchan := make(chan error, 1)
go func() {
result, err := c.List(dirFromStandardPath(path.Join(f.root, dir)))
result, err := c.List(f.dirFromStandardPath(path.Join(f.root, dir)))
f.putFtpConnection(&c, err)
if err != nil {
errchan <- err
@@ -428,7 +633,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}()
// Wait for List for up to Timeout seconds
timer := time.NewTimer(fs.Config.Timeout)
timer := time.NewTimer(f.ci.TimeoutOrInfinite())
select {
case listErr = <-errchan:
timer.Stop()
@@ -445,7 +650,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// doesn't exist, so check it really doesn't exist if no
// entries found.
if len(files) == 0 {
exists, err := f.dirExists(dir)
exists, err := f.dirExists(ctx, dir)
if err != nil {
return nil, errors.Wrap(err, "list")
}
@@ -455,7 +660,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
for i := range files {
object := files[i]
entryToStandard(object)
f.entryToStandard(object)
newremote := path.Join(dir, object.Name)
switch object.Type {
case ftp.EntryTypeFolder:
@@ -498,7 +703,7 @@ func (f *Fs) Precision() time.Duration {
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// fs.Debugf(f, "Trying to put file %s", src.Remote())
err := f.mkParentDir(src.Remote())
err := f.mkParentDir(ctx, src.Remote())
if err != nil {
return nil, errors.Wrap(err, "Put mkParentDir failed")
}
@@ -516,16 +721,16 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
}
// getInfo reads the FileInfo for a path
func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
dir := path.Dir(remote)
base := path.Base(remote)
c, err := f.getFtpConnection()
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "getInfo")
}
files, err := c.List(dirFromStandardPath(dir))
files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err)
if err != nil {
return nil, translateErrorFile(err)
@@ -533,7 +738,7 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
for i := range files {
file := files[i]
entryToStandard(file)
f.entryToStandard(file)
if file.Name == base {
info := &FileInfo{
Name: remote,
@@ -548,12 +753,12 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
}
// mkdir makes the directory and parents using unrooted paths
func (f *Fs) mkdir(abspath string) error {
func (f *Fs) mkdir(ctx context.Context, abspath string) error {
abspath = path.Clean(abspath)
if abspath == "." || abspath == "/" {
return nil
}
fi, err := f.getInfo(abspath)
fi, err := f.getInfo(ctx, abspath)
if err == nil {
if fi.IsDir {
return nil
@@ -563,15 +768,15 @@ func (f *Fs) mkdir(abspath string) error {
return errors.Wrapf(err, "mkdir %q failed", abspath)
}
parent := path.Dir(abspath)
err = f.mkdir(parent)
err = f.mkdir(ctx, parent)
if err != nil {
return err
}
c, connErr := f.getFtpConnection()
c, connErr := f.getFtpConnection(ctx)
if connErr != nil {
return errors.Wrap(connErr, "mkdir")
}
err = c.MakeDir(dirFromStandardPath(abspath))
err = c.MakeDir(f.dirFromStandardPath(abspath))
f.putFtpConnection(&c, err)
switch errX := err.(type) {
case *textproto.Error:
@@ -587,27 +792,27 @@ func (f *Fs) mkdir(abspath string) error {
// mkParentDir makes the parent of remote if necessary and any
// directories above that
func (f *Fs) mkParentDir(remote string) error {
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
parent := path.Dir(remote)
return f.mkdir(path.Join(f.root, parent))
return f.mkdir(ctx, path.Join(f.root, parent))
}
// Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
// defer fs.Trace(dir, "")("err=%v", &err)
root := path.Join(f.root, dir)
return f.mkdir(root)
return f.mkdir(ctx, root)
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
c, err := f.getFtpConnection()
c, err := f.getFtpConnection(ctx)
if err != nil {
return errors.Wrap(translateErrorFile(err), "Rmdir")
}
err = c.RemoveDir(dirFromStandardPath(path.Join(f.root, dir)))
err = c.RemoveDir(f.dirFromStandardPath(path.Join(f.root, dir)))
f.putFtpConnection(&c, err)
return translateErrorDir(err)
}
@@ -619,17 +824,17 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
err := f.mkParentDir(remote)
err := f.mkParentDir(ctx, remote)
if err != nil {
return nil, errors.Wrap(err, "Move mkParentDir failed")
}
c, err := f.getFtpConnection()
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "Move")
}
err = c.Rename(
enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
enc.FromStandardPath(path.Join(f.root, remote)),
f.opt.Enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
f.opt.Enc.FromStandardPath(path.Join(f.root, remote)),
)
f.putFtpConnection(&c, err)
if err != nil {
@@ -643,7 +848,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -660,7 +865,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
dstPath := path.Join(f.root, dstRemote)
// Check if destination exists
fi, err := f.getInfo(dstPath)
fi, err := f.getInfo(ctx, dstPath)
if err == nil {
if fi.IsDir {
return fs.ErrorDirExists
@@ -671,19 +876,19 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// Make sure the parent directory exists
err = f.mkdir(path.Dir(dstPath))
err = f.mkdir(ctx, path.Dir(dstPath))
if err != nil {
return errors.Wrap(err, "DirMove mkParentDir dst failed")
}
// Do the move
c, err := f.getFtpConnection()
c, err := f.getFtpConnection(ctx)
if err != nil {
return errors.Wrap(err, "DirMove")
}
err = c.Rename(
dirFromStandardPath(srcPath),
dirFromStandardPath(dstPath),
f.dirFromStandardPath(srcPath),
f.dirFromStandardPath(dstPath),
)
f.putFtpConnection(&c, err)
if err != nil {
@@ -761,27 +966,31 @@ func (f *ftpReadCloser) Close() error {
go func() {
errchan <- f.rc.Close()
}()
// Wait for Close for up to 60 seconds
timer := time.NewTimer(60 * time.Second)
// Wait for Close for up to 60 seconds by default
timer := time.NewTimer(time.Duration(f.f.opt.CloseTimeout))
select {
case err = <-errchan:
timer.Stop()
case <-timer.C:
// if timer fired assume no error but connection dead
fs.Errorf(f.f, "Timeout when waiting for connection Close")
f.f.putFtpConnection(nil, nil)
return nil
}
// if errors while reading or closing, dump the connection
if err != nil || f.err != nil {
_ = f.c.Quit()
f.f.putFtpConnection(nil, nil)
} else {
f.f.putFtpConnection(&f.c, nil)
}
// mask the error if it was caused by a premature close
// NB StatusAboutToSend is to work around a bug in pureftpd
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable:
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
err = nil
}
}
@@ -805,11 +1014,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
}
}
}
c, err := o.fs.getFtpConnection()
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "open")
}
fd, err := c.RetrFrom(enc.FromStandardPath(path), uint64(offset))
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
return nil, errors.Wrap(err, "open")
@@ -840,18 +1049,19 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "Removed after failed upload: %v", err)
}
}
c, err := o.fs.getFtpConnection()
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
return errors.Wrap(err, "Update")
}
err = c.Stor(enc.FromStandardPath(path), in)
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
if err != nil {
_ = c.Quit() // toss this connection to avoid sync errors
remove()
o.fs.putFtpConnection(nil, err)
return errors.Wrap(err, "update stor")
}
o.fs.putFtpConnection(&c, nil)
o.info, err = o.fs.getInfo(path)
o.info, err = o.fs.getInfo(ctx, path)
if err != nil {
return errors.Wrap(err, "update getinfo")
}
@@ -863,18 +1073,18 @@ func (o *Object) Remove(ctx context.Context) (err error) {
// defer fs.Trace(o, "")("err=%v", &err)
path := path.Join(o.fs.root, o.remote)
// Check if it's a directory or a file
info, err := o.fs.getInfo(path)
info, err := o.fs.getInfo(ctx, path)
if err != nil {
return err
}
if info.IsDir {
err = o.fs.Rmdir(ctx, o.remote)
} else {
c, err := o.fs.getFtpConnection()
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
return errors.Wrap(err, "Remove")
}
err = c.Delete(enc.FromStandardPath(path))
err = c.Delete(o.fs.opt.Enc.FromStandardPath(path))
o.fs.putFtpConnection(&c, err)
}
return err
@@ -886,5 +1096,6 @@ var (
_ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Shutdowner = &Fs{}
_ fs.Object = &Object{}
)

View File

@@ -5,13 +5,44 @@ import (
"testing"
"github.com/rclone/rclone/backend/ftp"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTP:",
RemoteName: "TestFTPProftpd:",
NilObject: (*ftp.Object)(nil),
})
}
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPRclone:",
NilObject: (*ftp.Object)(nil),
})
}
func TestIntegration3(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPPureftpd:",
NilObject: (*ftp.Object)(nil),
})
}
// func TestIntegration4(t *testing.T) {
// if *fstest.RemoteName != "" {
// t.Skip("skipping as -remote is set")
// }
// fstests.Run(t, &fstests.Opt{
// RemoteName: "TestFTPVsftpd:",
// NilObject: (*ftp.Object)(nil),
// })
// }

View File

@@ -21,7 +21,6 @@ import (
"io/ioutil"
"log"
"net/http"
"os"
"path"
"strings"
"time"
@@ -32,12 +31,13 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"golang.org/x/oauth2"
@@ -69,8 +69,6 @@ var (
}
)
const enc = encodings.GoogleCloudStorage
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -78,33 +76,32 @@ func init() {
Prefix: "gcs",
Description: "Google Cloud Storage (this is not Google Drive)",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
Config: func(ctx context.Context, name string, m configmap.Mapper) {
saFile, _ := m.Get("service_account_file")
saCreds, _ := m.Get("service_account_credentials")
if saFile != "" || saCreds != "" {
anonymous, _ := m.Get("anonymous")
if saFile != "" || saCreds != "" || anonymous == "true" {
return
}
err := oauthutil.Config("google cloud storage", name, m, storageConfig)
err := oauthutil.Config(ctx, "google cloud storage", name, m, storageConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Google Application Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Google Application Client Secret\nLeave blank normally.",
}, {
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "project_number",
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, {
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth,
}, {
Name: "anonymous",
Help: "Access public buckets and objects without credentials\nSet to 'true' if you just want to download files and don't configure credentials.",
Default: false,
}, {
Name: "object_acl",
Help: "Access Control List for new objects.",
@@ -244,24 +241,36 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
}, {
Value: "COLDLINE",
Help: "Coldline storage class",
}, {
Value: "ARCHIVE",
Help: "Archive storage class",
}, {
Value: "DURABLE_REDUCED_AVAILABILITY",
Help: "Durable reduced availability storage class",
}},
}},
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Base |
encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8),
}}...),
})
}
// Options defines the configuration for this backend
type Options struct {
ProjectNumber string `config:"project_number"`
ServiceAccountFile string `config:"service_account_file"`
ServiceAccountCredentials string `config:"service_account_credentials"`
ObjectACL string `config:"object_acl"`
BucketACL string `config:"bucket_acl"`
BucketPolicyOnly bool `config:"bucket_policy_only"`
Location string `config:"location"`
StorageClass string `config:"storage_class"`
ProjectNumber string `config:"project_number"`
ServiceAccountFile string `config:"service_account_file"`
ServiceAccountCredentials string `config:"service_account_credentials"`
Anonymous bool `config:"anonymous"`
ObjectACL string `config:"object_acl"`
BucketACL string `config:"bucket_acl"`
BucketPolicyOnly bool `config:"bucket_policy_only"`
Location string `config:"location"`
StorageClass string `config:"storage_class"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote storage server
@@ -320,7 +329,10 @@ func (f *Fs) Features() *fs.Features {
}
// shouldRetry determines whether a given err rates being retried
func shouldRetry(err error) (again bool, errOut error) {
func shouldRetry(ctx context.Context, err error) (again bool, errOut error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
again = false
if err != nil {
if fserrors.ShouldRetry(err) {
@@ -353,7 +365,7 @@ func parsePath(path string) (root string) {
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
return enc.FromStandardName(bucketName), enc.FromStandardPath(bucketPath)
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
}
// split returns bucket and bucketPath from the object
@@ -361,12 +373,12 @@ func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote)
}
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) {
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
if err != nil {
return nil, errors.Wrap(err, "error processing credentials")
}
ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx))
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
}
@@ -377,8 +389,7 @@ func (f *Fs) setRoot(root string) {
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
var oAuthClient *http.Client
// Parse config into Options struct
@@ -396,19 +407,21 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// try loading service account credentials from env variable, then from a file
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil {
return nil, errors.Wrap(err, "error opening service account credentials file")
}
opt.ServiceAccountCredentials = string(loadedCreds)
}
if opt.ServiceAccountCredentials != "" {
oAuthClient, err = getServiceAccountClient([]byte(opt.ServiceAccountCredentials))
if opt.Anonymous {
oAuthClient = fshttp.NewClient(ctx)
} else if opt.ServiceAccountCredentials != "" {
oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials))
if err != nil {
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
}
} else {
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
if err != nil {
ctx := context.Background()
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
@@ -422,7 +435,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
name: name,
root: root,
opt: *opt,
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(),
}
f.setRoot(root)
@@ -431,7 +444,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
}).Fill(f)
}).Fill(ctx, f)
// Create a new authorized Drive client.
f.client = oAuthClient
@@ -442,10 +455,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if f.rootBucket != "" && f.rootDirectory != "" {
// Check to see if the object exists
encodedDirectory := enc.FromStandardPath(f.rootDirectory)
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
return shouldRetry(err)
return shouldRetry(ctx, err)
})
if err == nil {
newRoot := path.Dir(f.root)
@@ -511,7 +524,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
var objects *storage.Objects
err = f.pacer.Call(func() (bool, error) {
objects, err = list.Context(ctx).Do()
return shouldRetry(err)
return shouldRetry(ctx, err)
})
if err != nil {
if gErr, ok := err.(*googleapi.Error); ok {
@@ -527,7 +540,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
if !strings.HasSuffix(remote, "/") {
continue
}
remote = enc.ToStandardPath(remote)
remote = f.opt.Enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", remote)
continue
@@ -543,18 +556,18 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
}
for _, object := range objects.Items {
remote := enc.ToStandardPath(object.Name)
remote := f.opt.Enc.ToStandardPath(object.Name)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", object.Name)
continue
}
remote = remote[len(prefix):]
isDirectory := strings.HasSuffix(remote, "/")
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
if addBucket {
remote = path.Join(bucket, remote)
}
// is this a directory marker?
if isDirectory && object.Size == 0 {
if isDirectory {
continue // skip directory marker
}
err = fn(remote, object, false)
@@ -614,13 +627,13 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
var buckets *storage.Buckets
err = f.pacer.Call(func() (bool, error) {
buckets, err = listBuckets.Context(ctx).Do()
return shouldRetry(err)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
for _, bucket := range buckets.Items {
d := fs.NewDir(enc.ToStandardName(bucket.Name), time.Time{})
d := fs.NewDir(f.opt.Enc.ToStandardName(bucket.Name), time.Time{})
entries = append(entries, d)
}
if buckets.NextPageToken == "" {
@@ -740,7 +753,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
// service account that only has the "Storage Object Admin" role. See #2193 for details.
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
return shouldRetry(err)
return shouldRetry(ctx, err)
})
if err == nil {
// Bucket already exists
@@ -775,7 +788,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
insertBucket.PredefinedAcl(f.opt.BucketACL)
}
_, err = insertBucket.Context(ctx).Do()
return shouldRetry(err)
return shouldRetry(ctx, err)
})
}, nil)
}
@@ -792,7 +805,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
return f.cache.Remove(bucket, func() error {
return f.pacer.Call(func() (bool, error) {
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
return shouldRetry(err)
return shouldRetry(ctx, err)
})
})
}
@@ -802,7 +815,7 @@ func (f *Fs) Precision() time.Duration {
return time.Nanosecond
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -830,20 +843,27 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
remote: remote,
}
var newObject *storage.Object
err = f.pacer.Call(func() (bool, error) {
copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil)
if !f.opt.BucketPolicyOnly {
copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, nil)
if !f.opt.BucketPolicyOnly {
rewriteRequest.DestinationPredefinedAcl(f.opt.ObjectACL)
}
var rewriteResponse *storage.RewriteResponse
for {
err = f.pacer.Call(func() (bool, error) {
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
newObject, err = copyObject.Context(ctx).Do()
return shouldRetry(err)
})
if err != nil {
return nil, err
if rewriteResponse.Done {
break
}
rewriteRequest.RewriteToken(rewriteResponse.RewriteToken)
fs.Debugf(dstObj, "Continuing rewrite %d bytes done", rewriteResponse.TotalBytesRewritten)
}
// Set the metadata for the new object while we have it
dstObj.setMetaData(newObject)
dstObj.setMetaData(rewriteResponse.Resource)
return dstObj, nil
}
@@ -924,7 +944,7 @@ func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, er
bucket, bucketPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
return shouldRetry(err)
return shouldRetry(ctx, err)
})
if err != nil {
if gErr, ok := err.(*googleapi.Error); ok {
@@ -995,7 +1015,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = copyObject.Context(ctx).Do()
return shouldRetry(err)
return shouldRetry(ctx, err)
})
if err != nil {
return err
@@ -1011,11 +1031,10 @@ func (o *Object) Storable() bool {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
req, err := http.NewRequest("GET", o.url, nil)
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
if err != nil {
return nil, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
fs.FixRangeOption(options, o.bytes)
fs.OpenOptionAddHTTPHeaders(req.Header, options)
var res *http.Response
@@ -1027,7 +1046,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
_ = res.Body.Close() // ignore error
}
}
return shouldRetry(err)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
@@ -1057,6 +1076,35 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
ContentType: fs.MimeType(ctx, src),
Metadata: metadataFromModTime(modTime),
}
// Apply upload options
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
object.CacheControl = value
case "content-disposition":
object.ContentDisposition = value
case "content-encoding":
object.ContentEncoding = value
case "content-language":
object.ContentLanguage = value
case "content-type":
object.ContentType = value
case "x-goog-storage-class":
object.StorageClass = value
default:
const googMetaPrefix = "x-goog-meta-"
if strings.HasPrefix(lowerKey, googMetaPrefix) {
metaKey := lowerKey[len(googMetaPrefix):]
object.Metadata[metaKey] = value
} else {
fs.Errorf(o, "Don't know how to set key %q on upload", key)
}
}
}
var newObject *storage.Object
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
@@ -1064,7 +1112,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = insertObject.Context(ctx).Do()
return shouldRetry(err)
return shouldRetry(ctx, err)
})
if err != nil {
return err
@@ -1079,7 +1127,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
bucket, bucketPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
return shouldRetry(err)
return shouldRetry(ctx, err)
})
return err
}

View File

@@ -30,7 +30,7 @@ func TestAlbumsAdd(t *testing.T) {
albums.add(a1)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"one": {a1},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
@@ -39,7 +39,7 @@ func TestAlbumsAdd(t *testing.T) {
"one": a1,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one"},
"": {"one"},
}, albums.path)
a2 := &api.Album{
@@ -49,8 +49,8 @@ func TestAlbumsAdd(t *testing.T) {
albums.add(a2)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2},
"one": {a1},
"two": {a2},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
@@ -61,7 +61,7 @@ func TestAlbumsAdd(t *testing.T) {
"two": a2,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two"},
"": {"one", "two"},
}, albums.path)
// Add a duplicate
@@ -72,8 +72,8 @@ func TestAlbumsAdd(t *testing.T) {
albums.add(a2a)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one": {a1},
"two": {a2, a2a},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
@@ -86,7 +86,7 @@ func TestAlbumsAdd(t *testing.T) {
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
"": {"one", "two {2}", "two {2a}"},
}, albums.path)
// Add a sub directory
@@ -97,9 +97,9 @@ func TestAlbumsAdd(t *testing.T) {
albums.add(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
@@ -114,8 +114,8 @@ func TestAlbumsAdd(t *testing.T) {
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
"": {"one", "two {2}", "two {2a}"},
"one": {"sub"},
}, albums.path)
// Add a weird path
@@ -126,10 +126,10 @@ func TestAlbumsAdd(t *testing.T) {
albums.add(a0)
assert.Equal(t, map[string][]*api.Album{
"{0}": []*api.Album{a0},
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
"{0}": {a0},
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"0": a0,
@@ -146,8 +146,8 @@ func TestAlbumsAdd(t *testing.T) {
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}", "{0}"},
"one": []string{"sub"},
"": {"one", "two {2}", "two {2a}", "{0}"},
"one": {"sub"},
}, albums.path)
}
@@ -181,9 +181,9 @@ func TestAlbumsDel(t *testing.T) {
albums.add(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
@@ -198,16 +198,16 @@ func TestAlbumsDel(t *testing.T) {
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
"": {"one", "two {2}", "two {2a}"},
"one": {"sub"},
}, albums.path)
albums.del(a1)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"2": a2,
@@ -220,16 +220,16 @@ func TestAlbumsDel(t *testing.T) {
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
"": {"one", "two {2}", "two {2a}"},
"one": {"sub"},
}, albums.path)
albums.del(a2)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"2a": a2a,
@@ -240,16 +240,16 @@ func TestAlbumsDel(t *testing.T) {
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2a}"},
"one": []string{"sub"},
"": {"one", "two {2a}"},
"one": {"sub"},
}, albums.path)
albums.del(a2a)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1sub": a1sub,
@@ -258,16 +258,16 @@ func TestAlbumsDel(t *testing.T) {
"one/sub": a1sub,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one"},
"one": []string{"sub"},
"": {"one"},
"one": {"sub"},
}, albums.path)
albums.del(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{}, albums.byID)
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)

View File

@@ -17,7 +17,7 @@ type Error struct {
Details ErrorDetails `json:"error"`
}
// Error statisfies error interface
// Error satisfies error interface
func (e *Error) Error() string {
return fmt.Sprintf("%s (%d %s)", e.Details.Message, e.Details.Code, e.Details.Status)
}

View File

@@ -21,7 +21,6 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
@@ -79,7 +78,7 @@ func init() {
Prefix: "gphotos",
Description: "Google Photos",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
Config: func(ctx context.Context, name string, m configmap.Mapper) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -96,7 +95,7 @@ func init() {
}
// Do the oauth
err = oauthutil.Config("google photos", name, m, oauthConfig)
err = oauthutil.Config(ctx, "google photos", name, m, oauthConfig, nil)
if err != nil {
golog.Fatalf("Failed to configure token: %v", err)
}
@@ -110,13 +109,7 @@ func init() {
`)
},
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Google Application Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Google Application Client Secret\nLeave blank normally.",
}, {
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "read_only",
Default: false,
Help: `Set to make the Google Photos backend read only.
@@ -134,14 +127,38 @@ rclone mount needs to know the size of files in advance of reading
them, so setting this flag when using rclone mount is recommended if
you want to read the media.`,
Advanced: true,
}},
}, {
Name: "start_year",
Default: 2000,
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
Advanced: true,
}, {
Name: "include_archived",
Default: false,
Help: `Also view and download archived media.
By default rclone does not request archived media. Thus, when syncing,
archived media is not visible in directory listings or transferred.
Note that media in albums is always visible and synced, no matter
their archive status.
With this flag, archived media are always visible in directory
listings and transferred.
Without this flag, archived media will not be visible in directory
listings and won't be transferred.`,
Advanced: true,
}}...),
})
}
// Options defines the configuration for this backend
type Options struct {
ReadOnly bool `config:"read_only"`
ReadSize bool `config:"read_size"`
ReadOnly bool `config:"read_only"`
ReadSize bool `config:"read_size"`
StartYear int `config:"start_year"`
IncludeArchived bool `config:"include_archived"`
}
// Fs represents a remote storage server
@@ -202,6 +219,15 @@ func (f *Fs) dirTime() time.Time {
return f.startTime
}
// startYear returns the start year
func (f *Fs) startYear() int {
return f.opt.StartYear
}
func (f *Fs) includeArchived() bool {
return f.opt.IncludeArchived
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
@@ -214,7 +240,10 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
@@ -224,6 +253,10 @@ func errorHandler(resp *http.Response) error {
if err != nil {
body = nil
}
// Google sends 404 messages as images so be prepared for that
if strings.HasPrefix(resp.Header.Get("Content-Type"), "image/") {
body = []byte("Image not found or broken")
}
var e = api.Error{
Details: api.ErrorDetails{
Code: resp.StatusCode,
@@ -238,7 +271,7 @@ func errorHandler(resp *http.Response) error {
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -246,8 +279,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
baseClient := fshttp.NewClient(fs.Config)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
baseClient := fshttp.NewClient(ctx)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
}
@@ -264,14 +297,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
unAuth: rest.NewClient(baseClient),
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
ts: ts,
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
startTime: time.Now(),
albums: map[bool]*albums{},
uploaded: dirtree.New(),
}
f.features = (&fs.Features{
ReadMimeType: true,
}).Fill(f)
}).Fill(ctx, f)
f.srv.SetErrorHandler(errorHandler)
_, _, pattern := patterns.match(f.root, "", true)
@@ -280,7 +313,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
var leaf string
f.root, leaf = path.Split(f.root)
f.root = strings.TrimRight(f.root, "/")
_, err := f.NewObject(context.TODO(), leaf)
_, err := f.NewObject(ctx, leaf)
if err == nil {
return f, fs.ErrorIsFile
}
@@ -299,7 +332,7 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
var openIDconfig map[string]interface{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", errors.Wrap(err, "couldn't read openID config")
@@ -328,7 +361,7 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &userInfo)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't read user info")
@@ -359,7 +392,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
var res interface{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &res)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't revoke token")
@@ -446,7 +479,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list albums")
@@ -489,13 +522,19 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
}
filter.PageSize = listChunks
filter.PageToken = ""
if filter.AlbumID == "" { // album ID and filters cannot be set together, else error 400 INVALID_ARGUMENT
if filter.Filters == nil {
filter.Filters = &api.Filters{}
}
filter.Filters.IncludeArchivedMedia = &f.opt.IncludeArchived
}
lastID := ""
for {
var result api.MediaItems
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &filter, &result)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't list files")
@@ -639,7 +678,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, request, &result)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create album")
@@ -774,7 +813,7 @@ func (o *Object) Size() int64 {
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
fs.Debugf(o, "Reading size failed: %v", err)
@@ -825,7 +864,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &item)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't get media item")
@@ -902,7 +941,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
@@ -943,8 +982,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Upload the media item in exchange for an UploadToken
opts := rest.Opts{
Method: "POST",
Path: "/uploads",
Method: "POST",
Path: "/uploads",
Options: options,
ExtraHeaders: map[string]string{
"X-Goog-Upload-File-Name": fileName,
"X-Goog-Upload-Protocol": "raw",
@@ -956,10 +996,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
if err != nil {
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
}
token, err = rest.ReadBody(resp)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't upload file")
@@ -987,7 +1027,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var result api.BatchCreateResponse
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, request, &result)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to create media item")
@@ -1003,7 +1043,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Add upload to internal storage
if pattern.isUpload {
o.fs.uploadedMu.Lock()
o.fs.uploaded.AddEntry(o)
o.fs.uploadedMu.Unlock()
}
return nil
}
@@ -1030,7 +1072,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't delete item from album")

View File

@@ -12,6 +12,7 @@ import (
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
@@ -26,17 +27,6 @@ const (
fileNameUpload = "rclone-test-image2.jpg"
)
// Wrapper to override the remote for an object
type overrideRemoteObject struct {
fs.Object
remote string
}
// Remote returns the overridden remote name
func (o *overrideRemoteObject) Remote() string {
return o.remote
}
func TestIntegration(t *testing.T) {
ctx := context.Background()
fstest.Initialise()
@@ -45,14 +35,14 @@ func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
*fstest.RemoteName = "TestGooglePhotos:"
}
f, err := fs.NewFs(*fstest.RemoteName)
f, err := fs.NewFs(ctx, *fstest.RemoteName)
if err == fs.ErrorNotFoundInConfigFile {
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
}
require.NoError(t, err)
// Create local Fs pointing at testfiles
localFs, err := fs.NewFs("testfiles")
localFs, err := fs.NewFs(ctx, "testfiles")
require.NoError(t, err)
t.Run("CreateAlbum", func(t *testing.T) {
@@ -66,7 +56,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()
@@ -125,7 +115,7 @@ func TestIntegration(t *testing.T) {
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
})
// Check it is there in the date/month/year heirachy
// Check it is there in the date/month/year hierarchy
// 2013-07-13 is the creation date of the folder
checkPresent := func(t *testing.T, objPath string) {
entries, err := f.List(ctx, objPath)
@@ -165,7 +155,7 @@ func TestIntegration(t *testing.T) {
})
t.Run("NewFsIsFile", func(t *testing.T) {
fNew, err := fs.NewFs(*fstest.RemoteName + remote)
fNew, err := fs.NewFs(ctx, *fstest.RemoteName+remote)
assert.Equal(t, fs.ErrorIsFile, err)
leaf := path.Base(remote)
o, err := fNew.NewObject(ctx, leaf)
@@ -231,7 +221,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()

View File

@@ -23,6 +23,8 @@ type lister interface {
listAlbums(ctx context.Context, shared bool) (all *albums, err error)
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
dirTime() time.Time
startYear() int
includeArchived() bool
}
// dirPattern describes a single directory pattern
@@ -52,6 +54,7 @@ var patterns = dirPatterns{
fs.NewDir(prefix+"album", f.dirTime()),
fs.NewDir(prefix+"shared-album", f.dirTime()),
fs.NewDir(prefix+"upload", f.dirTime()),
fs.NewDir(prefix+"feature", f.dirTime()),
}, nil
},
},
@@ -189,6 +192,28 @@ var patterns = dirPatterns{
re: `^shared-album/(.+?)/([^/]+)$`,
isFile: true,
},
{
re: `^feature$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return fs.DirEntries{
fs.NewDir(prefix+"favorites", f.dirTime()),
}, nil
},
},
{
re: `^feature/favorites$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
filter := featureFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^feature/favorites/([^/]+)$`,
isFile: true,
},
}.mustCompile()
// mustCompile compiles the regexps in the dirPatterns
@@ -200,7 +225,7 @@ func (ds dirPatterns) mustCompile() dirPatterns {
return ds
}
// match finds the path passed in in the matching structure and
// match finds the path passed in the matching structure and
// returns the parameters and a pointer to the match, or nil.
func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []string, prefix string, pattern *dirPattern) {
itemPath = strings.Trim(itemPath, "/")
@@ -222,11 +247,10 @@ func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []
return nil, "", nil
}
// Return the years from 2000 to today
// FIXME make configurable?
// Return the years from startYear to today
func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
currentYear := f.dirTime().Year()
for year := 2000; year <= currentYear; year++ {
for year := f.startYear(); year <= currentYear; year++ {
entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime()))
}
return entries, nil
@@ -290,6 +314,24 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
return sf, nil
}
// featureFilter creates a filter for the Feature enum
//
// The API only supports one feature, FAVORITES, so hardcode that feature
//
// https://developers.google.com/photos/library/reference/rest/v1/mediaItems/search#FeatureFilter
func featureFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter) {
sf = api.SearchFilter{
Filters: &api.Filters{
FeatureFilter: &api.FeatureFilter{
IncludedFeatures: []string{
"FAVORITES",
},
},
},
}
return sf
}
// Turns an albumPath into entries
//
// These can either be synthetic directory entries if the album path

View File

@@ -59,6 +59,16 @@ func (f *testLister) dirTime() time.Time {
return startTime
}
// mock startYear for testing
func (f *testLister) startYear() int {
return 2000
}
// mock includeArchived for testing
func (f *testLister) includeArchived() bool {
return false
}
func TestPatternMatch(t *testing.T) {
for testNumber, test := range []struct {
// input
@@ -150,6 +160,38 @@ func TestPatternMatch(t *testing.T) {
wantPrefix: "file.jpg/",
wantPattern: &patterns[5],
},
{
root: "",
itemPath: "feature",
isFile: false,
wantMatch: []string{"feature"},
wantPrefix: "feature/",
wantPattern: &patterns[23],
},
{
root: "feature/favorites",
itemPath: "",
isFile: false,
wantMatch: []string{"feature/favorites"},
wantPrefix: "",
wantPattern: &patterns[24],
},
{
root: "feature",
itemPath: "favorites",
isFile: false,
wantMatch: []string{"feature/favorites"},
wantPrefix: "favorites/",
wantPattern: &patterns[24],
},
{
root: "feature/favorites",
itemPath: "file.jpg",
isFile: true,
wantMatch: []string{"feature/favorites/file.jpg", "file.jpg"},
wantPrefix: "file.jpg/",
wantPattern: &patterns[25],
},
} {
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q,isFile=%v", testNumber, test.root, test.itemPath, test.isFile), func(t *testing.T) {
gotMatch, gotPrefix, gotPattern := patterns.match(test.root, test.itemPath, test.isFile)

320
backend/hdfs/fs.go Normal file
View File

@@ -0,0 +1,320 @@
// +build !plan9
package hdfs
import (
"context"
"fmt"
"io"
"os"
"os/user"
"path"
"strings"
"time"
"github.com/colinmarc/hdfs/v2"
krb "github.com/jcmturner/gokrb5/v8/client"
"github.com/jcmturner/gokrb5/v8/config"
"github.com/jcmturner/gokrb5/v8/credentials"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
)
// Fs represents a HDFS server
type Fs struct {
name string
root string
features *fs.Features // optional features
opt Options // options for this backend
ci *fs.ConfigInfo // global config
client *hdfs.Client
}
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
func getKerberosClient() (*krb.Client, error) {
configPath := os.Getenv("KRB5_CONFIG")
if configPath == "" {
configPath = "/etc/krb5.conf"
}
cfg, err := config.Load(configPath)
if err != nil {
return nil, err
}
// Determine the ccache location from the environment, falling back to the
// default location.
ccachePath := os.Getenv("KRB5CCNAME")
if strings.Contains(ccachePath, ":") {
if strings.HasPrefix(ccachePath, "FILE:") {
ccachePath = strings.SplitN(ccachePath, ":", 2)[1]
} else {
return nil, fmt.Errorf("unusable ccache: %s", ccachePath)
}
} else if ccachePath == "" {
u, err := user.Current()
if err != nil {
return nil, err
}
ccachePath = fmt.Sprintf("/tmp/krb5cc_%s", u.Uid)
}
ccache, err := credentials.LoadCCache(ccachePath)
if err != nil {
return nil, err
}
client, err := krb.NewFromCCache(ccache, cfg)
if err != nil {
return nil, err
}
return client, nil
}
// NewFs constructs an Fs from the path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
options := hdfs.ClientOptions{
Addresses: []string{opt.Namenode},
UseDatanodeHostname: false,
}
if opt.ServicePrincipalName != "" {
options.KerberosClient, err = getKerberosClient()
if err != nil {
return nil, fmt.Errorf("Problem with kerberos authentication: %s", err)
}
options.KerberosServicePrincipleName = opt.ServicePrincipalName
if opt.DataTransferProtection != "" {
options.DataTransferProtection = opt.DataTransferProtection
}
} else {
options.User = opt.Username
}
client, err := hdfs.NewClient(options)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: fs.GetConfig(ctx),
client: client,
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
info, err := f.client.Stat(f.realpath(""))
if err == nil && !info.IsDir() {
f.root = path.Dir(f.root)
return f, fs.ErrorIsFile
}
return f, nil
}
// Name of this fs
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("hdfs://%s", f.opt.Namenode)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision return the precision of this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Hashes are not supported
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// NewObject finds file at remote or return fs.ErrorObjectNotFound
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
realpath := f.realpath(remote)
fs.Debugf(f, "new [%s]", realpath)
info, err := f.ensureFile(realpath)
if err != nil {
return nil, err
}
return &Object{
fs: f,
remote: remote,
size: info.Size(),
modTime: info.ModTime(),
}, nil
}
// List the objects and directories in dir into entries.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
realpath := f.realpath(dir)
fs.Debugf(f, "list [%s]", realpath)
err = f.ensureDirectory(realpath)
if err != nil {
return nil, err
}
list, err := f.client.ReadDir(realpath)
if err != nil {
return nil, err
}
for _, x := range list {
stdName := f.opt.Enc.ToStandardName(x.Name())
remote := path.Join(dir, stdName)
if x.IsDir() {
entries = append(entries, fs.NewDir(remote, x.ModTime()))
} else {
entries = append(entries, &Object{
fs: f,
remote: remote,
size: x.Size(),
modTime: x.ModTime()})
}
}
return entries, nil
}
// Put the object
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o := &Object{
fs: f,
remote: src.Remote(),
}
err := o.Update(ctx, in, src, options...)
return o, err
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// Mkdir makes a directory
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
fs.Debugf(f, "mkdir [%s]", f.realpath(dir))
return f.client.MkdirAll(f.realpath(dir), 0755)
}
// Rmdir deletes the directory
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
realpath := f.realpath(dir)
fs.Debugf(f, "rmdir [%s]", realpath)
err := f.ensureDirectory(realpath)
if err != nil {
return err
}
// do not remove empty directory
list, err := f.client.ReadDir(realpath)
if err != nil {
return err
}
if len(list) > 0 {
return fs.ErrorDirectoryNotEmpty
}
return f.client.Remove(realpath)
}
// Purge deletes all the files in the directory
func (f *Fs) Purge(ctx context.Context, dir string) error {
realpath := f.realpath(dir)
fs.Debugf(f, "purge [%s]", realpath)
err := f.ensureDirectory(realpath)
if err != nil {
return err
}
return f.client.RemoveAll(realpath)
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
info, err := f.client.StatFs()
if err != nil {
return nil, err
}
return &fs.Usage{
Total: fs.NewUsageValue(int64(info.Capacity)),
Used: fs.NewUsageValue(int64(info.Used)),
Free: fs.NewUsageValue(int64(info.Remaining)),
}, nil
}
func (f *Fs) ensureDirectory(realpath string) error {
info, err := f.client.Stat(realpath)
if e, ok := err.(*os.PathError); ok && e.Err == os.ErrNotExist {
return fs.ErrorDirNotFound
}
if err != nil {
return err
}
if !info.IsDir() {
return fs.ErrorDirNotFound
}
return nil
}
func (f *Fs) ensureFile(realpath string) (os.FileInfo, error) {
info, err := f.client.Stat(realpath)
if e, ok := err.(*os.PathError); ok && e.Err == os.ErrNotExist {
return nil, fs.ErrorObjectNotFound
}
if err != nil {
return nil, err
}
if info.IsDir() {
return nil, fs.ErrorObjectNotFound
}
return info, nil
}
func (f *Fs) realpath(dir string) string {
return f.opt.Enc.FromStandardPath(xPath(f.Root(), dir))
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
)

86
backend/hdfs/hdfs.go Normal file
View File

@@ -0,0 +1,86 @@
// +build !plan9
package hdfs
import (
"path"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/lib/encoder"
)
func init() {
fsi := &fs.RegInfo{
Name: "hdfs",
Description: "Hadoop distributed file system",
NewFs: NewFs,
Options: []fs.Option{{
Name: "namenode",
Help: "hadoop name node and port",
Required: true,
Examples: []fs.OptionExample{{
Value: "namenode:8020",
Help: "Connect to host namenode at port 8020",
}},
}, {
Name: "username",
Help: "hadoop user name",
Required: false,
Examples: []fs.OptionExample{{
Value: "root",
Help: "Connect to hdfs as root",
}},
}, {
Name: "service_principal_name",
Help: `Kerberos service principal name for the namenode
Enables KERBEROS authentication. Specifies the Service Principal Name
(<SERVICE>/<FQDN>) for the namenode.`,
Required: false,
Examples: []fs.OptionExample{{
Value: "hdfs/namenode.hadoop.docker",
Help: "Namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.",
}},
Advanced: true,
}, {
Name: "data_transfer_protection",
Help: `Kerberos data transfer protection: authentication|integrity|privacy
Specifies whether or not authentication, data signature integrity
checks, and wire encryption is required when communicating the the
datanodes. Possible values are 'authentication', 'integrity' and
'privacy'. Used only with KERBEROS enabled.`,
Required: false,
Examples: []fs.OptionExample{{
Value: "privacy",
Help: "Ensure authentication, integrity and encryption enabled.",
}},
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Display | encoder.EncodeInvalidUtf8 | encoder.EncodeColon),
}},
}
fs.Register(fsi)
}
// Options for this backend
type Options struct {
Namenode string `config:"namenode"`
Username string `config:"username"`
ServicePrincipalName string `config:"service_principal_name"`
DataTransferProtection string `config:"data_transfer_protection"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// xPath make correct file path with leading '/'
func xPath(root string, tail string) string {
if !strings.HasPrefix(root, "/") {
root = "/" + root
}
return path.Join(root, tail)
}

20
backend/hdfs/hdfs_test.go Normal file
View File

@@ -0,0 +1,20 @@
// Test HDFS filesystem interface
// +build !plan9
package hdfs_test
import (
"testing"
"github.com/rclone/rclone/backend/hdfs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestHdfs:",
NilObject: (*hdfs.Object)(nil),
})
}

View File

@@ -0,0 +1,6 @@
// Build for hdfs for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9
package hdfs

177
backend/hdfs/object.go Normal file
View File

@@ -0,0 +1,177 @@
// +build !plan9
package hdfs
import (
"context"
"io"
"path"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers"
)
// Object describes an HDFS file
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
realpath := o.fs.realpath(o.Remote())
err := o.fs.client.Chtimes(realpath, modTime, modTime)
if err != nil {
return err
}
o.modTime = modTime
return nil
}
// Storable returns whether this object is storable
func (o *Object) Storable() bool {
return true
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Hash is not supported
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
realpath := o.realpath()
fs.Debugf(o.fs, "open [%s]", realpath)
f, err := o.fs.client.Open(realpath)
if err != nil {
return nil, err
}
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
}
}
_, err = f.Seek(offset, io.SeekStart)
if err != nil {
return nil, err
}
if limit != -1 {
in = readers.NewLimitedReadCloser(f, limit)
} else {
in = f
}
return in, err
}
// Update object
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
realpath := o.fs.realpath(src.Remote())
dirname := path.Dir(realpath)
fs.Debugf(o.fs, "update [%s]", realpath)
err := o.fs.client.MkdirAll(dirname, 0755)
if err != nil {
return err
}
info, err := o.fs.client.Stat(realpath)
if err == nil {
err = o.fs.client.Remove(realpath)
if err != nil {
return err
}
}
out, err := o.fs.client.Create(realpath)
if err != nil {
return err
}
cleanup := func() {
rerr := o.fs.client.Remove(realpath)
if rerr != nil {
fs.Errorf(o.fs, "failed to remove [%v]: %v", realpath, rerr)
}
}
_, err = io.Copy(out, in)
if err != nil {
cleanup()
return err
}
err = out.Close()
if err != nil {
cleanup()
return err
}
info, err = o.fs.client.Stat(realpath)
if err != nil {
return err
}
err = o.SetModTime(ctx, src.ModTime(ctx))
if err != nil {
return err
}
o.size = info.Size()
return nil
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
realpath := o.fs.realpath(o.remote)
fs.Debugf(o.fs, "remove [%s]", realpath)
return o.fs.client.Remove(realpath)
}
func (o *Object) realpath() string {
return o.fs.opt.Enc.FromStandardPath(xPath(o.Fs().Root(), o.remote))
}
// Check the interfaces are satisfied
var (
_ fs.Object = (*Object)(nil)
)

View File

@@ -58,7 +58,7 @@ The input format is comma separated list of key,value pairs. Standard
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
You can set multiple headers, eg '"Cookie","name=value","Authorization","xxx"'.
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
`,
Default: fs.CommaSepList{},
Advanced: true,
@@ -115,8 +115,9 @@ type Options struct {
type Fs struct {
name string
root string
features *fs.Features // optional features
opt Options // options for this backend
features *fs.Features // optional features
opt Options // options for this backend
ci *fs.ConfigInfo // global config
endpoint *url.URL
endpointURL string // endpoint as a string
httpClient *http.Client
@@ -145,8 +146,7 @@ func statusError(res *http.Response, err error) error {
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -172,7 +172,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
client := fshttp.NewClient(fs.Config)
client := fshttp.NewClient(ctx)
var isFile = false
if !strings.HasSuffix(u.String(), "/") {
@@ -183,9 +183,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return http.ErrUseLastResponse
}
// check to see if points to a file
req, err := http.NewRequest("HEAD", u.String(), nil)
req, err := http.NewRequestWithContext(ctx, "HEAD", u.String(), nil)
if err == nil {
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
addHeaders(req, opt)
res, err := noRedir.Do(req)
err = statusError(res, err)
@@ -210,17 +209,19 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: ci,
httpClient: client,
endpoint: u,
endpointURL: u.String(),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(f)
}).Fill(ctx, f)
if isFile {
return f, fs.ErrorIsFile
}
@@ -389,11 +390,10 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
}
// Do the request
req, err := http.NewRequest("GET", URL, nil)
req, err := http.NewRequestWithContext(ctx, "GET", URL, nil)
if err != nil {
return nil, errors.Wrap(err, "readDir failed")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
f.addHeaders(req)
res, err := f.httpClient.Do(req)
if err == nil {
@@ -440,14 +440,15 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
var (
entriesMu sync.Mutex // to protect entries
wg sync.WaitGroup
in = make(chan string, fs.Config.Checkers)
checkers = f.ci.Checkers
in = make(chan string, checkers)
)
add := func(entry fs.DirEntry) {
entriesMu.Lock()
entries = append(entries, entry)
entriesMu.Unlock()
}
for i := 0; i < fs.Config.Checkers; i++ {
for i := 0; i < checkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
@@ -544,11 +545,10 @@ func (o *Object) stat(ctx context.Context) error {
return nil
}
url := o.url()
req, err := http.NewRequest("HEAD", url, nil)
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
if err != nil {
return errors.Wrap(err, "stat failed")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
o.fs.addHeaders(req)
res, err := o.fs.httpClient.Do(req)
if err == nil && res.StatusCode == http.StatusNotFound {
@@ -585,7 +585,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return errorReadOnly
}
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc)
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
func (o *Object) Storable() bool {
return true
}
@@ -593,11 +593,10 @@ func (o *Object) Storable() bool {
// Open a remote http file object for reading. Seek is supported
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
url := o.url()
req, err := http.NewRequest("GET", url, nil)
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
if err != nil {
return nil, errors.Wrap(err, "Open failed")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
// Add optional headers
for k, v := range fs.OpenOptionHeaders(options) {

View File

@@ -15,7 +15,7 @@ import (
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/rest"
@@ -47,7 +47,7 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
ts := httptest.NewServer(handler)
// Configure the remote
config.LoadConfig()
configfile.LoadConfig(context.Background())
// fs.Config.LogLevel = fs.LogLevelDebug
// fs.Config.DumpHeaders = true
// fs.Config.DumpBodies = true
@@ -69,7 +69,7 @@ func prepare(t *testing.T) (fs.Fs, func()) {
m, tidy := prepareServer(t)
// Instantiate it
f, err := NewFs(remoteName, "", m)
f, err := NewFs(context.Background(), remoteName, "", m)
require.NoError(t, err)
return f, tidy
@@ -166,8 +166,7 @@ func TestNewObject(t *testing.T) {
require.NoError(t, err)
tFile := fi.ModTime()
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
// check object not found
o, err = f.NewObject(context.Background(), "not found.txt")
@@ -215,7 +214,7 @@ func TestIsAFileRoot(t *testing.T) {
m, tidy := prepareServer(t)
defer tidy()
f, err := NewFs(remoteName, "one%.txt", m)
f, err := NewFs(context.Background(), remoteName, "one%.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)
testListRoot(t, f, false)
@@ -225,7 +224,7 @@ func TestIsAFileSubDir(t *testing.T) {
m, tidy := prepareServer(t)
defer tidy()
f, err := NewFs(remoteName, "three/underthree.txt", m)
f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)
entries, err := f.List(context.Background(), "")

View File

@@ -5,7 +5,7 @@ import (
"net/http"
"time"
"github.com/ncw/swift"
"github.com/ncw/swift/v2"
"github.com/rclone/rclone/fs"
)
@@ -21,10 +21,10 @@ func newAuth(f *Fs) *auth {
}
}
// Request constructs a http.Request for authentication
// Request constructs an http.Request for authentication
//
// returns nil for not needed
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
const retries = 10
for try := 1; try <= retries; try++ {
err = a.f.getCredentials(context.TODO())
@@ -38,7 +38,7 @@ func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
}
// Response parses the result of an http request
func (a *auth) Response(resp *http.Response) error {
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
return nil
}

View File

@@ -4,7 +4,7 @@ package hubic
// This uses the normal swift mechanism to update the credentials and
// ignores the expires field returned by the Hubic API. This may need
// to be revisted after some actual experience.
// to be revisited after some actual experience.
import (
"context"
@@ -16,11 +16,10 @@ import (
"strings"
"time"
swiftLib "github.com/ncw/swift"
swiftLib "github.com/ncw/swift/v2"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
@@ -39,7 +38,7 @@ var (
// Description of how to auth for this app
oauthConfig = &oauth2.Config{
Scopes: []string{
"credentials.r", // Read Openstack credentials
"credentials.r", // Read OpenStack credentials
},
Endpoint: oauth2.Endpoint{
AuthURL: "https://api.hubic.com/oauth/auth/",
@@ -57,28 +56,22 @@ func init() {
Name: "hubic",
Description: "Hubic",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
err := oauthutil.Config("hubic", name, m, oauthConfig)
Config: func(ctx context.Context, name string, m configmap.Mapper) {
err := oauthutil.Config(ctx, "hubic", name, m, oauthConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append([]fs.Option{{
Name: config.ConfigClientID,
Help: "Hubic Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Hubic Client Secret\nLeave blank normally.",
}}, swift.SharedOptions...),
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
})
}
// credentials is the JSON returned from the Hubic API to read the
// OpenStack credentials
type credentials struct {
Token string `json:"token"` // Openstack token
Endpoint string `json:"endpoint"` // Openstack endpoint
Expires string `json:"expires"` // Expires date - eg "2015-11-09T14:24:56+01:00"
Token string `json:"token"` // OpenStack token
Endpoint string `json:"endpoint"` // OpenStack endpoint
Expires string `json:"expires"` // Expires date - e.g. "2015-11-09T14:24:56+01:00"
}
// Fs represents a remote hubic
@@ -117,11 +110,10 @@ func (f *Fs) String() string {
//
// The credentials are read into the Fs
func (f *Fs) getCredentials(ctx context.Context) (err error) {
req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil)
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil)
if err != nil {
return err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
resp, err := f.client.Do(req)
if err != nil {
return err
@@ -153,8 +145,8 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(name, m, oauthConfig)
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Hubic")
}
@@ -164,13 +156,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
// Make the swift Connection
ci := fs.GetConfig(ctx)
c := &swiftLib.Connection{
Auth: newAuth(f),
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(fs.Config),
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(ctx),
}
err = c.Authenticate()
err = c.Authenticate(ctx)
if err != nil {
return nil, errors.Wrap(err, "error authenticating swift connection")
}
@@ -183,7 +176,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
// Make inner swift Fs from the connection
swiftFs, err := swift.NewFsWithConnection(opt, name, root, c, true)
swiftFs, err := swift.NewFsWithConnection(ctx, opt, name, root, c, true)
if err != nil && err != fs.ErrorIsFile {
return nil, err
}

View File

@@ -46,13 +46,57 @@ func (t Time) String() string { return time.Time(t).Format(timeFormat) }
// APIString returns Time string in Jottacloud API format
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
// LoginToken is struct representing the login token generated in the WebUI
type LoginToken struct {
Username string `json:"username"`
Realm string `json:"realm"`
WellKnownLink string `json:"well_known_link"`
AuthToken string `json:"auth_token"`
}
// WellKnown contains some configuration parameters for setting up endpoints
type WellKnown struct {
Issuer string `json:"issuer"`
AuthorizationEndpoint string `json:"authorization_endpoint"`
TokenEndpoint string `json:"token_endpoint"`
TokenIntrospectionEndpoint string `json:"token_introspection_endpoint"`
UserinfoEndpoint string `json:"userinfo_endpoint"`
EndSessionEndpoint string `json:"end_session_endpoint"`
JwksURI string `json:"jwks_uri"`
CheckSessionIframe string `json:"check_session_iframe"`
GrantTypesSupported []string `json:"grant_types_supported"`
ResponseTypesSupported []string `json:"response_types_supported"`
SubjectTypesSupported []string `json:"subject_types_supported"`
IDTokenSigningAlgValuesSupported []string `json:"id_token_signing_alg_values_supported"`
UserinfoSigningAlgValuesSupported []string `json:"userinfo_signing_alg_values_supported"`
RequestObjectSigningAlgValuesSupported []string `json:"request_object_signing_alg_values_supported"`
ResponseNodesSupported []string `json:"response_modes_supported"`
RegistrationEndpoint string `json:"registration_endpoint"`
TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported"`
TokenEndpointAuthSigningAlgValuesSupported []string `json:"token_endpoint_auth_signing_alg_values_supported"`
ClaimsSupported []string `json:"claims_supported"`
ClaimTypesSupported []string `json:"claim_types_supported"`
ClaimsParameterSupported bool `json:"claims_parameter_supported"`
ScopesSupported []string `json:"scopes_supported"`
RequestParameterSupported bool `json:"request_parameter_supported"`
RequestURIParameterSupported bool `json:"request_uri_parameter_supported"`
CodeChallengeMethodsSupported []string `json:"code_challenge_methods_supported"`
TLSClientCertificateBoundAccessTokens bool `json:"tls_client_certificate_bound_access_tokens"`
IntrospectionEndpoint string `json:"introspection_endpoint"`
}
// TokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
type TokenJSON struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
AccessToken string `json:"access_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
RefreshExpiresIn int32 `json:"refresh_expires_in"`
RefreshToken string `json:"refresh_token"`
TokenType string `json:"token_type"`
IDToken string `json:"id_token"`
NotBeforePolicy int32 `json:"not-before-policy"`
SessionState string `json:"session_state"`
Scope string `json:"scope"`
}
// JSON structures returned by new API
@@ -109,9 +153,9 @@ type CustomerInfo struct {
AccountType string `json:"account_type"`
SubscriptionType string `json:"subscription_type"`
Usage int64 `json:"usage"`
Qouta int64 `json:"quota"`
Quota int64 `json:"quota"`
BusinessUsage int64 `json:"business_usage"`
BusinessQouta int64 `json:"business_quota"`
BusinessQuota int64 `json:"business_quota"`
WriteLocked bool `json:"write_locked"`
ReadLocked bool `json:"read_locked"`
LockedCause interface{} `json:"locked_cause"`
@@ -120,6 +164,12 @@ type CustomerInfo struct {
IOSHash string `json:"ios_hash"`
}
// TrashResponse is returned when emptying the Trash
type TrashResponse struct {
Folders int64 `json:"folders"`
Files int64 `json:"files"`
}
// XML structures returned by the old API
// Flag is a hacky type for checking if an attribute is present
@@ -336,7 +386,7 @@ type Error struct {
Cause string `xml:"cause"`
}
// Error returns a string for the error and statistifes the error interface
// Error returns a string for the error and satisfies the error interface
func (e *Error) Error() string {
out := fmt.Sprintf("error %d", e.StatusCode)
if e.Message != "" {

View File

@@ -4,7 +4,9 @@ import (
"bytes"
"context"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
@@ -26,47 +28,53 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
)
const enc = encodings.JottaCloud
// Globals
const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
defaultDevice = "Jotta"
defaultMountpoint = "Archive"
rootURL = "https://www.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/"
baseURL = "https://www.jottacloud.com/"
tokenURL = "https://api.jottacloud.com/auth/v1/token"
registerURL = "https://api.jottacloud.com/auth/v1/register"
cachePrefix = "rclone-jcmd5-"
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
configClientID = "client_id"
configClientSecret = "client_secret"
configDevice = "device"
configMountpoint = "mountpoint"
charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
defaultDevice = "Jotta"
defaultMountpoint = "Archive"
rootURL = "https://jfs.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/"
baseURL = "https://www.jottacloud.com/"
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
cachePrefix = "rclone-jcmd5-"
configDevice = "device"
configMountpoint = "mountpoint"
configTokenURL = "tokenURL"
configClientID = "client_id"
configClientSecret = "client_secret"
configVersion = 1
v1tokenURL = "https://api.jottacloud.com/auth/v1/token"
v1registerURL = "https://api.jottacloud.com/auth/v1/register"
v1ClientID = "nibfk8biu12ju7hpqomr8b1e40"
v1EncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
v1configVersion = 0
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
teliaCloudClientID = "desktop"
)
var (
// Description of how to auth for this app for a personal account
oauthConfig = &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: tokenURL,
TokenURL: tokenURL,
AuthURL: defaultTokenURL,
TokenURL: defaultTokenURL,
},
RedirectURL: oauthutil.RedirectLocalhostURL,
}
@@ -77,71 +85,42 @@ func init() {
// needs to be done early so we can use oauth during config
fs.Register(&fs.RegInfo{
Name: "jottacloud",
Description: "JottaCloud",
Description: "Jottacloud",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
ctx := context.TODO()
tokenString, ok := m.Get("token")
if ok && tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.Confirm() {
return
}
}
srv := rest.NewClient(fshttp.NewClient(fs.Config))
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
if config.Confirm() {
deviceRegistration, err := registerDevice(ctx, srv)
Config: func(ctx context.Context, name string, m configmap.Mapper) {
refresh := false
if version, ok := m.Get("configVersion"); ok {
ver, err := strconv.Atoi(version)
if err != nil {
log.Fatalf("Failed to register device: %v", err)
log.Fatalf("Failed to parse config version - corrupted config")
}
m.Set(configClientID, deviceRegistration.ClientID)
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
refresh = (ver != configVersion) && (ver != v1configVersion)
}
clientID, ok := m.Get(configClientID)
if !ok {
clientID = rcloneClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = rcloneEncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
fmt.Printf("Username> ")
username := config.ReadLine()
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
token, err := doAuth(ctx, srv, username, password)
if err != nil {
log.Fatalf("Failed to get oauth token: %s", err)
}
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
log.Fatalf("Error while saving token: %s", err)
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm() {
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
if refresh {
fmt.Printf("Config outdated - refreshing\n")
} else {
tokenString, ok := m.Get("token")
if ok && tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.Confirm(false) {
return
}
}
}
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
fmt.Printf("Choose authentication type:\n" +
"1: Standard authentication - use this if you're a normal Jottacloud user.\n" +
"2: Legacy authentication - this is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n" +
"3: Telia Cloud authentication - use this if you are using Telia Cloud.\n")
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
if err != nil {
log.Fatalf("Failed to setup mountpoint: %s", err)
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
switch config.ChooseNumber("Your choice", 1, 3) {
case 1:
v2config(ctx, name, m)
case 2:
v1config(ctx, name, m)
case 3:
teliaCloudConfig(ctx, name, m)
}
},
Options: []fs.Option{{
@@ -150,13 +129,13 @@ func init() {
Default: fs.SizeSuffix(10 * 1024 * 1024),
Advanced: true,
}, {
Name: "hard_delete",
Help: "Delete files permanently rather than putting them into the trash.",
Name: "trashed_only",
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
Default: false,
Advanced: true,
}, {
Name: "unlink",
Help: "Remove existing public link to file/folder with link command rather than creating.\nDefault is false, meaning link command will create or retrieve public link.",
Name: "hard_delete",
Help: "Delete files permanently rather than putting them into the trash.",
Default: false,
Advanced: true,
}, {
@@ -164,18 +143,29 @@ func init() {
Help: "Files bigger than this can be resumed if the upload fail's.",
Default: fs.SizeSuffix(10 * 1024 * 1024),
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Encode invalid UTF-8 bytes as xml doesn't handle them properly.
//
// Also: '*', '/', ':', '<', '>', '?', '\"', '\x00', '|'
Default: (encoder.Display |
encoder.EncodeWin | // :?"*<>|
encoder.EncodeInvalidUtf8),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Device string `config:"device"`
Mountpoint string `config:"mountpoint"`
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
HardDelete bool `config:"hard_delete"`
Unlink bool `config:"unlink"`
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
Device string `config:"device"`
Mountpoint string `config:"mountpoint"`
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
TrashedOnly bool `config:"trashed_only"`
HardDelete bool `config:"hard_delete"`
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote jottacloud
@@ -227,7 +217,7 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// parsePath parses an box 'url'
// parsePath parses a box 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
@@ -245,16 +235,124 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
func teliaCloudConfig(ctx context.Context, name string, m configmap.Mapper) {
teliaCloudOauthConfig := &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: teliaCloudAuthURL,
TokenURL: teliaCloudTokenURL,
},
ClientID: teliaCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
}
err := oauthutil.Config(ctx, "jottacloud", name, m, teliaCloudOauthConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
return
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, teliaCloudOauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
}
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
if err != nil {
log.Fatalf("Failed to setup mountpoint: %s", err)
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
m.Set("configVersion", strconv.Itoa(configVersion))
m.Set(configClientID, teliaCloudClientID)
m.Set(configTokenURL, teliaCloudTokenURL)
}
// v1config configure a jottacloud backend using legacy authentication
func v1config(ctx context.Context, name string, m configmap.Mapper) {
srv := rest.NewClient(fshttp.NewClient(ctx))
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
if config.Confirm(false) {
deviceRegistration, err := registerDevice(ctx, srv)
if err != nil {
log.Fatalf("Failed to register device: %v", err)
}
m.Set(configClientID, deviceRegistration.ClientID)
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
}
clientID, ok := m.Get(configClientID)
if !ok {
clientID = v1ClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = v1EncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
oauthConfig.Endpoint.AuthURL = v1tokenURL
oauthConfig.Endpoint.TokenURL = v1tokenURL
fmt.Printf("Username> ")
username := config.ReadLine()
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
token, err := doAuthV1(ctx, srv, username, password)
if err != nil {
log.Fatalf("Failed to get oauth token: %s", err)
}
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
log.Fatalf("Error while saving token: %s", err)
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
}
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
if err != nil {
log.Fatalf("Failed to setup mountpoint: %s", err)
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
m.Set("configVersion", strconv.Itoa(v1configVersion))
}
// registerDevice register a new device for use with the jottacloud API
func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
// random generator to generate random device names
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
randonDeviceNamePartLength := 21
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
charset := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
for i := range randomDeviceNamePart {
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
}
@@ -266,7 +364,7 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
opts := rest.Opts{
Method: "POST",
RootURL: registerURL,
RootURL: v1registerURL,
ContentType: "application/x-www-form-urlencoded",
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
Parameters: values,
@@ -277,8 +375,8 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
return deviceRegistration, err
}
// doAuth runs the actual token request
func doAuth(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
// doAuthV1 runs the actual token request for V1 authentication
func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
// prepare out token request with username and password
values := url.Values{}
values.Set("grant_type", "PASSWORD")
@@ -307,7 +405,7 @@ func doAuth(ctx context.Context, srv *rest.Client, username, password string) (t
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
resp, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
}
}
}
@@ -319,6 +417,106 @@ func doAuth(ctx context.Context, srv *rest.Client, username, password string) (t
return token, err
}
// v2config configure a jottacloud backend using the modern JottaCli token based authentication
func v2config(ctx context.Context, name string, m configmap.Mapper) {
srv := rest.NewClient(fshttp.NewClient(ctx))
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
fmt.Printf("Login Token> ")
loginToken := config.ReadLine()
m.Set(configClientID, "jottacli")
m.Set(configClientSecret, "")
token, err := doAuthV2(ctx, srv, loginToken, m)
if err != nil {
log.Fatalf("Failed to get oauth token: %s", err)
}
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
log.Fatalf("Error while saving token: %s", err)
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
}
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
if err != nil {
log.Fatalf("Failed to setup mountpoint: %s", err)
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
m.Set("configVersion", strconv.Itoa(configVersion))
}
// doAuthV2 runs the actual token request for V2 authentication
func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
if err != nil {
return token, err
}
// decode login token
var loginToken api.LoginToken
decoder := json.NewDecoder(bytes.NewReader(loginTokenBytes))
err = decoder.Decode(&loginToken)
if err != nil {
return token, err
}
// retrieve endpoint urls
opts := rest.Opts{
Method: "GET",
RootURL: loginToken.WellKnownLink,
}
var wellKnown api.WellKnown
_, err = srv.CallJSON(ctx, &opts, nil, &wellKnown)
if err != nil {
return token, err
}
// save the tokenurl
oauthConfig.Endpoint.AuthURL = wellKnown.TokenEndpoint
oauthConfig.Endpoint.TokenURL = wellKnown.TokenEndpoint
m.Set(configTokenURL, wellKnown.TokenEndpoint)
// prepare out token request with username and password
values := url.Values{}
values.Set("client_id", "jottacli")
values.Set("grant_type", "password")
values.Set("password", loginToken.AuthToken)
values.Set("scope", "offline_access+openid")
values.Set("username", loginToken.Username)
values.Encode()
opts = rest.Opts{
Method: "POST",
RootURL: oauthConfig.Endpoint.AuthURL,
ContentType: "application/x-www-form-urlencoded",
Body: strings.NewReader(values.Encode()),
}
// do the first request
var jsonToken api.TokenJSON
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
if err != nil {
return token, err
}
token.AccessToken = jsonToken.AccessToken
token.RefreshToken = jsonToken.RefreshToken
token.TokenType = jsonToken.TokenType
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
return token, err
}
// setupMountpoint sets up a custom device and mountpoint if desired by the user
func setupMountpoint(ctx context.Context, srv *rest.Client, apiSrv *rest.Client) (device, mountpoint string, err error) {
cust, err := getCustomerInfo(ctx, apiSrv)
@@ -407,7 +605,7 @@ func (f *Fs) setEndpointURL() {
if f.opt.Mountpoint == "" {
f.opt.Mountpoint = defaultMountpoint
}
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
f.endpointURL = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
}
// readMetaDataForPath reads the metadata from the path
@@ -420,7 +618,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Jo
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if apiErr, ok := err.(*api.Error); ok {
@@ -456,17 +654,17 @@ func errorHandler(resp *http.Response) error {
return errResponse
}
// Jottacloud want's '+' to be URL encoded even though the RFC states it's not reserved
// Jottacloud wants '+' to be URL encoded even though the RFC states it's not reserved
func urlPathEscape(in string) string {
return strings.Replace(rest.URLPathEscape(in), "+", "%2B", -1)
}
// filePathRaw returns an unescaped file path (f.root, file)
func (f *Fs) filePathRaw(file string) string {
return path.Join(f.endpointURL, enc.FromStandardPath(path.Join(f.root, file)))
return path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(path.Join(f.root, file)))
}
// filePath returns a escaped file path (f.root, file)
// filePath returns an escaped file path (f.root, file)
func (f *Fs) filePath(file string) string {
return urlPathEscape(f.filePathRaw(file))
}
@@ -478,7 +676,7 @@ func (f *Fs) filePath(file string) string {
// This filter catches all refresh requests, reads the body,
// changes the case and then sends it on
func grantTypeFilter(req *http.Request) {
if tokenURL == req.URL.String() {
if v1tokenURL == req.URL.String() {
// read the entire body
refreshBody, err := ioutil.ReadAll(req.Body)
if err != nil {
@@ -495,8 +693,7 @@ func grantTypeFilter(req *http.Request) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -504,54 +701,89 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
rootIsDir := strings.HasSuffix(root, "/")
root = parsePath(root)
clientID, ok := m.Get(configClientID)
// Check config version
var ver int
version, ok := m.Get("configVersion")
if ok {
ver, err = strconv.Atoi(version)
if err != nil {
return nil, errors.New("Failed to parse config version")
}
ok = (ver == configVersion) || (ver == v1configVersion)
}
if !ok {
clientID = rcloneClientID
return nil, errors.New("Outdated config - please reconfigure this backend")
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = rcloneEncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
// the oauth client for the api servers needs
// a filter to fix the grant_type issues (see above)
baseClient := fshttp.NewClient(fs.Config)
if do, ok := baseClient.Transport.(interface {
SetRequestFilter(f func(req *http.Request))
}); ok {
do.SetRequestFilter(grantTypeFilter)
} else {
fs.Debugf(name+":", "Couldn't add request filter - uploads will fail")
baseClient := fshttp.NewClient(ctx)
if ver == configVersion {
oauthConfig.ClientID = "jottacli"
// if custom endpoints are set use them else stick with defaults
if tokenURL, ok := m.Get(configTokenURL); ok {
oauthConfig.Endpoint.TokenURL = tokenURL
// jottacloud is weird. we need to use the tokenURL as authURL
oauthConfig.Endpoint.AuthURL = tokenURL
}
} else if ver == v1configVersion {
clientID, ok := m.Get(configClientID)
if !ok {
clientID = v1ClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = v1EncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
oauthConfig.Endpoint.TokenURL = v1tokenURL
oauthConfig.Endpoint.AuthURL = v1tokenURL
// add the request filter to fix token refresh
if do, ok := baseClient.Transport.(interface {
SetRequestFilter(f func(req *http.Request))
}); ok {
do.SetRequestFilter(grantTypeFilter)
} else {
fs.Debugf(name+":", "Couldn't add request filter - uploads will fail")
}
}
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
// Create OAuth Client
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
}
rootIsDir := strings.HasSuffix(root, "/")
root = parsePath(root)
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CaseInsensitive: true,
CanHaveEmptyDirectories: true,
ReadMimeType: true,
WriteMimeType: true,
}).Fill(f)
WriteMimeType: false,
}).Fill(ctx, f)
f.srv.SetErrorHandler(errorHandler)
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
f.features.ListR = nil
}
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
if err == fs.ErrorNotAFile {
err = nil
}
return err
})
@@ -625,7 +857,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (jf *api.JottaFolder, e
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &jf)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
//fmt.Printf("...Error %v\n", err)
@@ -654,7 +886,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
var result api.JottaFolder
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
@@ -667,26 +899,32 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return nil, errors.Wrap(err, "couldn't list files")
}
if result.Deleted {
if bool(result.Deleted) && !f.opt.TrashedOnly {
return nil, fs.ErrorDirNotFound
}
for i := range result.Folders {
item := &result.Folders[i]
if item.Deleted {
if !f.opt.TrashedOnly && bool(item.Deleted) {
continue
}
remote := path.Join(dir, enc.ToStandardName(item.Name))
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
d := fs.NewDir(remote, time.Time(item.ModifiedAt))
entries = append(entries, d)
}
for i := range result.Files {
item := &result.Files[i]
if item.Deleted || item.State != "COMPLETED" {
continue
if f.opt.TrashedOnly {
if !item.Deleted || item.State != "COMPLETED" {
continue
}
} else {
if item.Deleted || item.State != "COMPLETED" {
continue
}
}
remote := path.Join(dir, enc.ToStandardName(item.Name))
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
o, err := f.newObjectWithInfo(ctx, remote, item)
if err != nil {
continue
@@ -711,7 +949,7 @@ func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolde
if folder.Deleted {
return nil
}
folderPath := enc.ToStandardPath(path.Join(folder.Path, folder.Name))
folderPath := f.opt.Enc.ToStandardPath(path.Join(folder.Path, folder.Name))
folderPathLength := len(folderPath)
var remoteDir string
if folderPathLength > pathPrefixLength {
@@ -729,7 +967,7 @@ func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolde
if file.Deleted || file.State != "COMPLETED" {
continue
}
remoteFile := path.Join(remoteDir, enc.ToStandardName(file.Name))
remoteFile := path.Join(remoteDir, f.opt.Enc.ToStandardName(file.Name))
o, err := f.newObjectWithInfo(ctx, remoteFile, file)
if err != nil {
return err
@@ -760,7 +998,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
var result api.JottaFolder // Could be JottaFileDirList, but JottaFolder is close enough
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
if apiErr, ok := err.(*api.Error); ok {
@@ -866,7 +1104,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't purge directory")
@@ -888,8 +1126,8 @@ func (f *Fs) Precision() time.Duration {
}
// Purge deletes all the files and the container
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
}
// copyOrMoves copies or moves directories or files depending on the method parameter
@@ -900,12 +1138,12 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
Parameters: url.Values{},
}
opts.Parameters.Set(method, "/"+path.Join(f.endpointURL, enc.FromStandardPath(path.Join(f.root, dest))))
opts.Parameters.Set(method, "/"+path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(path.Join(f.root, dest))))
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
@@ -913,7 +1151,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
return info, nil
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -943,7 +1181,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
//return f.newObjectWithInfo(remote, &result)
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
@@ -974,7 +1212,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1007,7 +1245,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return fs.ErrorDirExists
}
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, enc.FromStandardPath(srcPath))+"/", dstRemote)
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
if err != nil {
return errors.Wrap(err, "couldn't move directory")
@@ -1016,14 +1254,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
opts := rest.Opts{
Method: "GET",
Path: f.filePath(remote),
Parameters: url.Values{},
}
if f.opt.Unlink {
if unlink {
opts.Parameters.Set("mode", "disableShare")
} else {
opts.Parameters.Set("mode", "enableShare")
@@ -1033,7 +1271,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
var result api.JottaFile
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if apiErr, ok := err.(*api.Error); ok {
@@ -1043,12 +1281,12 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
}
}
if err != nil {
if f.opt.Unlink {
if unlink {
return "", errors.Wrap(err, "couldn't remove public link")
}
return "", errors.Wrap(err, "couldn't create public link")
}
if f.opt.Unlink {
if unlink {
if result.PublicSharePath != "" {
return "", errors.Errorf("couldn't remove public link - %q", result.PublicSharePath)
}
@@ -1078,6 +1316,22 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return usage, nil
}
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) error {
opts := rest.Opts{
Method: "POST",
Path: "files/v1/purge_trash",
}
var info api.TrashResponse
_, err := f.apiSrv.CallJSON(ctx, &opts, nil, &info)
if err != nil {
return errors.Wrap(err, "couldn't empty trash")
}
return nil
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
@@ -1103,7 +1357,7 @@ func (o *Object) Remote() string {
return o.remote
}
// filePath returns a escaped file path (f.root, remote)
// filePath returns an escaped file path (f.root, remote)
func (o *Object) filePath() string {
return o.fs.filePath(o.remote)
}
@@ -1151,7 +1405,7 @@ func (o *Object) readMetaData(ctx context.Context, force bool) (err error) {
if err != nil {
return err
}
if info.Deleted {
if bool(info.Deleted) && !o.fs.opt.TrashedOnly {
return fs.ErrorObjectNotFound
}
return o.setMetaData(info)
@@ -1195,7 +1449,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
@@ -1208,7 +1462,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// The cleanup function should be called when out is finished with
// regardless of whether this function returned an error or not.
func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader, cleanup func(), err error) {
// we need a MD5
// we need an MD5
md5Hasher := md5.New()
// use the teeReader to write to the local file AND calculate the MD5 while doing so
teeReader := io.TeeReader(in, md5Hasher)
@@ -1266,6 +1520,8 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
size := src.Size()
md5String, err := src.Hash(ctx, hash.MD5)
if err != nil || md5String == "" {
@@ -1288,6 +1544,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
opts := rest.Opts{
Method: "POST",
Path: "files/v1/allocate",
Options: options,
ExtraHeaders: make(map[string]string),
}
fileDate := api.Time(src.ModTime(ctx)).APIString()
@@ -1298,20 +1555,20 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Created: fileDate,
Modified: fileDate,
Md5: md5String,
Path: path.Join(o.fs.opt.Mountpoint, enc.FromStandardPath(path.Join(o.fs.root, o.remote))),
Path: path.Join(o.fs.opt.Mountpoint, o.fs.opt.Enc.FromStandardPath(path.Join(o.fs.root, o.remote))),
}
// send it
var response api.AllocateFileResponse
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, &request, &response)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return err
}
// If the file state is INCOMPLETE and CORRPUT, try to upload a then
// If the file state is INCOMPLETE and CORRUPT, try to upload a then
if response.State != "COMPLETED" {
// how much do we still have to upload?
remainingBytes := size - response.ResumePos
@@ -1370,7 +1627,7 @@ func (o *Object) Remove(ctx context.Context) error {
return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
return shouldRetry(resp, err)
return shouldRetry(ctx, resp, err)
})
}
@@ -1384,6 +1641,7 @@ var (
_ fs.ListRer = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
)

View File

@@ -12,67 +12,74 @@ import (
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
httpclient "github.com/koofr/go-httpclient"
koofrclient "github.com/koofr/go-koofrclient"
)
const enc = encodings.Koofr
// Register Fs with rclone
func init() {
fs.Register(&fs.RegInfo{
Name: "koofr",
Description: "Koofr",
NewFs: NewFs,
Options: []fs.Option{
{
Name: "endpoint",
Help: "The Koofr API endpoint to use",
Default: "https://app.koofr.net",
Required: true,
Advanced: true,
}, {
Name: "mountid",
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
Required: false,
Default: "",
Advanced: true,
}, {
Name: "setmtime",
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
Default: true,
Required: true,
Advanced: true,
}, {
Name: "user",
Help: "Your Koofr user name",
Required: true,
}, {
Name: "password",
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
IsPassword: true,
Required: true,
},
},
Options: []fs.Option{{
Name: "endpoint",
Help: "The Koofr API endpoint to use",
Default: "https://app.koofr.net",
Required: true,
Advanced: true,
}, {
Name: "mountid",
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
Required: false,
Default: "",
Advanced: true,
}, {
Name: "setmtime",
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
Default: true,
Required: true,
Advanced: true,
}, {
Name: "user",
Help: "Your Koofr user name",
Required: true,
}, {
Name: "password",
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
IsPassword: true,
Required: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
}},
})
}
// Options represent the configuration of the Koofr backend
type Options struct {
Endpoint string `config:"endpoint"`
MountID string `config:"mountid"`
User string `config:"user"`
Password string `config:"password"`
SetMTime bool `config:"setmtime"`
Endpoint string `config:"endpoint"`
MountID string `config:"mountid"`
User string `config:"user"`
Password string `config:"password"`
SetMTime bool `config:"setmtime"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// A Fs is a representation of a remote Koofr Fs
// An Fs is a representation of a remote Koofr Fs
type Fs struct {
name string
mountID string
@@ -243,13 +250,13 @@ func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// fullPath constructs a full, absolute path from a Fs root relative path,
// fullPath constructs a full, absolute path from an Fs root relative path,
func (f *Fs) fullPath(part string) string {
return enc.FromStandardPath(path.Join("/", f.root, part))
return f.opt.Enc.FromStandardPath(path.Join("/", f.root, part))
}
// NewFs constructs a new filesystem given a root path and configuration options
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
@@ -259,7 +266,9 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
if err != nil {
return nil, err
}
client := koofrclient.NewKoofrClient(opt.Endpoint, false)
httpClient := httpclient.New()
httpClient.Client = fshttp.NewClient(ctx)
client := koofrclient.NewKoofrClientWithHTTPClient(opt.Endpoint, httpClient)
basicAuth := fmt.Sprintf("Basic %s",
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
client.HTTPClient.Headers.Set("Authorization", basicAuth)
@@ -278,7 +287,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
DuplicateFiles: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(f)
}).Fill(ctx, f)
for _, m := range mounts {
if opt.MountID != "" {
if m.Id == opt.MountID {
@@ -296,7 +305,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
}
return nil, errors.New("Failed to find mount " + opt.MountID)
}
rootFile, err := f.client.FilesInfo(f.mountID, enc.FromStandardPath("/"+f.root))
rootFile, err := f.client.FilesInfo(f.mountID, f.opt.Enc.FromStandardPath("/"+f.root))
if err == nil && rootFile.Type != "dir" {
f.root = dir(f.root)
err = fs.ErrorIsFile
@@ -314,7 +323,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
entries = make([]fs.DirEntry, len(files))
for i, file := range files {
remote := path.Join(dir, enc.ToStandardName(file.Name))
remote := path.Join(dir, f.opt.Enc.ToStandardName(file.Name))
if file.Type == "dir" {
entries[i] = fs.NewDir(remote, time.Unix(0, 0))
} else {
@@ -412,7 +421,7 @@ func translateErrorsObject(err error) error {
}
// mkdir creates a directory at the given remote path. Creates ancestors if
// neccessary
// necessary
func (f *Fs) mkdir(fullPath string) error {
if fullPath == "/" {
return nil
@@ -594,7 +603,7 @@ func createLink(c *koofrclient.KoofrClient, mountID string, path string) (*link,
}
// PublicLink creates a public link to the remote path
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
linkData, err := createLink(f.client, f.mountID, f.fullPath(remote))
if err != nil {
return "", translateErrorsDir(err)

View File

@@ -4,6 +4,7 @@ package local
import (
"context"
"os"
"syscall"
"github.com/pkg/errors"
@@ -15,6 +16,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var s syscall.Statfs_t
err := syscall.Statfs(f.root, &s)
if err != nil {
if os.IsNotExist(err) {
return nil, fs.ErrorDirNotFound
}
return nil, errors.Wrap(err, "failed to read disk usage")
}
bs := int64(s.Bsize) // nolint: unconvert

View File

@@ -2,8 +2,10 @@
package local
import (
"github.com/rclone/rclone/fs/encodings"
)
import "github.com/rclone/rclone/lib/encoder"
const enc = encodings.LocalMacOS
// This is the encoding used by the local backend for macOS
//
// macOS can't store invalid UTF-8, it converts them into %XX encoding
const defaultEnc = (encoder.Base |
encoder.EncodeInvalidUtf8)

View File

@@ -2,8 +2,7 @@
package local
import (
"github.com/rclone/rclone/fs/encodings"
)
import "github.com/rclone/rclone/lib/encoder"
const enc = encodings.LocalUnix
// This is the encoding used by the local backend for non windows platforms
const defaultEnc = encoder.Base

View File

@@ -2,8 +2,32 @@
package local
import (
"github.com/rclone/rclone/fs/encodings"
)
import "github.com/rclone/rclone/lib/encoder"
const enc = encodings.LocalWindows
// This is the encoding used by the local backend for windows platforms
//
// List of replaced characters:
// < (less than) -> '' // FULLWIDTH LESS-THAN SIGN
// > (greater than) -> '' // FULLWIDTH GREATER-THAN SIGN
// : (colon) -> '' // FULLWIDTH COLON
// " (double quote) -> '' // FULLWIDTH QUOTATION MARK
// \ (backslash) -> '' // FULLWIDTH REVERSE SOLIDUS
// | (vertical line) -> '' // FULLWIDTH VERTICAL LINE
// ? (question mark) -> '' // FULLWIDTH QUESTION MARK
// * (asterisk) -> '' // FULLWIDTH ASTERISK
//
// Additionally names can't end with a period (.) or space ( ).
// List of replaced characters:
// . (period) -> '' // FULLWIDTH FULL STOP
// (space) -> '␠' // SYMBOL FOR SPACE
//
// Also encode invalid UTF-8 bytes as Go can't convert them to UTF-16.
//
// https://docs.microsoft.com/de-de/windows/desktop/FileIO/naming-a-file#naming-conventions
const defaultEnc = (encoder.Base |
encoder.EncodeWin |
encoder.EncodeBackSlash |
encoder.EncodeCtl |
encoder.EncodeRightSpace |
encoder.EncodeRightPeriod |
encoder.EncodeInvalidUtf8)

View File

@@ -1,4 +1,4 @@
// +build windows plan9
// +build windows plan9 js
package local

View File

@@ -1,4 +1,4 @@
// +build !windows,!plan9
// +build !windows,!plan9,!js
package local

View File

@@ -10,7 +10,6 @@ import (
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
@@ -20,10 +19,12 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/readers"
)
@@ -39,9 +40,11 @@ func init() {
Name: "local",
Description: "Local Disk",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
Advanced: runtime.GOOS != "windows",
Examples: []fs.OptionExample{{
Value: "true",
Help: "Disables long file names",
@@ -68,6 +71,20 @@ points, as you explicitly acknowledge that they should be skipped.`,
Default: false,
NoPrefix: true,
Advanced: true,
}, {
Name: "zero_size_links",
Help: `Assume the Stat size of links is zero (and read them instead)
On some virtual filesystems (such ash LucidLink), reading a link size via a Stat call always returns 0.
However, on unix it reads as the length of the text in the link. This may cause errors like this when
syncing:
Failed to copy: corrupted on transfer: sizes differ 0 vs 13
Setting this flag causes rclone to read the link and use that as the size of the link
instead of 0 which in most cases fixes the problem.`,
Default: false,
Advanced: true,
}, {
Name: "no_unicode_normalization",
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
@@ -85,9 +102,26 @@ Normally rclone checks the size and modification time of files as they
are being uploaded and aborts with a message which starts "can't copy
- source file is being updated" if the file changes during upload.
However on some file systems this modification time check may fail (eg
However on some file systems this modification time check may fail (e.g.
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
check can be disabled with this flag.`,
check can be disabled with this flag.
If this flag is set, rclone will use its best efforts to transfer a
file which is being updated. If the file is only having things
appended to it (e.g. a log) then rclone will transfer the log file with
the size it had the first time rclone saw it.
If the file is being modified throughout (not just appended to) then
the transfer may fail with a hash check failure.
In detail, once the file has had stat() called on it for the first
time we:
- Only transfer the size that stat gave
- Only checksum the size that stat gave
- Don't update the stat info for the file
`,
Default: false,
Advanced: true,
}, {
@@ -115,6 +149,43 @@ Windows/macOS and case sensitive for everything else. Use this flag
to override the default choice.`,
Default: false,
Advanced: true,
}, {
Name: "no_preallocate",
Help: `Disable preallocation of disk space for transferred files
Preallocation of disk space helps prevent filesystem fragmentation.
However, some virtual filesystem layers (such as Google Drive File
Stream) may incorrectly set the actual file size equal to the
preallocated space, causing checksum and file size checks to fail.
Use this flag to disable preallocation.`,
Default: false,
Advanced: true,
}, {
Name: "no_sparse",
Help: `Disable sparse files for multi-thread downloads
On Windows platforms rclone will make sparse files when doing
multi-thread downloads. This avoids long pauses on large files where
the OS zeros the file. However sparse files may be undesirable as they
cause disk fragmentation and can be slow to work with.`,
Default: false,
Advanced: true,
}, {
Name: "no_set_modtime",
Help: `Disable setting modtime
Normally rclone updates modification time of files after they are done
uploading. This can cause permissions issues on Linux platforms when
the user rclone is running as does not own the file uploaded, such as
when copying to a CIFS mount owned by another user. If this option is
enabled, rclone will no longer update the modtime after copying a file.`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: defaultEnc,
}},
}
fs.Register(fsi)
@@ -122,15 +193,20 @@ to override the default choice.`,
// Options defines the configuration for this backend
type Options struct {
FollowSymlinks bool `config:"copy_links"`
TranslateSymlinks bool `config:"links"`
SkipSymlinks bool `config:"skip_links"`
NoUTFNorm bool `config:"no_unicode_normalization"`
NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"`
OneFileSystem bool `config:"one_file_system"`
CaseSensitive bool `config:"case_sensitive"`
CaseInsensitive bool `config:"case_insensitive"`
FollowSymlinks bool `config:"copy_links"`
TranslateSymlinks bool `config:"links"`
SkipSymlinks bool `config:"skip_links"`
ZeroSizeLinks bool `config:"zero_size_links"`
NoUTFNorm bool `config:"no_unicode_normalization"`
NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"`
OneFileSystem bool `config:"one_file_system"`
CaseSensitive bool `config:"case_sensitive"`
CaseInsensitive bool `config:"case_insensitive"`
NoPreAllocate bool `config:"no_preallocate"`
NoSparse bool `config:"no_sparse"`
NoSetModTime bool `config:"no_set_modtime"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a local filesystem rooted at root
@@ -146,20 +222,22 @@ type Fs struct {
warned map[string]struct{} // whether we have warned about this string
// do os.Lstat or os.Stat
lstat func(name string) (os.FileInfo, error)
objectHashesMu sync.Mutex // global lock for Object.hashes
lstat func(name string) (os.FileInfo, error)
objectMetaMu sync.RWMutex // global lock for Object metadata
}
// Object represents a local filesystem object
type Object struct {
fs *Fs // The Fs this object is part of
remote string // The remote path (encoded path)
path string // The local path (OS path)
size int64 // file metadata - always present
mode os.FileMode
modTime time.Time
hashes map[hash.Type]string // Hashes
translatedLink bool // Is this object a translated link
fs *Fs // The Fs this object is part of
remote string // The remote path (encoded path)
path string // The local path (OS path)
// When using these items the fs.objectMetaMu must be held
size int64 // file metadata - always present
mode os.FileMode
modTime time.Time
hashes map[hash.Type]string // Hashes
// these are read only and don't need the mutex held
translatedLink bool // Is this object a translated link
}
// ------------------------------------------------------------
@@ -167,7 +245,7 @@ type Object struct {
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
// NewFs constructs an Fs from the path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -189,12 +267,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
dev: devUnset,
lstat: os.Lstat,
}
f.root = cleanRootPath(root, f.opt.NoUNC)
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
f.features = (&fs.Features{
CaseInsensitive: f.caseInsensitive(),
CanHaveEmptyDirectories: true,
IsLocal: true,
}).Fill(f)
SlowHash: true,
}).Fill(ctx, f)
if opt.FollowSymlinks {
f.lstat = os.Stat
}
@@ -234,7 +313,7 @@ func (f *Fs) Name() string {
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return enc.ToStandardPath(filepath.ToSlash(f.root))
return f.opt.Enc.ToStandardPath(filepath.ToSlash(f.root))
}
// String converts this Fs to a string
@@ -350,7 +429,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
err = errors.Wrapf(err, "failed to open directory %q", dir)
fs.Errorf(dir, "%v", err)
if isPerm {
accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
err = nil // ignore error but fail sync
}
return nil, err
@@ -386,7 +465,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if fierr != nil {
err = errors.Wrapf(err, "failed to read directory %q", namepath)
fs.Errorf(dir, "%v", fierr)
accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
continue
}
fis = append(fis, fi)
@@ -405,11 +484,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
localPath := filepath.Join(fsDirPath, name)
fi, err = os.Stat(localPath)
if os.IsNotExist(err) {
// Skip bad symlinks
if os.IsNotExist(err) || isCircularSymlinkError(err) {
// Skip bad symlinks and circular symlinks
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
fs.Errorf(newRemote, "Listing error: %v", err)
accounting.Stats(ctx).Error(err)
err = accounting.Stats(ctx).Error(err)
continue
}
if err != nil {
@@ -443,7 +522,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
func (f *Fs) cleanRemote(dir, filename string) (remote string) {
remote = path.Join(dir, enc.ToStandardName(filename))
remote = path.Join(dir, f.opt.Enc.ToStandardName(filename))
if !utf8.ValidString(filename) {
f.warnedMu.Lock()
@@ -457,7 +536,7 @@ func (f *Fs) cleanRemote(dir, filename string) (remote string) {
}
func (f *Fs) localPath(name string) string {
return filepath.Join(f.root, filepath.FromSlash(enc.FromStandardPath(name)))
return filepath.Join(f.root, filepath.FromSlash(f.opt.Enc.FromStandardPath(name)))
}
// Put the Object to the local filesystem
@@ -503,6 +582,10 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// Precision of the file system
func (f *Fs) Precision() (precision time.Duration) {
if f.opt.NoSetModTime {
return fs.ModTimeNotSupported
}
f.precisionOk.Do(func() {
f.precision = f.readPrecision()
})
@@ -561,23 +644,28 @@ func (f *Fs) readPrecision() (precision time.Duration) {
return
}
// Purge deletes all the files and directories
// Purge deletes all the files in the directory
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context) error {
fi, err := f.lstat(f.root)
func (f *Fs) Purge(ctx context.Context, dir string) error {
dir = f.localPath(dir)
fi, err := f.lstat(dir)
if err != nil {
// already purged
if os.IsNotExist(err) {
return fs.ErrorDirNotFound
}
return err
}
if !fi.Mode().IsDir() {
return errors.Errorf("can't purge non directory: %q", f.root)
return errors.Errorf("can't purge non directory: %q", dir)
}
return os.RemoveAll(f.root)
return os.RemoveAll(dir)
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
@@ -595,6 +683,9 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Temporary Object under construction
dstObj := f.newObject(remote)
dstObj.fs.objectMetaMu.RLock()
dstObjMode := dstObj.mode
dstObj.fs.objectMetaMu.RUnlock()
// Check it is a file if it exists
err := dstObj.lstat()
@@ -602,7 +693,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// OK
} else if err != nil {
return nil, err
} else if !dstObj.fs.isRegular(dstObj.mode) {
} else if !dstObj.fs.isRegular(dstObjMode) {
// It isn't a file
return nil, errors.New("can't move file onto non-file")
}
@@ -638,7 +729,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -689,6 +780,50 @@ func (f *Fs) Hashes() hash.Set {
return hash.Supported()
}
var commandHelp = []fs.CommandHelp{
{
Name: "noop",
Short: "A null operation for testing backend commands",
Long: `This is a test command which has some options
you can try to change the output.`,
Opts: map[string]string{
"echo": "echo the input arguments",
"error": "return an error based on option value",
},
},
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
switch name {
case "noop":
if txt, ok := opt["error"]; ok {
if txt == "" {
txt = "unspecified error"
}
return nil, errors.New(txt)
}
if _, ok := opt["echo"]; ok {
out := map[string]interface{}{}
out["name"] = name
out["arg"] = arg
out["opt"] = opt
return out, nil
}
return nil, nil
default:
return nil, fs.ErrorCommandNotFound
}
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -712,19 +847,31 @@ func (o *Object) Remote() string {
// Hash returns the requested hash of a file as a lowercase hex string
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
// Check that the underlying file hasn't changed
o.fs.objectMetaMu.RLock()
oldtime := o.modTime
oldsize := o.size
o.fs.objectMetaMu.RUnlock()
err := o.lstat()
var changed bool
if err != nil {
return "", errors.Wrap(err, "hash: failed to stat")
if os.IsNotExist(errors.Cause(err)) {
// If file not found then we assume any accumulated
// hashes are OK - this will error on Open
changed = true
} else {
return "", errors.Wrap(err, "hash: failed to stat")
}
} else {
o.fs.objectMetaMu.RLock()
changed = !o.modTime.Equal(oldtime) || oldsize != o.size
o.fs.objectMetaMu.RUnlock()
}
o.fs.objectHashesMu.Lock()
hashes := o.hashes
o.fs.objectMetaMu.RLock()
hashValue, hashFound := o.hashes[r]
o.fs.objectHashesMu.Unlock()
o.fs.objectMetaMu.RUnlock()
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil || !hashFound {
if changed || !hashFound {
var in io.ReadCloser
if !o.translatedLink {
@@ -736,9 +883,14 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
} else {
in, err = o.openTranslatedLink(0, -1)
}
// If not checking for updates, only read size given
if o.fs.opt.NoCheckUpdated {
in = readers.NewLimitedReadCloser(in, o.size)
}
if err != nil {
return "", errors.Wrap(err, "hash: failed to open")
}
var hashes map[hash.Type]string
hashes, err = hash.StreamTypes(in, hash.NewHashSet(r))
closeErr := in.Close()
if err != nil {
@@ -748,29 +900,36 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
return "", errors.Wrap(closeErr, "hash: failed to close")
}
hashValue = hashes[r]
o.fs.objectHashesMu.Lock()
o.fs.objectMetaMu.Lock()
if o.hashes == nil {
o.hashes = hashes
} else {
o.hashes[r] = hashValue
}
o.fs.objectHashesMu.Unlock()
o.fs.objectMetaMu.Unlock()
}
return hashValue, nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
o.fs.objectMetaMu.RLock()
defer o.fs.objectMetaMu.RUnlock()
return o.size
}
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
o.fs.objectMetaMu.RLock()
defer o.fs.objectMetaMu.RUnlock()
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if o.fs.opt.NoSetModTime {
return nil
}
var err error
if o.translatedLink {
err = lChtimes(o.path, modTime, modTime)
@@ -786,7 +945,9 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
// Storable returns a boolean showing if this object is storable
func (o *Object) Storable() bool {
o.fs.objectMetaMu.RLock()
mode := o.mode
o.fs.objectMetaMu.RUnlock()
if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks {
if !o.fs.opt.SkipSymlinks {
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
@@ -819,11 +980,15 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
if err != nil {
return 0, errors.Wrap(err, "can't read status of source file while transferring")
}
if file.o.size != fi.Size() {
return 0, errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size())
file.o.fs.objectMetaMu.RLock()
oldtime := file.o.modTime
oldsize := file.o.size
file.o.fs.objectMetaMu.RUnlock()
if oldsize != fi.Size() {
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", oldsize, fi.Size()))
}
if !file.o.modTime.Equal(fi.ModTime()) {
return 0, errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime())
if !oldtime.Equal(fi.ModTime()) {
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", oldtime, fi.ModTime()))
}
}
@@ -840,9 +1005,9 @@ func (file *localOpenFile) Close() (err error) {
err = file.in.Close()
if err == nil {
if file.hash.Size() == file.o.Size() {
file.o.fs.objectHashesMu.Lock()
file.o.fs.objectMetaMu.Lock()
file.o.hashes = file.hash.Sums()
file.o.fs.objectHashesMu.Unlock()
file.o.fs.objectMetaMu.Unlock()
}
}
return err
@@ -867,7 +1032,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.size)
offset, limit = x.Decode(o.Size())
case *fs.HashesOption:
if x.Hashes.Count() > 0 {
hasher, err = hash.NewMultiHasherTypes(x.Hashes)
@@ -882,6 +1047,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
}
// If not checking updated then limit to current size. This means if
// file is being extended, readers will read a o.Size() bytes rather
// than the new size making for a consistent upload.
if limit < 0 && o.fs.opt.NoCheckUpdated {
limit = o.size
}
// Handle a translated link
if o.translatedLink {
return o.openTranslatedLink(offset, limit)
@@ -956,12 +1128,28 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if !o.translatedLink {
f, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return err
if runtime.GOOS == "windows" && os.IsPermission(err) {
// If permission denied on Windows might be trying to update a
// hidden file, in which case try opening without CREATE
// See: https://stackoverflow.com/questions/13215716/ioerror-errno-13-permission-denied-when-trying-to-open-hidden-file-in-w-mod
f, err = file.OpenFile(o.path, os.O_WRONLY|os.O_TRUNC, 0666)
if err != nil {
return err
}
} else {
return err
}
}
// Pre-allocate the file for performance reasons
err = preAllocate(src.Size(), f)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
if !o.fs.opt.NoPreAllocate {
// Pre-allocate the file for performance reasons
err = file.PreAllocate(src.Size(), f)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
if err == file.ErrDiskFull {
_ = f.Close()
return err
}
}
}
out = f
} else {
@@ -1008,9 +1196,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// All successful so update the hashes
if hasher != nil {
o.fs.objectHashesMu.Lock()
o.fs.objectMetaMu.Lock()
o.hashes = hasher.Sums()
o.fs.objectHashesMu.Unlock()
o.fs.objectMetaMu.Unlock()
}
// Set the mtime
@@ -1023,6 +1211,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.lstat()
}
var sparseWarning sync.Once
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
@@ -1046,29 +1236,50 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
return nil, err
}
// Pre-allocate the file for performance reasons
err = preAllocate(size, out)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
if !f.opt.NoPreAllocate {
err = file.PreAllocate(size, out)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
}
}
if !f.opt.NoSparse && file.SetSparseImplemented {
sparseWarning.Do(func() {
fs.Infof(nil, "Writing sparse files: use --local-no-sparse or --multi-thread-streams 0 to disable")
})
// Set the file to be a sparse file (important on Windows)
err = file.SetSparse(out)
if err != nil {
fs.Errorf(o, "Failed to set sparse: %v", err)
}
}
return out, nil
}
// setMetadata sets the file info from the os.FileInfo passed in
func (o *Object) setMetadata(info os.FileInfo) {
// Don't overwrite the info if we don't need to
// this avoids upsetting the race detector
if o.size != info.Size() {
o.size = info.Size()
// if not checking updated then don't update the stat
if o.fs.opt.NoCheckUpdated && !o.modTime.IsZero() {
return
}
if !o.modTime.Equal(info.ModTime()) {
o.modTime = info.ModTime()
}
if o.mode != info.Mode() {
o.mode = info.Mode()
o.fs.objectMetaMu.Lock()
o.size = info.Size()
o.modTime = info.ModTime()
o.mode = info.Mode()
o.fs.objectMetaMu.Unlock()
// On Windows links read as 0 size so set the correct size here
// Optionally, users can turn this feature on with the zero_size_links flag
if (runtime.GOOS == "windows" || o.fs.opt.ZeroSizeLinks) && o.translatedLink {
linkdst, err := os.Readlink(o.path)
if err != nil {
fs.Errorf(o, "Failed to read link size: %v", err)
} else {
o.size = int64(len(linkdst))
}
}
}
// Stat a Object into info
// Stat an Object into info
func (o *Object) lstat() error {
info, err := o.fs.lstat(o.path)
if err == nil {
@@ -1082,22 +1293,22 @@ func (o *Object) Remove(ctx context.Context) error {
return remove(o.path)
}
func cleanRootPath(s string, noUNC bool) string {
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
if runtime.GOOS == "windows" {
s = filepath.ToSlash(s)
vol := filepath.VolumeName(s)
s = vol + enc.FromStandardPath(s[len(vol):])
s = filepath.FromSlash(s)
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
s = filepath.ToSlash(s)
vol := filepath.VolumeName(s)
s = vol + enc.FromStandardPath(s[len(vol):])
s = filepath.FromSlash(s)
if !noUNC {
// Convert to UNC
s = uncPath(s)
s = file.UNCPath(s)
}
return s
}
@@ -1111,28 +1322,6 @@ func cleanRootPath(s string, noUNC bool) string {
return s
}
// Pattern to match a windows absolute path: "c:\" and similar
var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`)
// uncPath converts an absolute Windows path
// to a UNC long path.
func uncPath(l string) string {
// If prefix is "\\", we already have a UNC path or server.
if strings.HasPrefix(l, `\\`) {
// If already long path, just keep it
if strings.HasPrefix(l, `\\?\`) {
return l
}
// Trim "\\" from path and add UNC prefix.
return `\\?\UNC\` + strings.TrimPrefix(l, `\\`)
}
if isAbsWinDrive.MatchString(l) {
return `\\?\` + l
}
return l
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
@@ -1140,6 +1329,7 @@ var (
_ fs.PutStreamer = &Fs{}
_ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{}
_ fs.Commander = &Fs{}
_ fs.OpenWriterAter = &Fs{}
_ fs.Object = &Object{}
)

Some files were not shown because too many files have changed in this diff Show More