1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-06 00:03:32 +00:00

Compare commits

...

400 Commits

Author SHA1 Message Date
Nick Craig-Wood
544757a33b cache: set timeout to 24 hour - FIXME DO NOT MERGE 2021-02-05 19:09:46 +00:00
Nick Craig-Wood
bfcf6baf93 lib/cache: fix locking so we don't try to create the item many times
Before this fix, if several Get requests were submitted very quickly,
this could run the item create function multiple times due to the
unlock of the mutex in the creation code.

This fixes the problem by having a mutex in each cache entry which is
held when the item is being created.
2021-02-05 19:08:00 +00:00
Nick Craig-Wood
b2b5b7598c build: ensure go1.16 go gets the correct versions of tools
go1.16 uses GO111MODULE=on by default which meant we were picking up
v1 of nfpm instead of the intended v2.
2021-02-04 17:34:30 +00:00
Nick Craig-Wood
5f943aabc8 build: Use GO386=softfloat instead of deprecated GO386=387 for 386 builds 2021-02-04 16:37:23 +00:00
Nick Craig-Wood
84c785bc36 build: use go1.16-rc1 as default compiler 2021-02-04 16:08:51 +00:00
Nick Craig-Wood
993146375e build: update all dependencies 2021-02-03 21:34:38 +00:00
Nick Craig-Wood
bbe791a886 swift: update github.com/ncw/swift to v2.0.0
The update to v2 of the swift library introduces a context parameter
to each function. This required a lot of mostly mechanical changes
adding context parameters.

See: https://github.com/ncw/swift/issues/159
See: https://github.com/ncw/swift/issues/161
2021-02-03 20:23:37 +00:00
Nick Craig-Wood
1545ace8f2 build: remove go1.13 build constraints 2021-02-03 17:46:08 +00:00
Nick Craig-Wood
bcac8fdc83 Use http.NewRequestWithContext where possible after go1.13 minimum version 2021-02-03 17:41:27 +00:00
Nick Craig-Wood
15e1a6bee7 build: raise minimum go version to go1.13 2021-02-03 17:41:06 +00:00
Nick Craig-Wood
9710ded60f b2: automatically raise upload cutoff to avoid spurious error
Before this change, if --b2-chunk-size was raised above 200M then this
error would be produced:

    b2: upload cutoff: 200M is less than chunk size 1G

This change automatically reaises --b2-upload-cutoff to be the value
of --b2-chunk-size if it is below it, which stops this error being
generated.

Fixes #4475
2021-02-03 16:29:32 +00:00
Nick Craig-Wood
5f3672102c s3: add --s3-no-head to reducing costs docs - Fixes #2163 2021-02-03 16:18:29 +00:00
Nick Craig-Wood
644cc69108 build: update GitHub release tool to use gh and put a link to changelog
Fixes #4994
2021-02-03 14:44:40 +00:00
Nick Craig-Wood
1415666074 lsjson: fix unterminated JSON in the presence of errors
See: https://forum.rclone.org/t/rclone-lsjson-invalid-json-produced-no-at-the-end/22046
2021-02-02 17:46:56 +00:00
Alex JOST
bae550c71e docs: Changelog: Correct link to digitalis.io 2021-02-02 17:26:41 +00:00
Nick Craig-Wood
beff081abb Start v1.55.0-DEV development 2021-02-02 16:30:58 +00:00
Nick Craig-Wood
7f5ee5d81f Version v1.54.0 2021-02-02 14:17:09 +00:00
Nick Craig-Wood
8b41dfa50a s3: add --s3-no-head parameter to minimise transactions on upload
See: https://forum.rclone.org/t/prevent-head-on-amazon-s3-family/21935
2021-02-02 10:07:48 +00:00
Nick Craig-Wood
0d8bcc08da Revert "fs/accounting: make edge bandwidth limiters have smaller bursts to make smoother" #4987
This reverts commit 463a18aa07.

In practice this caused Windows to have a max bandwidth limit of 8
MiB/s. This is because of the limited resolution (1ms) of the Windows
timer.

This needs fixing in a different way.

See: https://forum.rclone.org/t/bug-report-for-v1-54-0-beta-5133/22015
2021-02-01 22:23:50 +00:00
buengese
d3b7f14b66 bump sgzip to v1.1.0 - fixes #4970 2021-02-01 18:34:42 +00:00
Nick Craig-Wood
f66928a846 drive: fix copyid command with a bare filename: can't use empty string as a path
Before this change, running

    rclone backend copyid drive: ID file.txt

Failed with the error

    command "copyid" failed: failed copying "ID" "file.txt": can't use empty string as a path

This fixes the problem.
2021-02-01 15:17:25 +00:00
Nick Craig-Wood
3b1122c888 azureblob: require go1.14+ to compile due to SDK changes 2021-01-30 18:01:12 +00:00
Nick Craig-Wood
463a18aa07 fs/accounting: make edge bandwidth limiters have smaller bursts to make smoother
This change decreases the edge limiter burst size which dramatically
increases the smoothness of the bandwidth limiting.

The core bandwidth limiter remains with a large burst so it isn't
affected by double rate limiting on the edge limiters.

See: #4395
See: https://forum.rclone.org/t/bwlimit-is-not-really-smooth/20947
2021-01-30 17:39:30 +00:00
Nick Craig-Wood
0a932dc1f2 Add --bwlimit for upload and download #1873 2021-01-30 17:39:30 +00:00
Nick Craig-Wood
8856e0e559 fshttp: Obey bwlimit in http Transport - fixes #4395
This change uses the bwlimit code to apply limits to the receive and
transmit data functions in the HTTP Transport.

This means that all HTTP transactions will have limiting applied -
this includes listings for example.

For HTTP based transorts this makes the limiting in Accounting
redundant and possibly counter productive
2021-01-30 17:39:30 +00:00
Nick Craig-Wood
3b6df71838 accounting: refactor bwlimit code to allow for multiple slots 2021-01-30 17:39:30 +00:00
Nick Craig-Wood
31de631b22 vendor: run go mod tidy 2021-01-30 17:28:27 +00:00
Nick Craig-Wood
189ef5f257 azureblob: fix memory usage by upgrading the SDK and implementing a TransferManager
In the Azure SDK there was a bug which caused excessive memory use
when doing repeated transfers:

https://github.com/Azure/azure-storage-blob-go/issues/233

This patch updates the SDK to v0.13.0 which allowed us to implement a
custom TransferManager which integrates with rclone's memory pool.

This fixes the excessive memory consumption.

See: https://forum.rclone.org/t/ask-for-settings-recommendation-for-azureblob/21505/
2021-01-30 17:26:59 +00:00
Nick Craig-Wood
2f67681e3b cmount: don't attempt to unmount if fs has been destroyed already #4957 2021-01-30 17:19:28 +00:00
Nick Craig-Wood
41127965b0 fstest: add Onedrive Business and Onedrive China to the integration tests 2021-01-30 16:32:32 +00:00
Nick Craig-Wood
8171671d82 Add Bob Pusateri to contributors 2021-01-30 16:24:54 +00:00
Nick Craig-Wood
75617c0c3b Add Pau Rodriguez-Estivill to contributors 2021-01-30 16:24:54 +00:00
Nick Craig-Wood
8b9d23916b Add Nicolas Rueff to contributors 2021-01-30 16:24:54 +00:00
Bob Pusateri
e43b79e33d azureblob: add examples for access tier
Azure Blob access tier values are case-sensitive, though this is
not indicated anywhere in the documentation. Adding examples
with proper casing.
2021-01-30 16:21:34 +00:00
albertony
459cc70a50 vfs: fix invalid cache path on windows when using :backend: as remote
The initial ':' is included in the ad-hoc remote name, but is illegal character
in Windows path. Replacing it with '^', which is legal in filesystems but illegal
in regular remote names, so name conflict is avoided.

Fixes #4544
2021-01-30 16:18:15 +00:00
Ivan Andreev
20578f3f89 Add Sakuragawa Misty to contributors 2021-01-29 23:05:49 +03:00
NyaMisty
15da53696e onedrive: add support for china region operated by 21vianet #4963 (#4963)
fixes #3804
obsoletes #3973
obsoletes #4072
2021-01-29 23:04:21 +03:00
Ivan Andreev
2bddba118e fstest: apply shellcheck on rclone-serve.bash (#4975) 2021-01-29 19:07:17 +03:00
Ivan Andreev
c7e5976e11 build: replace go 1.16-beta1 by 1.16-rc1 (#4974) 2021-01-29 19:05:46 +03:00
Pau Rodriguez-Estivill
f0bf9cfda1 drive: add xdg office icons to xdg desktop files 2021-01-28 17:12:48 +00:00
Nguyễn Hữu Luân
671dd047f7 swift: ensure partially uploaded large files are uploaded unless --swift-leave-parts-on-error
This makes sure that partially uploaded large files are removed
unless the `--swift-leave-parts-on-error` flag is supplied.

- refactor swift.go
- add unit test for swift with chunk
- add unit test for large object with fail case
- add "-" to white list char during encode.
2021-01-28 17:09:41 +00:00
negative0
6272ca74bc plugins: Move plugins cache path initialization to initPluginsOrError.
Fixes #4951.
2021-01-28 16:58:23 +00:00
Nicolas Rueff
f5af761466 gphotos: new flag --gphotos-include-archived - fixes #4728
New flag --gphotos-include-archived to download and view archived media when needed.
2021-01-28 16:51:31 +00:00
Nick Craig-Wood
06f1c0c61c move: fix data loss when moving the same object
This checks to see if IDs are the same of the source and destination
object before deleting one of them and potentially causing data loss.

See: https://forum.rclone.org/t/files-deleted-and-not-moved/21830
2021-01-28 16:14:16 +00:00
Nick Craig-Wood
e6a9f005d6 sftp: implement --sftp-use-fstat
See: https://forum.rclone.org/t/sftp-fails-to-sync-to-local-failed-to-copy-file-does-not-exist/21759
2021-01-28 16:07:26 +00:00
Nick Craig-Wood
8f6f4b053c obscure: make rclone osbcure - ignore newline at end of line
See: https://forum.rclone.org/t/authentification-issues-with-webdav-server/21891
2021-01-28 15:54:41 +00:00
Nick Craig-Wood
fe15a2eeeb Add Riccardo Iaconelli to contributors 2021-01-28 15:54:41 +00:00
Nick Craig-Wood
019667170f Add Zach Kipp to contributors 2021-01-28 15:54:41 +00:00
albertony
7a496752f3 fs: add support for flag --no-console on windows to hide the console window 2021-01-27 18:44:35 +00:00
Yury Stankevich
b569dc11a0 hdfs: support kerberos authentication #42 2021-01-27 18:16:58 +00:00
Riccardo Iaconelli
df4e6079f1 local: new flag --local-zero-size-links to fix sync on some virtual filesystems
Assume the Stat size of links is zero (and read them instead)

On some virtual filesystems (such ash LucidLink), reading a link size via a
Stat call always returns 0.
However, on unix it reads as the length of the text in the link. This may
cause errors like this when syncing:

    Failed to copy: corrupted on transfer: sizes differ 0 vs 13

Setting this flag causes rclone to read the link and use that as the size of
the link instead of 0 which in most cases fixes the problem.

Fixes #4950

Signed-off-by: Riccardo Iaconelli <riccardo@kde.org>
2021-01-27 18:13:16 +00:00
Zach Kipp
6156f90601 Fix test failure in different local time zones
TestParseDuration relied on an elapsed time calculation which
would vary based on the system local time. Fix the test by not relying
on the system time location. Also make the test more deterministic
by injecting time in tests rather than using system time.

Fixes #4529.
2021-01-27 15:05:35 +00:00
Louis Koo
cdaea62932 s3: fix copy multipart with v2 auth failing with 'SignatureDoesNotMatch'
Signed-off-by: zhuc <zhucan.k8s@gmail.com>
2021-01-27 14:43:02 +00:00
Nick Craig-Wood
78afe01d15 filefabric: fix finding directories in a case insensitive way #4830 2021-01-27 14:28:17 +00:00
Nick Craig-Wood
4eac88babf premiumizeme: fix finding directories in a case insensitive way #4830 2021-01-27 14:28:17 +00:00
Nick Craig-Wood
b4217fabd3 opendrive: fix finding directories in a case insensitive way #4830 2021-01-27 14:28:17 +00:00
Nick Craig-Wood
92b9dabf3c fstests: only test with ASCII uppercase for case insensitive tests
Upper/lower case for multilingual is provider dependent so we don't go
there in the tests.
2021-01-27 14:28:17 +00:00
Antoine GIRARD
4323ff8a63 docs: fix broken link in serve sftp/ftp #4968 2021-01-27 13:41:32 +03:00
Nick Craig-Wood
3e188495f5 sugarsync: fix finding directories in a case insentive way #4830 2021-01-26 14:48:33 +00:00
Nick Craig-Wood
acb9e17eb3 box: fix finding directories in a case insentive way #4830 2021-01-26 14:48:33 +00:00
Nick Craig-Wood
c8ab4f1d02 fstests: add a test for finding a directory in a case insensitive way #4830 2021-01-26 14:48:06 +00:00
Nick Craig-Wood
e776a1b122 fstests: don't run encoding tests on -short 2021-01-26 14:46:23 +00:00
Nick Craig-Wood
c57af26de9 docs: add note about --check-first being useful with --order-by 2021-01-26 12:12:26 +00:00
Nick Craig-Wood
7d89912666 Add lluuaapp to contributors 2021-01-26 12:12:26 +00:00
Martin Michlmayr
cd075f1703 docs: fix markup of arguments #4276
Command line arguments have to be marked as code.
2021-01-25 22:40:46 +03:00
lluuaapp
35b2ca642c b2: fixed possible crash when accessing Backblaze b2 remote 2021-01-25 17:48:40 +00:00
edwardxml
127f48e8ad docs: Rewrite rclone filtering documentation
This is an attempt at rewriting the rclone filter documentation page.

I have drawn largely from what appears to be the strong original
structure of the page; existing text, and forum comments.

The term flag is used throughout rather than differentiating `--`
options with more complex arguments. That diverges from some standard
practice but is consistent with messages in the rclone binary and `go`
documentation.

The term directory not folder is used throughout.

I tried referring to objects more broadly rather than files and it
just did not seem to work. Apart from a note at the top the
explanations refer entirely to paths, directories and files. My
justification is that bucket store users understand the concept of
files. Not all users of directory aware storage are so familiar with
objects, keys and metadata.

Many of the changes I have made involve moving issues into what seemed
to me to be more relevant parts of the original page structure. I
still find the content repetitious and overly long but that may be
inevitable when users can only be expected to read the section of the
page they think most relevant.

I have eliminated the rsync section from the original structure. It is
hard enough explaining how rclone filters work without also setting
out how they do not. Comment on sync is instead relegated to a
paragraph in the directory filter section.

The structure of the page is intended to work with a hugo toc card
from html Header2 to Header3.

My original intention was to establish a separate examples section. I
have instead retained examples in each section, added to them and
tried to make clear what is documentation and what example.

The changes draw on Github and Forum issues too numerous to mention.
for instance:

https://forum.rclone.org/t/certain-exclusion-flags-seem-to-be-ignored/20049/2

I am **especially** grateful for
https://forum.rclone.org/t/object-key-remote-directory-filter-clarification/20386/2
for making sense of directory filters for me.

@ncw has a fun (and useful) online filter app at
https://filterdemo.rclone.org/ I have not referred to it at this stage
though I particularly like the fact that it is tied to the same
codebase as an rclone version.

I have added cautions about mixing the `--filter...` flags with
`--exclude...` or `--include...`. The same issues seem to arise as
already recognised between the latter two.

The formal summary of glob syntax introduced at the top of the page is
shamelessly stolen from https://godoc.org/github.com/gobwas/glob

I have tried not to alter too many header descriptions and thereby
break existing links to them.

The reference to 'lass' in the example has been retained to confuse
all those not of Scottish or Yorkshire heritage.

Some of my activity was to remove ambiguity and I anticipate
suggestions to roll that back where it has become overly complex.

I tried particularly to bring together and make clear material about
directory filters. It was previously scattered throughout the page and
I couldn't understand it. I am particularly grateful for the
explanations I received about directory filters though any remaining
errors are entirely my own.

Removed erroneous references to non existent `--filter...` flags.

In some ways the best person to write this page would be one with no
knowledge whatsoever of how rclone filters work. The further I got
into it the better qualified I found myself to be.

E&OE
2021-01-22 16:59:22 +00:00
Nick Craig-Wood
3e986cdf54 dedup: add warning if used on a remote which can't have duplicate names 2021-01-22 15:25:35 +00:00
Nick Craig-Wood
b80d498304 vfs: fix file leaks with --vfs-cache-mode full and --buffer-size 0
Before this change using --vfs-cache-mode full and --buffer-size 0
together caused the vfs downloader to open more and more downloaders.

This is fixed by introducing a minimum size of 1M for the window to
look for an existing downloader.

Fixes #4892
2021-01-21 18:35:04 +00:00
Nick Craig-Wood
757e696a6b Add Sơn Trần-Nguyễn to contributors 2021-01-21 18:35:04 +00:00
Nick Craig-Wood
e3979131f2 Add CokeMine to contributors 2021-01-21 18:35:04 +00:00
Nick Craig-Wood
a774f6bfdb qingstor: fix rclone cleanup
This patch changes to using the default page limit for listing
unfinished multpart uploads rather than 1000. 1000 is the maximum
specified in the docs, but setting anything larger than 200 gives an
error.
2021-01-21 17:35:31 +00:00
Nick Craig-Wood
d7cd35e2ca qingstor: fix error propagation in CleanUp
Before this change errors cleaning multiple buckets were passing silently
2021-01-21 17:35:31 +00:00
Aleksandar Jankovic
38e70f1797 rc/jobs: add listener for finished jobs
Add jobs.OnFinish method to register listener that will trigger when job
is finished.

Includes fix for stopping listeners.
2021-01-21 13:43:41 +00:00
albertony
3b49440c25 crypt: docs: extended description 2021-01-21 13:40:12 +00:00
Sơn Trần-Nguyễn
7c0287b824 docs: fix references to upstreams in union example 2021-01-21 13:33:16 +00:00
edwardxml
f97c2c85bd docs: update ftp with note about active mode
See: https://forum.rclone.org/t/copy-or-sync-from-ftp-server-results-in-source-directory-not-found/20636/4
2021-01-21 13:21:31 +00:00
Nick Craig-Wood
14c0d8a93e cryptdecode: fix formatting 2021-01-21 10:39:51 +00:00
Evan Harris
768ad4de2a docs: Updated crypt/cryptdecode docs with additional info 2021-01-21 09:55:20 +00:00
CokeMine
817987dfc4 docs: Update Onedrive max file size limit 2021-01-21 09:50:48 +00:00
buengese
eb090d3544 compress: check type assertion in SetTier - fixes #4941 2021-01-20 22:59:14 +00:00
Nick Craig-Wood
4daf8b7083 Changelog updates from Version v1.53.4 2021-01-20 22:34:04 +00:00
Nick Craig-Wood
0be69018b8 drive: log that emptying the trash can take some time - fixes #4915 2021-01-19 18:09:36 +00:00
Nick Craig-Wood
9b9ab5f3e8 gcs: Fix Entry doesn't belong in directory "" (same as directory) - ignoring
This change allows directory markers to be non-zero in size.

See: https://forum.rclone.org/t/public-gcs-bucket-and-entry-doesnt-belong-in-directory-same-as-directory/21753/
2021-01-19 16:50:37 +00:00
Nick Craig-Wood
072464cbdb gcs: fix anonymous client to use rclone's HTTP client 2021-01-19 16:50:37 +00:00
Nick Craig-Wood
b0491dec88 Add new email address for buengese to contributors 2021-01-19 16:50:28 +00:00
Nick Craig-Wood
ccfefedb47 Add Patrik Nordlén to contributors 2021-01-19 16:48:37 +00:00
Nick Craig-Wood
2fffcf9e7f Add Janne Johansson to contributors 2021-01-19 16:48:37 +00:00
buengese
a39a5d261c docs/compress: but a warning at the top 2021-01-18 21:42:58 +01:00
buengese
45b57822d5 compress: improve testing 2021-01-18 21:42:58 +01:00
buengese
d8984cd37f compress: correctly handle wrapping of remotes without PutStream
Also fixes ObjectInfo wrapping for Hash and Size - fixes #4928
2021-01-18 21:42:58 +01:00
Patrik Nordlén
80e63af470 jottacloud: Add support for Telia Cloud (#4930) 2021-01-17 02:38:57 +01:00
Janne Johansson
db2c38b21b docs: fix small spelling nit, file -> find in FAQ 2021-01-13 17:36:57 +00:00
Nick Craig-Wood
cef51d58ac jottacloud: fix token refresh failed: is not a regular file error
Before this change the jottacloud token renewer would run and give the
error:

    Token refresh failed: is not a regular file

This is because the refresh runs on the root and it isn't a file.

This was fixed by ignoring that specific error.

See: https://forum.rclone.org/t/jottacloud-crypt-3-gb-copy-runs-for-a-week-without-completing/21173
2021-01-12 17:09:44 +00:00
Nick Craig-Wood
e0b5a13a13 jottacloud: fix token renewer to fix long uploads
See: https://forum.rclone.org/t/jottacloud-crypt-3-gb-copy-runs-for-a-week-without-completing/21173
2021-01-11 16:44:11 +00:00
Nick Craig-Wood
de21356154 Add Denis Neuling to contributors 2021-01-11 16:44:11 +00:00
Ivan Andreev
35a4de2030 chunker: fix case-insensitive NewObject, test metadata detection #4902
- fix test case FsNewObjectCaseInsensitive (PR #4830)
- continue PR #4917, add comments in metadata detection code
- add warning about metadata detection in user documentation
- change metadata size limits, make room for future development
- hide critical chunker parameters from command line
2021-01-10 22:29:24 +03:00
Ivan Andreev
847625822f chunker: improve detection of incompatible metadata #4917
Before this patch chunker required that there is at least one
data chunk to start checking for a composite object.

Now if chunker finds at least one potential temporary or control
chunk, it marks found files as a suspected composite object.
When later rclone tries a concrete operation on the object,
it performs postponed metadata read and decides: is this a native
composite object, incompatible metadata or just garbage.
2021-01-10 21:55:15 +03:00
Nick Craig-Wood
3877df4e62 s3: update help for --s3-no-check-bucket #4913 2021-01-10 17:54:19 +00:00
Denis Neuling
ec73d2fb9a azure-blob-storage: utilize streaming capabilities - #1614 2021-01-10 17:02:42 +00:00
Nick Craig-Wood
a7689d7023 rcserver: fix 500 error when marshalling errors from core/command
Before this change attempting to return an error from core/command
failed with a 500 error and a message about unmarshable types.

This is because it was attempting to marshal the input parameters
which get _response added to them which contains an unmarshalable
field.

This was fixed by using the original parameters in the error response
rather than the one modified during the error handling.

This also adds end to end tests for the streaming facilities as used
in core/command.
2021-01-10 16:34:46 +00:00
Nick Craig-Wood
847a44e7ad fs/rc: add Copy method to rc.Params 2021-01-10 16:34:46 +00:00
Nick Craig-Wood
b3710c962e rc: fix core/command giving 500 internal error - fixes #4914
Before this change calling core/command gave the error

    error: response object is required expecting *http.ResponseWriter value for key "_response" (was *http.response)

This was because the http.ResponseWriter is an interface not an object.

Removing the `*` fixes the problem.

This also indicates that this bit of code wasn't properly tested.
2021-01-10 16:34:46 +00:00
Nick Craig-Wood
35ccfe1721 Add kice to contributors 2021-01-10 16:34:46 +00:00
kice
ef2bfb9718 onedrive: Support addressing site by server-relative URL (#4761) 2021-01-09 03:26:42 +08:00
Nick Craig-Wood
a97effa27c build: add go1.16beta1 to the build matrix 2021-01-08 12:22:37 +00:00
Nick Craig-Wood
01adee7554 build: raise minimum go version to go1.12 2021-01-08 12:17:09 +00:00
Alex Chen
78a76b0d29 onedrive: remove % and # from the set of encoded characters (#4909)
onedrive: remove % and # from the set of encoded characters

This fixes #4700, fixes #4184, fixes #2920.
2021-01-08 12:07:17 +00:00
Nick Craig-Wood
e775328523 ftp,sftp: Make --tpslimit apply - fixes #4906 2021-01-08 10:29:57 +00:00
Nick Craig-Wood
50344e7792 accounting: factor --tpslimit code into accounting from fshttp 2021-01-08 10:29:57 +00:00
Nick Craig-Wood
d58fdb10db onedrive: enhance link creation with expiry, scope, type and password
This change makes the --expire flag in `rclone link` work.

It also adds the new flags

    --onedrive-link-type
    --onedrive-link-scope
    --onedrive-link-password

See: https://forum.rclone.org/t/create-share-link-within-the-organization-only/21498
2021-01-08 09:22:50 +00:00
Nick Craig-Wood
feaacfd226 hdfs: correct username parameter in integration tests 2021-01-08 09:05:25 +00:00
Nick Craig-Wood
e3c238ac95 build: add -buildmode to cross-compile.go
This builds on

768e4c4735 build: Temporary fix for Windows build errors

But passes the -buildmode flag down to the cross-compile.go command
too.
2021-01-08 08:58:50 +00:00
Nick Craig-Wood
752997c5e8 Add Yury Stankevich to contributors 2021-01-08 08:58:50 +00:00
Yury Stankevich
71edc75ca6 HDFS (Hadoop Distributed File System) implementation - #42
This includes an HDFS docker image to use with the integration tests.

Co-authored-by: Ivan Andreev <ivandeex@gmail.com>
Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
2021-01-07 09:48:51 +00:00
Ivan Andreev
768e4c4735 build: Temporary fix for Windows build errors
Applies a temporary fix similar to https://github.com/grafana/grafana/pull/28557
before go 1.15.6+ fixes https://github.com/golang/go/issues/40795
2021-01-07 09:48:51 +00:00
Nick Craig-Wood
c553ad5158 serve sftp: fix authentication on one connection blocking others - fixes #4882
Before this change, if one connection was authenticating this would
block any others from authenticating.

This was due to ssh.NewServerConn not being called in a go routine
after the Accept call.

This is fixed by running the ssh authentication in a go routine.

Thanks to @FiloSottile for advice on how to fix this.

See: https://github.com/golang/go/issues/43521
2021-01-06 15:34:07 +00:00
Alex Chen
c66b901320 onedrive: (business only) workaround to replace existing file on server-side copy (#4904) 2021-01-06 10:50:37 +08:00
Nick Craig-Wood
dd67a3d5f5 operations: add size if known to skipped items and JSON log - fixes #4624 2021-01-05 19:44:25 +00:00
Nick Craig-Wood
e972f2c98a log: make it easier to add parameters to JSON logging 2021-01-05 19:44:25 +00:00
Nick Craig-Wood
acbcb1ea9d Add Ilyess Bachiri to contributors 2021-01-05 19:44:25 +00:00
Nick Craig-Wood
d4444375ac Add Kerry Su to contributors 2021-01-05 19:44:25 +00:00
Cnly
00bf40a8ef onedrive: fix server-side copy completely disabled on OneDrive for Business
This fixes a little problem in PR #4903, which is a fix for #4342
2021-01-06 02:57:51 +08:00
Ilyess Bachiri
5d1f947f32 docs: fix a typo in the dedupe docs 2021-01-05 15:46:57 +00:00
Alex Chen
b594cb9430 onedrive: fall back to normal copy if server-side copy unavailable (#4903)
Fixes #4342 by:
* Disabling server-side copy if either drive isn't OneDrive Personal
* Falling back to normal copy if server-side copy fails
2021-01-05 21:26:00 +08:00
Kerry Su
add7a35e55 b2: docs for download_url with private buckets
The current authentication scheme works without creating
a public download endpoint for a private bucket as in the B2 official blog.
On the contrary, if the existing authorization header gets duplicated
in the Cloudflare Workers script, one might receive 401 Unauthorized errors.
2021-01-02 11:33:48 +00:00
Nick Craig-Wood
2af7b61fc3 cmd: fix wording for no retries message
See: https://forum.rclone.org/t/immutable-should-set-retries-1-when-source-and-dest-are-different/21326/
2021-01-02 11:20:02 +00:00
Nick Craig-Wood
cb97c2b0d3 azureblob: fix crash on startup
This was introduced by accidental code deletion in

08b9ede217 azureblob: add support for managed identities
2020-12-31 18:39:09 +00:00
Nick Craig-Wood
35da38e93f operations: fix --immutable error message
This was accidentally changed in

53a1a0e3ef accounting: add reference to completed transfers
2020-12-31 18:16:51 +00:00
Nick Craig-Wood
963c0f28b9 sync: Only print "There was nothing to transfer" if no errors
See: https://forum.rclone.org/t/immutable-should-set-retries-1-when-source-and-dest-are-different/21326
2020-12-31 18:16:51 +00:00
Nick Craig-Wood
b3815dc0c2 sync: fix --immutable errors retrying many times
See: https://forum.rclone.org/t/immutable-should-set-retries-1-when-source-and-dest-are-different/21326
2020-12-31 18:16:51 +00:00
Nick Craig-Wood
8053fc4e16 fs: correct default implementation of fs.CountError 2020-12-31 18:16:51 +00:00
buengese
66c3f2f31f new backend: zoho workdrive - fixes #4533 2020-12-30 17:56:08 +00:00
Nick Craig-Wood
62c9074132 compress: fix icon in docs 2020-12-30 17:56:08 +00:00
Nick Craig-Wood
a854cb9617 webdav: add "Depth: 0" to GET requests to fix bitrix
See: https://forum.rclone.org/t/bitrix24-de-remote-support/21112/
2020-12-30 10:14:50 +00:00
Nick Craig-Wood
fbf9942fe7 Add Ingo Weiss to contributors 2020-12-30 10:14:50 +00:00
Nick Craig-Wood
f425950a52 fs: Always show stats when using --dry-run or --interactive #4624 2020-12-29 21:11:12 +00:00
Ingo Weiss
1d40bc1901 fs: Accumulate stats when using --dry-run
Fixes #4624
2020-12-29 21:11:12 +00:00
Nick Craig-Wood
ba51409c3c sftp: implement keyboard interactive authentication - fixes #4177
Some ssh servers are set up with keyboard interactive authentication
which previously the sftp backkend was ignoring.
2020-12-29 19:48:09 +00:00
Nick Craig-Wood
a64fc05385 Add Benjamin Gustin to contributors 2020-12-29 19:48:09 +00:00
Benjamin Gustin
4d54454900 fs/log: don't compile systemd log integration for non unix systems 2020-12-28 23:07:12 +00:00
Nick Craig-Wood
5601652d65 ncdu: add ! (errror) and . (unreadable) file flags to go with e (empty) 2020-12-28 17:25:46 +00:00
Adam Plánský
b218bc5bed ncdu: add empty folder flag into ncdu browser
Implemented empty folder flag for ncdu browser interface. If there is
empty folder in the list the flag e is prepended before size. If there
is no empty folder this flag is ommited. It has the same behaviour as
original ncdu browser. (https://dev.yorhel.nl/ncdu/man)
2020-12-28 17:25:46 +00:00
Nick Craig-Wood
65eee674b9 webdav: fix Open Range requests to fix 4shared mount
Before this change the webdav backend didn't truncate Range requests
to the size of the object. Most webdav providers are OK with this (it
is RFC compliant), but it causes 4shared to return 500 internal error.

Because Range requests are used in mounting, this meant that mounting
didn't work for 4shared.

This change truncates the Range request to the size of the object.

See: https://forum.rclone.org/t/cant-copy-use-files-on-webdav-mount-4shared-that-have-foreign-characters/21334/
2020-12-28 15:45:40 +00:00
Nick Craig-Wood
72eb74e94a Add Claudio Bantaloukas to contributors 2020-12-28 15:45:40 +00:00
Nick Craig-Wood
6bfec25165 Add Mitsuo Heijo to contributors 2020-12-28 15:45:39 +00:00
Nick Craig-Wood
1c61d51448 Add Brad Ackerman to contributors 2020-12-28 15:45:39 +00:00
Claudio Bantaloukas
f7fe1d766b cmd/ncdu: highlight read errors instead of aborting - fixes #4014
When a directory cannot be walk-ed because of a permissions error
- or any error for that matter -, ncdu mode keeps track of the error
and highlights directories that could not be read.

Previously, the error would cause ncdu to abort.

Now, directories with unreadable sub-directories are displayed in yellow and
a message warns that the total may be underestimated.

Unreadable directories themselves are displayed in red along with the error message
2020-12-28 14:08:12 +00:00
albertony
55aec19389 mount: docs: add section about windows filesystem permissions 2020-12-28 13:59:34 +00:00
albertony
9db51117dc vfs: docs: add note on os specific option 2020-12-28 13:59:34 +00:00
albertony
a9c9467210 vfs: docs: add note about use of --transfers in section about performance 2020-12-28 13:59:34 +00:00
albertony
f50e15c77c mount: docs: add note about mounted file system size 2020-12-28 13:59:34 +00:00
albertony
e3191d096f mount: just set default options without checking if customized by user, because it will be overridden anyway 2020-12-28 13:59:34 +00:00
albertony
07c40780b3 mount: also detect if uid or gid are set in same option string: -o uid=123,gid=456 2020-12-28 13:59:34 +00:00
albertony
67b82b4a28 mount: docs: update documentation according to new syntax on windows 2020-12-28 13:59:34 +00:00
albertony
5f47e1e034 mount: cleanup os specific option handling and documentation 2020-12-28 13:59:34 +00:00
albertony
e92cb9e8f8 mount: more user friendly mounting as network drive on windows
Add --network-mode option to activate mounting as network drive without having to set volume prefix.
Add support for automatic drive letter assignment (not specific to network drive mounting).
Allow full network share unc path in --volname, which will also implicitely activate network drive mounting.
Allow full network share unc path as mountpoint, which will also implicitely activate network drive mounting, and the specified path will be used as volume prefix and the remote will be mounted on an automatically assigned drive letter instead.
2020-12-28 13:59:34 +00:00
Mitsuo Heijo
9ea990d5a2 azureblob: update azure-storage-blob-go to v0.12.0
See https://github.com/Azure/azure-storage-blob-go/blob/master/ChangeLog.md#version-0120
2020-12-28 13:29:38 +00:00
Brad Ackerman
08b9ede217 azureblob: add support for managed identities
Fixes #3213
2020-12-28 13:23:35 +00:00
Nguyễn Hữu Luân
6342499c47 swift: fix deletion of parts of Static Large Object (SLO)
Before this change, deleting SLO objects could leave the parts of the object behind.
2020-12-28 13:21:11 +00:00
Nick Craig-Wood
f347a198f7 azureblob: delete archive tier blobs before update if --azureblob-archive-tier-delete
Before this change, attempting to update an archive tier blob failed
with a 409 error message:

    409 This operation is not permitted on an archived blob.

This change detects if we are overwriting a blob and either generates
the error (if `--azureblob-archive-tier-delete` is not set):

    can't update archive tier blob without --azureblob-archive-tier-delete

Or deletes the blob first before uploading it again (if
`--azureblob-archive-tier-delete` is set).

Fixes #4819
2020-12-28 12:31:24 +00:00
Nick Craig-Wood
060642ad14 flags: improve error message when reading environment vars #4888
The message now includes the flag name to help the user work out what
is happening.

    Invalid value for environment variable "RCLONE_VERSION" when setting default
    for --version: strconv.ParseBool: parsing "yes": invalid syntax
2020-12-28 12:26:23 +00:00
Nick Craig-Wood
629c0d0f65 serve http: fix serving files of unknown length
Before this change serving files of unknown length were always
returned as 0 length files.

This change serves them correctly, but does not support Range:
requests on them.

See: https://forum.rclone.org/t/serve-http-behavior-when-the-size-is-unknown/21319
2020-12-27 22:01:41 +00:00
Nick Craig-Wood
f7404f52e7 azureblob: fix crash when listing outside a SAS URL's root - fixes #4851
Before this change if you attempted to list a remote set up with a SAS
URL outside its container then it would crash the Azure SDK.

A check is done to make sure the root is inside the container when
starting the backend which is usually enough, but when two SAS URL
based remotes are mounted in a union, the union backend attempts to
read paths outside the named container. This was causing a mysterious
crash in the Azure SDK.

This fixes the problem by checking to see if the container in the
listing is the one in the SAS URL before listing the directory and
returning directory not found if it isn't.
2020-12-27 15:55:00 +00:00
Nick Craig-Wood
74a321e156 Add gtorelly to contributors 2020-12-27 15:55:00 +00:00
Nick Craig-Wood
fce885c0cd Add Milly to contributors 2020-12-27 15:55:00 +00:00
Nick Craig-Wood
4028a245b0 Add kelv to contributors 2020-12-27 15:55:00 +00:00
Nick Craig-Wood
c5b07a6714 Add lostheli to contributors 2020-12-27 15:55:00 +00:00
Nick Craig-Wood
b0965bf34f Add Nathan Collins to contributors 2020-12-27 15:54:33 +00:00
Nick Craig-Wood
1eaca9fb45 Add Bob Bagwill to contributors 2020-12-27 15:54:33 +00:00
gtorelly
d833e49db9 docs: Update onedrive setup guide
I followed this guide and successfully set up OneDrive on rclone, but there is a detail which stumped me for some time.
You may not copy and paste `http://localhost:53682/` from this document to Microsoft's website, you must type it, otherwise it is not recognized as a valid address. I edited the line with this information.
2020-12-27 15:47:56 +00:00
Milly
3aee544cee docs: fix invalid option name in example of crypt config 2020-12-27 15:46:23 +00:00
kelv
9e87f5090f s3: add requester pays option - fixes #301 2020-12-27 15:43:44 +00:00
lostheli
c8cfa43ccc Add a download flag to hashsum and related commands to force rclone to download and hash files locally
This commit modifies the operations.hashSum function by adding an alternate code path. This code path is triggered by passing downloadFlag = True. When activated, rclone will download files from the remote and hash them locally. downloadFlag = False preserves the existing behavior of using the remote to retrieve the hash.

This commit modifies HashLister to support the new hashSum method as well as consolidating the roles of HashLister, HashListerBase64, Md5sum, and Sha1sum.  The printing of hashes from the function defined in HashLister has been revised to work with --progress.  There are light changes to operations.syncFprintf and cmd.startProgress for this.

The unit test operations_test.TestHashSums is modified to support this change and test the download functionality.

The command functions hashsum, md5sum, sha1sum, and dbhashsum are modified to support this change.  A download flag has been added and an output-file flag has been added.  The output-file flag writes hashes to a file instead of stdout to avoid the need to redirect stdout.
2020-12-27 15:40:44 +00:00
negative0
ed7af3f370 plugins: Create plugins files only if webui is enabled. Fixes #4592. May fix #4600. 2020-12-27 15:05:41 +00:00
Nathan Collins
be19d6a403 fshttp: prevent overlap of HTTP headers in logs 2020-12-27 12:44:46 +00:00
Bob Bagwill
46858ee6fe docs: fix typo in FAQ 2020-12-27 12:43:30 +00:00
Nick Craig-Wood
a94e4d803b build: update nfpm syntax to fix build of .deb/.rpm packages 2020-12-26 18:36:16 +00:00
Nick Craig-Wood
dcbe62ab0a build: fix brew install --cask syntax for macOS build 2020-12-26 17:23:43 +00:00
Nick Craig-Wood
121b981b49 build: revert GitHub actions brew fix since this is now fixed
Revert "build: work around GitHub actions brew problem"

This reverts commit a2fa1370c5.
2020-12-26 16:32:26 +00:00
Nick Craig-Wood
73bb9322f5 rc: prefer actual listener address if using ":port" or "addr:0" only
Before this change rclone would turn `localhost:8888` into
`127.0.0.1:8888` which apparently does not work with some browsers.

See: https://github.com/rclone/rclone-webui-react/issues/117
2020-12-26 16:28:54 +00:00
Nick Craig-Wood
bdc2278a30 alias: fix tests after parsing of ... change #4862
This was broken in ea8d13d841

    fs: Fix parsing of .. when joining remotes
2020-12-21 18:23:16 +00:00
Nick Craig-Wood
ea8d13d841 fs: Fix parsing of .. when joining remotes - Fixes #4862
Before this fix setting an alias of `s3:bucket` then using `alias:..`
would use the current working directory!

This fix corrects the path parsing. This parsing is also used in
wrapping backends like crypt, chunker, union etc.

It does not allow looking above the root of the alias, so `alias:..`
now lists `s3:bucket` as you might expect if you did `cd /` then
`ls ..`.
2020-12-18 13:06:39 +00:00
Nick Craig-Wood
e45716cac2 mount: add "." and ".." to directories to match cmount and expectations
See: https://forum.rclone.org/t/empty-directorys-size-for-a-mounted-crypt-remote/21077
2020-12-17 12:14:22 +00:00
Nick Craig-Wood
c98dd8755c log: fix enabling systemd logging when using --log-file
This also moves all the systemd logging decisions to fs/log
2020-12-17 11:55:27 +00:00
Nick Craig-Wood
5ae5e1dd56 docs: add an extra paragraph with links to rclone rc and the HTTP API
See: https://forum.rclone.org/t/rcd-endpoint-documenation/20949
2020-12-11 10:58:59 +00:00
Nick Craig-Wood
4f8ee736b1 vfs: make cache dir absolute before using it to fix path too long errors
If --cache-dir is passed in as a relative path, then rclone will not
be able to turn it into a UNC path under Windows, which means that
file names longer than 260 chars will fail when stored in the cache.

This patch makes the --cache-dir path absolute before using it.

See: https://forum.rclone.org/t/handling-of-long-paths-on-windows-260-characters/20913
2020-12-11 10:00:51 +00:00
Nick Craig-Wood
816e68a274 Add Laurens Janssen to contributors 2020-12-11 10:00:51 +00:00
Laurens Janssen
6ab6c8eefa gcs: Storage class object header support - fixes #3043 2020-12-10 20:06:49 +00:00
Nick Craig-Wood
cb16f42075 b2: Make NewObject use less expensive API calls
Before this change when NewObject was called the b2 backend would list
the directory that the object was in in order to find it.

Unfortunately list calls are Class C transactions and cost more.

This patch switches to using HEAD requests instead. These are Class B
transactions. It is then necessary to parse the headers from response
back into the data that we get from the listing. However B2 returns
exactly the same data, just in a different form.

Rclone will use the old directory listing method when looking for
files with versions as these can't be found via a HEAD request.

This change will particularly benefit --files-from, rclone serve
restic but most operations will see some benefit.
2020-12-09 20:00:22 +00:00
Nick Craig-Wood
7ae84a3c91 Add James Lim to contributors 2020-12-09 20:00:22 +00:00
James Lim
2fd543c989 azure: add support for service principals - fixes #3230
Before: users can only connect to Azure blob containers using the access keys
from the storage account.

After: users can additionally choose connect to Azure blob containers
using service principals. This uses OAuth2 under the hood to exchange
a client ID and client secret for a short-lived access token.

Ref:
- https://github.com/rclone/rclone/issues/3230
- https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-app?tabs=dotnet#well-known-values-for-authentication-with-azure-ad
- https://docs.microsoft.com/en-us/azure/developer/go/azure-sdk-authorization#available-authentication-types-and-methods
- https://gist.github.com/ItalyPaleAle/ec6498bfa81a96f9ca27a2da6f60a770
2020-12-09 17:52:15 +00:00
Nick Craig-Wood
50cf97fc72 sugarsync: fix NewObject for files that differ in case #4830 2020-12-07 17:38:22 +00:00
Nick Craig-Wood
4acd68188b box: fix NewObject for files that differ in case #4830 2020-12-07 17:38:22 +00:00
Nick Craig-Wood
b81b6da3fc fstest: add test for reading object in a case insensitive way #4830 2020-12-07 17:38:22 +00:00
Nick Craig-Wood
56ad6aac4d test_all: remove duplicated config for filefabric backend 2020-12-07 17:38:22 +00:00
Caleb Case
1efb8ea280 backend/tardigrade: Upgrade to uplink v1.4.1
Uplink v1.4.1 provides two important improvements for rclone:

* Fix for a connection handling issue where an open project could
  potentially become unusable because the underlying connection had
  failed.
* Fix for concurrent use issue in drpc.
2020-12-06 11:45:47 +00:00
Matteo Pietro Dazzi
9cfc01f791 build: upgrade docker buildx action 2020-12-06 11:43:34 +00:00
Nick Craig-Wood
86014cebd7 dedupe: add --dedupe-mode list to just list dupes, changing nothing 2020-12-02 16:52:12 +00:00
Nick Craig-Wood
507f861c67 dedupe: add --by-hash to dedupe on hash not file name - fixes #1674 2020-12-02 16:52:12 +00:00
Nick Craig-Wood
e073720a8f dropbox: enable short lived access tokens #4792
Starting September 30th, 2021, the Dropbox OAuth flow will no longer
return long-lived access tokens. It will instead return short-lived
access tokens, and optionally return refresh tokens.

This patch adds the token_access_type=offline parameter which causes
dropbox to return short lived tokens now.
2020-12-02 16:50:16 +00:00
Nick Craig-Wood
ce7cdadb71 Add zhucan to contributors 2020-12-02 16:50:16 +00:00
zhucan
a223b78872 fs: support multi-threads to head dst object
Signed-off-by: zhuc <zhucan.k8s@gmail.com>
2020-12-02 16:26:37 +00:00
buengese
d5181118cc compress: finish docs 2020-12-02 16:30:02 +01:00
buengese
886b3abac1 compress: fix broken tests 2020-12-02 16:30:02 +01:00
Nick Craig-Wood
250f8d9371 drive: allow shortcut resolution and creation to be retried
This was an oversight in the original code - these operations should
always have been retriable.
2020-12-02 15:28:38 +00:00
Anagh Kumar Baranwal
8a429d12cf s3: Added error handling for error code 429 indicating too many requests
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2020-12-01 18:13:31 +00:00
Nick Craig-Wood
8bf4697dc2 serve http/webdav: redirect requests to the base url without the /
When using `--baseurl` before this patch, if a request was made to the
base URL without a trailing / then rclone would return a 404 error.

Unfortunately GVFS / Nautilus makes the request without the /
regardless of what the user put in.

This patch redirects the request to the base URL with a /. So if the
user was using `--baseurl rclone` then a request to
http://localhost/rclone would be redirected with a 308 response to
http://localhost/rclone/

Fixes #4814
2020-12-01 18:00:07 +00:00
Nick Craig-Wood
584523672c dropbox: test file name length before upload to fix upload loop
Before this change rclone would upload the whole of multipart files
before receiving a message from dropbox that the path was too long.

This change hard codes the 255 rune limit and checks that before
uploading any files.

Fixes #4805
2020-12-01 17:56:36 +00:00
Nick Craig-Wood
a9585efd64 dropbox: make malformed_path errors from too long files not retriable
Before this change, rclone would retry files with filenames that were
too long again and again.

This changed recognises the malformed_path error that is returned and
marks it not to be retried which stops unnecessary retrying of the file.

See #4805
2020-12-01 17:56:36 +00:00
Nick Craig-Wood
f6b1f05e0f dropbox: tidy repeated error message 2020-12-01 17:56:36 +00:00
Nick Craig-Wood
cc8538e0d1 gcs: fix server side copy of large objects - fixes #3724
Before this change rclone was using the copy endpoint to copy large objects.

This can fail for large objects with this error:

    Error 413: Copy spanning locations and/or storage classes could
    not complete within 30 seconds. Please use the Rewrite method

This change makes Copy use the Rewrite method as suggested by the
error message which should be good for any size of copy.
2020-11-30 16:20:30 +00:00
Nick Craig-Wood
f7d9b15707 cmount: don't call host.Umount if a signal has been received
Before this change cgofuse and libatexit would race to see who could
unmount the file system with unpredicatable results. On Linux it could
report an error or not, depending.

This change checks to see if umount is beng called from a signal and
if so leaves the unmounting to cgofuse/libfuse.

See #4804
2020-11-29 17:44:00 +00:00
Nick Craig-Wood
83406bc473 atexit: add Signalled() function - set if called from a signal #4804 2020-11-29 17:44:00 +00:00
Nick Craig-Wood
1cfce703b2 mountlib: make sure we don't call umount more than once #4804
Before this change when using CTRL-C with rclone cmount the
mount would be unmounted twice.
2020-11-29 17:44:00 +00:00
Nick Craig-Wood
3b24a4cada yandex: set Features.WriteMimeType=false as Yandex ignores mime types
Yandex appears to ignore mime types set as part of the PUT request or
as part of a PATCH request.

The docs make no mention of being able to set a mime type, so set
WriteMimeType=false indicating the backend can't set mime types on
uploaded files.
2020-11-29 17:22:43 +00:00
Nick Craig-Wood
135adb426e filefabric: set Features.Read/WriteMimeType as both supported 2020-11-29 17:22:43 +00:00
Nick Craig-Wood
987dac9fe5 fichier: set Features.ReadMimeType=true as Object.MimeType is supported 2020-11-29 17:22:43 +00:00
Nick Craig-Wood
7fde48a805 dropbox: set Features.ReadMimeType=false as Object.MimeType not supported 2020-11-29 17:22:43 +00:00
Nick Craig-Wood
ce9028bb5b chunker: set Features.ReadMimeType=false as Object.MimeType not supported 2020-11-29 17:22:43 +00:00
buengese
52688a63c6 jottacloud: don't erroniously report support for writing mime types - fixes #4817 2020-11-29 18:11:43 +01:00
Durval Menezes
8904e81cdf Fixed verbal tense
"which are uploaded" -> "which were uploaded" (minor peeve)
2020-11-29 17:32:49 +03:00
Nick Craig-Wood
bcbe393af3 sftp: implement Shutdown method 2020-11-27 17:35:01 +00:00
Nick Craig-Wood
47aada16a0 fs: add Shutdown optional method for backends 2020-11-27 17:35:01 +00:00
Nick Craig-Wood
c22d04aa30 filter: deglobalise to put filter config into the context #4685 2020-11-27 17:28:42 +00:00
Nick Craig-Wood
354b4f19ec rc: add context to flag Reload function #4685 2020-11-27 17:28:42 +00:00
Durval Menezes
0ed1857fa9 webdav: updated docs to show streaming to nextcloud is working 2020-11-27 16:57:43 +00:00
Nick Craig-Wood
dfadd98969 azureblob,memory,pcloud: fix setting of mime types
Before this change the backend was reading the mime type of the
destination object instead of the source object when uploading.

This changes fixes the problem and introduces an integration test for
it.

See: https://forum.rclone.org/t/is-there-a-way-to-get-rclone-copy-to-preserve-metadata/20682/2
2020-11-27 14:40:05 +00:00
edwardxml
19a8b66cee docs: update rclone about docs
Create a full loop of documentation for rclone about, backends overview
and individual backend pages.

Discussion:
https://github.com/rclone/rclone/pull/4774 relates

Previously pull was requested, in part, under ref
https://github.com/rclone/rclone/pull/4801

Notes:
Introduce a tentative draft see-link format the end of sections to try
rather than lots of in-para links.

Update about.go incl link to list of backends not supporting about.

In list of backends not supporting about, include link to about command
reference.

I appreciate there may be decisions to make going forward about whether
command links should be code formatted, and using proper pretty url
links, but I have fudged that for now.

Update backend pages that do not support about with wording used
previously for ftp - it is in passive voice but I can live with it. (my
own wording and fault). The note is applied to a limitations section. If
one does not already exist it is created (even if there are other
limitations with their own sections)
2020-11-27 14:08:52 +00:00
Anagh Kumar Baranwal
07dee18d6b cmount: Add optional brew tag to throw an error when using mount in the
binaries installed via Homebrew - Fixes #4775

Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2020-11-26 22:56:59 +00:00
Maciej Zimnoch
70e8b11805 accounting: fix data race in Transferred()
startedTransfers is accessed by multiple threads, and it wasn't
protected by the mutex call in Transferred() func.

Fixes #4799
2020-11-26 22:54:38 +00:00
Nick Craig-Wood
9d574c0d63 fshttp: read config from ctx not passed in ConfigInfo #4685 2020-11-26 16:40:12 +00:00
Nick Craig-Wood
2e21c58e6a fs: deglobalise the config #4685
This is done by making fs.Config private and attaching it to the
context instead.

The Config should be obtained with fs.GetConfig and fs.AddConfig
should be used to get a new mutable config that can be changed.
2020-11-26 16:40:12 +00:00
Nick Craig-Wood
506342317b s3: update docs with a Reducing Costs section - Fixes #2889 2020-11-26 15:05:26 +00:00
Nick Craig-Wood
979bb07c86 filefabric: Implement the Enterprise File Fabric backend
Missing features
- M-Stream support
- Oauth-like flow (soon being changed to oauth)
2020-11-25 21:11:29 +00:00
Nick Craig-Wood
dfeae0e70a Revert "sharefile: fix backend due to API swapping integers for strings"
The API seems to have reverted to what it was before

This reverts commit 095c7bd801.
2020-11-25 20:52:57 +00:00
Nick Craig-Wood
f43a9ac17e pcloud: only use SHA1 hashes in EU region
Apparently only SHA1 hashes are supported in the EU region for
pcloud. This has been confirmed by pCloud support. The EU regions also
support SHA256 hashes which we don't support yet.

https://forum.rclone.org/t/pcloud-to-local-no-hashes-in-common/19440
2020-11-25 20:46:38 +00:00
Nick Craig-Wood
c3ac9319f4 build: attempt to fix docker build by upgrading ilteoood/docker_buildx 2020-11-25 13:01:59 +00:00
Nick Craig-Wood
76ee3060d1 s3: Add MD5 metadata to objects uploaded with SSE-AWS/SSE-C
Before this change, small objects uploaded with SSE-AWS/SSE-C would
not have MD5 sums.

This change adds metadata for these objects in the same way that the
metadata is stored for multipart uploaded objects.

See: #1824 #2827
2020-11-25 12:28:02 +00:00
Nick Craig-Wood
4bb241c435 s3: store md5 in the Object rather than the ETag
This enables us to set the md5 to cache it.

See: #1824 #2827
2020-11-25 12:28:02 +00:00
Nick Craig-Wood
a06f4c2514 s3: fix hashes on small files with aws:kms and sse-c
If rclone is configured for server side encryption - either aws:kms or
sse-c (but not sse-s3) then don't treat the ETags returned on objects
as MD5 hashes.

This fixes being able to upload small files.

Fixes #1824
2020-11-25 12:28:02 +00:00
Nick Craig-Wood
53aa03cc44 s3: complete sse-c implementation
This now can complete all operations with SSE-C enabled.

Fixes #2827
See: https://forum.rclone.org/t/issues-with-aws-s3-sse-c-getting-strange-log-entries-and-errors/20553
2020-11-25 12:28:02 +00:00
Nick Craig-Wood
1ce0b45965 build: Stop tagged releases making a current beta - fixes #4789
Before this change stable releases updated the current beta which mean
confusingly the current beta release would jump backwards from
1.54.0-beta to 1.53.3-beta say.

This commit stops any tagged build making a current beta release. They
will still make beta releases, they just won't update the
rclone*current*.zip and version.txt files.

This also means that a .0 release will not make a current beta like it
does at the moment.
2020-11-25 09:48:29 +00:00
Nick Craig-Wood
7078311a84 compress: add integration tests 2020-11-23 18:02:22 +00:00
Nick Craig-Wood
ef9b717961 Add Marcin Zelent to contributors 2020-11-23 18:02:22 +00:00
Nick Craig-Wood
09246ed9d5 Add Deepak Sah to contributors 2020-11-23 18:02:22 +00:00
Ankur Gupta
33ea55efed fs: parseduration: fixed tests to use UTC time 2020-11-23 17:11:56 +00:00
albertony
79474b2e4c docs: update documentation of commands delete,purge,rmdir,rmdirs 2020-11-23 17:10:08 +00:00
edwardxml
fb001b6c01 docs: improve "rclone about" docs and relate to union mfs limitation in ftp remote
See: https://forum.rclone.org/t/rclone-union-mfs-most-free-space-not-working-for-ftp/20346
2020-11-23 16:34:27 +00:00
Marcin Zelent
2896f51a22 docs: Fixed links to VFS FIle Caching section 2020-11-23 16:30:58 +00:00
Deepak Sah
5b9115d87a serve ftp: add options to enable TLS - fixes #3640 2020-11-23 16:07:51 +00:00
Nick Craig-Wood
211b08f771 Changelog updates from Version v1.53.3 2020-11-19 17:57:27 +00:00
Nick Craig-Wood
f0905499e3 random: seed math/rand in one place with crypto strong seed #4783
This shouldn't be read as encouraging the use of math/rand instead of
crypto/rand in security sensitive contexts, rather as a safer default
if that does happen by accident.
2020-11-18 17:48:44 +00:00
Nick Craig-Wood
7985df3768 random: fix incorrect use of math/rand instead of crypto/rand CVE-2020-28924
For implications see the linked issue.

Fixes #4783
2020-11-18 12:03:01 +00:00
Nick Craig-Wood
095c7bd801 sharefile: fix backend due to API swapping integers for strings
For some reason the API started returning some integers as strings in
JSON. This is probably OK in Javascript but it upsets Go.

This is easily fixed with the `json:"name,size"` struct tag.
2020-11-13 14:37:43 +00:00
Nick Craig-Wood
23469c9c7c ftp: add --ftp-disable-msld option to ignore MLSD for really old servers
This is useful for servers which advertise MLSD (eg some versions of
Serv-U) but don't support it properly.

See: https://forum.rclone.org/t/double-folder-names-on-target-destination-paths-ftp/18822
See: https://github.com/jlaffaye/ftp/pull/196
2020-11-13 11:25:34 +00:00
Nick Craig-Wood
2347762b0d vfs: fix "file already exists" error for stale cache files
Before this change if a file was uploaded through a mount, then
deleted externally, trying to upload that file again could give EEXIST
"file already exists".

This was because the file already existing in the cache was confusing
rclone into thinking it already had the file.

The fix is to check that if rclone has a stale cache file then to
ignore it in this situation.

See: https://forum.rclone.org/t/rclone-cant-reuse-filenames/20400
2020-11-13 10:32:21 +00:00
buengese
636fb5344a drive: implement CleanUp workaround for team drives - fixes #2418 2020-11-13 03:30:28 +01:00
buengese
aaa8b7738a compress: initial documentation 2020-11-13 02:31:59 +01:00
buengese
bc4282e49e compress: added experimental compression remote - implements #2098, #1356, #675
This remote implements transparent compression using gzip. Uses JSON as a for storing metadata.

Co-authored-by: id01 <gaviniboom@gmail.com>
2020-11-13 02:31:59 +01:00
buengese
2812816142 hash: add MultiHasher.Sum() to retrieve a single specific hash 2020-11-13 02:31:59 +01:00
Nick Craig-Wood
ceeac84cfe serve restic: implement object cache
This caches all the objects returned from the List call. This makes
opening them much quicker so speeds up prune and restores. It also
uses fewer transactions. It can be disabled with
`--cache-objects=false`.

This was discovered when using the B2 backend when the budget was
being blown on list object calls which can avoided with a bit of
caching.

For typical 1 million file backup for a latop or server this will only
use a small amount more memory.
2020-11-12 17:58:46 +00:00
Nick Craig-Wood
83d48f65b6 Add Manish Gupta to contributors 2020-11-12 17:58:46 +00:00
Manish Gupta
95d0410baa local: continue listing files/folders when a circular symlink is detected
Before this change a circular symlink would cause rclone to error out from the listings.

After this change rclone will skip a circular symlink and carry on the listing,
producing an error at the end.

Fixes #4743
2020-11-12 11:32:55 +00:00
albertony
2708a7569e mount: docs: make note about mounting as network drive less confusing 2020-11-11 20:37:57 +00:00
Nick Craig-Wood
45e8bea8d0 testserver: Make Test{FTP,SFTP,Webdav}Rclone run the current rclone
Before this change the tests were run against the previous stable
rclone/rclone docker image.

This unfortunately masked errors in the integration test server.

This change uses the currently installed rclone to run "rclone serve
ftp" etc. This is installed out of the current code by the integration
test server so will make a better test.
2020-11-10 18:01:15 +00:00
Nick Craig-Wood
f980f230c5 vfs: fix virtual entries causing deleted files to still appear
Before this change, if a file was created on a remote but deleted
externally from that remote then there was potential for the delete to
never be noticed.

The sequence of events was:

- Create file on VFS - creates virtual directory entry
- File deleted externally to remote before the directory refreshed
- Now the file has a virtual add but is not in the listings so will never disappear

This patch fixes it by removing all virtual directory entries except
the following when the directory is re-read.

- On remotes which can't have empty directories: virtual directory
  adds are not flushed. These will remain virtual as long as the
  directory is empty.

- For virtual file add: files that are in the process of being
  uploaded are not flushed

This patch also adds the distinction between virtually added files and
directories.

It also refactors the virtual directory logic to make it easier to follow.

Fixes #4446
2020-11-10 16:47:25 +00:00
Nick Craig-Wood
e204f89685 servetest: add -sub-run flag for running a subset of the backend tests
Use like this (eg in cmd/serve/sftp)

    go test -v -run TestSftp/Normal -sub-run "TestIntegration/FsMkdir/FsPutFiles/FsDirMove"
2020-11-10 16:47:25 +00:00
Nick Craig-Wood
f7efce594b config: add context.Context #3257 #4685
This add config to the Config callback in the backends and the related
config functions.
2020-11-09 18:05:54 +00:00
Nick Craig-Wood
1fb6ad700f accounting: add context.Context #3257 #4685 2020-11-09 18:05:54 +00:00
Nick Craig-Wood
e3fe31f7cb fs: add context.Context to fs.GetModifyWindow #3257 #4685 2020-11-09 18:05:54 +00:00
Nick Craig-Wood
8b96933e58 fs: Add context to fs.Features.Fill & fs.Features.Mask #3257 #4685 2020-11-09 18:05:54 +00:00
Nick Craig-Wood
d69b96a94c test: Add context to mockfs.NewFs #3257 #4685 2020-11-09 18:05:54 +00:00
Nick Craig-Wood
d846210978 fs: Add context to NewFs #3257 #4685
This adds a context.Context parameter to NewFs and related calls.

This is necessary as part of reading config from the context -
backends need to be able to read the global config.
2020-11-09 18:05:54 +00:00
Anagh Kumar Baranwal
30c8b1b84f fs: Fix nil pointer on copy & move operations directly to remote
Fix the copy and move operations that broke in 127f0fc when copying directly
to a remote without a specific destination.

Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2020-11-09 12:36:10 +00:00
Nick Craig-Wood
43e0929339 vfs: fix vfs/refresh calls with fs= parameter
Before this change rclone gave an error when the fs parameter was
provided.

This change removes the fs parameter from the parameters once it has
been read which avoids the error.

See: https://forum.rclone.org/t/precaching-with-vfs-refresh-fails-with-an-error-when-having-multiple-cloud-drives/20267
2020-11-07 14:26:33 +00:00
albertony
6c70c42577 jottacloud: docs: add heading to the example section so that it does not seem to be part of the legacy setup section 2020-11-06 17:52:50 +00:00
Adam Plánský
cd2c06f2a7 testserver: speed up seafile integration test
Before every seafile integration test we run Seafile server in docker
environment. We don't have to sleep for 60 seconds to have everything ready before running integration test. We can assume everything is ready when Seafile webserver returns status code 200. Seafile Dockerfile (https://github.com/haiwen/seafile-docker/blob/master/image/seafile/Dockerfile) runs scripts/start.py where is defined that before init_seafile_server() we always wait for mysql wait_for_mysql() (https://github.com/haiwen/seafile-docker/blob/master/scripts/start.py)
2020-11-03 16:31:39 +00:00
Nick Craig-Wood
af55a74bd2 stats: add counter for deleted directories - fixes #4676 2020-11-03 11:47:00 +00:00
Nick Craig-Wood
d00c126cef operations: fix --cutof-mode hard not cutting off immediately
This failure was noted on the integration tests server.

The fix was to be more careful about which error message was emitted
with which --cutoff-mode
2020-11-02 17:13:19 +00:00
Nick Craig-Wood
bedf6e90d2 onedrive: warn on gateway timeout errors
It seems that when doing chunked uploads to onedrive, if the chunks
take more than 3 minutes or so to upload then they may timeout with
error 504 Gateway Timeout.

This change produces an error (just once) suggesting lowering
`--onedrive-chunk-size` or decreasing `--transfers`.

This is easy to replicate with:

    rclone copy -Pvv --bwlimit 0.05M 20M onedrive:20M

See: https://forum.rclone.org/t/default-onedrive-chunk-size-does-not-work/20010/
2020-11-02 16:53:35 +00:00
Nick Craig-Wood
e8c84d8b53 Add Adam Plánský to contributors 2020-11-02 16:53:35 +00:00
Adam Plánský
f89ff3872d ncdu: add toggle option for average size in directory - key 'a'
Add toggle option to show average size in directory. This toggle
function is for ncdu and is binded to key 'a'.
2020-10-30 15:33:54 +00:00
Adam Plánský
127f0fc64c operations: move and copy log name of the destination object in verbose
If the object is moved or copied rclone in verbose mode prints name of the
destination object into the info log.
2020-10-30 15:31:54 +00:00
edwardxml
0cfa89f316 docs: ftp: put limitations in a single section
The topic is mostly about so limitations so all of these are grouped together with a section hyperlink near the top of the page. Intention is to avoid potential duplication and make it more straightforward (there is a place and it is essentially just a list so wording doesn't need to be elegant) to add notes about limitations in future, harvested from rclone forum postings.

Minor wording alterations that do not intend to change meaning
2020-10-30 14:59:36 +00:00
Nick Craig-Wood
bfcd4113c3 mount: implement mknod to make NFS file creation work - fixes #2115
It turns out that NFS calls mknod in FUSE even though we have create
defined. This was causing EIO errors when creating files.

This patch fixes it by implementing mknod. The way it is implemented
means that to write to an NFS file system you'll need --vfs-cache-mode
writes.
2020-10-29 15:12:36 +00:00
Nick Craig-Wood
0e7fc7613f mount: make mount be cmount under macOS #4393
This also adds an alias to the mount command so it responds as `rclone
cmount` as well as `rclone mount`.
2020-10-29 13:34:39 +00:00
Nick Craig-Wood
8ac2f52b6e mount: disable bazil/fuse based mount on macOS #4393
The library is no longer supported on macOS.
2020-10-29 13:34:39 +00:00
Nick Craig-Wood
1973fc1ecc azureblob: update lib from v0.10.0 to v0.11.0 and fix API breakage
See: https://github.com/Azure/azure-storage-blob-go/issues/226
2020-10-29 13:34:39 +00:00
Nick Craig-Wood
7c39a13281 build: update all dependencies 2020-10-29 13:34:39 +00:00
Nick Craig-Wood
c5c503cbbe Add Adam Plánský to contributors 2020-10-29 13:34:39 +00:00
Josh Soref
d09488b829 docs: update: add Tencent
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
Josh Soref
0a6196716c docs: style: avoid double-nesting parens
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
Josh Soref
8bc9b2b883 docs: grammar: examples are examples
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
Josh Soref
a15f50254a docs: grammar: if, then
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
Josh Soref
5d4f77a022 docs: grammar: Oxford comma
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
Josh Soref
a089de0964 docs: grammar: uncountable: links
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
Josh Soref
3068ae8447 docs: grammar: count agreement: files
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
Josh Soref
67ff153b0c docs: grammar: article: a-file
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
Josh Soref
3e1cb8302a docs: spelling: etc.
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
Josh Soref
e4a87f772f docs: spelling: e.g.
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
Josh Soref
d4f38d45a5 docs: spelling: high-speed
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
Josh Soref
bbe7eb35f1 docs: spelling: server-side
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-28 18:16:23 +00:00
edwardxml
87e54f2dde ftp: update wording for flags
Minor wording change to help for explicit and implicit FTPS flags. More consistent between flags. Add 's' to request because only one 'client' mentioned.
2020-10-28 15:45:52 +00:00
edwardxml
3f3afe489f docs: mount: minor changes to wording 2020-10-28 15:43:49 +00:00
edwardxml
70b21d9c87 docs: Update mount docs
Eliminate repeat word mode
2020-10-28 15:42:33 +00:00
Adam Plánský
e00bf3d723 ncdu: add sort by average size in directory
Add keyboard shortcut 'A' which sort by average size in directory.

If files/folders have same avgSize sort by actual size

Fixes: #4699
2020-10-27 13:28:38 +00:00
Nick Craig-Wood
605f2b819a build: fix nfpm install 2020-10-27 13:15:21 +00:00
Nick Craig-Wood
bf2b975359 build: update nfpm and github-release install method to go modules 2020-10-26 19:07:59 +00:00
Nick Craig-Wood
00a5086ff2 Remove accidentally committed binary and fix formatting
These were both committed in

b7253fc1c1 mount: docs: add note that allow-root and allow-other is not relevant on windows
2020-10-26 19:07:42 +00:00
Ivan Andreev
be6a888e50 chunker: skip long local hashing, hash in-transit (fixes #4021)
PR 4614
2020-10-26 20:18:07 +03:00
Ivan Andreev
dad8447423 mailru: avoid prehashing of large local files
PR 4617
2020-10-26 20:16:52 +03:00
Ivan Andreev
65ff109065 mailru: accept special folders eg camera-upload
Fixes #4025
PR 4690
2020-10-26 20:04:31 +03:00
albertony
b7253fc1c1 mount: docs: add note that allow-root and allow-other is not relevant on windows 2020-10-26 16:21:43 +00:00
Nick Craig-Wood
d143f576c6 Changelog updates from Version v1.53.2 2020-10-26 15:44:26 +00:00
Nick Craig-Wood
a152351a71 build: stop using set-env and set-path in the GitHub actions
A security problem was discovered when using set-env and
set-path. This has been deprecated by GitHub and a new mechanism
introduced.

This patch switches to using the new mechanism which will stop GitHub
warning about the use of the old mechanism.

See: https://github.com/actions/toolkit/security/advisories/GHSA-mfwh-5m23-j46w
2020-10-26 11:19:06 +00:00
Nick Craig-Wood
a2fa1370c5 build: work around GitHub actions brew problem
Brew was failing with

    fatal: 'origin' does not appear to be a git repository
    fatal: Could not read from remote repository.

See: https://github.com/actions/virtual-environments/issues/1811
See: https://github.com/actions/virtual-environments/issues/1869
2020-10-25 18:26:01 +00:00
Nick Craig-Wood
bed83b0b64 test: add ListRetries config parameter to integration tests
Occasionally the b2 tests fail because the integration tests don't
retry hard enough with their new setting of -list-retries 3. Override
this setting to 5 for the b2 tests only.
2020-10-25 18:10:50 +00:00
Nick Craig-Wood
cf0bdad5de union: create root directories if none exist
This fixes the TestUnion: integration test if the /tmp/union[123] dirs
don't exist.
2020-10-25 18:10:49 +00:00
Nick Craig-Wood
85d35ef03c test: remove TestS3Ceph: and TestSwiftCeph: from integration tests
Unfortunately we don't have access to this server any more
2020-10-25 18:10:49 +00:00
Nick Craig-Wood
514d10b314 Add Ingo to contributors 2020-10-25 18:10:49 +00:00
Ingo
5164c3d2d0 genautocomplete: add support to output to stdout 2020-10-22 17:28:33 +01:00
albertony
ffdd0719e7 jottacloud: avoid double url escaping of device/mountpoint - fixes #4697 2020-10-20 17:43:49 +02:00
Nick Craig-Wood
4e2b5389d7 check: make the error count match up in the log message
Before this change we counted the final summary error as an error,
producing confusing log messages like:

    Failed to check with 54 errors: last error was: 53 differences found

This change marks the summary error as already being counted, so the
error message becomes:

    Failed to check with 53 errors: last error was: 53 differences found

This change also returns a listing failure in preference to a summary error.

See: https://forum.rclone.org/t/slow-checksum-validation/19763/22
2020-10-15 12:57:48 +01:00
Nick Craig-Wood
dc4e63631f Add David to contributors 2020-10-15 12:57:48 +01:00
Nick Craig-Wood
275bf456d3 Add Josh Soref to contributors 2020-10-15 12:57:48 +01:00
Nick Craig-Wood
7dfa871095 Add Dan Hipschman to contributors 2020-10-15 12:57:48 +01:00
Nick Craig-Wood
70cc88de22 Add Ameer Dawood to contributors 2020-10-15 12:57:48 +01:00
Nick Craig-Wood
4bc0f46955 Add Dov Murik to contributors 2020-10-15 12:57:48 +01:00
Anagh Kumar Baranwal
5b09599a23 drive: Added flag --drive-stop-on-download-limit to stop transfers when the download limit is exceeded
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2020-10-14 15:51:01 +01:00
David
f4dd8e3fe8 dedupe: minor clarification in docs
dedupe will not delete equal files if they are located in another folder.
2020-10-14 15:31:47 +01:00
Josh Soref
d0888edc0a Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-14 15:21:31 +01:00
Nick Craig-Wood
51a230d7fd fs: Implement UnWrapObjectInfo for getting original object out of src objects 2020-10-14 15:20:06 +01:00
Anagh Kumar Baranwal
fc5b14b620 s3: Added --s3-disable-http2 to disable http/2
Fixes #4673

Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2020-10-13 17:11:22 +01:00
Stephen Harris
bbddadbd04 sftp: remember entered password in AskPass mode
As reported in

  https://github.com/rclone/rclone/issues/4660#issuecomment-705502792

After switching to a password callback function, if the ssh connection
aborts and needs to be reconnected then the user is-reprompted for their
password.  Instead we now remember the password they entered and just give
that back.  We do lose the ability for them to correct mistakes, but that's
the situation from before switching to callbacks.  We keep the benefits
of not asking for passwords until the SSH connection succeeds (right
known_hosts entry, for example).

This required a small refactor of how `f := &Fs{}` was built, so we can
store the saved password in the Fs object
2020-10-13 16:53:11 +01:00
Nick Craig-Wood
7428e47ebc local: fix sizes and syncing with --links option on Windows - fixes #4581
Before this change rclone returned the size from the Stat call of the
link. On Windows this reads as 0 always, however on unix it reads as
the length of the text in the link. This caused errors like this when
syncing:

    Failed to copy: corrupted on transfer: sizes differ 0 vs 13

This change causes Windows platforms to read the link and use that as
the size of the link instead of 0 which fixes the problem.
2020-10-13 16:29:56 +01:00
Nick Craig-Wood
72083c65ad cmd: make backend env vars show in help as the defaults for backend flags
Before this change

    RCLONE_DRIVE_CHUNK_SIZE=111M rclone help flags | grep drive-chunk-size

Would show the default value, not the setting of RCLONE_DRIVE_CHUNK_SIZE
as the non backend flags do.

This change makes it work as expected by setting the default of the
option to the environment variable.

Fixes #4659
2020-10-13 15:43:58 +01:00
Dan Hipschman
70f92fd6b3 crypt: small simplification, no functionality change 2020-10-12 17:20:39 +01:00
Nick Craig-Wood
a86cedbc24 vfs: Fix --no-modtime to not attempt to set modtimes (as documented)
See: https://forum.rclone.org/t/rclone-mount-with-azure-blob-archive-tier/19414
2020-10-09 17:01:16 +01:00
Nick Craig-Wood
0906f8dd3b onedrive: fix disk usage for sharepoint
Some onedrive sharepoints appear to return all 0s for quota

    "quota":{"deleted":0,"remaining":0,"total":0,"used":0}

This commit detects this and returns unknown for all quota parts.

See: https://forum.rclone.org/t/zero-size-volume-when-mounting-onedrive-sharepoint/19597
2020-10-09 14:11:56 +01:00
buengese
664213cedb jottacloud: remove clientSecret from config when upgrading to token based authentication - #4645 2020-10-08 11:51:17 +02:00
Ameer Dawood
75a7226174 mount: docs: correction of repeated word 2020-10-07 14:25:31 +01:00
Stephen Harris
9e925becb6 sftp: defer asking for user passwords until the SSH connection succeeds
Issue: 4660
    https://github.com/rclone/rclone/issues/4660

Unexpected side effect: a wrong password allows for the user to retry!
2020-10-07 12:01:17 +01:00
Anagh Kumar Baranwal
e3a5bb9b48 s3: Add missing regions for AWS
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2020-10-06 16:54:42 +01:00
Dov Murik
b7eeb0e260 docs: Box: explain about the backslash-like unicode character
Add the full name of the backslash-lookalike unicode character.
2020-10-06 16:47:49 +01:00
Nick Craig-Wood
84d64ddabc Add LaSombra to contributors 2020-10-06 16:42:28 +01:00
Nick Craig-Wood
6c9f92aee6 Add Hekmon to contributors 2020-10-06 16:42:28 +01:00
Nick Craig-Wood
893297760b Add gyutw to contributors 2020-10-06 16:42:28 +01:00
Leo Luan
c5c56cda02 vfs: Add a missed update of used cache space
The missed update can cause incorrect before-cleaning cache stats
and a pre-mature condition broadcast in purgeOld before the cache
space use is reduced below the quota.
2020-10-06 16:35:23 +01:00
Leo Luan
2295123cad vfs: Add exponential backoff during ENOSPC retries
Add an exponentially increasing delay during retries up ENOSPC error
to avoid exhausting the 10 retries too soon when the cache space
recovery from item resets is not available from the file system yet
or consumed by other large cache writes.
2020-10-06 16:35:23 +01:00
Leo Luan
ff0280c0cb vfs: Fix missed concurrency control between some item operations and reset
Item reset is invoked by cache cleaner for synchronous recovery
from ENOSPC errors. The reset operation removes the cache file and
closes/reopens the downloaders.  Although most parts of reset and
other item operations are done with the item mutex held, the mutex
is released during fd.WriteAt and downloaders calls. We used preAccess
and postAccess calls to serialize Reset, ReadAt, and Open, but missed
some other item operations. The patch adds preAccess/postAccess
calls in Sync, Truncate, Close, WriteAt, and rename.
2020-10-06 16:35:23 +01:00
Leo Luan
64d736a57b vfs: Fix a race condition in retryFailedResets
A failed item reset is saved in the errItems for retryFailedResets
to process.  If the item gets closed before the retry, the item may
have been removed from the c.item array. Previous code did not
account for this condition. This patch adds the check for the
exitence of the retry items in retryFailedResets.
2020-10-06 16:35:23 +01:00
Leo Luan
5f1d5a1897 vfs: Fix a deadlock vulnerability in downloaders.Close
The downloaders.Close() call acquires the downloaders' mutex before
calling the wait group wait and the main downloaders thread has a
periodical (5 seconds interval) call to kick its waiters and the
waiter dispatch function tries to get the mutex. So a deadlock can
occur if the Close() call starts, gets the mutex, while the main
downloader thread already got the timer's tick and proceeded to
call kickWaiters. The deadlock happens when the Close call gets
the mutex between the timer's kick and the main downloader thread
gets the mutex first. So it's a pretty short period of time and
it probably explains why the problem has not surfaced, maybe
something like tens of nanoseconds out of 5 seconds (~10^^-8).
It took 5 days of continued stressing the Close calls for the
deadlock to appear.
2020-10-06 16:35:23 +01:00
LaSombra
aac2406e19 cmd: add --progress-terminal-title to print ETA to terminal title
Adds a flag, --progress-terminal-title, that when used with --progress,
will print the string `ETA: %s` to the terminal title.

This also adds WriteTerminalTitle to lib/terminal
2020-10-06 16:34:26 +01:00
Stephen Harris
6dc28ef50a sftp: Allow user to optionally check server hosts key to add security
Based on Issue 4087
  https://github.com/rclone/rclone/issues/4087

Current behaviour is insecure.  If the user specifies this value then we
switch to validating the server hostkey and so can detect server changes
or MITM-type attacks.
2020-10-06 16:27:42 +01:00
Hekmon
66def93373 mount cmd: update systemd status with cache stats 2020-10-06 16:21:30 +01:00
Hekmon
c58023a9ba enhance systemd integration
* log level identification
* manual activation with flag
* automatic systemd launch detection
2020-10-06 16:21:30 +01:00
buengese
3edc9ff0b0 jottacloud: remove DirMove workaround as it's not required anymore - also fixes #4655 2020-10-05 20:13:05 +02:00
edwardxml
8e8ae1edc7 crypt: update docs
Mostly tense, clarity and point of view proposed changes.

There is still some duplication and benefits that would accrue from further examples.
2020-10-05 17:19:00 +01:00
Nick Craig-Wood
20b00db390 operations: fix spurious "--checksum is in use but the source and destination have no hashes in common"
Before this change rclone would emit the message

    --checksum is in use but the source and destination have no hashes in common; falling back to --size-only

When the source or destination hash was missing as well as when the
source and destination had no hashes in common.

This first case is very confusing for users when the source and
destination do have a hash in common.

This change fixes that and makes sure the error message is not emitted
on missing hashes even when there is a hash in common.

See: https://forum.rclone.org/t/source-and-destination-have-no-hashes-in-common-for-unencrypted-drive-to-local-sync/19531
2020-10-05 16:07:05 +01:00
Nick Craig-Wood
db4bbf9521 operations: fix use of --suffix without --backup-dir
As part of the original work adding this feature it was overlooked
that this didn't actually work for full rclone copy/sync.

This commit fixes the problem and adds a test to make sure it stays
working.

See: https://forum.rclone.org/t/suffix-not-working-on-folder-upload-via-ssh-sftp/19526
2020-10-04 16:40:20 +01:00
Nick Craig-Wood
2b7994e739 operations: add logs when need to upload files to set mod times #1505 2020-09-29 17:04:29 +01:00
gyutw
e7fbdac8e0 fichier: increase maximum file size from 100GB to 300GB - fixes #4634 2020-09-28 20:27:17 +02:00
Nick Craig-Wood
41ec712aa9 ftp,sftp: fix docs for usernames
- factor env.CurrentUser out of backend/sftp
- Use env.CurrentUser in ftp and sftp
- fix docs to have correct username
2020-09-27 11:44:05 +01:00
Stephen Harris
17acae2b00 sftp: allow cert based auth via optional pubkey
Discussion at
  https://forum.rclone.org/t/ssh-certificate-based-authentication-does-not-work/19222

Basically we allow the user to specify their own public key cert rather
than letting the SSH client extract the pubkey from the private key.
This allows certificate based authentication to work.
2020-09-27 11:10:13 +01:00
Nick Craig-Wood
57261c7e97 mount: docs: remove incorrect statement about --vfs-cache-mode full
See: https://forum.rclone.org/t/is-this-documentation-correct/19376
2020-09-27 11:04:59 +01:00
Ivan Andreev
d8239e0194 mailru: remove deprecated protocol quirks 2020-09-26 15:38:32 +03:00
Ivan Andreev
004c3796de chunker: disable ListR to fix missing files on GDrive (workaround #3972) 2020-09-26 15:19:16 +03:00
Ivan Andreev
18c7549770 mailru: fix invalid timestamp on corrupted files (fixes #4229) 2020-09-26 15:12:30 +03:00
Nick Craig-Wood
e5190f14ce drive: implement "rclone backend copyid" command for copying files by ID
This allows files to be copied by ID from google drive. These can be
copied to any rclone remote and if the remote is a google drive then
server side copy will be attempted.

Fixes #3625
2020-09-25 17:53:51 +01:00
Nick Craig-Wood
433b73a5a8 accounting: stabilize display order of transfers on Windows
Before this change we sorted transfers in the stats list solely on
time started. However if --check-first was in use then lots of
transfers could be started in the same millisecond. Because Windows
time resolution is only 1mS this caused the entries to sort equal and
bounce around in the list.

This change fixes the sort so that if the time is equal it uses the
name which should stabilize the order.

Fixes #4599
2020-09-24 19:10:37 +01:00
Nick Craig-Wood
ab88a3341f Add Russell Cattelan to contributors 2020-09-24 19:10:37 +01:00
Nick Craig-Wood
181da3ce9b Add Christopher Stewart to contributors 2020-09-24 19:10:37 +01:00
Russell Cattelan
b14a58c9b8 cmd/mount2: fix the swapped UID / GID values 2020-09-23 23:06:33 +01:00
buengese
60cc2cba1f sftp: always convert the checksum to lower case - fixes #4518 2020-09-22 03:15:09 +02:00
Ivan Andreev
c797494d88 Merge pull request #4608 from ivandeex/pr-chunker-crypt
chunker: fix upload over crypt (fixes #4570)
2020-09-18 17:58:44 +03:00
Ivan Andreev
e2a57182be mailru: re-enable fixed chunker tests
This reverts commit 9d3d397f50.
2020-09-18 17:56:34 +03:00
Ivan Andreev
8928441466 mailru: fix range requests after june changes on server 2020-09-18 17:56:34 +03:00
Ivan Andreev
0e8965060f mailru: fix uploads after recent changes on server
similar fix: 5efa9958f1
2020-09-18 17:56:34 +03:00
Christopher Stewart
f3cf6fcdd7 s3: fix spelling mistake
Fix spelling mistake "patific" => "pacific"
2020-09-18 12:03:13 +01:00
Nick Craig-Wood
18ccf0f871 vfs: detect and recover from a file being removed externally from the cache
Before this change if a file was removed from the cache while rclone
is running then rclone would not notice and proceed to re-create it
full of zeros.

This change notices files that we expect to have data in going missing
and if they do logs an ERROR recovers.

It isn't recommended deleting files from the cache manually with
rclone running!

See: https://forum.rclone.org/t/corrupted-data-streaming-after-vfs-meta-files-removed/18997
Fixes #4602
2020-09-18 10:30:02 +01:00
Nick Craig-Wood
313647bcf3 Add Muffin King to contributors 2020-09-18 10:30:02 +01:00
Muffin King
61fe068c90 seafile: fix accessing libraries > 2GB on 32 bit systems - fixes #4588 2020-09-15 21:55:10 +02:00
Nick Craig-Wood
5c49096e11 acounting: fix incorrect speed and transferTime in core/stats
Before this change the code which summed up the existing transfers
over all the stats groups forgot to add the old transfer time and old
transfers in.

This meant that the speed and elapsedTime got increasingly inaccurate
over time due to the transfers being culled from the list but their
time not being accounted for.

This change adds the old transfers into the sum which fixes the
problem.

This was only a problem over the rc.

Fixes #4569
2020-09-15 12:01:18 +01:00
Nick Craig-Wood
a73c78545d Changelog updates from Version v1.53.1 2020-09-13 10:27:24 +01:00
Nick Craig-Wood
e0fd560711 build: update not-in-stable after semver changes 2020-09-12 12:49:16 +01:00
Nick Craig-Wood
6a56ac1032 vfs,local: Log an ERROR if we fail to set the file to be sparse
See: https://forum.rclone.org/t/rclone-1-53-release/18880/73
2020-09-11 15:36:47 +01:00
Nick Craig-Wood
96299629b4 Add wjielai to contributors 2020-09-11 15:36:38 +01:00
Nick Craig-Wood
75de30cfa8 sync: fix --cutoff-mode soft & cautious so it doesn't end the transfer early
Before ths fix --cutoff-mode soft and cautious would emit a Fatal
error which stopped the sync immediately.

This fix introduces a new error which is checked in the sync error
processing which stops the sync gracefully.

Fixes #4576
2020-09-09 12:53:21 +01:00
buengese
233bed6a73 dropbox: implement IDer - fixes #2928 2020-09-08 19:04:32 +02:00
buengese
b3964efe4d docs/dropbox: update docs with information regarding the new flags to access shared files and folders 2020-09-08 19:02:35 +02:00
buengese
575f061629 dropbox: add support for viewing shared files and folders 2020-09-08 19:02:35 +02:00
Evan Harris
640d7d3b4e opendrive: Do not retry 400 errors
This type of error is unlikely to be an error that can be resolved by a retry,
and is triggered in #2296 by files with a timestamp before the unix epoch.
2020-09-08 17:15:35 +01:00
Evan Harris
e92294b482 docs: Updated mount command to reflect that it requires Go 1.13 or newer 2020-09-08 16:40:43 +01:00
wjielai
22937e8982 docs: add Tencent COS to s3 provider list - fixes #4468
* add Tencent COS to s3 provider list.

Co-authored-by: wjielai <wjielai@tencent.com>
2020-09-08 16:34:25 +01:00
edwardxml
c3d1474eb9 docs: Add full stops for consistency in rclone --help
closes #4560 closes #4561 closes #4562 closes #4563 closes #4564
2020-09-08 16:26:09 +01:00
Nick Craig-Wood
e2426ea87b docs: note --log-file does append 2020-09-08 16:13:33 +01:00
Nick Craig-Wood
e58a61175f build: fix architecture name in ARMv7 build - fixes #4571
After introducing the arm-v7 build we are accidentally making debs
and rpms with the architecture arm-v7.

This fixes the problem by stripping the version off.
2020-09-08 16:10:52 +01:00
Nick Craig-Wood
05bea46c3e accounting: remove new line from end of --stats-one-line display 2020-09-08 16:10:52 +01:00
Chaitanya Bankanhal
c8a719ae0d webui: Prompt user for updating webui if an update is available 2020-09-07 16:45:00 +01:00
482 changed files with 68144 additions and 35000 deletions

View File

@@ -33,18 +33,18 @@ The Rclone Developers
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
#### Which OS you are using and how many bits (e.g. Windows 7, 64 bit)
#### Which cloud storage system are you using? (eg Google Drive)
#### Which cloud storage system are you using? (e.g. Google Drive)
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
#### The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
#### A log from the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)

View File

@@ -19,12 +19,12 @@ jobs:
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'go1.11', 'go1.12', 'go1.13', 'go1.14']
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'go1.13', 'go1.14', 'go1.15']
include:
- job_name: linux
os: ubuntu-latest
go: '1.15.x'
go: '1.16.0-rc1'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@@ -34,7 +34,7 @@ jobs:
- job_name: mac
os: macOS-latest
go: '1.15.x'
go: '1.16.0-rc1'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -43,40 +43,32 @@ jobs:
- job_name: windows_amd64
os: windows-latest
go: '1.15.x'
go: '1.16.0-rc1'
gotags: cmount
build_flags: '-include "^windows/amd64" -cgo'
build_args: '-buildmode exe'
quicktest: true
racequicktest: true
deploy: true
- job_name: windows_386
os: windows-latest
go: '1.15.x'
go: '1.16.0-rc1'
gotags: cmount
goarch: '386'
cgo: '1'
build_flags: '-include "^windows/386" -cgo'
build_args: '-buildmode exe'
quicktest: true
deploy: true
- job_name: other_os
os: ubuntu-latest
go: '1.15.x'
go: '1.16.0-rc1'
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
compile_all: true
deploy: true
- job_name: go1.11
os: ubuntu-latest
go: '1.11.x'
quicktest: true
- job_name: go1.12
os: ubuntu-latest
go: '1.12.x'
quicktest: true
- job_name: go1.13
os: ubuntu-latest
go: '1.13.x'
@@ -88,6 +80,12 @@ jobs:
quicktest: true
racequicktest: true
- job_name: go1.15
os: ubuntu-latest
go: '1.15.x'
quicktest: true
racequicktest: true
name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }}
@@ -107,10 +105,11 @@ jobs:
- name: Set environment variables
shell: bash
run: |
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
echo 'BUILD_ARGS=${{ matrix.build_args }}' >> $GITHUB_ENV
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
- name: Install Libraries on Linux
shell: bash
@@ -125,7 +124,7 @@ jobs:
shell: bash
run: |
brew update
brew cask install osxfuse
brew install --cask osxfuse
if: matrix.os == 'macOS-latest'
- name: Install Libraries on Windows
@@ -133,10 +132,10 @@ jobs:
run: |
$ProgressPreference = 'SilentlyContinue'
choco install -y winfsp zip
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
if ($env:GOARCH -eq "386") {
choco install -y mingw --forcex86 --force
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
}
# Copy mingw32-make.exe to make.exe so the same command line
# can be used on Windows as on macOS and Linux
@@ -223,8 +222,8 @@ jobs:
- name: Set environment variables
shell: bash
run: |
echo '::set-env name=GOPATH::${{ runner.workspace }}'
echo '::add-path::${{ runner.workspace }}/bin'
echo 'GOPATH=${{ runner.workspace }}' >> $GITHUB_ENV
echo '${{ runner.workspace }}/bin' >> $GITHUB_PATH
- name: Cross-compile rclone
run: |

View File

@@ -15,7 +15,7 @@ jobs:
with:
fetch-depth: 0
- name: Build and publish image
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
uses: ilteoood/docker_buildx@1.1.0
with:
tag: beta
imageName: rclone/rclone

View File

@@ -23,7 +23,7 @@ jobs:
id: actual_major_version
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
- name: Build and publish image
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
uses: ilteoood/docker_buildx@1.1.0
with:
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
imageName: rclone/rclone

View File

@@ -12,10 +12,10 @@ When filing an issue, please include the following information if
possible as well as a description of the problem. Make sure you test
with the [latest beta of rclone](https://beta.rclone.org/):
* Rclone version (eg output from `rclone -V`)
* Which OS you are using and how many bits (eg Windows 7, 64 bit)
* The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
* A log of the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
* Rclone version (e.g. output from `rclone -V`)
* Which OS you are using and how many bits (e.g. Windows 7, 64 bit)
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
* if the log contains secrets then edit the file with a text editor first to obscure them
## Submitting a pull request ##
@@ -48,7 +48,7 @@ When ready - run the unit tests for the code you changed
go test -v
Note that you may need to make a test remote, eg `TestSwift` for some
Note that you may need to make a test remote, e.g. `TestSwift` for some
of the unit tests.
Note the top level Makefile targets
@@ -86,7 +86,7 @@ git reset --soft HEAD~2 # This squashes the 2 latest commits together.
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
git commit # Add a new commit message.
git push --force # Push the squashed commit to your GitHub repo.
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also reccommends wizardzines.com
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com
```
## CI for your fork ##
@@ -170,7 +170,7 @@ with modules beneath.
* log - logging facilities
* march - iterates directories in lock step
* object - in memory Fs objects
* operations - primitives for sync, eg Copy, Move
* operations - primitives for sync, e.g. Copy, Move
* sync - sync directories
* walk - walk a directory
* fstest - provides integration test framework
@@ -178,7 +178,7 @@ with modules beneath.
* mockdir - mocks an fs.Directory
* mockobject - mocks an fs.Object
* test_all - Runs integration tests for everything
* graphics - the images used in the website etc
* graphics - the images used in the website, etc.
* lib - libraries used by the backend
* atexit - register functions to run when rclone exits
* dircache - directory ID to name caching
@@ -202,12 +202,12 @@ for the flag help, the remainder is shown to the user in `rclone
config` and is added to the docs with `make backenddocs`.
The only documentation you need to edit are the `docs/content/*.md`
files. The MANUAL.*, rclone.1, web site etc are all auto generated
files. The MANUAL.*, rclone.1, web site, etc. are all auto generated
from those during the release process. See the `make doc` and `make
website` targets in the Makefile if you are interested in how. You
don't need to run these when adding a feature.
Documentation for rclone sub commands is with their code, eg
Documentation for rclone sub commands is with their code, e.g.
`cmd/ls/ls.go`.
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
@@ -364,7 +364,7 @@ See the [testing](#testing) section for more information on integration tests.
Add your fs to the docs - you'll need to pick an icon for it from
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
alphabetical order of full name of remote (eg `drive` is ordered as
alphabetical order of full name of remote (e.g. `drive` is ordered as
`Google Drive`) but with the local file system last.
* `README.md` - main GitHub page

View File

@@ -11,7 +11,7 @@ Current active maintainers of rclone are:
| Fabian Möller | @B4dM4n | |
| Alex Chen | @Cnly | onedrive backend |
| Sandeep Ummadi | @sandeepkru | azureblob backend |
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
| Sebastian Bünger | @buengese | jottacloud, yandex & compress backends |
| Ivan Andreev | @ivandeex | chunker & mailru backends |
| Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend |
@@ -37,7 +37,7 @@ Rclone uses the labels like this:
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
* `maintenance` - internal enhancement, code re-organisation etc
* `maintenance` - internal enhancement, code re-organisation, etc.
* `Needs Go 1.XX` - waiting for that version of Go to be released
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
* `Remote: XXX` - which rclone backend this affects
@@ -45,7 +45,7 @@ Rclone uses the labels like this:
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (eg the next go release).
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (e.g. the next go release).
The milestones have these meanings:

20498
MANUAL.html generated

File diff suppressed because one or more lines are too long

5211
MANUAL.md generated

File diff suppressed because it is too large Load Diff

21034
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -46,13 +46,13 @@ endif
.PHONY: rclone test_all vars version
rclone:
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
mkdir -p `go env GOPATH`/bin/
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
test_all:
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
vars:
@echo SHELL="'$(SHELL)'"
@@ -93,8 +93,7 @@ build_dep:
# Get the release dependencies we only install on linux
release_dep_linux:
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
go run bin/get-github-release.go -extract github-release aktau/github-release 'linux-amd64-github-release.tar.bz2'
cd /tmp && go get github.com/goreleaser/nfpm/v2/...
# Get the release dependencies we only install on Windows
release_dep_windows:
@@ -188,10 +187,10 @@ upload_github:
./bin/upload-github $(TAG)
cross: doc
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
go run bin/cross-compile.go -release current $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
beta:
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
go run bin/cross-compile.go $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
@@ -199,23 +198,23 @@ log_since_last_release:
git log $(LAST_TAG)..
compile_all:
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
ci_upload:
sudo chown -R $$USER build
find build -type l -delete
gzip -r9v build
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
ifndef BRANCH_PATH
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
endif
@echo Beta release ready at $(BETA_URL)/testbuilds
ci_beta:
git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
endif
@echo Beta release ready at $(BETA_URL)
@@ -233,7 +232,7 @@ tag: retag doc
@echo "Edit the new changelog in docs/content/changelog.md"
@echo "Then commit all the changes"
@echo git commit -m \"Version $(VERSION)\" -a -v
@echo "And finally run make retag before make cross etc"
@echo "And finally run make retag before make cross, etc."
retag:
@echo "Version is $(VERSION)"

View File

@@ -30,11 +30,13 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
* HTTP [:page_facing_up:](https://rclone.org/http/)
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
@@ -64,9 +66,11 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
@@ -81,6 +85,7 @@ Please see [the full list of all storage providers and their features](https://r
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
* Can sync to and from network, e.g. two different cloud accounts
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
* Optional transparent compression ([Compress](https://rclone.org/compress/))
* Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional cache ([Cache](https://rclone.org/cache/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))

View File

@@ -4,7 +4,7 @@ This file describes how to make the various kinds of releases
## Extra required software for making a release
* [github-release](https://github.com/aktau/github-release) for uploading packages
* [gh the github cli](https://github.com/cli/cli) for uploading packages
* pandoc for making the html and man pages
## Making a release
@@ -21,7 +21,7 @@ This file describes how to make the various kinds of releases
* git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0"
* make retag
* git push --tags origin master
* git push --follow-tags origin
* # Wait for the GitHub builds to complete then...
* make fetch_binaries
* make tarball
@@ -48,8 +48,8 @@ If rclone needs a point release due to some horrendous bug:
Set vars
* BASE_TAG=v1.XX # eg v1.52
* NEW_TAG=${BASE_TAG}.Y # eg v1.52.1
* BASE_TAG=v1.XX # e.g. v1.52
* NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
First make the release branch. If this is a second point release then
@@ -65,9 +65,8 @@ Now
* git cherry-pick any fixes
* Do the steps as above
* make startstable
* NB this overwrites the current beta so we need to do this - FIXME is this true any more?
* git co master
* # cherry pick the changes to the changelog
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
* git push

View File

@@ -1 +1 @@
v1.54.0
v1.55.0

View File

@@ -1,6 +1,7 @@
package alias
import (
"context"
"errors"
"strings"
@@ -34,7 +35,7 @@ type Options struct {
// NewFs constructs an Fs from the path.
//
// The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -47,5 +48,5 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if strings.HasPrefix(opt.Remote, name+":") {
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
}
return cache.Get(fspath.JoinRootPath(opt.Remote, root))
return cache.Get(ctx, fspath.JoinRootPath(opt.Remote, root))
}

View File

@@ -19,7 +19,7 @@ var (
)
func prepare(t *testing.T, root string) {
config.LoadConfig()
config.LoadConfig(context.Background())
// Configure the remote
config.FileSet(remoteName, "type", "alias")
@@ -54,21 +54,22 @@ func TestNewFS(t *testing.T) {
{"four/under four.txt", 9, false},
}},
{"four", "..", "", true, []testEntry{
{"four", -1, true},
{"one%.txt", 6, false},
{"three", -1, true},
{"two.html", 7, false},
{"five", -1, true},
{"under four.txt", 9, false},
}},
{"four", "../three", "", true, []testEntry{
{"", "../../three", "", true, []testEntry{
{"underthree.txt", 9, false},
}},
{"four", "../../five", "", true, []testEntry{
{"underfive.txt", 6, false},
}},
} {
what := fmt.Sprintf("test %d remoteRoot=%q, fsRoot=%q, fsList=%q", testi, test.remoteRoot, test.fsRoot, test.fsList)
remoteRoot, err := filepath.Abs(filepath.FromSlash(path.Join("test/files", test.remoteRoot)))
require.NoError(t, err, what)
prepare(t, remoteRoot)
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
require.NoError(t, err, what)
gotEntries, err := f.List(context.Background(), test.fsList)
require.NoError(t, err, what)
@@ -90,7 +91,7 @@ func TestNewFS(t *testing.T) {
func TestNewFSNoRemote(t *testing.T) {
prepare(t, "")
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:", remoteName))
require.Error(t, err)
require.Nil(t, f)
@@ -98,7 +99,7 @@ func TestNewFSNoRemote(t *testing.T) {
func TestNewFSInvalidRemote(t *testing.T) {
prepare(t, "not_existing_test_remote:")
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:", remoteName))
require.Error(t, err)
require.Nil(t, f)

View File

@@ -9,13 +9,16 @@ import (
_ "github.com/rclone/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/chunker"
_ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/dropbox"
_ "github.com/rclone/rclone/backend/fichier"
_ "github.com/rclone/rclone/backend/filefabric"
_ "github.com/rclone/rclone/backend/ftp"
_ "github.com/rclone/rclone/backend/googlecloudstorage"
_ "github.com/rclone/rclone/backend/googlephotos"
_ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/hubic"
_ "github.com/rclone/rclone/backend/jottacloud"
@@ -40,4 +43,5 @@ import (
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/webdav"
_ "github.com/rclone/rclone/backend/yandex"
_ "github.com/rclone/rclone/backend/zoho"
)

View File

@@ -70,8 +70,8 @@ func init() {
Prefix: "acd",
Description: "Amazon Drive",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
err := oauthutil.Config("amazon cloud drive", name, m, acdConfig, nil)
Config: func(ctx context.Context, name string, m configmap.Mapper) {
err := oauthutil.Config(ctx, "amazon cloud drive", name, m, acdConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
@@ -144,6 +144,7 @@ type Fs struct {
name string // name of this remote
features *fs.Features // optional features
opt Options // options for this Fs
ci *fs.ConfigInfo // global config
c *acd.Client // the connection to the acd server
noAuthClient *http.Client // unauthenticated http client
root string // the path we are working on
@@ -239,8 +240,7 @@ func filterRequest(req *http.Request) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -248,7 +248,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
root = parsePath(root)
baseClient := fshttp.NewClient(fs.Config)
baseClient := fshttp.NewClient(ctx)
if do, ok := baseClient.Transport.(interface {
SetRequestFilter(f func(req *http.Request))
}); ok {
@@ -256,25 +256,27 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} else {
fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
}
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, acdConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
}
c := acd.NewClient(oAuthClient)
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: ci,
c: c,
pacer: fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
noAuthClient: fshttp.NewClient(fs.Config),
pacer: fs.NewPacer(ctx, pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
noAuthClient: fshttp.NewClient(ctx),
}
f.features = (&fs.Features{
CaseInsensitive: true,
ReadMimeType: true,
CanHaveEmptyDirectories: true,
}).Fill(f)
}).Fill(ctx, f)
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
@@ -502,7 +504,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if err != nil {
return nil, err
}
maxTries := fs.Config.LowLevelRetries
maxTries := f.ci.LowLevelRetries
var iErr error
for tries := 1; tries <= maxTries; tries++ {
entries = nil
@@ -523,7 +525,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
entries = append(entries, o)
default:
// ignore ASSET etc
// ignore ASSET, etc.
}
return false
})
@@ -680,7 +682,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return err
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
@@ -717,7 +719,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
dstObj fs.Object
srcErr, dstErr error
)
for i := 1; i <= fs.Config.LowLevelRetries; i++ {
for i := 1; i <= f.ci.LowLevelRetries; i++ {
_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
// exit if error on source
@@ -732,7 +734,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// finished if src not found and dst found
break
}
fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, fs.Config.LowLevelRetries)
fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, f.ci.LowLevelRetries)
time.Sleep(1 * time.Second)
}
return dstObj, dstErr
@@ -745,7 +747,7 @@ func (f *Fs) DirCacheFlush() {
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -893,7 +895,7 @@ func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//

View File

@@ -1,17 +1,17 @@
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
// +build !plan9,!solaris,!js,go1.13
// +build !plan9,!solaris,!js,go1.14
package azureblob
import (
"bytes"
"context"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
@@ -21,9 +21,9 @@ import (
"github.com/Azure/azure-pipeline-go/pipeline"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
@@ -33,10 +33,9 @@ import (
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/sync/errgroup"
)
const (
@@ -47,15 +46,12 @@ const (
modTimeKey = "mtime"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
maxTotalParts = 50000 // in multipart upload
storageDefaultBaseURL = "blob.core.windows.net"
// maxUncommittedSize = 9 << 30 // can't upload bigger than this
defaultChunkSize = 4 * fs.MebiByte
maxChunkSize = 100 * fs.MebiByte
defaultUploadCutoff = 256 * fs.MebiByte
maxUploadCutoff = 256 * fs.MebiByte
defaultAccessTier = azblob.AccessTierNone
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
defaultChunkSize = 4 * fs.MebiByte
maxChunkSize = 100 * fs.MebiByte
uploadConcurrency = 4
defaultAccessTier = azblob.AccessTierNone
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
// Default storage account, key and blob endpoint for emulator support,
// though it is a base64 key checked in here, it is publicly available secret.
emulatorAccount = "devstoreaccount1"
@@ -65,6 +61,10 @@ const (
memoryPoolUseMmap = false
)
var (
errCantUpdateArchiveTierBlobs = fserrors.NoRetryError(errors.New("can't update archive tier blob without --azureblob-archive-tier-delete"))
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -74,12 +74,51 @@ func init() {
Options: []fs.Option{{
Name: "account",
Help: "Storage Account Name (leave blank to use SAS URL or Emulator)",
}, {
Name: "service_principal_file",
Help: `Path to file containing credentials for use with a service principal.
Leave blank normally. Needed only if you want to use a service principal instead of interactive login.
$ az sp create-for-rbac --name "<name>" \
--role "Storage Blob Data Owner" \
--scopes "/subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Storage/storageAccounts/<storage-account>/blobServices/default/containers/<container>" \
> azure-principal.json
See [Use Azure CLI to assign an Azure role for access to blob and queue data](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli)
for more details.
`,
}, {
Name: "key",
Help: "Storage Account Key (leave blank to use SAS URL or Emulator)",
}, {
Name: "sas_url",
Help: "SAS URL for container level access only\n(leave blank if using account/key or Emulator)",
}, {
Name: "use_msi",
Help: `Use a managed service identity to authenticate (only works in Azure)
When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/)
to authenticate to Azure Storage instead of a SAS token or account key.
If the VM(SS) on which this program is running has a system-assigned identity, it will
be used by default. If the resource has no system-assigned but exactly one user-assigned identity,
the user-assigned identity will be used by default. If the resource has multiple user-assigned
identities, the identity to use must be explicitly specified using exactly one of the msi_object_id,
msi_client_id, or msi_mi_res_id parameters.`,
Default: false,
}, {
Name: "msi_object_id",
Help: "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_mi_res_id specified.",
Advanced: true,
}, {
Name: "msi_client_id",
Help: "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_object_id or msi_mi_res_id specified.",
Advanced: true,
}, {
Name: "msi_mi_res_id",
Help: "Azure resource ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_object_id specified.",
Advanced: true,
}, {
Name: "use_emulator",
Help: "Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)",
@@ -90,8 +129,7 @@ func init() {
Advanced: true,
}, {
Name: "upload_cutoff",
Help: "Cutoff for switching to chunked upload (<= 256MB).",
Default: defaultUploadCutoff,
Help: "Cutoff for switching to chunked upload (<= 256MB). (Deprecated)",
Advanced: true,
}, {
Name: "chunk_size",
@@ -129,6 +167,24 @@ If blobs are in "archive tier" at remote, trying to perform data transfer
operations from remote will not be allowed. User should first restore by
tiering blob to "Hot" or "Cool".`,
Advanced: true,
}, {
Name: "archive_tier_delete",
Default: false,
Help: fmt.Sprintf(`Delete archive tier blobs before overwriting.
Archive tier blobs cannot be updated. So without this flag, if you
attempt to update an archive tier blob, then rclone will produce the
error:
%v
With this flag set then before rclone attempts to overwrite an archive
tier blob, it will delete the existing blob before uploading its
replacement. This has the potential for data loss if the upload fails
(unlike updating a normal blob) and also may cost more since deleting
archive tier blobs early may be chargable.
`, errCantUpdateArchiveTierBlobs),
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Don't store MD5 checksum with object metadata.
@@ -167,19 +223,24 @@ This option controls how often unused buffers will be removed from the pool.`,
// Options defines the configuration for this backend
type Options struct {
Account string `config:"account"`
Key string `config:"key"`
Endpoint string `config:"endpoint"`
SASURL string `config:"sas_url"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
ListChunkSize uint `config:"list_chunk"`
AccessTier string `config:"access_tier"`
UseEmulator bool `config:"use_emulator"`
DisableCheckSum bool `config:"disable_checksum"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"`
Account string `config:"account"`
ServicePrincipalFile string `config:"service_principal_file"`
Key string `config:"key"`
UseMSI bool `config:"use_msi"`
MSIObjectID string `config:"msi_object_id"`
MSIClientID string `config:"msi_client_id"`
MSIResourceID string `config:"msi_mi_res_id"`
Endpoint string `config:"endpoint"`
SASURL string `config:"sas_url"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
ListChunkSize uint `config:"list_chunk"`
AccessTier string `config:"access_tier"`
ArchiveTierDelete bool `config:"archive_tier_delete"`
UseEmulator bool `config:"use_emulator"`
DisableCheckSum bool `config:"disable_checksum"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote azure server
@@ -187,6 +248,7 @@ type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed config options
ci *fs.ConfigInfo // global config
features *fs.Features // optional features
client *http.Client // http client we are using
svcURL *azblob.ServiceURL // reference to serviceURL
@@ -197,6 +259,7 @@ type Fs struct {
isLimited bool // if limited to one container
cache *bucket.Cache // cache for container creation status
pacer *fs.Pacer // To pace and retry the API calls
imdsPacer *fs.Pacer // Same but for IMDS
uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
}
@@ -274,7 +337,7 @@ func validateAccessTier(tier string) bool {
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
401, // Unauthorized (eg "Token has expired")
401, // Unauthorized (e.g. "Token has expired")
408, // Request Timeout
429, // Rate exceeded.
500, // Get occasional 500 Internal Server Error
@@ -299,6 +362,8 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
return true, err
}
}
} else if httpErr, ok := err.(httpError); ok {
return fserrors.ShouldRetryHTTP(httpErr.Response, retryErrorCodes), err
}
return fserrors.ShouldRetry(err), err
}
@@ -322,21 +387,6 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
return errors.Errorf("%v must be less than or equal to %v", cs, maxUploadCutoff)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// httpClientFactory creates a Factory object that sends HTTP requests
// to an rclone's http.Client.
//
@@ -353,6 +403,50 @@ func httpClientFactory(client *http.Client) pipeline.Factory {
})
}
type servicePrincipalCredentials struct {
AppID string `json:"appId"`
Password string `json:"password"`
Tenant string `json:"tenant"`
}
const azureActiveDirectoryEndpoint = "https://login.microsoftonline.com/"
const azureStorageEndpoint = "https://storage.azure.com/"
// newServicePrincipalTokenRefresher takes the client ID and secret, and returns a refresh-able access token.
func newServicePrincipalTokenRefresher(ctx context.Context, credentialsData []byte) (azblob.TokenRefresher, error) {
var spCredentials servicePrincipalCredentials
if err := json.Unmarshal(credentialsData, &spCredentials); err != nil {
return nil, errors.Wrap(err, "error parsing credentials from JSON file")
}
oauthConfig, err := adal.NewOAuthConfig(azureActiveDirectoryEndpoint, spCredentials.Tenant)
if err != nil {
return nil, errors.Wrap(err, "error creating oauth config")
}
// Create service principal token for Azure Storage.
servicePrincipalToken, err := adal.NewServicePrincipalToken(
*oauthConfig,
spCredentials.AppID,
spCredentials.Password,
azureStorageEndpoint)
if err != nil {
return nil, errors.Wrap(err, "error creating service principal token")
}
// Wrap token inside a refresher closure.
var tokenRefresher azblob.TokenRefresher = func(credential azblob.TokenCredential) time.Duration {
if err := servicePrincipalToken.Refresh(); err != nil {
panic(err)
}
refreshedToken := servicePrincipalToken.Token()
credential.SetToken(refreshedToken.AccessToken)
exp := refreshedToken.Expires().Sub(time.Now().Add(2 * time.Minute))
return exp
}
return tokenRefresher, nil
}
// newPipeline creates a Pipeline using the specified credentials and options.
//
// this code was copied from azblob.NewPipeline
@@ -379,8 +473,7 @@ func (f *Fs) setRoot(root string) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -388,10 +481,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "azure: upload cutoff")
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "azure: chunk size")
@@ -410,21 +499,25 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
opt: *opt,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
client: fshttp.NewClient(fs.Config),
ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
imdsPacer: fs.NewPacer(ctx, pacer.NewAzureIMDS()),
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
client: fshttp.NewClient(ctx),
cache: bucket.NewCache(),
cntURLcache: make(map[string]*azblob.ContainerURL, 1),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
fs.Config.Transfers,
ci.Transfers,
opt.MemoryPoolUseMmap,
),
}
f.imdsPacer.SetRetries(5) // per IMDS documentation
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
@@ -433,7 +526,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
BucketBasedRootOK: true,
SetTier: true,
GetTier: true,
}).Fill(f)
}).Fill(ctx, f)
var (
u *url.URL
@@ -451,6 +544,76 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline)
case opt.UseMSI:
var token adal.Token
var userMSI *userMSI = &userMSI{}
if len(opt.MSIClientID) > 0 || len(opt.MSIObjectID) > 0 || len(opt.MSIResourceID) > 0 {
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
// Validate and ensure exactly one is set. (To do: better validation.)
if len(opt.MSIClientID) > 0 {
if len(opt.MSIObjectID) > 0 || len(opt.MSIResourceID) > 0 {
return nil, errors.New("more than one user-assigned identity ID is set")
}
userMSI.Type = msiClientID
userMSI.Value = opt.MSIClientID
}
if len(opt.MSIObjectID) > 0 {
if len(opt.MSIClientID) > 0 || len(opt.MSIResourceID) > 0 {
return nil, errors.New("more than one user-assigned identity ID is set")
}
userMSI.Type = msiObjectID
userMSI.Value = opt.MSIObjectID
}
if len(opt.MSIResourceID) > 0 {
if len(opt.MSIClientID) > 0 || len(opt.MSIObjectID) > 0 {
return nil, errors.New("more than one user-assigned identity ID is set")
}
userMSI.Type = msiResourceID
userMSI.Value = opt.MSIResourceID
}
} else {
userMSI = nil
}
err = f.imdsPacer.Call(func() (bool, error) {
// Retry as specified by the documentation:
// https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#retry-guidance
token, err = GetMSIToken(ctx, userMSI)
return f.shouldRetry(err)
})
if err != nil {
return nil, errors.Wrapf(err, "Failed to acquire MSI token")
}
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
}
credential := azblob.NewTokenCredential(token.AccessToken, func(credential azblob.TokenCredential) time.Duration {
fs.Debugf(f, "Token refresher called.")
var refreshedToken adal.Token
err := f.imdsPacer.Call(func() (bool, error) {
refreshedToken, err = GetMSIToken(ctx, userMSI)
return f.shouldRetry(err)
})
if err != nil {
// Failed to refresh.
return 0
}
credential.SetToken(refreshedToken.AccessToken)
now := time.Now().UTC()
// Refresh one minute before expiry.
refreshAt := refreshedToken.Expires().UTC().Add(-1 * time.Minute)
fs.Debugf(f, "Acquired new token that expires at %v; refreshing in %d s", refreshedToken.Expires(),
int(refreshAt.Sub(now).Seconds()))
if now.After(refreshAt) {
// Acquired a causality violation.
return 0
}
return refreshAt.Sub(now)
})
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline)
case opt.Account != "" && opt.Key != "":
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
if err != nil {
@@ -482,8 +645,27 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} else {
serviceURL = azblob.NewServiceURL(*u, pipeline)
}
case opt.ServicePrincipalFile != "":
// Create a standard URL.
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
}
// Try loading service principal credentials from file.
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServicePrincipalFile))
if err != nil {
return nil, errors.Wrap(err, "error opening service principal credentials file")
}
// Create a token refresher from service principal credentials.
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, loadedCreds)
if err != nil {
return nil, errors.Wrap(err, "failed to create a service principal token")
}
options := azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}}
pipe := f.newPipeline(azblob.NewTokenCredential("", tokenRefresher), options)
serviceURL = azblob.NewServiceURL(*u, pipe)
default:
return nil, errors.New("Need account+key or connectionString or sasURL")
return nil, errors.New("No authentication method configured")
}
f.svcURL = &serviceURL
@@ -524,7 +706,7 @@ func (f *Fs) cntURL(container string) (containerURL *azblob.ContainerURL) {
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItem) (fs.Object, error) {
func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItemInternal) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
@@ -581,7 +763,7 @@ func isDirectoryMarker(size int64, metadata azblob.Metadata, remote string) bool
}
// listFn is called from list to handle an object
type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
type listFn func(remote string, object *azblob.BlobItemInternal, isDirectory bool) error
// list lists the objects into the function supplied from
// the container and root supplied
@@ -680,7 +862,7 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
}
// Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(remote string, object *azblob.BlobItem, isDirectory bool) (fs.DirEntry, error) {
func (f *Fs) itemToDirEntry(remote string, object *azblob.BlobItemInternal, isDirectory bool) (fs.DirEntry, error) {
if isDirectory {
d := fs.NewDir(remote, time.Time{})
return d, nil
@@ -692,9 +874,27 @@ func (f *Fs) itemToDirEntry(remote string, object *azblob.BlobItem, isDirectory
return o, nil
}
// Check to see if this is a limited container and the container is not found
func (f *Fs) containerOK(container string) bool {
if !f.isLimited {
return true
}
f.cntURLcacheMu.Lock()
defer f.cntURLcacheMu.Unlock()
for limitedContainer := range f.cntURLcache {
if container == limitedContainer {
return true
}
}
return false
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
err = f.list(ctx, container, directory, prefix, addContainer, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
if !f.containerOK(container) {
return nil, fs.ErrorDirNotFound
}
err = f.list(ctx, container, directory, prefix, addContainer, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItemInternal, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
@@ -775,7 +975,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
container, directory := f.split(dir)
list := walk.NewListRHelper(callback)
listR := func(container, directory, prefix string, addContainer bool) error {
return f.list(ctx, container, directory, prefix, addContainer, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
return f.list(ctx, container, directory, prefix, addContainer, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItemInternal, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
@@ -802,6 +1002,9 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
f.cache.MarkOK(container)
}
} else {
if !f.containerOK(container) {
return fs.ErrorDirNotFound
}
err = listR(container, directory, f.rootDirectory, f.rootContainer == "")
if err != nil {
return err
@@ -903,7 +1106,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
// isEmpty checks to see if a given (container, directory) is empty and returns an error if not
func (f *Fs) isEmpty(ctx context.Context, container, directory string) (err error) {
empty := true
err = f.list(ctx, container, directory, f.rootDirectory, f.rootContainer == "", true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
err = f.list(ctx, container, directory, f.rootDirectory, f.rootContainer == "", true, 1, func(remote string, object *azblob.BlobItemInternal, isDirectory bool) error {
empty = false
return nil
})
@@ -976,7 +1179,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.deleteContainer(ctx, container)
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -1008,7 +1211,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var startCopy *azblob.BlobStartCopyFromURLResponse
err = f.pacer.Call(func() (bool, error) {
startCopy, err = dstBlobURL.StartCopyFromURL(ctx, *source, nil, azblob.ModifiedAccessConditions{}, options)
startCopy, err = dstBlobURL.StartCopyFromURL(ctx, *source, nil, azblob.ModifiedAccessConditions{}, options, azblob.AccessTierType(f.opt.AccessTier), nil)
return f.shouldRetry(err)
})
if err != nil {
@@ -1018,7 +1221,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
copyStatus := startCopy.CopyStatus()
for copyStatus == azblob.CopyStatusPending {
time.Sleep(1 * time.Second)
getMetadata, err := dstBlobURL.GetProperties(ctx, options)
getMetadata, err := dstBlobURL.GetProperties(ctx, options, azblob.ClientProvidedKeyOptions{})
if err != nil {
return nil, err
}
@@ -1036,7 +1239,7 @@ func (f *Fs) getMemoryPool(size int64) *pool.Pool {
return pool.New(
time.Duration(f.opt.MemoryPoolFlushTime),
int(size),
fs.Config.Transfers,
f.ci.Transfers,
f.opt.MemoryPoolUseMmap,
)
}
@@ -1123,7 +1326,7 @@ func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetProper
return nil
}
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItemInternal) (err error) {
metadata := info.Metadata
size := *info.Properties.ContentLength
if isDirectoryMarker(size, metadata, o.remote) {
@@ -1169,7 +1372,7 @@ func (o *Object) readMetaData() (err error) {
ctx := context.Background()
var blobProperties *azblob.BlobGetPropertiesResponse
err = o.fs.pacer.Call(func() (bool, error) {
blobProperties, err = blob.GetProperties(ctx, options)
blobProperties, err = blob.GetProperties(ctx, options, azblob.ClientProvidedKeyOptions{})
return o.fs.shouldRetry(err)
})
if err != nil {
@@ -1204,7 +1407,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
blob := o.getBlobReference()
err := o.fs.pacer.Call(func() (bool, error) {
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{})
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
return o.fs.shouldRetry(err)
})
if err != nil {
@@ -1245,15 +1448,15 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
blob := o.getBlobReference()
ac := azblob.BlobAccessConditions{}
var dowloadResponse *azblob.DownloadResponse
var downloadResponse *azblob.DownloadResponse
err = o.fs.pacer.Call(func() (bool, error) {
dowloadResponse, err = blob.Download(ctx, offset, count, ac, false)
downloadResponse, err = blob.Download(ctx, offset, count, ac, false, azblob.ClientProvidedKeyOptions{})
return o.fs.shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to open for download")
}
in = dowloadResponse.Body(azblob.RetryReaderOptions{})
in = downloadResponse.Body(azblob.RetryReaderOptions{})
return in, nil
}
@@ -1278,12 +1481,6 @@ func init() {
}
}
// readSeeker joins an io.Reader and an io.Seeker
type readSeeker struct {
io.Reader
io.Seeker
}
// increment the slice passed in as LSB binary
func increment(xs []byte) {
for i, digit := range xs {
@@ -1296,153 +1493,69 @@ func increment(xs []byte) {
}
}
var warnStreamUpload sync.Once
// poolWrapper wraps a pool.Pool as an azblob.TransferManager
type poolWrapper struct {
pool *pool.Pool
bufToken chan struct{}
runToken chan struct{}
}
// uploadMultipart uploads a file using multipart upload
//
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, blob *azblob.BlobURL, httpHeaders *azblob.BlobHTTPHeaders) (err error) {
// Calculate correct chunkSize
chunkSize := int64(o.fs.opt.ChunkSize)
totalParts := -1
// Note that the max size of file is 4.75 TB (100 MB X 50,000
// blocks) and this is bigger than the max uncommitted block
// size (9.52 TB) so we do not need to part commit block lists
// or garbage collect uncommitted blocks.
//
// See: https://docs.microsoft.com/en-gb/rest/api/storageservices/put-block
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
// buffers here (default 4MB). With a maximum number of parts (50,000) this will be a file of
// 195GB which seems like a not too unreasonable limit.
if size == -1 {
warnStreamUpload.Do(func() {
fs.Logf(o, "Streaming uploads using chunk size %v will have maximum file size of %v",
o.fs.opt.ChunkSize, fs.SizeSuffix(chunkSize*maxTotalParts))
})
} else {
// Adjust partSize until the number of parts is small enough.
if size/chunkSize >= maxTotalParts {
// Calculate partition size rounded up to the nearest MB
chunkSize = (((size / maxTotalParts) >> 20) + 1) << 20
}
if chunkSize > int64(maxChunkSize) {
return errors.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), totalParts, fs.SizeSuffix(chunkSize/2))
}
totalParts = int(size / chunkSize)
if size%chunkSize != 0 {
totalParts++
}
// newPoolWrapper creates an azblob.TransferManager that will use a
// pool.Pool with maximum concurrency as specified.
func (f *Fs) newPoolWrapper(concurrency int) azblob.TransferManager {
return &poolWrapper{
pool: f.pool,
bufToken: make(chan struct{}, concurrency),
runToken: make(chan struct{}, concurrency),
}
}
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, fs.SizeSuffix(chunkSize))
// Get implements TransferManager.Get().
func (pw *poolWrapper) Get() []byte {
pw.bufToken <- struct{}{}
return pw.pool.Get()
}
// unwrap the accounting from the input, we use wrap to put it
// back on after the buffering
in, wrap := accounting.UnWrap(in)
// Put implements TransferManager.Put().
func (pw *poolWrapper) Put(b []byte) {
pw.pool.Put(b)
<-pw.bufToken
}
// Upload the chunks
var (
g, gCtx = errgroup.WithContext(ctx)
remaining = size // remaining size in file for logging only, -1 if size < 0
position = int64(0) // position in file
memPool = o.fs.getMemoryPool(chunkSize) // pool to get memory from
finished = false // set when we have read EOF
blocks []string // list of blocks for finalize
blockBlobURL = blob.ToBlockBlobURL() // Get BlockBlobURL, we will use default pipeline here
ac = azblob.LeaseAccessConditions{} // Use default lease access conditions
binaryBlockID = make([]byte, 8) // block counter as LSB first 8 bytes
)
for part := 0; !finished; part++ {
// Get a block of memory from the pool and a token which limits concurrency
o.fs.uploadToken.Get()
buf := memPool.Get()
// Run implements TransferManager.Run().
func (pw *poolWrapper) Run(f func()) {
pw.runToken <- struct{}{}
go func() {
f()
<-pw.runToken
}()
}
free := func() {
memPool.Put(buf) // return the buf
o.fs.uploadToken.Put() // return the token
}
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
free()
break
}
// Read the chunk
n, err := readers.ReadFill(in, buf) // this can never return 0, nil
if err == io.EOF {
if n == 0 { // end if no data
free()
break
}
finished = true
} else if err != nil {
free()
return errors.Wrap(err, "multipart upload failed to read source")
}
buf = buf[:n]
// increment the blockID and save the blocks for finalize
increment(binaryBlockID)
blockID := base64.StdEncoding.EncodeToString(binaryBlockID)
blocks = append(blocks, blockID)
// Transfer the chunk
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
g.Go(func() (err error) {
defer free()
// Upload the block, with MD5 for check
md5sum := md5.Sum(buf)
transactionalMD5 := md5sum[:]
err = o.fs.pacer.Call(func() (bool, error) {
bufferReader := bytes.NewReader(buf)
wrappedReader := wrap(bufferReader)
rs := readSeeker{wrappedReader, bufferReader}
_, err = blockBlobURL.StageBlock(ctx, blockID, &rs, ac, transactionalMD5)
return o.fs.shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "multipart upload failed to upload part")
}
return nil
})
// ready for next block
if size >= 0 {
remaining -= chunkSize
}
position += chunkSize
}
err = g.Wait()
if err != nil {
return err
}
// Finalise the upload session
err = o.fs.pacer.Call(func() (bool, error) {
_, err := blockBlobURL.CommitBlockList(ctx, blocks, *httpHeaders, o.meta, azblob.BlobAccessConditions{})
return o.fs.shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "multipart upload failed to finalize")
}
return nil
// Close implements TransferManager.Close().
func (pw *poolWrapper) Close() {
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
if o.accessTier == azblob.AccessTierArchive {
if o.fs.opt.ArchiveTierDelete {
fs.Debugf(o, "deleting archive tier blob before updating")
err = o.Remove(ctx)
if err != nil {
return errors.Wrap(err, "failed to delete archive blob before updating")
}
} else {
return errCantUpdateArchiveTierBlobs
}
}
container, _ := o.split()
err = o.fs.makeContainer(ctx, container)
if err != nil {
return err
}
size := src.Size()
// Update Mod time
o.updateMetadataWithModTime(src.ModTime(ctx))
if err != nil {
@@ -1451,11 +1564,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
blob := o.getBlobReference()
httpHeaders := azblob.BlobHTTPHeaders{}
httpHeaders.ContentType = fs.MimeType(ctx, o)
// Compute the Content-MD5 of the file, for multiparts uploads it
httpHeaders.ContentType = fs.MimeType(ctx, src)
// Compute the Content-MD5 of the file. As we stream all uploads it
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
// Note: If multipart, an MD5 checksum will also be computed for each uploaded block
// in order to validate its integrity during transport
if !o.fs.opt.DisableCheckSum {
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
@@ -1469,30 +1581,17 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
BufferSize: int(o.fs.opt.ChunkSize),
MaxBuffers: 4,
MaxBuffers: uploadConcurrency,
Metadata: o.meta,
BlobHTTPHeaders: httpHeaders,
}
// FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75
// is merged the SDK can't upload a single blob of exactly the chunk
// size, so upload with a multpart upload to work around.
// See: https://github.com/rclone/rclone/issues/2653
multipartUpload := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
if size == int64(o.fs.opt.ChunkSize) {
multipartUpload = true
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
TransferManager: o.fs.newPoolWrapper(uploadConcurrency),
}
// Don't retry, return a retry error instead
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
if multipartUpload {
// If a large file upload in chunks
err = o.uploadMultipart(ctx, in, size, &blob, &httpHeaders)
} else {
// Write a small blob in one transaction
blockBlobURL := blob.ToBlockBlobURL()
_, err = azblob.UploadStreamToBlockBlob(ctx, in, blockBlobURL, putBlobOptions)
}
// Stream contents of the reader object to the given blob URL
blockBlobURL := blob.ToBlockBlobURL()
_, err = azblob.UploadStreamToBlockBlob(ctx, in, blockBlobURL, putBlobOptions)
return o.fs.shouldRetry(err)
})
if err != nil {

View File

@@ -1,4 +1,4 @@
// +build !plan9,!solaris,!js,go1.13
// +build !plan9,!solaris,!js,go1.14
package azureblob

View File

@@ -1,14 +1,16 @@
// Test AzureBlob filesystem interface
// +build !plan9,!solaris,!js,go1.13
// +build !plan9,!solaris,!js,go1.14
package azureblob
import (
"context"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
)
// TestIntegration runs integration tests against the remote
@@ -27,11 +29,36 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
func TestServicePrincipalFileSuccess(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"password": "my secret",
"tenant": "my active directory tenant ID"
}
`
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
if assert.NoError(t, err) {
assert.NotNil(t, tokenRefresher)
}
}
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
func TestServicePrincipalFileFailure(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"tenant": "my active directory tenant ID"
}
`
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
assert.Error(t, err)
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
}

View File

@@ -1,6 +1,6 @@
// Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9 solaris js !go1.13
// +build plan9 solaris js !go1.14
package azureblob

137
backend/azureblob/imds.go Normal file
View File

@@ -0,0 +1,137 @@
// +build !plan9,!solaris,!js,go1.14
package azureblob
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
)
const (
azureResource = "https://storage.azure.com"
imdsAPIVersion = "2018-02-01"
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
)
// This custom type is used to add the port the test server has bound to
// to the request context.
type testPortKey string
type msiIdentifierType int
const (
msiClientID msiIdentifierType = iota
msiObjectID
msiResourceID
)
type userMSI struct {
Type msiIdentifierType
Value string
}
type httpError struct {
Response *http.Response
}
func (e httpError) Error() string {
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
}
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
// Metadata Service.
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
// Attempt to get an MSI token; silently continue if unsuccessful.
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
result := adal.Token{}
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
if err != nil {
fs.Debugf(nil, "Failed to create request: %v", err)
return result, err
}
params := req.URL.Query()
params.Set("resource", azureResource)
params.Set("api-version", imdsAPIVersion)
// Specify user-assigned identity if requested.
if identity != nil {
switch identity.Type {
case msiClientID:
params.Set("client_id", identity.Value)
case msiObjectID:
params.Set("object_id", identity.Value)
case msiResourceID:
params.Set("mi_res_id", identity.Value)
default:
// If this happens, the calling function and this one don't agree on
// what valid ID types exist.
return result, fmt.Errorf("unknown MSI identity type specified")
}
}
req.URL.RawQuery = params.Encode()
// The Metadata header is required by all calls to IMDS.
req.Header.Set("Metadata", "true")
// If this function is run in a test, query the test server instead of IMDS.
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
if isTest {
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
req.Host = req.URL.Host
}
// Send request
httpClient := fshttp.NewClient(ctx)
resp, err := httpClient.Do(req)
if err != nil {
return result, errors.Wrap(err, "MSI is not enabled on this VM")
}
defer func() { // resp and Body should not be nil
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
}
err = resp.Body.Close()
if err != nil {
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
}
}()
// Check if the status code indicates success
// The request returns 200 currently, add 201 and 202 as well for possible extension.
switch resp.StatusCode {
case 200, 201, 202:
break
default:
body, _ := ioutil.ReadAll(resp.Body)
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
return result, httpError{Response: resp}
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, errors.Wrap(err, "Couldn't read IMDS response")
}
// Remove BOM, if any. azcopy does this so I'm following along.
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
// This would be a good place to persist the token if a large number of rclone
// invocations are being made in a short amount of time. If the token is
// persisted, the azureblob code will need to check for expiry before every
// storage API call.
err = json.Unmarshal(b, &result)
if err != nil {
return result, errors.Wrap(err, "Couldn't unmarshal IMDS response")
}
return result, nil
}

View File

@@ -0,0 +1,117 @@
// +build !plan9,!solaris,!js,go1.14
package azureblob
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
require.NoError(t, err)
parameters := r.URL.Query()
(*actual)["path"] = r.URL.Path
(*actual)["Metadata"] = r.Header.Get("Metadata")
(*actual)["method"] = r.Method
for paramName := range parameters {
(*actual)[paramName] = parameters.Get(paramName)
}
// Make response.
response := adal.Token{}
responseBytes, err := json.Marshal(response)
require.NoError(t, err)
_, err = w.Write(responseBytes)
require.NoError(t, err)
}
}
func TestManagedIdentity(t *testing.T) {
// test user-assigned identity specifiers to use
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
tests := []struct {
identity *userMSI
identityParameterName string
expectedAbsent []string
}{
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
}
alwaysExpected := map[string]string{
"path": "/metadata/identity/oauth2/token",
"resource": "https://storage.azure.com",
"Metadata": "true",
"api-version": "2018-02-01",
"method": "GET",
}
for _, test := range tests {
actual := make(map[string]string, 10)
testServer := httptest.NewServer(handler(t, &actual))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, test.identity)
require.NoError(t, err)
// Validate expected query parameters present
expected := make(map[string]string)
for k, v := range alwaysExpected {
expected[k] = v
}
if test.identity != nil {
expected[test.identityParameterName] = test.identity.Value
}
for key := range expected {
value, exists := actual[key]
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
test.identityParameterName, key) {
assert.Equalf(t, expected[key], value,
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
}
}
// Validate unexpected query parameters absent
for _, key := range test.expectedAbsent {
_, exists := actual[key]
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
}
}
}
func errorHandler(resultCode int) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Test error generated", resultCode)
}
}
func TestIMDSErrors(t *testing.T) {
errorCodes := []int{404, 429, 500}
for _, code := range errorCodes {
testServer := httptest.NewServer(errorHandler(code))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, nil)
require.Error(t, err)
httpErr, ok := err.(httpError)
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
}
}

View File

@@ -44,8 +44,10 @@ const (
timeHeader = headerPrefix + timeKey
sha1Key = "large_file_sha1"
sha1Header = "X-Bz-Content-Sha1"
sha1InfoHeader = headerPrefix + sha1Key
testModeHeader = "X-Bz-Test-Mode"
idHeader = "X-Bz-File-Id"
nameHeader = "X-Bz-File-Name"
timestampHeader = "X-Bz-Upload-Timestamp"
retryAfterHeader = "Retry-After"
minSleep = 10 * time.Millisecond
maxSleep = 5 * time.Minute
@@ -121,7 +123,7 @@ This value should be set no larger than 4.657GiB (== 5GB).`,
Name: "copy_cutoff",
Help: `Cutoff for switching to multipart copy
Any files larger than this that need to be server side copied will be
Any files larger than this that need to be server-side copied will be
copied in chunks of this size.
The minimum is 0 and the maximum is 4.6GB.`,
@@ -153,7 +155,9 @@ to start uploading.`,
This is usually set to a Cloudflare CDN URL as Backblaze offers
free egress for data downloaded through the Cloudflare network.
This is probably only useful for a public bucket.
Rclone works with private buckets by sending an "Authorization" header.
If the custom endpoint rewrites the requests for authentication,
e.g., in Cloudflare Workers, this header needs to be handled properly.
Leave blank if you want to use the endpoint provided by Backblaze.`,
Advanced: true,
}, {
@@ -214,6 +218,7 @@ type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed config options
ci *fs.ConfigInfo // global config
features *fs.Features // optional features
srv *rest.Client // the connection to the b2 server
rootBucket string // bucket part of root (if any)
@@ -290,7 +295,7 @@ func (o *Object) split() (bucket, bucketPath string) {
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
401, // Unauthorized (eg "Token has expired")
401, // Unauthorized (e.g. "Token has expired")
408, // Request Timeout
429, // Rate exceeded.
500, // Get occasional 500 Internal Server Error
@@ -391,14 +396,17 @@ func (f *Fs) setRoot(root string) {
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if opt.UploadCutoff < opt.ChunkSize {
opt.UploadCutoff = opt.ChunkSize
fs.Infof(nil, "b2: raising upload cutoff to chunk size: %v", opt.UploadCutoff)
}
err = checkUploadCutoff(opt, opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "b2: upload cutoff")
@@ -416,20 +424,22 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if opt.Endpoint == "" {
opt.Endpoint = defaultEndpoint
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
opt: *opt,
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
ci: ci,
srv: rest.NewClient(fshttp.NewClient(ctx)).SetErrorHandler(errorHandler),
cache: bucket.NewCache(),
_bucketID: make(map[string]string, 1),
_bucketType: make(map[string]string, 1),
uploads: make(map[string][]*api.GetUploadURLResponse),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
fs.Config.Transfers,
ci.Transfers,
opt.MemoryPoolUseMmap,
),
}
@@ -439,7 +449,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
}).Fill(f)
}).Fill(ctx, f)
// Set the test flag if required
if opt.TestMode != "" {
testMode := strings.TrimSpace(opt.TestMode)
@@ -702,7 +712,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
remote := file.Name[len(prefix):]
// Check for directory
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
if isDirectory {
if isDirectory && len(remote) > 1 {
remote = remote[:len(remote)-1]
}
if addBucket {
@@ -1168,10 +1178,10 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
}
// Delete Config.Transfers in parallel
toBeDeleted := make(chan *api.File, fs.Config.Transfers)
toBeDeleted := make(chan *api.File, f.ci.Transfers)
var wg sync.WaitGroup
wg.Add(fs.Config.Transfers)
for i := 0; i < fs.Config.Transfers; i++ {
wg.Add(f.ci.Transfers)
for i := 0; i < f.ci.Transfers; i++ {
go func() {
defer wg.Done()
for object := range toBeDeleted {
@@ -1183,7 +1193,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
err = f.deleteByID(ctx, object.ID, object.Name)
checkErr(err)
tr.Done(err)
tr.Done(ctx, err)
}
}()
}
@@ -1211,7 +1221,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
toBeDeleted <- object
}
last = remote
tr.Done(nil)
tr.Done(ctx, nil)
}
return nil
}))
@@ -1234,7 +1244,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
return f.purge(ctx, "", true)
}
// copy does a server side copy from dstObj <- srcObj
// copy does a server-side copy from dstObj <- srcObj
//
// If newInfo is nil then the metadata will be copied otherwise it
// will be replaced with newInfo
@@ -1291,7 +1301,7 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
return dstObj.decodeMetaDataFileInfo(&response)
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -1440,7 +1450,7 @@ func (o *Object) Size() int64 {
// Make sure it is lower case
//
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
// Some tools (eg Cyberduck) use this
// Some tools (e.g. Cyberduck) use this
func cleanSHA1(sha1 string) (out string) {
out = strings.ToLower(sha1)
const unverified = "unverified:"
@@ -1494,8 +1504,11 @@ func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
}
// getMetaData gets the metadata from the object unconditionally
func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
// getMetaDataListing gets the metadata from the object unconditionally from the listing
//
// Note that listing is a class C transaction which costs more than
// the B transaction used in getMetaData
func (o *Object) getMetaDataListing(ctx context.Context) (info *api.File, err error) {
bucket, bucketPath := o.split()
maxSearched := 1
var timestamp api.Timestamp
@@ -1528,6 +1541,19 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
return info, nil
}
// getMetaData gets the metadata from the object unconditionally
func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
// If using versions and have a version suffix, need to list the directory to find the correct versions
if o.fs.opt.Versions {
timestamp, _ := api.RemoveVersion(o.remote)
if !timestamp.IsZero() {
return o.getMetaDataListing(ctx)
}
}
_, info, err = o.getOrHead(ctx, "HEAD", nil)
return info, err
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// Sets
@@ -1657,12 +1683,11 @@ func (file *openFile) Close() (err error) {
// Check it satisfies the interfaces
var _ io.ReadCloser = &openFile{}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.size)
func (o *Object) getOrHead(ctx context.Context, method string, options []fs.OpenOption) (resp *http.Response, info *api.File, err error) {
opts := rest.Opts{
Method: "GET",
Options: options,
Method: method,
Options: options,
NoResponse: method == "HEAD",
}
// Use downloadUrl from backblaze if downloadUrl is not set
@@ -1680,37 +1705,67 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
bucket, bucketPath := o.split()
opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath))
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return o.fs.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to open for download")
// 404 for files, 400 for directories
if resp != nil && (resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusBadRequest) {
return nil, nil, fs.ErrorObjectNotFound
}
return nil, nil, errors.Wrapf(err, "failed to %s for download", method)
}
// Parse the time out of the headers if possible
err = o.parseTimeString(resp.Header.Get(timeHeader))
// NB resp may be Open here - don't return err != nil without closing
// Convert the Headers into an api.File
var uploadTimestamp api.Timestamp
err = uploadTimestamp.UnmarshalJSON([]byte(resp.Header.Get(timestampHeader)))
if err != nil {
fs.Debugf(o, "Bad "+timestampHeader+" header: %v", err)
}
var Info = make(map[string]string)
for k, vs := range resp.Header {
k = strings.ToLower(k)
for _, v := range vs {
if strings.HasPrefix(k, headerPrefix) {
Info[k[len(headerPrefix):]] = v
}
}
}
info = &api.File{
ID: resp.Header.Get(idHeader),
Name: resp.Header.Get(nameHeader),
Action: "upload",
Size: resp.ContentLength,
UploadTimestamp: uploadTimestamp,
SHA1: resp.Header.Get(sha1Header),
ContentType: resp.Header.Get("Content-Type"),
Info: Info,
}
return resp, info, nil
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.size)
resp, info, err := o.getOrHead(ctx, "GET", options)
if err != nil {
return nil, err
}
// Don't check length or hash or metadata on partial content
if resp.StatusCode == http.StatusPartialContent {
return resp.Body, nil
}
err = o.decodeMetaData(info)
if err != nil {
_ = resp.Body.Close()
return nil, err
}
// Read sha1 from header if it isn't set
if o.sha1 == "" {
o.sha1 = resp.Header.Get(sha1Header)
fs.Debugf(o, "Reading sha1 from header - %q", o.sha1)
// if sha1 header is "none" (in big files), then need
// to read it from the metadata
if o.sha1 == "none" {
o.sha1 = resp.Header.Get(sha1InfoHeader)
fs.Debugf(o, "Reading sha1 from info - %q", o.sha1)
}
o.sha1 = cleanSHA1(o.sha1)
}
// Don't check length or hash on partial content
if resp.StatusCode == http.StatusPartialContent {
return resp.Body, nil
}
return newOpenFile(o, resp), nil
}

View File

@@ -84,20 +84,20 @@ func init() {
Name: "box",
Description: "Box",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
Config: func(ctx context.Context, name string, m configmap.Mapper) {
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
var err error
// If using box config.json, use JWT auth
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
err = refreshJWTToken(jsonFile, boxSubType, name, m)
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
if err != nil {
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
}
// Else, if not using an access token, use oauth2
} else if boxAccessToken == "" || !boxAccessTokenOk {
err = oauthutil.Config("box", name, m, oauthConfig, nil)
err = oauthutil.Config(ctx, "box", name, m, oauthConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
}
@@ -153,7 +153,7 @@ func init() {
})
}
func refreshJWTToken(jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
jsonFile = env.ShellExpand(jsonFile)
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
@@ -169,7 +169,7 @@ func refreshJWTToken(jsonFile string, boxSubType string, name string, m configma
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(fs.Config)
client := fshttp.NewClient(ctx)
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
return err
}
@@ -339,7 +339,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
}
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
if item.Name == leaf {
if strings.EqualFold(item.Name, leaf) {
info = item
return true
}
@@ -372,8 +372,7 @@ func errorHandler(resp *http.Response) error {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -387,28 +386,29 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
root = parsePath(root)
client := fshttp.NewClient(fs.Config)
client := fshttp.NewClient(ctx)
var ts *oauthutil.TokenSource
// If not using an accessToken, create an oauth client and tokensource
if opt.AccessToken == "" {
client, ts, err = oauthutil.NewClient(name, m, oauthConfig)
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
}
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
}
f.features = (&fs.Features{
CaseInsensitive: true,
CanHaveEmptyDirectories: true,
}).Fill(f)
}).Fill(ctx, f)
f.srv.SetErrorHandler(errorHandler)
// If using an accessToken, set the Authorization header
@@ -424,7 +424,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// should do so whether there are uploads pending or not.
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
err := refreshJWTToken(jsonFile, boxSubType, name, m)
err := refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
return err
})
f.tokenRenewer.Start()
@@ -463,7 +463,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
return nil, err
}
f.features.Fill(&tempF)
f.features.Fill(ctx, &tempF)
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
@@ -514,7 +514,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
if item.Name == leaf {
if strings.EqualFold(item.Name, leaf) {
pathIDOut = item.ID
return true
}
@@ -791,7 +791,7 @@ func (f *Fs) Precision() time.Duration {
return time.Second
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -909,7 +909,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return usage, nil
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
@@ -945,7 +945,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1013,7 +1013,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return info.SharedLink.URL, err
}
// deletePermanently permenently deletes a trashed file
// deletePermanently permanently deletes a trashed file
func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
opts := rest.Opts{
Method: "DELETE",

View File

@@ -1,4 +1,4 @@
// multpart upload for box
// multipart upload for box
package box

View File

@@ -68,7 +68,7 @@ func init() {
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Help: "Remote to cache.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true,
}, {
Name: "plex_url",
@@ -109,7 +109,7 @@ will need to be cleared or unexpected EOF errors will occur.`,
}},
}, {
Name: "info_age",
Help: `How long to cache file structure information (directory listings, file size, times etc).
Help: `How long to cache file structure information (directory listings, file size, times, etc.).
If all write operations are done through the cache then you can safely make
this value very large as the cache store will also be updated in real time.`,
Default: DefCacheInfoAge,
@@ -340,7 +340,7 @@ func parseRootPath(path string) (string, error) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -362,7 +362,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
}
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
wrappedFs, wrapErr := cache.Get(remotePath)
wrappedFs, wrapErr := cache.Get(ctx, remotePath)
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
}
@@ -479,7 +479,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
}
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
f.tempFs, err = cache.Get(f.opt.TempWritePath)
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
if err != nil {
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
}
@@ -506,13 +506,13 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
pollInterval := make(chan time.Duration, 1)
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
doChangeNotify(context.Background(), f.receiveChangeNotify, pollInterval)
doChangeNotify(ctx, f.receiveChangeNotify, pollInterval)
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
DuplicateFiles: false, // storage doesn't permit this
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// override only those features that use a temp fs and it doesn't support them
//f.features.ChangeNotify = f.ChangeNotify
if f.opt.TempWritePath != "" {
@@ -581,7 +581,7 @@ Some valid examples are:
"0:10" -> the first ten chunks
Any parameter with a key that starts with "file" can be used to
specify files to fetch, eg
specify files to fetch, e.g.
rclone rc cache/fetch chunks=0 file=hello file2=home/goodbye
@@ -1236,7 +1236,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
@@ -1517,7 +1517,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
return f.put(ctx, in, src, options, do)
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
@@ -1594,7 +1594,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return co, nil
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
@@ -1895,6 +1895,16 @@ func (f *Fs) Disconnect(ctx context.Context) error {
return do(ctx)
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
do := f.Fs.Features().Shutdown
if do == nil {
return nil
}
return do(ctx)
}
var commandHelp = []fs.CommandHelp{
{
Name: "stats",
@@ -1939,4 +1949,5 @@ var (
_ fs.Disconnecter = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
)

View File

@@ -925,14 +925,15 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
require.NoError(t, err)
fs.Config.LowLevelRetries = 1
ci := fs.GetConfig(context.Background())
ci.LowLevelRetries = 1
// Instantiate root
if purge {
boltDb.PurgeTempUploads()
_ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id))
}
f, err := cache.NewFs(remote, id, m)
f, err := cache.NewFs(context.Background(), remote, id, m)
require.NoError(t, err)
cfs, err := r.getCacheFs(f)
require.NoError(t, err)

View File

@@ -42,7 +42,7 @@ import (
// used mostly for consistency checks (lazily for performance reasons).
// Other formats can be developed that use an external meta store
// free of these limitations, but this needs some support from
// rclone core (eg. metadata store interfaces).
// rclone core (e.g. metadata store interfaces).
//
// The following types of chunks are supported:
// data and control, active and temporary.
@@ -97,7 +97,8 @@ var (
//
// And still chunker's primary function is to chunk large files
// rather than serve as a generic metadata container.
const maxMetadataSize = 255
const maxMetadataSize = 1023
const maxMetadataSizeWritten = 255
// Current/highest supported metadata format.
const metadataVersion = 1
@@ -121,6 +122,8 @@ const maxTransactionProbes = 100
// standard chunker errors
var (
ErrChunkOverflow = errors.New("chunk number overflow")
ErrMetaTooBig = errors.New("metadata is too big")
ErrMetaUnknown = errors.New("unknown metadata, please upgrade rclone")
)
// variants of baseMove's parameter delMode
@@ -140,7 +143,7 @@ func init() {
Name: "remote",
Required: true,
Help: `Remote to chunk/unchunk.
Normally should contain a ':' and a path, eg "myremote:path/to/dir",
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
"myremote:bucket" or maybe "myremote:" (not recommended).`,
}, {
Name: "chunk_size",
@@ -150,6 +153,7 @@ Normally should contain a ':' and a path, eg "myremote:path/to/dir",
}, {
Name: "name_format",
Advanced: true,
Hide: fs.OptionHideCommandLine,
Default: `*.rclone_chunk.###`,
Help: `String format of chunk file names.
The two placeholders are: base file name (*) and chunk number (#...).
@@ -160,12 +164,14 @@ Possible chunk files are ignored if their name does not match given format.`,
}, {
Name: "start_from",
Advanced: true,
Hide: fs.OptionHideCommandLine,
Default: 1,
Help: `Minimum valid chunk number. Usually 0 or 1.
By default chunk numbers start from 1.`,
}, {
Name: "meta_format",
Advanced: true,
Hide: fs.OptionHideCommandLine,
Default: "simplejson",
Help: `Format of the metadata object or "none". By default "simplejson".
Metadata is a small JSON file named after the composite file.`,
@@ -223,7 +229,7 @@ It has the following fields: ver, size, nchunks, md5, sha1.`,
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -248,12 +254,12 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
}
// Look for a file first
remotePath := fspath.JoinRootPath(basePath, rpath)
baseFs, err := cache.Get(baseName + remotePath)
baseFs, err := cache.Get(ctx, baseName+remotePath)
if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", baseName+remotePath)
}
if !operations.CanServerSideMove(baseFs) {
return nil, errors.New("can't use chunker on a backend which doesn't support server side move or copy")
return nil, errors.New("can't use chunker on a backend which doesn't support server-side move or copy")
}
f := &Fs{
@@ -276,7 +282,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// (yet can't satisfy fstest.CheckListing, will ignore)
if err == nil && !f.useMeta && strings.Contains(rpath, "/") {
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
_, testErr := cache.Get(baseName + firstChunkPath)
_, testErr := cache.Get(ctx, baseName+firstChunkPath)
if testErr == fs.ErrorIsFile {
err = testErr
}
@@ -289,12 +295,14 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
f.features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: true,
ReadMimeType: true,
ReadMimeType: false, // Object.MimeType not supported
WriteMimeType: true,
BucketBased: true,
CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: true,
}).Fill(f).Mask(baseFs).WrapsFs(f, baseFs)
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
return f, err
}
@@ -462,7 +470,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
// filePath can be name, relative or absolute path of main file.
//
// chunkNo must be a zero based index of data chunk.
// Negative chunkNo eg. -1 indicates a control chunk.
// Negative chunkNo e.g. -1 indicates a control chunk.
// ctrlType is type of control chunk (must be valid).
// ctrlType must be "" for data chunks.
//
@@ -691,43 +699,50 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
switch entry := dirOrObject.(type) {
case fs.Object:
remote := entry.Remote()
if mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(remote); mainRemote != "" {
if xactID != "" {
if revealHidden {
fs.Infof(f, "ignore temporary chunk %q", remote)
}
break
mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(remote)
if mainRemote == "" {
// this is meta object or standalone file
object := f.newObject("", entry, nil)
byRemote[remote] = object
tempEntries = append(tempEntries, object)
break
}
// this is some kind of chunk
// metobject should have been created above if present
isSpecial := xactID != "" || ctrlType != ""
mainObject := byRemote[mainRemote]
if mainObject == nil && f.useMeta && !isSpecial {
fs.Debugf(f, "skip orphan data chunk %q", remote)
break
}
if mainObject == nil && !f.useMeta {
// this is the "nometa" case
// create dummy chunked object without metadata
mainObject = f.newObject(mainRemote, nil, nil)
byRemote[mainRemote] = mainObject
if !badEntry[mainRemote] {
tempEntries = append(tempEntries, mainObject)
}
if ctrlType != "" {
if revealHidden {
fs.Infof(f, "ignore control chunk %q", remote)
}
break
}
if isSpecial {
if revealHidden {
fs.Infof(f, "ignore non-data chunk %q", remote)
}
mainObject := byRemote[mainRemote]
if mainObject == nil && f.useMeta {
fs.Debugf(f, "skip chunk %q without meta object", remote)
break
}
if mainObject == nil {
// useMeta is false - create chunked object without metadata
mainObject = f.newObject(mainRemote, nil, nil)
byRemote[mainRemote] = mainObject
if !badEntry[mainRemote] {
tempEntries = append(tempEntries, mainObject)
}
}
if err := mainObject.addChunk(entry, chunkNo); err != nil {
if f.opt.FailHard {
return nil, err
}
badEntry[mainRemote] = true
// need to read metadata to ensure actual object type
// no need to read if metaobject is too big or absent,
// use the fact that before calling validate()
// the `size` field caches metaobject size, if any
if f.useMeta && mainObject != nil && mainObject.size <= maxMetadataSize {
mainObject.unsure = true
}
break
}
object := f.newObject("", entry, nil)
byRemote[remote] = object
tempEntries = append(tempEntries, object)
if err := mainObject.addChunk(entry, chunkNo); err != nil {
if f.opt.FailHard {
return nil, err
}
badEntry[mainRemote] = true
}
case fs.Directory:
isSubdir[entry.Remote()] = true
wrapDir := fs.NewDirCopy(ctx, entry)
@@ -782,14 +797,22 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
// but opening even a small file can be slow on some backends.
//
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.scanObject(ctx, remote, false)
}
// scanObject is like NewObject with optional quick scan mode.
// The quick mode avoids directory requests other than `List`,
// ignores non-chunked objects and skips chunk size checks.
func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.Object, error) {
if err := f.forbidChunk(false, remote); err != nil {
return nil, errors.Wrap(err, "can't access")
}
var (
o *Object
baseObj fs.Object
err error
o *Object
baseObj fs.Object
err error
sameMain bool
)
if f.useMeta {
@@ -803,6 +826,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// as a hard limit. Anything larger than that is treated as a
// non-chunked file without even checking its contents, so it's
// paramount to prevent metadata from exceeding the maximum size.
// Anything smaller is additionally checked for format.
o = f.newObject("", baseObj, nil)
if o.size > maxMetadataSize {
return o, nil
@@ -832,18 +856,34 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return nil, errors.Wrap(err, "can't detect composite file")
}
caseInsensitive := f.features.CaseInsensitive
for _, dirOrObject := range entries {
entry, ok := dirOrObject.(fs.Object)
if !ok {
continue
}
entryRemote := entry.Remote()
if !strings.Contains(entryRemote, remote) {
if !caseInsensitive && !strings.Contains(entryRemote, remote) {
continue // bypass regexp to save cpu
}
mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(entryRemote)
if mainRemote == "" || mainRemote != remote || ctrlType != "" || xactID != "" {
continue // skip non-conforming, temporary and control chunks
if mainRemote == "" {
continue // skip non-chunks
}
if caseInsensitive {
sameMain = strings.EqualFold(mainRemote, remote)
} else {
sameMain = mainRemote == remote
}
if !sameMain {
continue // skip alien chunks
}
if ctrlType != "" || xactID != "" {
if f.useMeta {
// temporary/control chunk calls for lazy metadata read
o.unsure = true
}
continue
}
//fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
if err := o.addChunk(entry, chunkNo); err != nil {
@@ -853,7 +893,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
// Scanning hasn't found data chunks with conforming names.
if f.useMeta {
if f.useMeta || quickScan {
// Metadata is required but absent and there are no chunks.
return nil, fs.ErrorObjectNotFound
}
@@ -876,23 +916,48 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// file without metadata. Validate it and update the total data size.
// As an optimization, skip metadata reading here - we will call
// readMetadata lazily when needed (reading can be expensive).
if err := o.validate(); err != nil {
return nil, err
if !quickScan {
if err := o.validate(); err != nil {
return nil, err
}
}
return o, nil
}
// readMetadata reads composite object metadata and caches results,
// in case of critical errors metadata is not cached.
// Returns ErrMetaUnknown if an unsupported metadata format is detected.
// If object is not chunked but marked by List or NewObject for recheck,
// readMetadata will attempt to parse object as composite with fallback
// to non-chunked representation if the attempt fails.
func (o *Object) readMetadata(ctx context.Context) error {
// return quickly if metadata is absent or has been already cached
if !o.f.useMeta {
o.isFull = true
}
if o.isFull {
return nil
}
if !o.isComposite() || !o.f.useMeta {
if !o.isComposite() && !o.unsure {
// this for sure is a non-chunked standalone file
o.isFull = true
return nil
}
// validate metadata
metaObject := o.main
if metaObject.Size() > maxMetadataSize {
if o.unsure {
// this is not metadata but a foreign object
o.unsure = false
o.chunks = nil // make isComposite return false
o.isFull = true // cache results
return nil
}
return ErrMetaTooBig
}
// size is within limits, perform consistency checks
reader, err := metaObject.Open(ctx)
if err != nil {
return err
@@ -905,8 +970,22 @@ func (o *Object) readMetadata(ctx context.Context) error {
switch o.f.opt.MetaFormat {
case "simplejson":
metaInfo, err := unmarshalSimpleJSON(ctx, metaObject, metadata, true)
if err != nil {
metaInfo, madeByChunker, err := unmarshalSimpleJSON(ctx, metaObject, metadata)
if o.unsure {
o.unsure = false
if !madeByChunker {
// this is not metadata but a foreign object
o.chunks = nil // make isComposite return false
o.isFull = true // cache results
return nil
}
}
switch err {
case nil:
// fall thru
case ErrMetaTooBig, ErrMetaUnknown:
return err // return these errors unwrapped for unit tests
default:
return errors.Wrap(err, "invalid metadata")
}
if o.size != metaInfo.Size() || len(o.chunks) != metaInfo.nChunks {
@@ -916,12 +995,36 @@ func (o *Object) readMetadata(ctx context.Context) error {
o.sha1 = metaInfo.sha1
}
o.isFull = true
o.isFull = true // cache results
return nil
}
// put implements Put, PutStream, PutUnchecked, Update
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption, basePut putFn) (obj fs.Object, err error) {
func (f *Fs) put(
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
basePut putFn, action string, target fs.Object) (obj fs.Object, err error) {
// Perform consistency checks
if err := f.forbidChunk(src, remote); err != nil {
return nil, errors.Wrap(err, action+" refused")
}
if target == nil {
// Get target object with a quick directory scan
// skip metadata check if target object does not exist.
// ignore not-chunked objects, skip chunk size checks.
if obj, err := f.scanObject(ctx, remote, true); err == nil {
target = obj
}
}
if target != nil {
obj := target.(*Object)
if err := obj.readMetadata(ctx); err == ErrMetaUnknown {
// refuse to update a file of unsupported format
return nil, errors.Wrap(err, "refusing to "+action)
}
}
// Prepare to upload
c := f.newChunkingReader(src)
wrapIn := c.wrapStream(ctx, in, src)
@@ -958,6 +1061,8 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
}
info := f.wrapInfo(src, chunkRemote, size)
// Refill chunkLimit and let basePut repeatedly call chunkingReader.Read()
c.chunkLimit = c.chunkSize
// TODO: handle range/limit options
chunk, errChunk := basePut(ctx, wrapIn, info, options...)
if errChunk != nil {
@@ -990,7 +1095,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
}
// Wrapped remote may or may not have seen EOF from chunking reader,
// eg. the box multi-uploader reads exactly the chunk size specified
// e.g. the box multi-uploader reads exactly the chunk size specified
// and skips the "EOF" read. Hence, switch to next limit here.
if !(c.chunkLimit == 0 || c.chunkLimit == c.chunkSize || c.sizeTotal == -1 || c.done) {
silentlyRemove(ctx, chunk)
@@ -1009,8 +1114,8 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
// Check for input that looks like valid metadata
needMeta := len(c.chunks) > 1
if c.readCount <= maxMetadataSize && len(c.chunks) == 1 {
_, err := unmarshalSimpleJSON(ctx, c.chunks[0], c.smallHead, false)
needMeta = err == nil
_, madeByChunker, _ := unmarshalSimpleJSON(ctx, c.chunks[0], c.smallHead)
needMeta = madeByChunker
}
// Finalize small object as non-chunked.
@@ -1126,6 +1231,12 @@ func (c *chunkingReader) wrapStream(ctx context.Context, in io.Reader, src fs.Ob
switch {
case c.fs.useMD5:
srcObj := fs.UnWrapObjectInfo(src)
if srcObj != nil && srcObj.Fs().Features().SlowHash {
fs.Debugf(src, "skip slow MD5 on source file, hashing in-transit")
c.hasher = md5.New()
break
}
if c.md5, _ = src.Hash(ctx, hash.MD5); c.md5 == "" {
if c.fs.hashFallback {
c.sha1, _ = src.Hash(ctx, hash.SHA1)
@@ -1134,6 +1245,12 @@ func (c *chunkingReader) wrapStream(ctx context.Context, in io.Reader, src fs.Ob
}
}
case c.fs.useSHA1:
srcObj := fs.UnWrapObjectInfo(src)
if srcObj != nil && srcObj.Fs().Features().SlowHash {
fs.Debugf(src, "skip slow SHA1 on source file, hashing in-transit")
c.hasher = sha1.New()
break
}
if c.sha1, _ = src.Hash(ctx, hash.SHA1); c.sha1 == "" {
if c.fs.hashFallback {
c.md5, _ = src.Hash(ctx, hash.MD5)
@@ -1166,10 +1283,14 @@ func (c *chunkingReader) updateHashes() {
func (c *chunkingReader) Read(buf []byte) (bytesRead int, err error) {
if c.chunkLimit <= 0 {
// Chunk complete - switch to next one.
// We might not get here because some remotes (eg. box multi-uploader)
// Note #1:
// We might not get here because some remotes (e.g. box multi-uploader)
// read the specified size exactly and skip the concluding EOF Read.
// Then a check in the put loop will kick in.
c.chunkLimit = c.chunkSize
// Note #2:
// The crypt backend after receiving EOF here will call Read again
// and we must insist on returning EOF, so we postpone refilling
// chunkLimit to the main loop.
return 0, io.EOF
}
if int64(len(buf)) > c.chunkLimit {
@@ -1202,7 +1323,7 @@ func (c *chunkingReader) accountBytes(bytesRead int64) {
}
}
// dummyRead updates accounting, hashsums etc by simulating reads
// dummyRead updates accounting, hashsums, etc. by simulating reads
func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
if c.hasher == nil && c.readCount+size > maxMetadataSize {
c.accountBytes(size)
@@ -1253,29 +1374,16 @@ func (f *Fs) removeOldChunks(ctx context.Context, remote string) {
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if err := f.forbidChunk(src, src.Remote()); err != nil {
return nil, errors.Wrap(err, "refusing to put")
}
return f.put(ctx, in, src, src.Remote(), options, f.base.Put)
return f.put(ctx, in, src, src.Remote(), options, f.base.Put, "put", nil)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if err := f.forbidChunk(src, src.Remote()); err != nil {
return nil, errors.Wrap(err, "refusing to upload")
}
return f.put(ctx, in, src, src.Remote(), options, f.base.Features().PutStream)
return f.put(ctx, in, src, src.Remote(), options, f.base.Features().PutStream, "upload", nil)
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
return errors.Wrap(err, "update refused")
}
if err := o.readMetadata(ctx); err != nil {
// refuse to update a file of unsupported format
return errors.Wrap(err, "refusing to update")
}
basePut := o.f.base.Put
if src.Size() < 0 {
basePut = o.f.base.Features().PutStream
@@ -1283,7 +1391,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errors.New("wrapped file system does not support streaming uploads")
}
}
oNew, err := o.f.put(ctx, in, src, o.Remote(), options, basePut)
oNew, err := o.f.put(ctx, in, src, o.Remote(), options, basePut, "update", o)
if err == nil {
*o = *oNew.(*Object)
}
@@ -1367,7 +1475,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// However, if rclone dies unexpectedly, it can leave hidden temporary
// chunks, which cannot be discovered using the `list` command.
// Remove does not try to search for such chunks or to delete them.
// Sometimes this can lead to strange results eg. when `list` shows that
// Sometimes this can lead to strange results e.g. when `list` shows that
// directory is empty but `rmdir` refuses to remove it because on the
// level of wrapped remote it's actually *not* empty.
// As a workaround users can use `purge` to forcibly remove it.
@@ -1397,7 +1505,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
// to corrupt file in hard mode. Hence, refuse to Remove, too.
return errors.Wrap(err, "refuse to corrupt")
}
if err := o.readMetadata(ctx); err != nil {
if err := o.readMetadata(ctx); err == ErrMetaUnknown {
// Proceed but warn user that unexpected things can happen.
fs.Errorf(o, "Removing a file with unsupported metadata: %v", err)
}
@@ -1425,6 +1533,11 @@ func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMo
if err := f.forbidChunk(o, remote); err != nil {
return nil, errors.Wrapf(err, "can't %s", opName)
}
if err := o.readMetadata(ctx); err != nil {
// Refuse to copy/move composite files with invalid or future
// metadata format which might involve unsupported chunk types.
return nil, errors.Wrapf(err, "can't %s this file", opName)
}
if !o.isComposite() {
fs.Debugf(o, "%s non-chunked object...", opName)
oResult, err := do(ctx, o.mainChunk(), remote) // chain operation to a single wrapped chunk
@@ -1433,11 +1546,6 @@ func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMo
}
return f.newObject("", oResult, nil), nil
}
if err := o.readMetadata(ctx); err != nil {
// Refuse to copy/move composite files with invalid or future
// metadata format which might involve unsupported chunk types.
return nil, errors.Wrapf(err, "can't %s this file", opName)
}
fs.Debugf(o, "%s %d data chunks...", opName, len(o.chunks))
mainRemote := o.remote
@@ -1519,6 +1627,8 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
diff = "chunk sizes"
case f.opt.NameFormat != obj.f.opt.NameFormat:
diff = "chunk name formats"
case f.opt.StartFrom != obj.f.opt.StartFrom:
diff = "chunk numbering"
case f.opt.MetaFormat != obj.f.opt.MetaFormat:
diff = "meta formats"
}
@@ -1528,6 +1638,10 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
return
}
if obj.unsure {
// ensure object is composite if need to re-read metadata
_ = obj.readMetadata(ctx)
}
requireMetaHash := obj.isComposite() && f.opt.MetaFormat == "simplejson"
if !requireMetaHash && !f.hashAll {
ok = true // hash is not required for metadata
@@ -1558,7 +1672,7 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
return
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -1579,7 +1693,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return f.copyOrMove(ctx, obj, remote, baseCopy, md5, sha1, "copy")
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
@@ -1624,7 +1738,7 @@ func (f *Fs) baseMove(ctx context.Context, src fs.Object, remote string, delMode
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1704,6 +1818,16 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
do(ctx, wrappedNotifyFunc, pollIntervalChan)
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
do := f.base.Features().Shutdown
if do == nil {
return nil
}
return do(ctx)
}
// Object represents a composite file wrapping one or more data chunks
type Object struct {
remote string
@@ -1711,6 +1835,7 @@ type Object struct {
chunks []fs.Object // active data chunks if file is composite, or wrapped file as a single chunk if meta format is 'none'
size int64 // cached total size of chunks in a composite file or -1 for non-chunked files
isFull bool // true if metadata has been read
unsure bool // true if need to read metadata to detect object type
md5 string
sha1 string
f *Fs
@@ -1732,6 +1857,9 @@ func (o *Object) addChunk(chunk fs.Object, chunkNo int) error {
copy(newChunks, o.chunks)
o.chunks = newChunks
}
if o.chunks[chunkNo] != nil {
return fmt.Errorf("duplicate chunk number %d", chunkNo+o.f.opt.StartFrom)
}
o.chunks[chunkNo] = chunk
return nil
}
@@ -1861,15 +1989,16 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
// on the level of wrapped remote but chunker is unaware of that.
//
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) {
if err := o.readMetadata(ctx); err != nil {
return "", err // valid metadata is required to get hash, abort
}
if !o.isComposite() {
// First, chain to the wrapped non-chunked file if possible.
if value, err := o.mainChunk().Hash(ctx, hashType); err == nil && value != "" {
return value, nil
}
}
if err := o.readMetadata(ctx); err != nil {
return "", err // valid metadata is required to get hash, abort
}
// Try hash from metadata if the file is composite or if wrapped remote fails.
switch hashType {
case hash.MD5:
@@ -1894,13 +2023,13 @@ func (o *Object) UnWrap() fs.Object {
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
if !o.isComposite() {
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
}
if err := o.readMetadata(ctx); err != nil {
// refuse to open unsupported format
return nil, errors.Wrap(err, "can't open")
}
if !o.isComposite() {
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
}
var openOptions []fs.OpenOption
var offset, limit int64 = 0, -1
@@ -2158,72 +2287,74 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1 s
SHA1: sha1,
}
data, err := json.Marshal(&metadata)
if err == nil && data != nil && len(data) >= maxMetadataSize {
if err == nil && data != nil && len(data) >= maxMetadataSizeWritten {
// be a nitpicker, never produce something you can't consume
return nil, errors.New("metadata can't be this big, please report to rclone developers")
}
return data, err
}
// unmarshalSimpleJSON
// unmarshalSimpleJSON parses metadata.
//
// In case of errors returns a flag telling whether input has been
// produced by incompatible version of rclone vs wasn't metadata at all.
// Only metadata format version 1 is supported atm.
// Future releases will transparently migrate older metadata objects.
// New format will have a higher version number and cannot be correctly
// handled by current implementation.
// The version check below will then explicitly ask user to upgrade rclone.
//
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte, strictChecks bool) (info *ObjectInfo, err error) {
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
// Be strict about JSON format
// to reduce possibility that a random small file resembles metadata.
if data != nil && len(data) > maxMetadataSize {
return nil, errors.New("too big")
if data != nil && len(data) > maxMetadataSizeWritten {
return nil, false, ErrMetaTooBig
}
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
return nil, errors.New("invalid json")
return nil, false, errors.New("invalid json")
}
var metadata metaSimpleJSON
err = json.Unmarshal(data, &metadata)
if err != nil {
return nil, err
return nil, false, err
}
// Basic fields are strictly required
// to reduce possibility that a random small file resembles metadata.
if metadata.Version == nil || metadata.Size == nil || metadata.ChunkNum == nil {
return nil, errors.New("missing required field")
return nil, false, errors.New("missing required field")
}
// Perform strict checks, avoid corruption of future metadata formats.
if *metadata.Version < 1 {
return nil, errors.New("wrong version")
return nil, false, errors.New("wrong version")
}
if *metadata.Size < 0 {
return nil, errors.New("negative file size")
return nil, false, errors.New("negative file size")
}
if *metadata.ChunkNum < 0 {
return nil, errors.New("negative number of chunks")
return nil, false, errors.New("negative number of chunks")
}
if *metadata.ChunkNum > maxSafeChunkNumber {
return nil, ErrChunkOverflow
return nil, true, ErrChunkOverflow // produced by incompatible version of rclone
}
if metadata.MD5 != "" {
_, err = hex.DecodeString(metadata.MD5)
if len(metadata.MD5) != 32 || err != nil {
return nil, errors.New("wrong md5 hash")
return nil, false, errors.New("wrong md5 hash")
}
}
if metadata.SHA1 != "" {
_, err = hex.DecodeString(metadata.SHA1)
if len(metadata.SHA1) != 40 || err != nil {
return nil, errors.New("wrong sha1 hash")
return nil, false, errors.New("wrong sha1 hash")
}
}
// ChunkNum is allowed to be 0 in future versions
if *metadata.ChunkNum < 1 && *metadata.Version <= metadataVersion {
return nil, errors.New("wrong number of chunks")
return nil, false, errors.New("wrong number of chunks")
}
// Non-strict mode also accepts future metadata versions
if *metadata.Version > metadataVersion && strictChecks {
return nil, fmt.Errorf("version %d is not supported, please upgrade rclone", metadata.Version)
if *metadata.Version > metadataVersion {
return nil, true, ErrMetaUnknown // produced by incompatible version of rclone
}
var nilFs *Fs // nil object triggers appropriate type method
@@ -2231,7 +2362,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte,
info.nChunks = *metadata.ChunkNum
info.md5 = metadata.MD5
info.sha1 = metadata.SHA1
return info, nil
return info, true, nil
}
func silentlyRemove(ctx context.Context, o fs.Object) {
@@ -2278,6 +2409,7 @@ var (
_ fs.Abouter = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)

View File

@@ -13,6 +13,7 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
@@ -663,6 +664,80 @@ func testMetadataInput(t *testing.T, f *Fs) {
runSubtest(futureMeta, "future")
}
// test that chunker refuses to change on objects with future/unknowm metadata
func testFutureProof(t *testing.T, f *Fs) {
if f.opt.MetaFormat == "none" {
t.Skip("this test requires metadata support")
}
saveOpt := f.opt
ctx := context.Background()
f.opt.FailHard = true
const dir = "future"
const file = dir + "/test"
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
putPart := func(name string, part int, data, msg string) {
if part > 0 {
name = f.makeChunkName(name, part-1, "", "")
}
item := fstest.Item{Path: name, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
assert.NotNil(t, obj, msg)
}
// simulate chunked object from future
meta := `{"ver":999,"nchunks":3,"size":9,"garbage":"litter","sha1":"0707f2970043f9f7c22029482db27733deaec029"}`
putPart(file, 0, meta, "metaobject")
putPart(file, 1, "abc", "chunk1")
putPart(file, 2, "def", "chunk2")
putPart(file, 3, "ghi", "chunk3")
// List should succeed
ls, err := f.List(ctx, dir)
assert.NoError(t, err)
assert.Equal(t, 1, len(ls))
assert.Equal(t, int64(9), ls[0].Size())
// NewObject should succeed
obj, err := f.NewObject(ctx, file)
assert.NoError(t, err)
assert.Equal(t, file, obj.Remote())
assert.Equal(t, int64(9), obj.Size())
// Hash must fail
_, err = obj.Hash(ctx, hash.SHA1)
assert.Equal(t, ErrMetaUnknown, err)
// Move must fail
mobj, err := operations.Move(ctx, f, nil, file+"2", obj)
assert.Nil(t, mobj)
assert.Error(t, err)
if err != nil {
assert.Contains(t, err.Error(), "please upgrade rclone")
}
// Put must fail
oi := object.NewStaticObjectInfo(file, modTime, 3, true, nil, nil)
buf := bytes.NewBufferString("abc")
_, err = f.Put(ctx, buf, oi)
assert.Error(t, err)
// Rcat must fail
in := ioutil.NopCloser(bytes.NewBufferString("abc"))
robj, err := operations.Rcat(ctx, f, file, in, modTime)
assert.Nil(t, robj)
assert.NotNil(t, err)
if err != nil {
assert.Contains(t, err.Error(), "please upgrade rclone")
}
}
// InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("PutLarge", func(t *testing.T) {
@@ -686,6 +761,9 @@ func (f *Fs) InternalTest(t *testing.T) {
t.Run("MetadataInput", func(t *testing.T) {
testMetadataInput(t, f)
})
t.Run("FutureProof", func(t *testing.T) {
testFutureProof(t, f)
})
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -15,10 +15,10 @@ import (
// Command line flags
var (
// Invalid characters are not supported by some remotes, eg. Mailru.
// Invalid characters are not supported by some remotes, e.g. Mailru.
// We enable testing with invalid characters when -remote is not set, so
// chunker overlays a local directory, but invalid characters are disabled
// by default when -remote is set, eg. when test_all runs backend tests.
// by default when -remote is set, e.g. when test_all runs backend tests.
// You can still test with invalid characters using the below flag.
UseBadChars = flag.Bool("bad-chars", false, "Set to test bad characters in file names when -remote is set")
)

1
backend/compress/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
test

1416
backend/compress/compress.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,65 @@
// Test Crypt filesystem interface
package compress
import (
"os"
"path/filepath"
"testing"
_ "github.com/rclone/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{}}
fstests.Run(t, &opt)
}
// TestRemoteGzip tests GZIP compression
func TestRemoteGzip(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
name := "TestCompressGzip"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
UnimplementableObjectMethods: []string{
"GetTier",
"SetTier",
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
},
})
}

View File

@@ -147,7 +147,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
// If salt is "" we use a fixed salt just to make attackers lives
// slighty harder than using no salt.
//
// Note that empty passsword makes all 0x00 keys which is used in the
// Note that empty password makes all 0x00 keys which is used in the
// tests.
func (c *Cipher) Key(password, salt string) (err error) {
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
@@ -633,11 +633,8 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
}
// possibly err != nil here, but we will process the
// data and the next call to ReadFull will return 0, err
// Write nonce to start of block
copy(fh.buf, fh.nonce[:])
// Encrypt the block using the nonce
block := fh.buf
secretbox.Seal(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
fh.bufIndex = 0
fh.bufSize = blockHeaderSize + n
fh.nonce.increment()
@@ -782,8 +779,7 @@ func (fh *decrypter) fillBuffer() (err error) {
return ErrorEncryptedFileBadHeader
}
// Decrypt the block using the nonce
block := fh.buf
_, ok := secretbox.Open(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
if !ok {
if err != nil {
return err // return pending error as it is likely more accurate

View File

@@ -30,7 +30,7 @@ func init() {
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true,
}, {
Name: "filename_encryption",
@@ -76,7 +76,7 @@ NB If filename_encryption is "off" then this option will do nothing.`,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Allow server side operations (eg copy) to work across different crypt configs.
Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
Normally this option is not what you want, but if you have two crypts
pointing to the same backend you can use it.
@@ -144,7 +144,7 @@ func NewCipher(m configmap.Mapper) (*Cipher, error) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -159,21 +159,21 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
}
// Make sure to remove trailing . reffering to the current dir
// Make sure to remove trailing . referring to the current dir
if path.Base(rpath) == "." {
rpath = strings.TrimSuffix(rpath, ".")
}
// Look for a file first
var wrappedFs fs.Fs
if rpath == "" {
wrappedFs, err = cache.Get(remote)
wrappedFs, err = cache.Get(ctx, remote)
} else {
remotePath := fspath.JoinRootPath(remote, cipher.EncryptFileName(rpath))
wrappedFs, err = cache.Get(remotePath)
wrappedFs, err = cache.Get(ctx, remotePath)
// if that didn't produce a file, look for a directory
if err != fs.ErrorIsFile {
remotePath = fspath.JoinRootPath(remote, cipher.EncryptDirName(rpath))
wrappedFs, err = cache.Get(remotePath)
wrappedFs, err = cache.Get(ctx, remotePath)
}
}
if err != fs.ErrorIsFile && err != nil {
@@ -199,7 +199,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
SetTier: true,
GetTier: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
return f, err
}
@@ -444,7 +444,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return do(ctx, f.cipher.EncryptDirName(dir))
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -469,7 +469,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return f.newObject(oResult), nil
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
@@ -495,7 +495,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -917,6 +917,16 @@ func (f *Fs) Disconnect(ctx context.Context) error {
return do(ctx)
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
do := f.Fs.Features().Shutdown
if do == nil {
return nil
}
return do(ctx)
}
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
//
// This encrypts the remote name and adjusts the size
@@ -1025,6 +1035,7 @@ var (
_ fs.PublicLinker = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)

View File

@@ -33,7 +33,7 @@ func (o testWrapper) UnWrap() fs.Object {
// Create a temporary local fs to upload things from
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
localFs, err := fs.TemporaryLocalFs()
localFs, err := fs.TemporaryLocalFs(context.Background())
require.NoError(t, err)
cleanup = func() {
require.NoError(t, localFs.Rmdir(context.Background(), ""))
@@ -87,7 +87,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
}
// wrap the object in a crypt for upload using the nonce we
// saved from the encryptor
// saved from the encrypter
src := f.newObjectInfo(oi, nonce)
// Test ObjectInfo methods

View File

@@ -35,6 +35,7 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
@@ -72,6 +73,7 @@ const (
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
listRGrouping = 50 // number of IDs to search at once when using ListR
listRInputBuffer = 1000 // size of input buffer when using ListR
defaultXDGIcon = "text-html"
)
// Globals
@@ -127,6 +129,12 @@ var (
_mimeTypeCustomTransform = map[string]string{
"application/vnd.google-apps.script+json": "application/json",
}
_mimeTypeToXDGLinkIcons = map[string]string{
"application/vnd.google-apps.document": "x-office-document",
"application/vnd.google-apps.drawing": "x-office-drawing",
"application/vnd.google-apps.presentation": "x-office-presentation",
"application/vnd.google-apps.spreadsheet": "x-office-spreadsheet",
}
fetchFormatsOnce sync.Once // make sure we fetch the export/import formats only once
_exportFormats map[string][]string // allowed export MIME type conversions
_importFormats map[string][]string // allowed import MIME type conversions
@@ -175,8 +183,7 @@ func init() {
Description: "Google Drive",
NewFs: NewFs,
CommandHelp: commandHelp,
Config: func(name string, m configmap.Mapper) {
ctx := context.TODO()
Config: func(ctx context.Context, name string, m configmap.Mapper) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -193,7 +200,7 @@ func init() {
}
if opt.ServiceAccountFile == "" {
err = oauthutil.Config("drive", name, m, driveConfig, nil)
err = oauthutil.Config(ctx, "drive", name, m, driveConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
@@ -283,8 +290,8 @@ Instructs rclone to operate on your "Shared with me" folder (where
Google Drive lets you access the files and folders others have shared
with you).
This works both with the "list" (lsd, lsl, etc) and the "copy"
commands (copy, sync, etc), and with all other commands too.`,
This works both with the "list" (lsd, lsl, etc.) and the "copy"
commands (copy, sync, etc.), and with all other commands too.`,
Advanced: true,
}, {
Name: "trashed_only",
@@ -434,9 +441,9 @@ need to use --ignore size also.`,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Allow server side operations (eg copy) to work across different drive configs.
Help: `Allow server-side operations (e.g. copy) to work across different drive configs.
This can be useful if you wish to do a server side copy between two
This can be useful if you wish to do a server-side copy between two
different Google drives. Note that this isn't enabled by default
because it isn't easy to tell if it will work between any two
configurations.`,
@@ -470,6 +477,21 @@ Note that this detection is relying on error message strings which
Google don't document so it may break in the future.
See: https://github.com/rclone/rclone/issues/3857
`,
Advanced: true,
}, {
Name: "stop_on_download_limit",
Default: false,
Help: `Make download limit errors be fatal
At the time of writing it is only possible to download 10TB of data from
Google Drive a day (this is an undocumented limit). When this limit is
reached Google Drive produces a slightly different error message. When
this flag is set it causes these errors to be fatal. These will stop
the in-progress sync.
Note that this detection is relying on error message strings which
Google don't document so it may break in the future.
`,
Advanced: true,
}, {
@@ -539,6 +561,7 @@ type Options struct {
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
DisableHTTP2 bool `config:"disable_http2"`
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
StopOnDownloadLimit bool `config:"stop_on_download_limit"`
SkipShortcuts bool `config:"skip_shortcuts"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -548,6 +571,7 @@ type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
ci *fs.ConfigInfo // global config
features *fs.Features // optional features
svc *drive.Service // the connection to the drive server
v2Svc *drive_v2.Service // used to create download links for the v2 api
@@ -638,6 +662,9 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
return false, fserrors.FatalError(err)
}
return true, err
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
fs.Errorf(f, "Received download limit error: %v", err)
return false, fserrors.FatalError(err)
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
fs.Errorf(f, "Received team drive file limit error: %v", err)
return false, fserrors.FatalError(err)
@@ -669,7 +696,7 @@ func containsString(slice []string, s string) bool {
// getFile returns drive.File for the ID passed and fields passed in
func (f *Fs) getFile(ID string, fields googleapi.Field) (info *drive.File, err error) {
err = f.pacer.CallNoRetry(func() (bool, error) {
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Get(ID).
Fields(fields).
SupportsAllDrives(true).
@@ -693,10 +720,10 @@ func (f *Fs) getRootID() (string, error) {
// If the user fn ever returns true then it early exits with found = true
//
// Search params: https://developers.google.com/drive/search-parameters
func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directoriesOnly, filesOnly, includeAll bool, fn listFn) (found bool, err error) {
func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directoriesOnly, filesOnly, trashedOnly, includeAll bool, fn listFn) (found bool, err error) {
var query []string
if !includeAll {
q := "trashed=" + strconv.FormatBool(f.opt.TrashedOnly)
q := "trashed=" + strconv.FormatBool(trashedOnly)
if f.opt.TrashedOnly {
q = fmt.Sprintf("(mimeType='%s' or %s)", driveFolderType, q)
}
@@ -921,8 +948,10 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
// Figure out if the user wants to use a team drive
func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error {
ci := fs.GetConfig(ctx)
// Stop if we are running non-interactive config
if fs.Config.AutoConfirm {
if ci.AutoConfirm {
return nil
}
if opt.TeamDriveID == "" {
@@ -933,7 +962,7 @@ func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name
if !config.Confirm(false) {
return nil
}
f, err := newFs(name, "", m)
f, err := newFs(ctx, name, "", m)
if err != nil {
return errors.Wrap(err, "failed to make Fs to list teamdrives")
}
@@ -960,8 +989,8 @@ func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name
}
// getClient makes an http client according to the options
func getClient(opt *Options) *http.Client {
t := fshttp.NewTransportCustom(fs.Config, func(t *http.Transport) {
func getClient(ctx context.Context, opt *Options) *http.Client {
t := fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
if opt.DisableHTTP2 {
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
}
@@ -971,7 +1000,7 @@ func getClient(opt *Options) *http.Client {
}
}
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
func getServiceAccountClient(ctx context.Context, opt *Options, credentialsData []byte) (*http.Client, error) {
scopes := driveScopes(opt.Scope)
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
if err != nil {
@@ -980,11 +1009,11 @@ func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client
if opt.Impersonate != "" {
conf.Subject = opt.Impersonate
}
ctxWithSpecialClient := oauthutil.Context(getClient(opt))
ctxWithSpecialClient := oauthutil.Context(ctx, getClient(ctx, opt))
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
}
func createOAuthClient(opt *Options, name string, m configmap.Mapper) (*http.Client, error) {
func createOAuthClient(ctx context.Context, opt *Options, name string, m configmap.Mapper) (*http.Client, error) {
var oAuthClient *http.Client
var err error
@@ -997,12 +1026,12 @@ func createOAuthClient(opt *Options, name string, m configmap.Mapper) (*http.Cli
opt.ServiceAccountCredentials = string(loadedCreds)
}
if opt.ServiceAccountCredentials != "" {
oAuthClient, err = getServiceAccountClient(opt, []byte(opt.ServiceAccountCredentials))
oAuthClient, err = getServiceAccountClient(ctx, opt, []byte(opt.ServiceAccountCredentials))
if err != nil {
return nil, errors.Wrap(err, "failed to create oauth client from service account")
}
} else {
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(name, m, driveConfig, getClient(opt))
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
if err != nil {
return nil, errors.Wrap(err, "failed to create oauth client")
}
@@ -1045,7 +1074,7 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
//
// It constructs a valid Fs but doesn't attempt to figure out whether
// it is a file or a directory.
func newFs(name, path string, m configmap.Mapper) (*Fs, error) {
func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -1061,7 +1090,7 @@ func newFs(name, path string, m configmap.Mapper) (*Fs, error) {
return nil, errors.Wrap(err, "drive: chunk size")
}
oAuthClient, err := createOAuthClient(opt, name, m)
oAuthClient, err := createOAuthClient(ctx, opt, name, m)
if err != nil {
return nil, errors.Wrap(err, "drive: failed when making oauth client")
}
@@ -1071,11 +1100,13 @@ func newFs(name, path string, m configmap.Mapper) (*Fs, error) {
return nil, err
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))),
ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))),
m: m,
grouping: listRGrouping,
listRmu: new(sync.Mutex),
@@ -1089,7 +1120,7 @@ func newFs(name, path string, m configmap.Mapper) (*Fs, error) {
WriteMimeType: true,
CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
}).Fill(f)
}).Fill(ctx, f)
// Create a new authorized Drive client.
f.client = oAuthClient
@@ -1109,9 +1140,8 @@ func newFs(name, path string, m configmap.Mapper) (*Fs, error) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
f, err := newFs(name, path, m)
func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, error) {
f, err := newFs(ctx, name, path, m)
if err != nil {
return nil, err
}
@@ -1271,11 +1301,15 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
if t == nil {
return nil, errors.Errorf("unsupported link type %s", exportMimeType)
}
xdgIcon := _mimeTypeToXDGLinkIcons[info.MimeType]
if xdgIcon == "" {
xdgIcon = defaultXDGIcon
}
var buf bytes.Buffer
err := t.Execute(&buf, struct {
URL, Title string
URL, Title, XDGIcon string
}{
info.WebViewLink, info.Name,
info.WebViewLink, info.Name, xdgIcon,
})
if err != nil {
return nil, errors.Wrap(err, "executing template failed")
@@ -1376,7 +1410,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID
pathID = actualID(pathID)
found, err = f.list(ctx, []string{pathID}, leaf, true, false, false, func(item *drive.File) bool {
found, err = f.list(ctx, []string{pathID}, leaf, true, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
if !f.opt.SkipGdocs {
_, exportName, _, isDocument := f.findExportFormat(item)
if exportName == leaf {
@@ -1569,7 +1603,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
directoryID = actualID(directoryID)
var iErr error
_, err = f.list(ctx, []string{directoryID}, "", false, false, false, func(item *drive.File) bool {
_, err = f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
entry, err := f.itemToDirEntry(path.Join(dir, item.Name), item)
if err != nil {
iErr = err
@@ -1654,7 +1688,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
listRSlices{dirs, paths}.Sort()
var iErr error
foundItems := false
_, err := f.list(ctx, dirs, "", false, false, false, func(item *drive.File) bool {
_, err := f.list(ctx, dirs, "", false, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
// shared with me items have no parents when at the root
if f.opt.SharedWithMe && len(item.Parents) == 0 && len(paths) == 1 && paths[0] == "" {
item.Parents = dirs
@@ -1670,7 +1704,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
if len(paths) == 1 {
// don't check parents at root because
// - shared with me items have no parents at the root
// - if using a root alias, eg "root" or "appDataFolder" the ID won't match
// - if using a root alias, e.g. "root" or "appDataFolder" the ID won't match
i = 0
// items at root can have more than one parent so we need to put
// the item in just once.
@@ -1785,7 +1819,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
mu := sync.Mutex{} // protects in and overflow
wg := sync.WaitGroup{}
in := make(chan listREntry, listRInputBuffer)
out := make(chan error, fs.Config.Checkers)
out := make(chan error, f.ci.Checkers)
list := walk.NewListRHelper(callback)
overflow := []listREntry{}
listed := 0
@@ -1824,7 +1858,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
wg.Add(1)
in <- listREntry{directoryID, dir}
for i := 0; i < fs.Config.Checkers; i++ {
for i := 0; i < f.ci.Checkers; i++ {
go f.listRRunner(ctx, &wg, in, out, cb, sendJob)
}
go func() {
@@ -1857,7 +1891,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
mu.Unlock()
}()
// wait until the all workers to finish
for i := 0; i < fs.Config.Checkers; i++ {
for i := 0; i < f.ci.Checkers; i++ {
e := <-out
mu.Lock()
// if one worker returns an error early, close the input so all other workers exit
@@ -2025,10 +2059,10 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
exisitingObj, err := f.NewObject(ctx, src.Remote())
existingObj, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
return existingObj, existingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src, options...)
@@ -2129,7 +2163,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
for _, srcDir := range dirs[1:] {
// list the objects
infos := []*drive.File{}
_, err := f.list(ctx, []string{srcDir.ID()}, "", false, false, true, func(info *drive.File) bool {
_, err := f.list(ctx, []string{srcDir.ID()}, "", false, false, f.opt.TrashedOnly, true, func(info *drive.File) bool {
infos = append(infos, info)
return false
})
@@ -2207,7 +2241,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
}
var trashedFiles = false
if check {
found, err := f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
found, err := f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, true, func(item *drive.File) bool {
if !item.Trashed {
fs.Debugf(dir, "Rmdir: contains file: %q", item.Name)
return true
@@ -2253,7 +2287,7 @@ func (f *Fs) Precision() time.Duration {
return time.Millisecond
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -2367,8 +2401,57 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
}
type cleanupResult struct {
Errors int
}
func (r cleanupResult) Error() string {
return fmt.Sprintf("%d errors during cleanup - see log", r.Errors)
}
func (f *Fs) cleanupTeamDrive(ctx context.Context, dir string, directoryID string) (r cleanupResult, err error) {
_, err = f.list(ctx, []string{directoryID}, "", false, false, true, false, func(item *drive.File) bool {
remote := path.Join(dir, item.Name)
if item.ExplicitlyTrashed { // description is wrong - can also be set for folders - no need to recurse them
err := f.delete(ctx, item.Id, false)
if err != nil {
r.Errors++
fs.Errorf(remote, "%v", err)
}
return false
}
if item.MimeType == driveFolderType {
if !isShortcutID(item.Id) {
rNew, _ := f.cleanupTeamDrive(ctx, remote, item.Id)
r.Errors += rNew.Errors
}
return false
}
return false
})
if err != nil {
err = errors.Wrap(err, "failed to list directory")
r.Errors++
fs.Errorf(dir, "%v", err)
}
if r.Errors != 0 {
return r, r
}
return r, nil
}
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) error {
if f.isTeamDrive {
directoryID, err := f.dirCache.FindDir(ctx, "", false)
if err != nil {
return err
}
directoryID = actualID(directoryID)
_, err = f.cleanupTeamDrive(ctx, "", directoryID)
return err
}
err := f.pacer.Call(func() (bool, error) {
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
return f.shouldRetry(err)
@@ -2377,6 +2460,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
if err != nil {
return err
}
fs.Logf(f, "Note that emptying the trash happens in the background and can take some time.")
return nil
}
@@ -2420,7 +2504,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
usage := &fs.Usage{
Used: fs.NewUsageValue(q.UsageInDrive), // bytes in use
Trashed: fs.NewUsageValue(q.UsageInDriveTrash), // bytes in trash
Other: fs.NewUsageValue(q.Usage - q.UsageInDrive), // other usage eg gmail in drive
Other: fs.NewUsageValue(q.Usage - q.UsageInDrive), // other usage e.g. gmail in drive
}
if q.Limit > 0 {
usage.Total = fs.NewUsageValue(q.Limit) // quota of bytes that can be used
@@ -2429,7 +2513,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return usage, nil
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
@@ -2530,7 +2614,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -2751,7 +2835,7 @@ func (f *Fs) changeChunkSize(chunkSizeString string) (err error) {
return err
}
func (f *Fs) changeServiceAccountFile(file string) (err error) {
func (f *Fs) changeServiceAccountFile(ctx context.Context, file string) (err error) {
fs.Debugf(nil, "Changing Service Account File from %s to %s", f.opt.ServiceAccountFile, file)
if file == f.opt.ServiceAccountFile {
return nil
@@ -2773,7 +2857,7 @@ func (f *Fs) changeServiceAccountFile(file string) (err error) {
}()
f.opt.ServiceAccountFile = file
f.opt.ServiceAccountCredentials = ""
oAuthClient, err := createOAuthClient(&f.opt, f.name, f.m)
oAuthClient, err := createOAuthClient(ctx, &f.opt, f.name, f.m)
if err != nil {
return errors.Wrap(err, "drive: failed when making oauth client")
}
@@ -2850,7 +2934,7 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
}
var info *drive.File
err = dstFs.pacer.CallNoRetry(func() (bool, error) {
err = dstFs.pacer.Call(func() (bool, error) {
info, err = dstFs.svc.Files.Create(createInfo).
Fields(partialFields).
SupportsAllDrives(true).
@@ -2903,7 +2987,7 @@ func (r unTrashResult) Error() string {
func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurse bool) (r unTrashResult, err error) {
directoryID = actualID(directoryID)
fs.Debugf(dir, "finding trash to restore in directory %q", directoryID)
_, err = f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
_, err = f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, true, func(item *drive.File) bool {
remote := path.Join(dir, item.Name)
if item.ExplicitlyTrashed {
fs.Infof(remote, "restoring %q", item.Id)
@@ -2959,6 +3043,41 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
return f.unTrash(ctx, dir, directoryID, true)
}
// copy file with id to dest
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
info, err := f.getFile(id, f.fileFields)
if err != nil {
return errors.Wrap(err, "couldn't find id")
}
if info.MimeType == driveFolderType {
return errors.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
}
info.Name = f.opt.Enc.ToStandardName(info.Name)
o, err := f.newObjectWithInfo(info.Name, info)
if err != nil {
return err
}
destDir, destLeaf, err := fspath.Split(dest)
if err != nil {
return err
}
if destLeaf == "" {
destLeaf = info.Name
}
if destDir == "" {
destDir = "."
}
dstFs, err := cache.Get(ctx, destDir)
if err != nil {
return err
}
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
if err != nil {
return errors.Wrap(err, "copy failed")
}
return nil
}
var commandHelp = []fs.CommandHelp{{
Name: "get",
Short: "Get command for fetching the drive config parameters",
@@ -3059,6 +3178,29 @@ Result:
"Errors": 0
}
`,
}, {
Name: "copyid",
Short: "Copy files by ID",
Long: `This command copies files by ID
Usage:
rclone backend copyid drive: ID path
rclone backend copyid drive: ID1 path1 ID2 path2
It copies the drive file with ID given to the path (an rclone path which
will be passed internally to rclone copyto). The ID and path pairs can be
repeated.
The path should end with a / to indicate copy the file as named to
this directory. If it doesn't end with a / then the last path
component will be used as the file name.
If the destination is a drive backend then server-side copying will be
attempted if possible.
Use the -i flag to see what would be copied before copying.
`,
}}
// Command the backend to run a named command
@@ -3086,7 +3228,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
if serviceAccountFile, ok := opt["service_account_file"]; ok {
serviceAccountMap := make(map[string]string)
serviceAccountMap["previous"] = f.opt.ServiceAccountFile
if err = f.changeServiceAccountFile(serviceAccountFile); err != nil {
if err = f.changeServiceAccountFile(ctx, serviceAccountFile); err != nil {
return out, err
}
f.m.Set("service_account_file", serviceAccountFile)
@@ -3112,7 +3254,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
dstFs := f
target, ok := opt["target"]
if ok {
targetFs, err := cache.Get(target)
targetFs, err := cache.Get(ctx, target)
if err != nil {
return nil, errors.Wrap(err, "couldn't find target")
}
@@ -3130,6 +3272,19 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
dir = arg[0]
}
return f.unTrashDir(ctx, dir, true)
case "copyid":
if len(arg)%2 != 0 {
return nil, errors.New("need an even number of arguments")
}
for len(arg) > 0 {
id, dest := arg[0], arg[1]
arg = arg[2:]
err = f.copyID(ctx, id, dest)
if err != nil {
return nil, errors.Wrapf(err, "failed copying %q to %q", id, dest)
}
}
return nil, nil
default:
return nil, fs.ErrorCommandNotFound
}
@@ -3197,7 +3352,7 @@ func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
}
directoryID = actualID(directoryID)
found, err := f.list(ctx, []string{directoryID}, leaf, false, false, false, func(item *drive.File) bool {
found, err := f.list(ctx, []string{directoryID}, leaf, false, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
if !f.opt.SkipGdocs {
extension, exportName, exportMimeType, isDocument = f.findExportFormat(item)
if exportName == leaf {
@@ -3272,11 +3427,10 @@ func (o *baseObject) httpResponse(ctx context.Context, url, method string, optio
if url == "" {
return nil, nil, errors.New("forbidden to download - check sharing permission")
}
req, err = http.NewRequest(method, url, nil)
req, err = http.NewRequestWithContext(ctx, method, url, nil)
if err != nil {
return req, nil, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
fs.OpenOptionAddHTTPHeaders(req.Header, options)
if o.bytes == 0 {
// Don't supply range requests for 0 length objects as they always fail
@@ -3609,7 +3763,7 @@ URL={{ .URL }}{{"\r"}}
Encoding=UTF-8
Name={{ .Title }}
URL={{ .URL }}
Icon=text-html
Icon={{ .XDGIcon }}
Type=Link
`
htmlTemplate = `<html>

View File

@@ -7,6 +7,8 @@ import (
"io"
"io/ioutil"
"mime"
"os"
"path"
"path/filepath"
"strings"
"testing"
@@ -194,7 +196,7 @@ func (f *Fs) InternalTestDocumentImport(t *testing.T) {
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
require.NoError(t, err)
testFilesFs, err := fs.NewFs(testFilesPath)
testFilesFs, err := fs.NewFs(context.Background(), testFilesPath)
require.NoError(t, err)
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
@@ -208,7 +210,7 @@ func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
require.NoError(t, err)
testFilesFs, err := fs.NewFs(testFilesPath)
testFilesFs, err := fs.NewFs(context.Background(), testFilesPath)
require.NoError(t, err)
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
@@ -272,14 +274,15 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
}
}
const (
// from fstest/fstests/fstests.go
existingDir = "hello? sausage"
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
existingSubDir = "êé"
)
// TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts
func (f *Fs) InternalTestShortcuts(t *testing.T) {
const (
// from fstest/fstests/fstests.go
existingDir = "hello? sausage"
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
existingSubDir = "êé"
)
ctx := context.Background()
srcObj, err := f.NewObject(ctx, existingFile)
require.NoError(t, err)
@@ -408,6 +411,55 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
require.NoError(t, f.Purge(ctx, "trashDir"))
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
func (f *Fs) InternalTestCopyID(t *testing.T) {
ctx := context.Background()
obj, err := f.NewObject(ctx, existingFile)
require.NoError(t, err)
o := obj.(*Object)
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(dir)
}()
checkFile := func(name string) {
filePath := filepath.Join(dir, name)
fi, err := os.Stat(filePath)
require.NoError(t, err)
assert.Equal(t, int64(100), fi.Size())
err = os.Remove(filePath)
require.NoError(t, err)
}
t.Run("BadID", func(t *testing.T) {
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
require.Error(t, err)
assert.Contains(t, err.Error(), "couldn't find id")
})
t.Run("Directory", func(t *testing.T) {
rootID, err := f.dirCache.RootID(ctx, false)
require.NoError(t, err)
err = f.copyID(ctx, rootID, dir+"/")
require.Error(t, err)
assert.Contains(t, err.Error(), "can't copy directory")
})
t.Run("WithoutDestName", func(t *testing.T) {
err = f.copyID(ctx, o.id, dir+"/")
require.NoError(t, err)
checkFile(path.Base(existingFile))
})
t.Run("WithDestName", func(t *testing.T) {
err = f.copyID(ctx, o.id, dir+"/potato.txt")
require.NoError(t, err)
checkFile("potato.txt")
})
}
func (f *Fs) InternalTest(t *testing.T) {
// These tests all depend on each other so run them as nested tests
t.Run("DocumentImport", func(t *testing.T) {
@@ -424,6 +476,7 @@ func (f *Fs) InternalTest(t *testing.T) {
})
t.Run("Shortcuts", f.InternalTestShortcuts)
t.Run("UnTrash", f.InternalTestUnTrash)
t.Run("CopyID", f.InternalTestCopyID)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -77,11 +77,10 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
return false, err
}
var req *http.Request
req, err = http.NewRequest(method, urls, body)
req, err = http.NewRequestWithContext(ctx, method, urls, body)
if err != nil {
return false, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
googleapi.Expand(req.URL, map[string]string{
"fileId": fileID,
})
@@ -114,8 +113,7 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
// Make an http.Request for the range passed in
func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request {
req, _ := http.NewRequest("POST", rx.URI, body)
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
req, _ := http.NewRequestWithContext(ctx, "POST", rx.URI, body)
req.ContentLength = reqSize
totalSize := "*"
if rx.ContentLength >= 0 {

View File

@@ -30,6 +30,7 @@ import (
"regexp"
"strings"
"time"
"unicode/utf8"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
@@ -86,6 +87,8 @@ const (
// by default.
defaultChunkSize = 48 * fs.MebiByte
maxChunkSize = 150 * fs.MebiByte
// Max length of filename parts: https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
maxFileNameLength = 255
)
var (
@@ -107,6 +110,9 @@ var (
// DbHashType is the hash.Type for Dropbox
DbHashType hash.Type
// Errors
errNotSupportedInSharedMode = fserrors.NoRetryError(errors.New("not supported in shared files mode"))
)
// Register with Fs
@@ -116,11 +122,14 @@ func init() {
Name: "dropbox",
Description: "Dropbox",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
Config: func(ctx context.Context, name string, m configmap.Mapper) {
opt := oauthutil.Options{
NoOffline: true,
OAuth2Opts: []oauth2.AuthCodeOption{
oauth2.SetAuthURLParam("token_access_type", "offline"),
},
}
err := oauthutil.Config("dropbox", name, m, dropboxConfig, &opt)
err := oauthutil.Config(ctx, "dropbox", name, m, dropboxConfig, &opt)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
@@ -142,6 +151,31 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
Help: "Impersonate this user when using a business account.",
Default: "",
Advanced: true,
}, {
Name: "shared_files",
Help: `Instructs rclone to work on individual shared files.
In this mode rclone's features are extremely limited - only list (ls, lsl, etc.)
operations and read operations (e.g. downloading) are supported in this mode.
All other operations will be disabled.`,
Default: false,
Advanced: true,
}, {
Name: "shared_folders",
Help: `Instructs rclone to work on shared folders.
When this flag is used with no path only the List operation is supported and
all available shared folders will be listed. If you specify a path the first part
will be interpreted as the name of shared folder. Rclone will then try to mount this
shared to the root namespace. On success shared folder rclone proceeds normally.
The shared folder is now pretty much a normal folder and all normal operations
are supported.
Note that we don't unmount the shared folder afterwards so the
--dropbox-shared-folders can be omitted after the first use of a particular
shared folder.`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -161,9 +195,11 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
Enc encoder.MultiEncoder `config:"encoding"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote dropbox server
@@ -186,7 +222,9 @@ type Fs struct {
//
// Dropbox Objects always have full metadata
type Object struct {
fs *Fs // what this object is part of
fs *Fs // what this object is part of
id string
url string
remote string // The remote path
bytes int64 // size of the object
modTime time.Time // time it was last modified
@@ -222,9 +260,11 @@ func shouldRetry(err error) (bool, error) {
return false, err
}
baseErrString := errors.Cause(err).Error()
// First check for Insufficient Space
// First check for specific errors
if strings.Contains(baseErrString, "insufficient_space") {
return false, fserrors.FatalError(err)
} else if strings.Contains(baseErrString, "malformed_path") {
return false, fserrors.NoRetryError(err)
}
// Then handle any official Retry-After header from Dropbox's SDK
switch e := err.(type) {
@@ -262,7 +302,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -287,7 +327,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
}
oAuthClient, _, err := oauthutil.NewClient(name, m, dropboxConfig)
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, dropboxConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure dropbox")
}
@@ -295,7 +335,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f := &Fs{
name: name,
opt: *opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
config := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
@@ -330,10 +370,62 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.users = users.New(config)
f.features = (&fs.Features{
CaseInsensitive: true,
ReadMimeType: true,
ReadMimeType: false,
CanHaveEmptyDirectories: true,
}).Fill(f)
f.setRoot(root)
})
// do not fill features yet
if f.opt.SharedFiles {
f.setRoot(root)
if f.root == "" {
return f, nil
}
_, err := f.findSharedFile(f.root)
f.root = ""
if err == nil {
return f, fs.ErrorIsFile
}
return f, nil
}
if f.opt.SharedFolders {
f.setRoot(root)
if f.root == "" {
return f, nil // our root it empty so we probably want to list shared folders
}
dir := path.Dir(f.root)
if dir == "." {
dir = f.root
}
// root is not empty so we have find the right shared folder if it exists
id, err := f.findSharedFolder(dir)
if err != nil {
// if we didn't find the specified shared folder we have to bail out here
return nil, err
}
// we found the specified shared folder so let's mount it
// this will add it to the users normal root namespace and allows us
// to actually perform operations on it using the normal api endpoints.
err = f.mountSharedFolder(id)
if err != nil {
switch e := err.(type) {
case sharing.MountFolderAPIError:
if e.EndpointError == nil || (e.EndpointError != nil && e.EndpointError.Tag != sharing.MountFolderErrorAlreadyMounted) {
return nil, err
}
default:
return nil, err
}
// if the moint failed we have to abort here
}
// if the mount succeeded it's now a normal folder in the users root namespace
// we disable shared folder mode and proceed normally
f.opt.SharedFolders = false
}
f.features.Fill(ctx, f)
// If root starts with / then use the actual root
if strings.HasPrefix(root, "/") {
@@ -355,6 +447,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
fs.Debugf(f, "Using root namespace %q", f.ns)
}
f.setRoot(root)
// See if the root is actually an object
_, err = f.getFileMetadata(f.slashRoot)
@@ -465,9 +558,150 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if f.opt.SharedFiles {
return f.findSharedFile(remote)
}
return f.newObjectWithInfo(remote, nil)
}
// listSharedFoldersApi lists all available shared folders mounted and not mounted
// we'll need the id later so we have to return them in original format
func (f *Fs) listSharedFolders() (entries fs.DirEntries, err error) {
started := false
var res *sharing.ListFoldersResult
for {
if !started {
arg := sharing.ListFoldersArgs{
Limit: 100,
}
err := f.pacer.Call(func() (bool, error) {
res, err = f.sharing.ListFolders(&arg)
return shouldRetry(err)
})
if err != nil {
return nil, err
}
started = true
} else {
arg := sharing.ListFoldersContinueArg{
Cursor: res.Cursor,
}
err := f.pacer.Call(func() (bool, error) {
res, err = f.sharing.ListFoldersContinue(&arg)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "list continue")
}
}
for _, entry := range res.Entries {
leaf := f.opt.Enc.ToStandardName(entry.Name)
d := fs.NewDir(leaf, time.Now()).SetID(entry.SharedFolderId)
entries = append(entries, d)
if err != nil {
return nil, err
}
}
if res.Cursor == "" {
break
}
}
return entries, nil
}
// findSharedFolder find the id for a given shared folder name
// somewhat annoyingly there is no endpoint to query a shared folder by it's name
// so our only option is to iterate over all shared folders
func (f *Fs) findSharedFolder(name string) (id string, err error) {
entries, err := f.listSharedFolders()
if err != nil {
return "", err
}
for _, entry := range entries {
if entry.(*fs.Dir).Remote() == name {
return entry.(*fs.Dir).ID(), nil
}
}
return "", fs.ErrorDirNotFound
}
// mountSharedFolders mount a shared folder to the root namespace
func (f *Fs) mountSharedFolder(id string) error {
arg := sharing.MountFolderArg{
SharedFolderId: id,
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.sharing.MountFolder(&arg)
return shouldRetry(err)
})
return err
}
// listSharedFolders lists shared the user as access to (note this means individual
// files not files contained in shared folders)
func (f *Fs) listReceivedFiles() (entries fs.DirEntries, err error) {
started := false
var res *sharing.ListFilesResult
for {
if !started {
arg := sharing.ListFilesArg{
Limit: 100,
}
err := f.pacer.Call(func() (bool, error) {
res, err = f.sharing.ListReceivedFiles(&arg)
return shouldRetry(err)
})
if err != nil {
return nil, err
}
started = true
} else {
arg := sharing.ListFilesContinueArg{
Cursor: res.Cursor,
}
err := f.pacer.Call(func() (bool, error) {
res, err = f.sharing.ListReceivedFilesContinue(&arg)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "list continue")
}
}
for _, entry := range res.Entries {
fmt.Printf("%+v\n", entry)
entryPath := entry.Name
o := &Object{
fs: f,
url: entry.PreviewUrl,
remote: entryPath,
modTime: entry.TimeInvited,
}
if err != nil {
return nil, err
}
entries = append(entries, o)
}
if res.Cursor == "" {
break
}
}
return entries, nil
}
func (f *Fs) findSharedFile(name string) (o *Object, err error) {
files, err := f.listReceivedFiles()
if err != nil {
return nil, err
}
for _, entry := range files {
if entry.(*Object).remote == name {
return entry.(*Object), nil
}
}
return nil, fs.ErrorObjectNotFound
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
@@ -478,6 +712,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.opt.SharedFiles {
return f.listReceivedFiles()
}
if f.opt.SharedFolders {
return f.listSharedFolders()
}
root := f.slashRoot
if dir != "" {
root += "/" + dir
@@ -541,7 +782,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
remote := path.Join(dir, leaf)
if folderInfo != nil {
d := fs.NewDir(remote, time.Now())
d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
entries = append(entries, d)
} else if fileInfo != nil {
o, err := f.newObjectWithInfo(remote, fileInfo)
@@ -564,6 +805,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if f.opt.SharedFiles || f.opt.SharedFolders {
return nil, errNotSupportedInSharedMode
}
// Temporary Object under construction
o := &Object{
fs: f,
@@ -579,6 +823,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
if f.opt.SharedFiles || f.opt.SharedFolders {
return errNotSupportedInSharedMode
}
root := path.Join(f.slashRoot, dir)
// can't create or run metadata on root
@@ -598,6 +845,10 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
arg2 := files.CreateFolderArg{
Path: f.opt.Enc.FromStandardPath(root),
}
// Don't attempt to create filenames that are too long
if cErr := checkPathLength(arg2.Path); cErr != nil {
return cErr
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.CreateFolderV2(&arg2)
return shouldRetry(err)
@@ -656,6 +907,9 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
if f.opt.SharedFiles || f.opt.SharedFolders {
return errNotSupportedInSharedMode
}
return f.purgeCheck(ctx, dir, true)
}
@@ -664,7 +918,7 @@ func (f *Fs) Precision() time.Duration {
return time.Second
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -725,7 +979,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
return f.purgeCheck(ctx, dir, false)
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
@@ -830,7 +1084,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -927,8 +1181,16 @@ func (o *Object) Remote() string {
return o.remote
}
// ID returns the object id
func (o *Object) ID() string {
return o.id
}
// Hash returns the dropbox special hash
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
return "", errNotSupportedInSharedMode
}
if t != DbHashType {
return "", hash.ErrUnsupported
}
@@ -946,8 +1208,9 @@ func (o *Object) Size() int64 {
// setMetadataFromEntry sets the fs data from a files.FileMetadata
//
// This isn't a complete set of metadata and has an inacurate date
// This isn't a complete set of metadata and has an inaccurate date
func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
o.id = info.Id
o.bytes = int64(info.Size)
o.modTime = info.ClientModified
o.hash = info.ContentHash
@@ -1016,10 +1279,27 @@ func (o *Object) Storable() bool {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.fs.opt.SharedFiles {
if len(options) != 0 {
return nil, errors.New("OpenOptions not supported for shared files")
}
arg := sharing.GetSharedLinkMetadataArg{
Url: o.url,
}
err = o.fs.pacer.Call(func() (bool, error) {
_, in, err = o.fs.sharing.GetSharedLinkFile(&arg)
return shouldRetry(err)
})
if err != nil {
return nil, err
}
return
}
fs.FixRangeOption(options, o.bytes)
headers := fs.OpenOptionHeaders(options)
arg := files.DownloadArg{
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
Path: o.id,
ExtraHeaders: headers,
}
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1147,12 +1427,40 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
return entry, nil
}
// checks all the parts of name to see they are below
// maxFileNameLength runes.
//
// This checks the length as runes which isn't quite right as dropbox
// seems to encode some symbols (eg ☺) as two "characters". This seems
// like utf-16 except that ☺ doesn't need two characters in utf-16.
//
// Using runes instead of what dropbox is using will work for most
// cases, and when it goes wrong we will upload something we should
// have detected as too long which is the least damaging way to fail.
func checkPathLength(name string) (err error) {
for next := ""; len(name) > 0; name = next {
if slash := strings.IndexRune(name, '/'); slash >= 0 {
name, next = name[:slash], name[slash+1:]
} else {
next = ""
}
length := utf8.RuneCountInString(name)
if length > maxFileNameLength {
return fserrors.NoRetryError(fs.ErrorFileNameTooLong)
}
}
return nil
}
// Update the already existing object
//
// Copy the reader into the object updating modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
return errNotSupportedInSharedMode
}
remote := o.remotePath()
if ignoredFiles.MatchString(remote) {
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
@@ -1161,6 +1469,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
commitInfo.Mode.Tag = "overwrite"
// The Dropbox API only accepts timestamps in UTC with second precision.
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
// Don't attempt to create filenames that are too long
if cErr := checkPathLength(commitInfo.Path); cErr != nil {
return cErr
}
size := src.Size()
var err error
@@ -1181,6 +1493,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
return errNotSupportedInSharedMode
}
err = o.fs.pacer.Call(func() (bool, error) {
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
@@ -1201,4 +1516,5 @@ var (
_ fs.DirMover = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -0,0 +1,44 @@
package dropbox
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestInternalCheckPathLength(t *testing.T) {
rep := func(n int, r rune) (out string) {
rs := make([]rune, n)
for i := range rs {
rs[i] = r
}
return string(rs)
}
for _, test := range []struct {
in string
ok bool
}{
{in: "", ok: true},
{in: rep(maxFileNameLength, 'a'), ok: true},
{in: rep(maxFileNameLength+1, 'a'), ok: false},
{in: rep(maxFileNameLength, '£'), ok: true},
{in: rep(maxFileNameLength+1, '£'), ok: false},
{in: rep(maxFileNameLength, '☺'), ok: true},
{in: rep(maxFileNameLength+1, '☺'), ok: false},
{in: rep(maxFileNameLength, '你'), ok: true},
{in: rep(maxFileNameLength+1, '你'), ok: false},
{in: "/ok/ok", ok: true},
{in: "/ok/" + rep(maxFileNameLength, 'a') + "/ok", ok: true},
{in: "/ok/" + rep(maxFileNameLength+1, 'a') + "/ok", ok: false},
{in: "/ok/" + rep(maxFileNameLength, '£') + "/ok", ok: true},
{in: "/ok/" + rep(maxFileNameLength+1, '£') + "/ok", ok: false},
{in: "/ok/" + rep(maxFileNameLength, '☺') + "/ok", ok: true},
{in: "/ok/" + rep(maxFileNameLength+1, '☺') + "/ok", ok: false},
{in: "/ok/" + rep(maxFileNameLength, '你') + "/ok", ok: true},
{in: "/ok/" + rep(maxFileNameLength+1, '你') + "/ok", ok: false},
} {
err := checkPathLength(test.in)
assert.Equal(t, test.ok, err == nil, test.in)
}
}

View File

@@ -35,7 +35,7 @@ func init() {
fs.Register(&fs.RegInfo{
Name: "fichier",
Description: "1Fichier",
Config: func(name string, config configmap.Mapper) {
Config: func(ctx context.Context, name string, config configmap.Mapper) {
},
NewFs: NewFs,
Options: []fs.Option{{
@@ -167,7 +167,7 @@ func (f *Fs) Features() *fs.Features {
//
// On Windows avoid single character remote names as they can be mixed
// up with drive letters.
func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
func NewFs(ctx context.Context, name string, root string, config configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(config, opt)
if err != nil {
@@ -186,16 +186,17 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
name: name,
root: root,
opt: *opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
baseClient: &http.Client{},
}
f.features = (&fs.Features{
DuplicateFiles: true,
CanHaveEmptyDirectories: true,
}).Fill(f)
ReadMimeType: true,
}).Fill(ctx, f)
client := fshttp.NewClient(fs.Config)
client := fshttp.NewClient(ctx)
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
@@ -203,8 +204,6 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
f.dirCache = dircache.New(root, rootID, f)
ctx := context.Background()
// Find the current root
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
@@ -227,7 +226,7 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
}
return nil, err
}
f.features.Fill(&tempF)
f.features.Fill(ctx, &tempF)
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
@@ -306,10 +305,10 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
exisitingObj, err := f.NewObject(ctx, src.Remote())
existingObj, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
return existingObj, existingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src, options...)
@@ -323,7 +322,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(100e9) {
if size > int64(300e9) {
return nil, errors.New("File too big, cant upload")
} else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles

View File

@@ -4,13 +4,11 @@ package fichier
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fs.Config.LogLevel = fs.LogLevelDebug
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFichier:",
})

View File

@@ -0,0 +1,391 @@
// Package api has type definitions for filefabric
//
// Converted from the API responses with help from https://mholt.github.io/json-to-go/
package api
import (
"bytes"
"fmt"
"reflect"
"strings"
"time"
)
const (
// TimeFormat for parameters (UTC)
timeFormatParameters = `2006-01-02 15:04:05`
// "2020-08-11 10:10:04" for JSON parsing
timeFormatJSON = `"` + timeFormatParameters + `"`
)
// Time represents represents date and time information for the
// filefabric API
type Time time.Time
// MarshalJSON turns a Time into JSON (in UTC)
func (t *Time) MarshalJSON() (out []byte, err error) {
timeString := (*time.Time)(t).UTC().Format(timeFormatJSON)
return []byte(timeString), nil
}
var zeroTime = []byte(`"0000-00-00 00:00:00"`)
// UnmarshalJSON turns JSON into a Time (in UTC)
func (t *Time) UnmarshalJSON(data []byte) error {
// Set a Zero time.Time if we receive a zero time input
if bytes.Equal(data, zeroTime) {
*t = Time(time.Time{})
return nil
}
newT, err := time.Parse(timeFormatJSON, string(data))
if err != nil {
return err
}
*t = Time(newT)
return nil
}
// String turns a Time into a string in UTC suitable for the API
// parameters
func (t Time) String() string {
return time.Time(t).UTC().Format(timeFormatParameters)
}
// Status return returned in all status responses
type Status struct {
Code string `json:"status"`
Message string `json:"statusmessage"`
TaskID string `json:"taskid"`
// Warning string `json:"warning"` // obsolete
}
// Status statisfies the error interface
func (e *Status) Error() string {
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
}
// OK returns true if the status is all good
func (e *Status) OK() bool {
return e.Code == "ok"
}
// GetCode returns the status code if any
func (e *Status) GetCode() string {
return e.Code
}
// OKError defines an interface for items which can be OK or be an error
type OKError interface {
error
OK() bool
GetCode() string
}
// Check Status satisfies the OKError interface
var _ OKError = (*Status)(nil)
// EmptyResponse is response which just returns the error condition
type EmptyResponse struct {
Status
}
// GetTokenByAuthTokenResponse is the response to getTokenByAuthToken
type GetTokenByAuthTokenResponse struct {
Status
Token string `json:"token"`
UserID string `json:"userid"`
AllowLoginRemember string `json:"allowloginremember"`
LastLogin Time `json:"lastlogin"`
AutoLoginCode string `json:"autologincode"`
}
// ApplianceInfo is the response to getApplianceInfo
type ApplianceInfo struct {
Status
Sitetitle string `json:"sitetitle"`
OauthLoginSupport string `json:"oauthloginsupport"`
IsAppliance string `json:"isappliance"`
SoftwareVersion string `json:"softwareversion"`
SoftwareVersionLabel string `json:"softwareversionlabel"`
}
// GetFolderContentsResponse is returned from getFolderContents
type GetFolderContentsResponse struct {
Status
Total int `json:"total,string"`
Items []Item `json:"filelist"`
Folder Item `json:"folder"`
From int `json:"from,string"`
//Count int `json:"count"`
Pid string `json:"pid"`
RefreshResult Status `json:"refreshresult"`
// Curfolder Item `json:"curfolder"` - sometimes returned as "ROOT"?
Parents []Item `json:"parents"`
CustomPermissions CustomPermissions `json:"custompermissions"`
}
// ItemType determine whether it is a file or a folder
type ItemType uint8
// Types of things in Item
const (
ItemTypeFile ItemType = 0
ItemTypeFolder ItemType = 1
)
// Item ia a File or a Folder
type Item struct {
ID string `json:"fi_id"`
PID string `json:"fi_pid"`
// UID string `json:"fi_uid"`
Name string `json:"fi_name"`
// S3Name string `json:"fi_s3name"`
// Extension string `json:"fi_extension"`
// Description string `json:"fi_description"`
Type ItemType `json:"fi_type,string"`
// Created Time `json:"fi_created"`
Size int64 `json:"fi_size,string"`
ContentType string `json:"fi_contenttype"`
// Tags string `json:"fi_tags"`
// MainCode string `json:"fi_maincode"`
// Public int `json:"fi_public,string"`
// Provider string `json:"fi_provider"`
// ProviderFolder string `json:"fi_providerfolder"` // folder
// Encrypted int `json:"fi_encrypted,string"`
// StructType string `json:"fi_structtype"`
// Bname string `json:"fi_bname"` // folder
// OrgID string `json:"fi_orgid"`
// Favorite int `json:"fi_favorite,string"`
// IspartOf string `json:"fi_ispartof"` // folder
Modified Time `json:"fi_modified"`
// LastAccessed Time `json:"fi_lastaccessed"`
// Hits int64 `json:"fi_hits,string"`
// IP string `json:"fi_ip"` // folder
// BigDescription string `json:"fi_bigdescription"`
LocalTime Time `json:"fi_localtime"`
// OrgfolderID string `json:"fi_orgfolderid"`
// StorageIP string `json:"fi_storageip"` // folder
// RemoteTime Time `json:"fi_remotetime"`
// ProviderOptions string `json:"fi_provideroptions"`
// Access string `json:"fi_access"`
// Hidden string `json:"fi_hidden"` // folder
// VersionOf string `json:"fi_versionof"`
Trash bool `json:"trash"`
// Isbucket string `json:"isbucket"` // filelist
SubFolders int64 `json:"subfolders"` // folder
}
// ItemFields is a | separated list of fields in Item
var ItemFields = mustFields(Item{})
// fields returns the JSON fields in use by opt as a | separated
// string.
func fields(opt interface{}) (pipeTags string, err error) {
var tags []string
def := reflect.ValueOf(opt)
defType := def.Type()
for i := 0; i < def.NumField(); i++ {
field := defType.Field(i)
tag, ok := field.Tag.Lookup("json")
if !ok {
continue
}
if comma := strings.IndexRune(tag, ','); comma >= 0 {
tag = tag[:comma]
}
if tag == "" {
continue
}
tags = append(tags, tag)
}
return strings.Join(tags, "|"), nil
}
// mustFields returns the JSON fields in use by opt as a | separated
// string. It panics on failure.
func mustFields(opt interface{}) string {
tags, err := fields(opt)
if err != nil {
panic(err)
}
return tags
}
// CustomPermissions is returned as part of GetFolderContentsResponse
type CustomPermissions struct {
Upload string `json:"upload"`
CreateSubFolder string `json:"createsubfolder"`
Rename string `json:"rename"`
Delete string `json:"delete"`
Move string `json:"move"`
ManagePermissions string `json:"managepermissions"`
ListOnly string `json:"listonly"`
VisibleInTrash string `json:"visibleintrash"`
}
// DoCreateNewFolderResponse is response from foCreateNewFolder
type DoCreateNewFolderResponse struct {
Status
Item Item `json:"file"`
}
// DoInitUploadResponse is response from doInitUpload
type DoInitUploadResponse struct {
Status
ProviderID string `json:"providerid"`
UploadCode string `json:"uploadcode"`
FileType string `json:"filetype"`
DirectUploadSupport string `json:"directuploadsupport"`
ResumeAllowed string `json:"resumeallowed"`
}
// UploaderResponse is returned from /cgi-bin/uploader/uploader1.cgi
//
// Sometimes the response is returned as XML and sometimes as JSON
type UploaderResponse struct {
FileSize int64 `xml:"filesize" json:"filesize,string"`
MD5 string `xml:"md5" json:"md5"`
Success string `xml:"success" json:"success"`
}
// UploadStatus is returned from getUploadStatus
type UploadStatus struct {
Status
UploadCode string `json:"uploadcode"`
Metafile string `json:"metafile"`
Percent int `json:"percent,string"`
Uploaded int64 `json:"uploaded,string"`
Size int64 `json:"size,string"`
Filename string `json:"filename"`
Nofile string `json:"nofile"`
Completed string `json:"completed"`
Completsuccess string `json:"completsuccess"`
Completerror string `json:"completerror"`
}
// DoCompleteUploadResponse is the response to doCompleteUpload
type DoCompleteUploadResponse struct {
Status
UploadedSize int64 `json:"uploadedsize,string"`
StorageIP string `json:"storageip"`
UploadedName string `json:"uploadedname"`
// Versioned []interface{} `json:"versioned"`
// VersionedID int `json:"versionedid"`
// Comment interface{} `json:"comment"`
File Item `json:"file"`
// UsSize string `json:"us_size"`
// PaSize string `json:"pa_size"`
// SpaceInfo SpaceInfo `json:"spaceinfo"`
}
// Providers is returned as part of UploadResponse
type Providers struct {
Max string `json:"max"`
Used string `json:"used"`
ID string `json:"id"`
Private string `json:"private"`
Limit string `json:"limit"`
Percent int `json:"percent"`
}
// Total is returned as part of UploadResponse
type Total struct {
Max string `json:"max"`
Used string `json:"used"`
ID string `json:"id"`
Priused string `json:"priused"`
Primax string `json:"primax"`
Limit string `json:"limit"`
Percent int `json:"percent"`
Pripercent int `json:"pripercent"`
}
// UploadResponse is returned as part of SpaceInfo
type UploadResponse struct {
Providers []Providers `json:"providers"`
Total Total `json:"total"`
}
// SpaceInfo is returned as part of DoCompleteUploadResponse
type SpaceInfo struct {
Response UploadResponse `json:"response"`
Status string `json:"status"`
}
// DeleteResponse is returned from doDeleteFile
type DeleteResponse struct {
Status
Deleted []string `json:"deleted"`
Errors []interface{} `json:"errors"`
ID string `json:"fi_id"`
BackgroundTask int `json:"backgroundtask"`
UsSize string `json:"us_size"`
PaSize string `json:"pa_size"`
//SpaceInfo SpaceInfo `json:"spaceinfo"`
}
// FileResponse is returned from doRenameFile
type FileResponse struct {
Status
Item Item `json:"file"`
Exists string `json:"exists"`
}
// MoveFilesResponse is returned from doMoveFiles
type MoveFilesResponse struct {
Status
Filesleft string `json:"filesleft"`
Addedtobackground string `json:"addedtobackground"`
Moved string `json:"moved"`
Item Item `json:"file"`
IDs []string `json:"fi_ids"`
Length int `json:"length"`
DirID string `json:"dir_id"`
MovedObjects []Item `json:"movedobjects"`
// FolderTasks []interface{} `json:"foldertasks"`
}
// TasksResponse is the response to getUserBackgroundTasks
type TasksResponse struct {
Status
Tasks []Task `json:"tasks"`
Total string `json:"total"`
}
// BtData is part of TasksResponse
type BtData struct {
Callback string `json:"callback"`
}
// Task describes a task returned in TasksResponse
type Task struct {
BtID string `json:"bt_id"`
UsID string `json:"us_id"`
BtType string `json:"bt_type"`
BtData BtData `json:"bt_data"`
BtStatustext string `json:"bt_statustext"`
BtStatusdata string `json:"bt_statusdata"`
BtMessage string `json:"bt_message"`
BtProcent string `json:"bt_procent"`
BtAdded string `json:"bt_added"`
BtStatus string `json:"bt_status"`
BtCompleted string `json:"bt_completed"`
BtTitle string `json:"bt_title"`
BtCredentials string `json:"bt_credentials"`
BtHidden string `json:"bt_hidden"`
BtAutoremove string `json:"bt_autoremove"`
BtDevsite string `json:"bt_devsite"`
BtPriority string `json:"bt_priority"`
BtReport string `json:"bt_report"`
BtSitemarker string `json:"bt_sitemarker"`
BtExecuteafter string `json:"bt_executeafter"`
BtCompletestatus string `json:"bt_completestatus"`
BtSubtype string `json:"bt_subtype"`
BtCanceled string `json:"bt_canceled"`
Callback string `json:"callback"`
CanBeCanceled bool `json:"canbecanceled"`
CanBeRestarted bool `json:"canberestarted"`
Type string `json:"type"`
Status string `json:"status"`
Settings string `json:"settings"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,17 @@
// Test filefabric filesystem interface
package filefabric_test
import (
"testing"
"github.com/rclone/rclone/backend/filefabric"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFileFabric:",
NilObject: (*filefabric.Object)(nil),
})
}

View File

@@ -6,7 +6,6 @@ import (
"crypto/tls"
"io"
"net/textproto"
"os"
"path"
"runtime"
"strings"
@@ -16,16 +15,22 @@ import (
"github.com/jlaffaye/ftp"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
)
var (
currentUser = env.CurrentUser()
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -42,7 +47,7 @@ func init() {
}},
}, {
Name: "user",
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
Help: "FTP username, leave blank for current username, " + currentUser,
}, {
Name: "port",
Help: "FTP port, leave blank to use default (21)",
@@ -53,16 +58,16 @@ func init() {
Required: true,
}, {
Name: "tls",
Help: `Use FTPS over TLS (Implicit)
When using implicit FTP over TLS the client will connect using TLS
right from the start, which in turn breaks the compatibility with
Help: `Use Implicit FTPS (FTP over TLS)
When using implicit FTP over TLS the client connects using TLS
right from the start which breaks compatibility with
non-TLS-aware servers. This is usually served over port 990 rather
than port 21. Cannot be used in combination with explicit FTP.`,
Default: false,
}, {
Name: "explicit_tls",
Help: `Use FTP over TLS (Explicit)
When using explicit FTP over TLS the client explicitly request
Help: `Use Explicit FTPS (FTP over TLS)
When using explicit FTP over TLS the client explicitly requests
security from the server in order to upgrade a plain text connection
to an encrypted one. Cannot be used in combination with implicit FTP.`,
Default: false,
@@ -81,6 +86,11 @@ to an encrypted one. Cannot be used in combination with implicit FTP.`,
Help: "Disable using EPSV even if server advertises support",
Default: false,
Advanced: true,
}, {
Name: "disable_mlsd",
Help: "Disable using MLSD even if server advertises support",
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -107,15 +117,17 @@ type Options struct {
Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
DisableMLSD bool `config:"disable_mlsd"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote FTP server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
ci *fs.ConfigInfo // global config
features *fs.Features // optional features
url string
user string
pass string
@@ -200,9 +212,9 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
}
// Open a new connection to the FTP server.
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
func (f *Fs) ftpConnection(ctx context.Context) (*ftp.ServerConn, error) {
fs.Debugf(f, "Connecting to FTP server")
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)}
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(f.ci.ConnectTimeout)}
if f.opt.TLS && f.opt.ExplicitTLS {
fs.Errorf(f, "Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
@@ -222,8 +234,11 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
}
if fs.Config.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: fs.Config.Dump&fs.DumpAuth != 0}))
if f.opt.DisableMLSD {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
}
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
}
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
if err != nil {
@@ -240,10 +255,11 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
}
// Get an FTP connection from the pool, or open a new one
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
func (f *Fs) getFtpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.opt.Concurrency > 0 {
f.tokens.Get()
}
accounting.LimitTPS(ctx)
f.poolMu.Lock()
if len(f.pool) > 0 {
c = f.pool[0]
@@ -253,7 +269,7 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
if c != nil {
return c, nil
}
c, err = f.ftpConnection()
c, err = f.ftpConnection(ctx)
if err != nil && f.opt.Concurrency > 0 {
f.tokens.Put()
}
@@ -296,8 +312,7 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
ctx := context.Background()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
// Parse config into Options struct
opt := new(Options)
@@ -311,7 +326,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
}
user := opt.User
if user == "" {
user = os.Getenv("USER")
user = currentUser
}
port := opt.Port
if port == "" {
@@ -324,10 +339,12 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
protocol = "ftps://"
}
u := protocol + path.Join(dialAddr+"/", root)
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: ci,
url: u,
user: user,
pass: pass,
@@ -336,9 +353,9 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(f)
}).Fill(ctx, f)
// Make a connection and pool it to return errors early
c, err := f.getFtpConnection()
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "NewFs")
}
@@ -409,7 +426,7 @@ func (f *Fs) dirFromStandardPath(dir string) string {
}
// findItem finds a directory entry for the name in its parent directory
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
fullPath := path.Join(f.root, remote)
if fullPath == "" || fullPath == "." || fullPath == "/" {
@@ -423,7 +440,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
dir := path.Dir(fullPath)
base := path.Base(fullPath)
c, err := f.getFtpConnection()
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "findItem")
}
@@ -445,7 +462,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
entry, err := f.findItem(remote)
entry, err := f.findItem(ctx, remote)
if err != nil {
return nil, err
}
@@ -467,8 +484,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
}
// dirExists checks the directory pointed to by remote exists or not
func (f *Fs) dirExists(remote string) (exists bool, err error) {
entry, err := f.findItem(remote)
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
entry, err := f.findItem(ctx, remote)
if err != nil {
return false, errors.Wrap(err, "dirExists")
}
@@ -489,7 +506,7 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
c, err := f.getFtpConnection()
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "list")
}
@@ -510,7 +527,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}()
// Wait for List for up to Timeout seconds
timer := time.NewTimer(fs.Config.Timeout)
timer := time.NewTimer(f.ci.Timeout)
select {
case listErr = <-errchan:
timer.Stop()
@@ -527,7 +544,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// doesn't exist, so check it really doesn't exist if no
// entries found.
if len(files) == 0 {
exists, err := f.dirExists(dir)
exists, err := f.dirExists(ctx, dir)
if err != nil {
return nil, errors.Wrap(err, "list")
}
@@ -580,7 +597,7 @@ func (f *Fs) Precision() time.Duration {
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// fs.Debugf(f, "Trying to put file %s", src.Remote())
err := f.mkParentDir(src.Remote())
err := f.mkParentDir(ctx, src.Remote())
if err != nil {
return nil, errors.Wrap(err, "Put mkParentDir failed")
}
@@ -598,12 +615,12 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
}
// getInfo reads the FileInfo for a path
func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
dir := path.Dir(remote)
base := path.Base(remote)
c, err := f.getFtpConnection()
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "getInfo")
}
@@ -630,12 +647,12 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
}
// mkdir makes the directory and parents using unrooted paths
func (f *Fs) mkdir(abspath string) error {
func (f *Fs) mkdir(ctx context.Context, abspath string) error {
abspath = path.Clean(abspath)
if abspath == "." || abspath == "/" {
return nil
}
fi, err := f.getInfo(abspath)
fi, err := f.getInfo(ctx, abspath)
if err == nil {
if fi.IsDir {
return nil
@@ -645,11 +662,11 @@ func (f *Fs) mkdir(abspath string) error {
return errors.Wrapf(err, "mkdir %q failed", abspath)
}
parent := path.Dir(abspath)
err = f.mkdir(parent)
err = f.mkdir(ctx, parent)
if err != nil {
return err
}
c, connErr := f.getFtpConnection()
c, connErr := f.getFtpConnection(ctx)
if connErr != nil {
return errors.Wrap(connErr, "mkdir")
}
@@ -669,23 +686,23 @@ func (f *Fs) mkdir(abspath string) error {
// mkParentDir makes the parent of remote if necessary and any
// directories above that
func (f *Fs) mkParentDir(remote string) error {
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
parent := path.Dir(remote)
return f.mkdir(path.Join(f.root, parent))
return f.mkdir(ctx, path.Join(f.root, parent))
}
// Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
// defer fs.Trace(dir, "")("err=%v", &err)
root := path.Join(f.root, dir)
return f.mkdir(root)
return f.mkdir(ctx, root)
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
c, err := f.getFtpConnection()
c, err := f.getFtpConnection(ctx)
if err != nil {
return errors.Wrap(translateErrorFile(err), "Rmdir")
}
@@ -701,11 +718,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
err := f.mkParentDir(remote)
err := f.mkParentDir(ctx, remote)
if err != nil {
return nil, errors.Wrap(err, "Move mkParentDir failed")
}
c, err := f.getFtpConnection()
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "Move")
}
@@ -725,7 +742,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -742,7 +759,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
dstPath := path.Join(f.root, dstRemote)
// Check if destination exists
fi, err := f.getInfo(dstPath)
fi, err := f.getInfo(ctx, dstPath)
if err == nil {
if fi.IsDir {
return fs.ErrorDirExists
@@ -753,13 +770,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// Make sure the parent directory exists
err = f.mkdir(path.Dir(dstPath))
err = f.mkdir(ctx, path.Dir(dstPath))
if err != nil {
return errors.Wrap(err, "DirMove mkParentDir dst failed")
}
// Do the move
c, err := f.getFtpConnection()
c, err := f.getFtpConnection(ctx)
if err != nil {
return errors.Wrap(err, "DirMove")
}
@@ -891,7 +908,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
}
}
}
c, err := o.fs.getFtpConnection()
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "open")
}
@@ -926,7 +943,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "Removed after failed upload: %v", err)
}
}
c, err := o.fs.getFtpConnection()
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
return errors.Wrap(err, "Update")
}
@@ -938,7 +955,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errors.Wrap(err, "update stor")
}
o.fs.putFtpConnection(&c, nil)
o.info, err = o.fs.getInfo(path)
o.info, err = o.fs.getInfo(ctx, path)
if err != nil {
return errors.Wrap(err, "update getinfo")
}
@@ -950,14 +967,14 @@ func (o *Object) Remove(ctx context.Context) (err error) {
// defer fs.Trace(o, "")("err=%v", &err)
path := path.Join(o.fs.root, o.remote)
// Check if it's a directory or a file
info, err := o.fs.getInfo(path)
info, err := o.fs.getInfo(ctx, path)
if err != nil {
return err
}
if info.IsDir {
err = o.fs.Rmdir(ctx, o.remote)
} else {
c, err := o.fs.getFtpConnection()
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
return errors.Wrap(err, "Remove")
}

View File

@@ -76,14 +76,14 @@ func init() {
Prefix: "gcs",
Description: "Google Cloud Storage (this is not Google Drive)",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
Config: func(ctx context.Context, name string, m configmap.Mapper) {
saFile, _ := m.Get("service_account_file")
saCreds, _ := m.Get("service_account_credentials")
anonymous, _ := m.Get("anonymous")
if saFile != "" || saCreds != "" || anonymous == "true" {
return
}
err := oauthutil.Config("google cloud storage", name, m, storageConfig, nil)
err := oauthutil.Config(ctx, "google cloud storage", name, m, storageConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
@@ -370,12 +370,12 @@ func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote)
}
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) {
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
if err != nil {
return nil, errors.Wrap(err, "error processing credentials")
}
ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx))
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
}
@@ -386,8 +386,7 @@ func (f *Fs) setRoot(root string) {
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
var oAuthClient *http.Client
// Parse config into Options struct
@@ -412,14 +411,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
opt.ServiceAccountCredentials = string(loadedCreds)
}
if opt.Anonymous {
oAuthClient = &http.Client{}
oAuthClient = fshttp.NewClient(ctx)
} else if opt.ServiceAccountCredentials != "" {
oAuthClient, err = getServiceAccountClient([]byte(opt.ServiceAccountCredentials))
oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials))
if err != nil {
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
}
} else {
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
if err != nil {
ctx := context.Background()
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
@@ -433,7 +432,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
name: name,
root: root,
opt: *opt,
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(),
}
f.setRoot(root)
@@ -442,7 +441,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
}).Fill(f)
}).Fill(ctx, f)
// Create a new authorized Drive client.
f.client = oAuthClient
@@ -565,7 +564,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
remote = path.Join(bucket, remote)
}
// is this a directory marker?
if isDirectory && object.Size == 0 {
if isDirectory {
continue // skip directory marker
}
err = fn(remote, object, false)
@@ -813,7 +812,7 @@ func (f *Fs) Precision() time.Duration {
return time.Nanosecond
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -841,20 +840,27 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
remote: remote,
}
var newObject *storage.Object
err = f.pacer.Call(func() (bool, error) {
copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil)
if !f.opt.BucketPolicyOnly {
copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, nil)
if !f.opt.BucketPolicyOnly {
rewriteRequest.DestinationPredefinedAcl(f.opt.ObjectACL)
}
var rewriteResponse *storage.RewriteResponse
for {
err = f.pacer.Call(func() (bool, error) {
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
return shouldRetry(err)
})
if err != nil {
return nil, err
}
newObject, err = copyObject.Context(ctx).Do()
return shouldRetry(err)
})
if err != nil {
return nil, err
if rewriteResponse.Done {
break
}
rewriteRequest.RewriteToken(rewriteResponse.RewriteToken)
fs.Debugf(dstObj, "Continuing rewrite %d bytes done", rewriteResponse.TotalBytesRewritten)
}
// Set the metadata for the new object while we have it
dstObj.setMetaData(newObject)
dstObj.setMetaData(rewriteResponse.Resource)
return dstObj, nil
}
@@ -1022,11 +1028,10 @@ func (o *Object) Storable() bool {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
req, err := http.NewRequest("GET", o.url, nil)
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
if err != nil {
return nil, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
fs.FixRangeOption(options, o.bytes)
fs.OpenOptionAddHTTPHeaders(req.Header, options)
var res *http.Response
@@ -1085,6 +1090,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
object.ContentLanguage = value
case "content-type":
object.ContentType = value
case "x-goog-storage-class":
object.StorageClass = value
default:
const googMetaPrefix = "x-goog-meta-"
if strings.HasPrefix(lowerKey, googMetaPrefix) {

View File

@@ -78,7 +78,7 @@ func init() {
Prefix: "gphotos",
Description: "Google Photos",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
Config: func(ctx context.Context, name string, m configmap.Mapper) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -95,7 +95,7 @@ func init() {
}
// Do the oauth
err = oauthutil.Config("google photos", name, m, oauthConfig, nil)
err = oauthutil.Config(ctx, "google photos", name, m, oauthConfig, nil)
if err != nil {
golog.Fatalf("Failed to configure token: %v", err)
}
@@ -132,15 +132,33 @@ you want to read the media.`,
Default: 2000,
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
Advanced: true,
}, {
Name: "include_archived",
Default: false,
Help: `Also view and download archived media.
By default rclone does not request archived media. Thus, when syncing,
archived media is not visible in directory listings or transferred.
Note that media in albums is always visible and synced, no matter
their archive status.
With this flag, archived media are always visible in directory
listings and transferred.
Without this flag, archived media will not be visible in directory
listings and won't be transferred.`,
Advanced: true,
}}...),
})
}
// Options defines the configuration for this backend
type Options struct {
ReadOnly bool `config:"read_only"`
ReadSize bool `config:"read_size"`
StartYear int `config:"start_year"`
ReadOnly bool `config:"read_only"`
ReadSize bool `config:"read_size"`
StartYear int `config:"start_year"`
IncludeArchived bool `config:"include_archived"`
}
// Fs represents a remote storage server
@@ -206,6 +224,10 @@ func (f *Fs) startYear() int {
return f.opt.StartYear
}
func (f *Fs) includeArchived() bool {
return f.opt.IncludeArchived
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
@@ -246,7 +268,7 @@ func errorHandler(resp *http.Response) error {
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -254,8 +276,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
baseClient := fshttp.NewClient(fs.Config)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
baseClient := fshttp.NewClient(ctx)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
}
@@ -272,14 +294,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
unAuth: rest.NewClient(baseClient),
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
ts: ts,
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
startTime: time.Now(),
albums: map[bool]*albums{},
uploaded: dirtree.New(),
}
f.features = (&fs.Features{
ReadMimeType: true,
}).Fill(f)
}).Fill(ctx, f)
f.srv.SetErrorHandler(errorHandler)
_, _, pattern := patterns.match(f.root, "", true)
@@ -288,7 +310,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
var leaf string
f.root, leaf = path.Split(f.root)
f.root = strings.TrimRight(f.root, "/")
_, err := f.NewObject(context.TODO(), leaf)
_, err := f.NewObject(ctx, leaf)
if err == nil {
return f, fs.ErrorIsFile
}
@@ -497,6 +519,12 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
}
filter.PageSize = listChunks
filter.PageToken = ""
if filter.AlbumID == "" { // album ID and filters cannot be set together, else error 400 INVALID_ARGUMENT
if filter.Filters == nil {
filter.Filters = &api.Filters{}
}
filter.Filters.IncludeArchivedMedia = &f.opt.IncludeArchived
}
lastID := ""
for {
var result api.MediaItems

View File

@@ -35,14 +35,14 @@ func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
*fstest.RemoteName = "TestGooglePhotos:"
}
f, err := fs.NewFs(*fstest.RemoteName)
f, err := fs.NewFs(ctx, *fstest.RemoteName)
if err == fs.ErrorNotFoundInConfigFile {
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
}
require.NoError(t, err)
// Create local Fs pointing at testfiles
localFs, err := fs.NewFs("testfiles")
localFs, err := fs.NewFs(ctx, "testfiles")
require.NoError(t, err)
t.Run("CreateAlbum", func(t *testing.T) {
@@ -115,7 +115,7 @@ func TestIntegration(t *testing.T) {
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
})
// Check it is there in the date/month/year heirachy
// Check it is there in the date/month/year hierarchy
// 2013-07-13 is the creation date of the folder
checkPresent := func(t *testing.T, objPath string) {
entries, err := f.List(ctx, objPath)
@@ -155,7 +155,7 @@ func TestIntegration(t *testing.T) {
})
t.Run("NewFsIsFile", func(t *testing.T) {
fNew, err := fs.NewFs(*fstest.RemoteName + remote)
fNew, err := fs.NewFs(ctx, *fstest.RemoteName+remote)
assert.Equal(t, fs.ErrorIsFile, err)
leaf := path.Base(remote)
o, err := fNew.NewObject(ctx, leaf)

View File

@@ -24,6 +24,7 @@ type lister interface {
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
dirTime() time.Time
startYear() int
includeArchived() bool
}
// dirPattern describes a single directory pattern

View File

@@ -64,6 +64,11 @@ func (f *testLister) startYear() int {
return 2000
}
// mock includeArchived for testing
func (f *testLister) includeArchived() bool {
return false
}
func TestPatternMatch(t *testing.T) {
for testNumber, test := range []struct {
// input

320
backend/hdfs/fs.go Normal file
View File

@@ -0,0 +1,320 @@
// +build !plan9
package hdfs
import (
"context"
"fmt"
"io"
"os"
"os/user"
"path"
"strings"
"time"
"github.com/colinmarc/hdfs/v2"
krb "github.com/jcmturner/gokrb5/v8/client"
"github.com/jcmturner/gokrb5/v8/config"
"github.com/jcmturner/gokrb5/v8/credentials"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
)
// Fs represents a HDFS server
type Fs struct {
name string
root string
features *fs.Features // optional features
opt Options // options for this backend
ci *fs.ConfigInfo // global config
client *hdfs.Client
}
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
func getKerberosClient() (*krb.Client, error) {
configPath := os.Getenv("KRB5_CONFIG")
if configPath == "" {
configPath = "/etc/krb5.conf"
}
cfg, err := config.Load(configPath)
if err != nil {
return nil, err
}
// Determine the ccache location from the environment, falling back to the
// default location.
ccachePath := os.Getenv("KRB5CCNAME")
if strings.Contains(ccachePath, ":") {
if strings.HasPrefix(ccachePath, "FILE:") {
ccachePath = strings.SplitN(ccachePath, ":", 2)[1]
} else {
return nil, fmt.Errorf("unusable ccache: %s", ccachePath)
}
} else if ccachePath == "" {
u, err := user.Current()
if err != nil {
return nil, err
}
ccachePath = fmt.Sprintf("/tmp/krb5cc_%s", u.Uid)
}
ccache, err := credentials.LoadCCache(ccachePath)
if err != nil {
return nil, err
}
client, err := krb.NewFromCCache(ccache, cfg)
if err != nil {
return nil, err
}
return client, nil
}
// NewFs constructs an Fs from the path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
options := hdfs.ClientOptions{
Addresses: []string{opt.Namenode},
UseDatanodeHostname: false,
}
if opt.ServicePrincipalName != "" {
options.KerberosClient, err = getKerberosClient()
if err != nil {
return nil, fmt.Errorf("Problem with kerberos authentication: %s", err)
}
options.KerberosServicePrincipleName = opt.ServicePrincipalName
if opt.DataTransferProtection != "" {
options.DataTransferProtection = opt.DataTransferProtection
}
} else {
options.User = opt.Username
}
client, err := hdfs.NewClient(options)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: fs.GetConfig(ctx),
client: client,
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
info, err := f.client.Stat(f.realpath(""))
if err == nil && !info.IsDir() {
f.root = path.Dir(f.root)
return f, fs.ErrorIsFile
}
return f, nil
}
// Name of this fs
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("hdfs://%s", f.opt.Namenode)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision return the precision of this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Hashes are not supported
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// NewObject finds file at remote or return fs.ErrorObjectNotFound
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
realpath := f.realpath(remote)
fs.Debugf(f, "new [%s]", realpath)
info, err := f.ensureFile(realpath)
if err != nil {
return nil, err
}
return &Object{
fs: f,
remote: remote,
size: info.Size(),
modTime: info.ModTime(),
}, nil
}
// List the objects and directories in dir into entries.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
realpath := f.realpath(dir)
fs.Debugf(f, "list [%s]", realpath)
err = f.ensureDirectory(realpath)
if err != nil {
return nil, err
}
list, err := f.client.ReadDir(realpath)
if err != nil {
return nil, err
}
for _, x := range list {
stdName := f.opt.Enc.ToStandardName(x.Name())
remote := path.Join(dir, stdName)
if x.IsDir() {
entries = append(entries, fs.NewDir(remote, x.ModTime()))
} else {
entries = append(entries, &Object{
fs: f,
remote: remote,
size: x.Size(),
modTime: x.ModTime()})
}
}
return entries, nil
}
// Put the object
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o := &Object{
fs: f,
remote: src.Remote(),
}
err := o.Update(ctx, in, src, options...)
return o, err
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// Mkdir makes a directory
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
fs.Debugf(f, "mkdir [%s]", f.realpath(dir))
return f.client.MkdirAll(f.realpath(dir), 0755)
}
// Rmdir deletes the directory
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
realpath := f.realpath(dir)
fs.Debugf(f, "rmdir [%s]", realpath)
err := f.ensureDirectory(realpath)
if err != nil {
return err
}
// do not remove empty directory
list, err := f.client.ReadDir(realpath)
if err != nil {
return err
}
if len(list) > 0 {
return fs.ErrorDirectoryNotEmpty
}
return f.client.Remove(realpath)
}
// Purge deletes all the files in the directory
func (f *Fs) Purge(ctx context.Context, dir string) error {
realpath := f.realpath(dir)
fs.Debugf(f, "purge [%s]", realpath)
err := f.ensureDirectory(realpath)
if err != nil {
return err
}
return f.client.RemoveAll(realpath)
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
info, err := f.client.StatFs()
if err != nil {
return nil, err
}
return &fs.Usage{
Total: fs.NewUsageValue(int64(info.Capacity)),
Used: fs.NewUsageValue(int64(info.Used)),
Free: fs.NewUsageValue(int64(info.Remaining)),
}, nil
}
func (f *Fs) ensureDirectory(realpath string) error {
info, err := f.client.Stat(realpath)
if e, ok := err.(*os.PathError); ok && e.Err == os.ErrNotExist {
return fs.ErrorDirNotFound
}
if err != nil {
return err
}
if !info.IsDir() {
return fs.ErrorDirNotFound
}
return nil
}
func (f *Fs) ensureFile(realpath string) (os.FileInfo, error) {
info, err := f.client.Stat(realpath)
if e, ok := err.(*os.PathError); ok && e.Err == os.ErrNotExist {
return nil, fs.ErrorObjectNotFound
}
if err != nil {
return nil, err
}
if info.IsDir() {
return nil, fs.ErrorObjectNotFound
}
return info, nil
}
func (f *Fs) realpath(dir string) string {
return f.opt.Enc.FromStandardPath(xPath(f.Root(), dir))
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
)

86
backend/hdfs/hdfs.go Normal file
View File

@@ -0,0 +1,86 @@
// +build !plan9
package hdfs
import (
"path"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/lib/encoder"
)
func init() {
fsi := &fs.RegInfo{
Name: "hdfs",
Description: "Hadoop distributed file system",
NewFs: NewFs,
Options: []fs.Option{{
Name: "namenode",
Help: "hadoop name node and port",
Required: true,
Examples: []fs.OptionExample{{
Value: "namenode:8020",
Help: "Connect to host namenode at port 8020",
}},
}, {
Name: "username",
Help: "hadoop user name",
Required: false,
Examples: []fs.OptionExample{{
Value: "root",
Help: "Connect to hdfs as root",
}},
}, {
Name: "service_principal_name",
Help: `Kerberos service principal name for the namenode
Enables KERBEROS authentication. Specifies the Service Principal Name
(<SERVICE>/<FQDN>) for the namenode.`,
Required: false,
Examples: []fs.OptionExample{{
Value: "hdfs/namenode.hadoop.docker",
Help: "Namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.",
}},
Advanced: true,
}, {
Name: "data_transfer_protection",
Help: `Kerberos data transfer protection: authentication|integrity|privacy
Specifies whether or not authentication, data signature integrity
checks, and wire encryption is required when communicating the the
datanodes. Possible values are 'authentication', 'integrity' and
'privacy'. Used only with KERBEROS enabled.`,
Required: false,
Examples: []fs.OptionExample{{
Value: "privacy",
Help: "Ensure authentication, integrity and encryption enabled.",
}},
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Display | encoder.EncodeInvalidUtf8 | encoder.EncodeColon),
}},
}
fs.Register(fsi)
}
// Options for this backend
type Options struct {
Namenode string `config:"namenode"`
Username string `config:"username"`
ServicePrincipalName string `config:"service_principal_name"`
DataTransferProtection string `config:"data_transfer_protection"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// xPath make correct file path with leading '/'
func xPath(root string, tail string) string {
if !strings.HasPrefix(root, "/") {
root = "/" + root
}
return path.Join(root, tail)
}

20
backend/hdfs/hdfs_test.go Normal file
View File

@@ -0,0 +1,20 @@
// Test HDFS filesystem interface
// +build !plan9
package hdfs_test
import (
"testing"
"github.com/rclone/rclone/backend/hdfs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestHdfs:",
NilObject: (*hdfs.Object)(nil),
})
}

View File

@@ -0,0 +1,6 @@
// Build for hdfs for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9
package hdfs

177
backend/hdfs/object.go Normal file
View File

@@ -0,0 +1,177 @@
// +build !plan9
package hdfs
import (
"context"
"io"
"path"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers"
)
// Object describes an HDFS file
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
realpath := o.fs.realpath(o.Remote())
err := o.fs.client.Chtimes(realpath, modTime, modTime)
if err != nil {
return err
}
o.modTime = modTime
return nil
}
// Storable returns whether this object is storable
func (o *Object) Storable() bool {
return true
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Hash is not supported
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
realpath := o.realpath()
fs.Debugf(o.fs, "open [%s]", realpath)
f, err := o.fs.client.Open(realpath)
if err != nil {
return nil, err
}
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
}
}
_, err = f.Seek(offset, io.SeekStart)
if err != nil {
return nil, err
}
if limit != -1 {
in = readers.NewLimitedReadCloser(f, limit)
} else {
in = f
}
return in, err
}
// Update object
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
realpath := o.fs.realpath(src.Remote())
dirname := path.Dir(realpath)
fs.Debugf(o.fs, "update [%s]", realpath)
err := o.fs.client.MkdirAll(dirname, 755)
if err != nil {
return err
}
info, err := o.fs.client.Stat(realpath)
if err == nil {
err = o.fs.client.Remove(realpath)
if err != nil {
return err
}
}
out, err := o.fs.client.Create(realpath)
if err != nil {
return err
}
cleanup := func() {
rerr := o.fs.client.Remove(realpath)
if rerr != nil {
fs.Errorf(o.fs, "failed to remove [%v]: %v", realpath, rerr)
}
}
_, err = io.Copy(out, in)
if err != nil {
cleanup()
return err
}
err = out.Close()
if err != nil {
cleanup()
return err
}
info, err = o.fs.client.Stat(realpath)
if err != nil {
return err
}
err = o.SetModTime(ctx, src.ModTime(ctx))
if err != nil {
return err
}
o.size = info.Size()
return nil
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
realpath := o.fs.realpath(o.remote)
fs.Debugf(o.fs, "remove [%s]", realpath)
return o.fs.client.Remove(realpath)
}
func (o *Object) realpath() string {
return o.fs.opt.Enc.FromStandardPath(xPath(o.Fs().Root(), o.remote))
}
// Check the interfaces are satisfied
var (
_ fs.Object = (*Object)(nil)
)

View File

@@ -58,7 +58,7 @@ The input format is comma separated list of key,value pairs. Standard
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
You can set multiple headers, eg '"Cookie","name=value","Authorization","xxx"'.
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
`,
Default: fs.CommaSepList{},
Advanced: true,
@@ -115,8 +115,9 @@ type Options struct {
type Fs struct {
name string
root string
features *fs.Features // optional features
opt Options // options for this backend
features *fs.Features // optional features
opt Options // options for this backend
ci *fs.ConfigInfo // global config
endpoint *url.URL
endpointURL string // endpoint as a string
httpClient *http.Client
@@ -145,8 +146,7 @@ func statusError(res *http.Response, err error) error {
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -172,7 +172,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
client := fshttp.NewClient(fs.Config)
client := fshttp.NewClient(ctx)
var isFile = false
if !strings.HasSuffix(u.String(), "/") {
@@ -183,9 +183,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return http.ErrUseLastResponse
}
// check to see if points to a file
req, err := http.NewRequest("HEAD", u.String(), nil)
req, err := http.NewRequestWithContext(ctx, "HEAD", u.String(), nil)
if err == nil {
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
addHeaders(req, opt)
res, err := noRedir.Do(req)
err = statusError(res, err)
@@ -210,17 +209,19 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: ci,
httpClient: client,
endpoint: u,
endpointURL: u.String(),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(f)
}).Fill(ctx, f)
if isFile {
return f, fs.ErrorIsFile
}
@@ -389,11 +390,10 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
}
// Do the request
req, err := http.NewRequest("GET", URL, nil)
req, err := http.NewRequestWithContext(ctx, "GET", URL, nil)
if err != nil {
return nil, errors.Wrap(err, "readDir failed")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
f.addHeaders(req)
res, err := f.httpClient.Do(req)
if err == nil {
@@ -440,14 +440,15 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
var (
entriesMu sync.Mutex // to protect entries
wg sync.WaitGroup
in = make(chan string, fs.Config.Checkers)
checkers = f.ci.Checkers
in = make(chan string, checkers)
)
add := func(entry fs.DirEntry) {
entriesMu.Lock()
entries = append(entries, entry)
entriesMu.Unlock()
}
for i := 0; i < fs.Config.Checkers; i++ {
for i := 0; i < checkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
@@ -544,11 +545,10 @@ func (o *Object) stat(ctx context.Context) error {
return nil
}
url := o.url()
req, err := http.NewRequest("HEAD", url, nil)
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
if err != nil {
return errors.Wrap(err, "stat failed")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
o.fs.addHeaders(req)
res, err := o.fs.httpClient.Do(req)
if err == nil && res.StatusCode == http.StatusNotFound {
@@ -585,7 +585,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return errorReadOnly
}
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc)
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
func (o *Object) Storable() bool {
return true
}
@@ -593,11 +593,10 @@ func (o *Object) Storable() bool {
// Open a remote http file object for reading. Seek is supported
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
url := o.url()
req, err := http.NewRequest("GET", url, nil)
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
if err != nil {
return nil, errors.Wrap(err, "Open failed")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
// Add optional headers
for k, v := range fs.OpenOptionHeaders(options) {

View File

@@ -47,7 +47,7 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
ts := httptest.NewServer(handler)
// Configure the remote
config.LoadConfig()
config.LoadConfig(context.Background())
// fs.Config.LogLevel = fs.LogLevelDebug
// fs.Config.DumpHeaders = true
// fs.Config.DumpBodies = true
@@ -69,7 +69,7 @@ func prepare(t *testing.T) (fs.Fs, func()) {
m, tidy := prepareServer(t)
// Instantiate it
f, err := NewFs(remoteName, "", m)
f, err := NewFs(context.Background(), remoteName, "", m)
require.NoError(t, err)
return f, tidy
@@ -214,7 +214,7 @@ func TestIsAFileRoot(t *testing.T) {
m, tidy := prepareServer(t)
defer tidy()
f, err := NewFs(remoteName, "one%.txt", m)
f, err := NewFs(context.Background(), remoteName, "one%.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)
testListRoot(t, f, false)
@@ -224,7 +224,7 @@ func TestIsAFileSubDir(t *testing.T) {
m, tidy := prepareServer(t)
defer tidy()
f, err := NewFs(remoteName, "three/underthree.txt", m)
f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)
entries, err := f.List(context.Background(), "")

View File

@@ -5,7 +5,7 @@ import (
"net/http"
"time"
"github.com/ncw/swift"
"github.com/ncw/swift/v2"
"github.com/rclone/rclone/fs"
)
@@ -24,7 +24,7 @@ func newAuth(f *Fs) *auth {
// Request constructs an http.Request for authentication
//
// returns nil for not needed
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
const retries = 10
for try := 1; try <= retries; try++ {
err = a.f.getCredentials(context.TODO())
@@ -38,7 +38,7 @@ func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
}
// Response parses the result of an http request
func (a *auth) Response(resp *http.Response) error {
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
return nil
}

View File

@@ -4,7 +4,7 @@ package hubic
// This uses the normal swift mechanism to update the credentials and
// ignores the expires field returned by the Hubic API. This may need
// to be revisted after some actual experience.
// to be revisited after some actual experience.
import (
"context"
@@ -16,7 +16,7 @@ import (
"strings"
"time"
swiftLib "github.com/ncw/swift"
swiftLib "github.com/ncw/swift/v2"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fs"
@@ -56,8 +56,8 @@ func init() {
Name: "hubic",
Description: "Hubic",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
err := oauthutil.Config("hubic", name, m, oauthConfig, nil)
Config: func(ctx context.Context, name string, m configmap.Mapper) {
err := oauthutil.Config(ctx, "hubic", name, m, oauthConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
@@ -71,7 +71,7 @@ func init() {
type credentials struct {
Token string `json:"token"` // OpenStack token
Endpoint string `json:"endpoint"` // OpenStack endpoint
Expires string `json:"expires"` // Expires date - eg "2015-11-09T14:24:56+01:00"
Expires string `json:"expires"` // Expires date - e.g. "2015-11-09T14:24:56+01:00"
}
// Fs represents a remote hubic
@@ -110,11 +110,10 @@ func (f *Fs) String() string {
//
// The credentials are read into the Fs
func (f *Fs) getCredentials(ctx context.Context) (err error) {
req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil)
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil)
if err != nil {
return err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
resp, err := f.client.Do(req)
if err != nil {
return err
@@ -146,8 +145,8 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(name, m, oauthConfig)
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Hubic")
}
@@ -157,13 +156,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
// Make the swift Connection
ci := fs.GetConfig(ctx)
c := &swiftLib.Connection{
Auth: newAuth(f),
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(fs.Config),
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(ctx),
}
err = c.Authenticate()
err = c.Authenticate(ctx)
if err != nil {
return nil, errors.Wrap(err, "error authenticating swift connection")
}
@@ -176,7 +176,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
// Make inner swift Fs from the connection
swiftFs, err := swift.NewFsWithConnection(opt, name, root, c, true)
swiftFs, err := swift.NewFsWithConnection(ctx, opt, name, root, c, true)
if err != nil && err != fs.ErrorIsFile {
return nil, err
}

View File

@@ -153,9 +153,9 @@ type CustomerInfo struct {
AccountType string `json:"account_type"`
SubscriptionType string `json:"subscription_type"`
Usage int64 `json:"usage"`
Qouta int64 `json:"quota"`
Quota int64 `json:"quota"`
BusinessUsage int64 `json:"business_usage"`
BusinessQouta int64 `json:"business_quota"`
BusinessQuota int64 `json:"business_quota"`
WriteLocked bool `json:"write_locked"`
ReadLocked bool `json:"read_locked"`
LockedCause interface{} `json:"locked_cause"`
@@ -386,7 +386,7 @@ type Error struct {
Cause string `xml:"cause"`
}
// Error returns a string for the error and statistifes the error interface
// Error returns a string for the error and satisfies the error interface
func (e *Error) Error() string {
out := fmt.Sprintf("error %d", e.StatusCode)
if e.Message != "" {

View File

@@ -63,6 +63,10 @@ const (
v1ClientID = "nibfk8biu12ju7hpqomr8b1e40"
v1EncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
v1configVersion = 0
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
teliaCloudClientID = "desktop"
)
var (
@@ -83,9 +87,7 @@ func init() {
Name: "jottacloud",
Description: "Jottacloud",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
ctx := context.TODO()
Config: func(ctx context.Context, name string, m configmap.Mapper) {
refresh := false
if version, ok := m.Get("configVersion"); ok {
ver, err := strconv.Atoi(version)
@@ -107,11 +109,18 @@ func init() {
}
}
fmt.Printf("Use legacy authentification?.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n")
if config.Confirm(false) {
v1config(ctx, name, m)
} else {
fmt.Printf("Choose authentication type:\n" +
"1: Standard authentication - use this if you're a normal Jottacloud user.\n" +
"2: Legacy authentication - this is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n" +
"3: Telia Cloud authentication - use this if you are using Telia Cloud.\n")
switch config.ChooseNumber("Your choice", 1, 3) {
case 1:
v2config(ctx, name, m)
case 2:
v1config(ctx, name, m)
case 3:
teliaCloudConfig(ctx, name, m)
}
},
Options: []fs.Option{{
@@ -230,9 +239,49 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// v1config configure a jottacloud backend using legacy authentification
func teliaCloudConfig(ctx context.Context, name string, m configmap.Mapper) {
teliaCloudOauthConfig := &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: teliaCloudAuthURL,
TokenURL: teliaCloudTokenURL,
},
ClientID: teliaCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
}
err := oauthutil.Config(ctx, "jottacloud", name, m, teliaCloudOauthConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
return
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, teliaCloudOauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
}
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
if err != nil {
log.Fatalf("Failed to setup mountpoint: %s", err)
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
m.Set("configVersion", strconv.Itoa(configVersion))
m.Set(configClientID, teliaCloudClientID)
m.Set(configTokenURL, teliaCloudTokenURL)
}
// v1config configure a jottacloud backend using legacy authentication
func v1config(ctx context.Context, name string, m configmap.Mapper) {
srv := rest.NewClient(fshttp.NewClient(fs.Config))
srv := rest.NewClient(fshttp.NewClient(ctx))
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
if config.Confirm(false) {
@@ -275,7 +324,7 @@ func v1config(ctx context.Context, name string, m configmap.Mapper) {
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
}
@@ -323,7 +372,7 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
return deviceRegistration, err
}
// doAuthV1 runs the actual token request for V1 authentification
// doAuthV1 runs the actual token request for V1 authentication
func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
// prepare out token request with username and password
values := url.Values{}
@@ -365,14 +414,17 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
return token, err
}
// v2config configure a jottacloud backend using the modern JottaCli token based authentification
// v2config configure a jottacloud backend using the modern JottaCli token based authentication
func v2config(ctx context.Context, name string, m configmap.Mapper) {
srv := rest.NewClient(fshttp.NewClient(fs.Config))
srv := rest.NewClient(fshttp.NewClient(ctx))
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
fmt.Printf("Login Token> ")
loginToken := config.ReadLine()
m.Set(configClientID, "jottacli")
m.Set(configClientSecret, "")
token, err := doAuthV2(ctx, srv, loginToken, m)
if err != nil {
log.Fatalf("Failed to get oauth token: %s", err)
@@ -384,8 +436,7 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oauthConfig.ClientID = "jottacli"
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
}
@@ -403,7 +454,7 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
m.Set("configVersion", strconv.Itoa(configVersion))
}
// doAuthV2 runs the actual token request for V2 authentification
// doAuthV2 runs the actual token request for V2 authentication
func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
if err != nil {
@@ -551,7 +602,7 @@ func (f *Fs) setEndpointURL() {
if f.opt.Mountpoint == "" {
f.opt.Mountpoint = defaultMountpoint
}
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
f.endpointURL = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
}
// readMetaDataForPath reads the metadata from the path
@@ -639,8 +690,7 @@ func grantTypeFilter(req *http.Request) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -662,7 +712,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.New("Outdated config - please reconfigure this backend")
}
baseClient := fshttp.NewClient(fs.Config)
baseClient := fshttp.NewClient(ctx)
if ver == configVersion {
oauthConfig.ClientID = "jottacli"
@@ -698,7 +748,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
// Create OAuth Client
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
}
@@ -712,14 +762,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CaseInsensitive: true,
CanHaveEmptyDirectories: true,
ReadMimeType: true,
WriteMimeType: true,
}).Fill(f)
WriteMimeType: false,
}).Fill(ctx, f)
f.srv.SetErrorHandler(errorHandler)
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
f.features.ListR = nil
@@ -728,6 +778,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
if err == fs.ErrorNotAFile {
err = nil
}
return err
})
@@ -1087,8 +1140,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
retry, _ := shouldRetry(resp, err)
return (retry && resp.StatusCode != 500), err
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -1096,7 +1148,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
return info, nil
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -1126,7 +1178,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
//return f.newObjectWithInfo(remote, &result)
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
@@ -1157,7 +1209,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1192,18 +1244,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
// surprise! jottacloud fucked up dirmove - the api spits out an error but
// dir gets moved regardless
if apiErr, ok := err.(*api.Error); ok {
if apiErr.StatusCode == 500 {
_, err := f.NewObject(ctx, dstRemote)
if err == fs.ErrorNotAFile {
log.Printf("FIXME: ignoring DirMove error - move succeeded anyway\n")
return nil
}
return err
}
}
if err != nil {
return errors.Wrap(err, "couldn't move directory")
}
@@ -1477,6 +1517,8 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
size := src.Size()
md5String, err := src.Hash(ctx, hash.MD5)
if err != nil || md5String == "" {
@@ -1523,7 +1565,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
// If the file state is INCOMPLETE and CORRPUT, try to upload a then
// If the file state is INCOMPLETE and CORRUPT, try to upload a then
if response.State != "COMPLETED" {
// how much do we still have to upload?
remainingBytes := size - response.ResumePos

View File

@@ -256,7 +256,7 @@ func (f *Fs) fullPath(part string) string {
}
// NewFs constructs a new filesystem given a root path and configuration options
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
@@ -267,7 +267,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
return nil, err
}
httpClient := httpclient.New()
httpClient.Client = fshttp.NewClient(fs.Config)
httpClient.Client = fshttp.NewClient(ctx)
client := koofrclient.NewKoofrClientWithHTTPClient(opt.Endpoint, httpClient)
basicAuth := fmt.Sprintf("Basic %s",
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
@@ -287,7 +287,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
DuplicateFiles: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(f)
}).Fill(ctx, f)
for _, m := range mounts {
if opt.MountID != "" {
if m.Id == opt.MountID {

View File

@@ -70,6 +70,20 @@ points, as you explicitly acknowledge that they should be skipped.`,
Default: false,
NoPrefix: true,
Advanced: true,
}, {
Name: "zero_size_links",
Help: `Assume the Stat size of links is zero (and read them instead)
On some virtual filesystems (such ash LucidLink), reading a link size via a Stat call always returns 0.
However, on unix it reads as the length of the text in the link. This may cause errors like this when
syncing:
Failed to copy: corrupted on transfer: sizes differ 0 vs 13
Setting this flag causes rclone to read the link and use that as the size of the link
instead of 0 which in most cases fixes the problem.`,
Default: false,
Advanced: true,
}, {
Name: "no_unicode_normalization",
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
@@ -87,13 +101,13 @@ Normally rclone checks the size and modification time of files as they
are being uploaded and aborts with a message which starts "can't copy
- source file is being updated" if the file changes during upload.
However on some file systems this modification time check may fail (eg
However on some file systems this modification time check may fail (e.g.
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
check can be disabled with this flag.
If this flag is set, rclone will use its best efforts to transfer a
file which is being updated. If the file is only having things
appended to it (eg a log) then rclone will transfer the log file with
appended to it (e.g. a log) then rclone will transfer the log file with
the size it had the first time rclone saw it.
If the file is being modified throughout (not just appended to) then
@@ -170,6 +184,7 @@ type Options struct {
FollowSymlinks bool `config:"copy_links"`
TranslateSymlinks bool `config:"links"`
SkipSymlinks bool `config:"skip_links"`
ZeroSizeLinks bool `config:"zero_size_links"`
NoUTFNorm bool `config:"no_unicode_normalization"`
NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"`
@@ -217,7 +232,7 @@ type Object struct {
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
// NewFs constructs an Fs from the path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -245,7 +260,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
CanHaveEmptyDirectories: true,
IsLocal: true,
SlowHash: true,
}).Fill(f)
}).Fill(ctx, f)
if opt.FollowSymlinks {
f.lstat = os.Stat
}
@@ -456,8 +471,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
localPath := filepath.Join(fsDirPath, name)
fi, err = os.Stat(localPath)
if os.IsNotExist(err) {
// Skip bad symlinks
if os.IsNotExist(err) || isCircularSymlinkError(err) {
// Skip bad symlinks and circular symlinks
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
fs.Errorf(newRemote, "Listing error: %v", err)
err = accounting.Stats(ctx).Error(err)
@@ -637,7 +652,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return os.RemoveAll(dir)
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
@@ -701,7 +716,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1213,7 +1228,7 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
// Set the file to be a sparse file (important on Windows)
err = file.SetSparse(out)
if err != nil {
fs.Debugf(o, "Failed to set sparse: %v", err)
fs.Errorf(o, "Failed to set sparse: %v", err)
}
}
@@ -1231,6 +1246,16 @@ func (o *Object) setMetadata(info os.FileInfo) {
o.modTime = info.ModTime()
o.mode = info.Mode()
o.fs.objectMetaMu.Unlock()
// On Windows links read as 0 size so set the correct size here
// Optionally, users can turn this feature on with the zero_size_links flag
if (runtime.GOOS == "windows" || o.fs.opt.ZeroSizeLinks) && o.translatedLink {
linkdst, err := os.Readlink(o.path)
if err != nil {
fs.Errorf(o, "Failed to read link size: %v", err)
} else {
o.size = int64(len(linkdst))
}
}
}
// Stat an Object into info

View File

@@ -6,7 +6,6 @@ import (
"os"
"path"
"path/filepath"
"runtime"
"testing"
"time"
@@ -89,9 +88,6 @@ func TestSymlink(t *testing.T) {
// Object viewed as symlink
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
if runtime.GOOS == "windows" {
file2.Size = 0 // symlinks are 0 length under Windows
}
// Object viewed as destination
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
@@ -121,9 +117,6 @@ func TestSymlink(t *testing.T) {
// Create a symlink
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
if runtime.GOOS == "windows" {
file3.Size = 0 // symlinks are 0 length under Windows
}
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
if haveLChtimes {
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
@@ -142,9 +135,7 @@ func TestSymlink(t *testing.T) {
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
require.NoError(t, err)
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
if runtime.GOOS != "windows" {
assert.Equal(t, int64(8), o.Size())
}
assert.Equal(t, int64(8), o.Size())
// Check that NewObject doesn't see the non suffixed version
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
@@ -172,6 +163,6 @@ func TestSymlinkError(t *testing.T) {
"links": "true",
"copy_links": "true",
}
_, err := NewFs("local", "/", m)
_, err := NewFs(context.Background(), "local", "/", m)
assert.Equal(t, errLinksAndCopyLinks, err)
}

22
backend/local/symlink.go Normal file
View File

@@ -0,0 +1,22 @@
// +build !windows,!plan9,!js
package local
import (
"os"
"syscall"
)
// isCircularSymlinkError checks if the current error code is because of a circular symlink
func isCircularSymlinkError(err error) bool {
if err != nil {
if newerr, ok := err.(*os.PathError); ok {
if errcode, ok := newerr.Err.(syscall.Errno); ok {
if errcode == syscall.ELOOP {
return true
}
}
}
}
return false
}

View File

@@ -0,0 +1,17 @@
// +build windows plan9 js
package local
import (
"strings"
)
// isCircularSymlinkError checks if the current error code is because of a circular symlink
func isCircularSymlinkError(err error) bool {
if err != nil {
if strings.Contains(err.Error(), "The name of the file cannot be resolved by the system") {
return true
}
}
return false
}

View File

@@ -117,7 +117,7 @@ type ListItem struct {
Name string `json:"name"`
Home string `json:"home"`
Size int64 `json:"size"`
Mtime int64 `json:"mtime,omitempty"`
Mtime uint64 `json:"mtime,omitempty"`
Hash string `json:"hash,omitempty"`
VirusScan string `json:"virus_scan,omitempty"`
Tree string `json:"tree,omitempty"`
@@ -159,71 +159,6 @@ type FolderInfoResponse struct {
Email string `json:"email"`
}
// ShardInfoResponse ...
type ShardInfoResponse struct {
Email string `json:"email"`
Body struct {
Video []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"video"`
ViewDirect []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"view_direct"`
WeblinkView []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"weblink_view"`
WeblinkVideo []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"weblink_video"`
WeblinkGet []struct {
Count int `json:"count"`
URL string `json:"url"`
} `json:"weblink_get"`
Stock []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"stock"`
WeblinkThumbnails []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"weblink_thumbnails"`
PublicUpload []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"public_upload"`
Auth []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"auth"`
Web []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"web"`
View []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"view"`
Upload []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"upload"`
Get []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"get"`
Thumbnails []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"thumbnails"`
} `json:"body"`
Time int64 `json:"time"`
Status int `json:"status"`
}
// CleanupResponse ...
type CleanupResponse struct {
Email string `json:"email"`

View File

@@ -37,6 +37,7 @@ import (
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
"github.com/pkg/errors"
@@ -101,6 +102,7 @@ func init() {
This feature is called "speedup" or "put by hash". It is especially efficient
in case of generally available files like popular books, video or audio clips,
because files are searched by hash in all accounts of all mailru users.
It is meaningless and ineffective if source file is unique or encrypted.
Please note that rclone may need local memory and disk space to calculate
content hash in advance and decide whether full upload is required.
Also, if rclone does not know file size in advance (e.g. in case of
@@ -191,7 +193,7 @@ This option must not be used by an ordinary user. It is intended only to
facilitate remote troubleshooting of backend issues. Strict meaning of
flags is not documented and not guaranteed to persist between releases.
Quirks will be removed when the backend grows stable.
Supported quirks: atomicmkdir binlist gzip insecure retry400`,
Supported quirks: atomicmkdir binlist unknowndirs`,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -237,9 +239,6 @@ func shouldRetry(res *http.Response, err error, f *Fs, opts *rest.Opts) (bool, e
reAuthErr := f.reAuthorize(opts, err)
return reAuthErr == nil, err // return an original error
}
if res != nil && res.StatusCode == 400 && f.quirks.retry400 {
return true, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
}
@@ -274,8 +273,9 @@ type Fs struct {
name string
root string // root path
opt Options // parsed options
ci *fs.ConfigInfo // global config
speedupGlobs []string // list of file name patterns eligible for speedup
speedupAny bool // true if all file names are aligible for speedup
speedupAny bool // true if all file names are eligible for speedup
features *fs.Features // optional features
srv *rest.Client // REST API client
cli *http.Client // underlying HTTP client (for authorize)
@@ -295,9 +295,8 @@ type Fs struct {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// fs.Debugf(nil, ">>> NewFs %q %q", name, root)
ctx := context.Background() // Note: NewFs does not pass context!
// Parse config into Options struct
opt := new(Options)
@@ -314,10 +313,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// However the f.root string should not have leading or trailing slashes
root = strings.Trim(root, "/")
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: ci,
m: m,
}
@@ -326,7 +327,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
f.quirks.parseQuirks(opt.Quirks)
f.pacer = fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleepPacer), pacer.MaxSleep(maxSleepPacer), pacer.DecayConstant(decayConstPacer)))
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleepPacer), pacer.MaxSleep(maxSleepPacer), pacer.DecayConstant(decayConstPacer)))
f.features = (&fs.Features{
CaseInsensitive: true,
@@ -334,27 +335,21 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Can copy/move across mailru configs (almost, thus true here), but
// only when they share common account (this is checked in Copy/Move).
ServerSideAcrossConfigs: true,
}).Fill(f)
}).Fill(ctx, f)
// Override few config settings and create a client
clientConfig := *fs.Config
newCtx, clientConfig := fs.AddConfig(ctx)
if opt.UserAgent != "" {
clientConfig.UserAgent = opt.UserAgent
}
clientConfig.NoGzip = !f.quirks.gzip // Send not "Accept-Encoding: gzip" like official client
f.cli = fshttp.NewClient(&clientConfig)
clientConfig.NoGzip = true // Mimic official client, skip sending "Accept-Encoding: gzip"
f.cli = fshttp.NewClient(newCtx)
f.srv = rest.NewClient(f.cli)
f.srv.SetRoot(api.APIServerURL)
f.srv.SetHeader("Accept", "*/*") // Send "Accept: */*" with every request like official client
f.srv.SetErrorHandler(errorHandler)
if f.quirks.insecure {
transport := f.cli.Transport.(*fshttp.Transport).Transport
transport.TLSClientConfig.InsecureSkipVerify = true
transport.ProxyConnectHeader = http.Header{"User-Agent": {clientConfig.UserAgent}}
}
if err = f.authorize(ctx, false); err != nil {
return nil, err
}
@@ -387,30 +382,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Internal maintenance flags (to be removed when the backend matures).
// Primarily intended to facilitate remote support and troubleshooting.
type quirks struct {
gzip bool
insecure bool
binlist bool
atomicmkdir bool
retry400 bool
unknowndirs bool
}
func (q *quirks) parseQuirks(option string) {
for _, flag := range strings.Split(option, ",") {
switch strings.ToLower(strings.TrimSpace(flag)) {
case "gzip":
// This backend mimics the official client which never sends the
// "Accept-Encoding: gzip" header. However, enabling compression
// might be good for performance.
// Use this quirk to investigate the performance impact.
// Remove this quirk if performance does not improve.
q.gzip = true
case "insecure":
// The mailru disk-o protocol is not documented. To compare HTTP
// stream against the official client one can use Telerik Fiddler,
// which introduces a self-signed certificate. This quirk forces
// the Go http layer to accept it.
// Remove this quirk when the backend reaches maturity.
q.insecure = true
case "binlist":
// The official client sometimes uses a so called "bin" protocol,
// implemented in the listBin file system method below. This method
@@ -423,18 +402,14 @@ func (q *quirks) parseQuirks(option string) {
case "atomicmkdir":
// At the moment rclone requires Mkdir to return success if the
// directory already exists. However, such programs as borgbackup
// or restic use mkdir as a locking primitive and depend on its
// atomicity. This quirk is a workaround. It can be removed
// when the above issue is investigated.
// use mkdir as a locking primitive and depend on its atomicity.
// Remove this quirk when the above issue is investigated.
q.atomicmkdir = true
case "retry400":
// This quirk will help in troubleshooting a very rare "Error 400"
// issue. It can be removed if the problem does not show up
// for a year or so. See the below issue:
// https://github.com/ivandeex/rclone/issues/14
q.retry400 = true
case "unknowndirs":
// Accepts unknown resource types as folders.
q.unknowndirs = true
default:
// Just ignore all unknown flags
// Ignore unknown flags
}
}
}
@@ -448,7 +423,7 @@ func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
if err != nil || !tokenIsValid(t) {
fs.Infof(f, "Valid token not found, authorizing.")
ctx := oauthutil.Context(f.cli)
ctx := oauthutil.Context(ctx, f.cli)
t, err = oauthConfig.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
}
if err == nil && !tokenIsValid(t) {
@@ -471,7 +446,7 @@ func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
// crashing with panic `comparing uncomparable type map[string]interface{}`
// As a workaround, mimic oauth2.NewClient() wrapping token source in
// oauth2.ReuseTokenSource
_, ts, err := oauthutil.NewClientWithBaseClient(f.name, f.m, oauthConfig, f.cli)
_, ts, err := oauthutil.NewClientWithBaseClient(ctx, f.name, f.m, oauthConfig, f.cli)
if err == nil {
f.source = oauth2.ReuseTokenSource(nil, ts)
}
@@ -550,7 +525,7 @@ func (f *Fs) relPath(absPath string) (string, error) {
return "", fmt.Errorf("path %q should be under %q", absPath, f.root)
}
// metaServer ...
// metaServer returns URL of current meta server
func (f *Fs) metaServer(ctx context.Context) (string, error) {
f.metaMu.Lock()
defer f.metaMu.Unlock()
@@ -655,28 +630,56 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
if err != nil {
return nil, -1, err
}
switch item.Kind {
case "folder":
dir := fs.NewDir(remote, time.Unix(item.Mtime, 0)).SetSize(item.Size)
dirSize := item.Count.Files + item.Count.Folders
return dir, dirSize, nil
case "file":
binHash, err := mrhash.DecodeString(item.Hash)
if err != nil {
return nil, -1, err
}
file := &Object{
fs: f,
remote: remote,
hasMetaData: true,
size: item.Size,
mrHash: binHash,
modTime: time.Unix(item.Mtime, 0),
}
return file, -1, nil
default:
return nil, -1, fmt.Errorf("Unknown resource type %q", item.Kind)
mTime := int64(item.Mtime)
if mTime < 0 {
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
mTime = 0
}
modTime := time.Unix(mTime, 0)
isDir, err := f.isDir(item.Kind, remote)
if err != nil {
return nil, -1, err
}
if isDir {
dir := fs.NewDir(remote, modTime).SetSize(item.Size)
return dir, item.Count.Files + item.Count.Folders, nil
}
binHash, err := mrhash.DecodeString(item.Hash)
if err != nil {
return nil, -1, err
}
file := &Object{
fs: f,
remote: remote,
hasMetaData: true,
size: item.Size,
mrHash: binHash,
modTime: modTime,
}
return file, -1, nil
}
// isDir returns true for directories, false for files
func (f *Fs) isDir(kind, path string) (bool, error) {
switch kind {
case "":
return false, errors.New("empty resource type")
case "file":
return false, nil
case "folder":
// fall thru
case "camera-upload", "mounted", "shared":
fs.Debugf(f, "[%s]: folder has type %q", path, kind)
default:
if !f.quirks.unknowndirs {
return false, fmt.Errorf("unknown resource type %q", kind)
}
fs.Errorf(f, "[%s]: folder has unknown type %q", path, kind)
}
return true, nil
}
// List the objects and directories in dir into entries.
@@ -692,7 +695,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
entries, err = f.listM1(ctx, f.absPath(dir), 0, maxInt32)
}
if err == nil && fs.Config.LogLevel >= fs.LogLevelDebug {
if err == nil && f.ci.LogLevel >= fs.LogLevelDebug {
names := []string{}
for _, entry := range entries {
names = append(names, entry.Remote())
@@ -744,7 +747,11 @@ func (f *Fs) listM1(ctx context.Context, dirPath string, offset int, limit int)
return nil, err
}
if info.Body.Kind != "folder" {
isDir, err := f.isDir(info.Body.Kind, dirPath)
if err != nil {
return nil, err
}
if !isDir {
return nil, fs.ErrorIsFile
}
@@ -952,7 +959,7 @@ func (t *treeState) NextRecord() (fs.DirEntry, error) {
return nil, r.Error()
}
if fs.Config.LogLevel >= fs.LogLevelDebug {
if t.f.ci.LogLevel >= fs.LogLevelDebug {
ctime, _ := modTime.MarshalJSON()
fs.Debugf(t.f, "binDir %d.%d %q %q (%d) %s", t.level, itemType, t.currDir, name, size, ctime)
}
@@ -1222,7 +1229,7 @@ func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) error {
}
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
// This is stored with the remote path given.
// It returns the destination Object and a possible error.
// Will only be called if src.Fs().Name() == f.Name()
@@ -1317,7 +1324,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, err
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
// This is stored with the remote path given.
// It returns the destination Object and a possible error.
// Will only be called if src.Fs().Name() == f.Name()
@@ -1404,7 +1411,7 @@ func (f *Fs) moveItemBin(ctx context.Context, srcPath, dstPath, opName string) e
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
// Will only be called if src.Fs().Name() == f.Name()
// If it isn't possible then return fs.ErrorCantDirMove
// If destination exists then return fs.ErrorDirExists
@@ -1597,23 +1604,28 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
var (
fileBuf []byte
fileHash []byte
newHash []byte
trySpeedup bool
fileBuf []byte
fileHash []byte
newHash []byte
slowHash bool
localSrc bool
)
if srcObj := fs.UnWrapObjectInfo(src); srcObj != nil {
srcFeatures := srcObj.Fs().Features()
slowHash = srcFeatures.SlowHash
localSrc = srcFeatures.IsLocal
}
// Don't disturb the source if file fits in hash.
// Skip an extra speedup request if file fits in hash.
if size > mrhash.Size {
// Request hash from source.
// Try speedup if it's globally enabled but skip extra post
// request if file is small and fits in the metadata request
trySpeedup := o.fs.opt.SpeedupEnable && size > mrhash.Size
// Try to get the hash if it's instant
if trySpeedup && !slowHash {
if srcHash, err := src.Hash(ctx, MrHashType); err == nil && srcHash != "" {
fileHash, _ = mrhash.DecodeString(srcHash)
}
// Try speedup if it's globally enabled and source hash is available.
trySpeedup = o.fs.opt.SpeedupEnable
if trySpeedup && fileHash != nil {
if fileHash != nil {
if o.putByHash(ctx, fileHash, src, "source") {
return nil
}
@@ -1622,13 +1634,22 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// Need to calculate hash, check whether file is still eligible for speedup
if trySpeedup {
trySpeedup = o.fs.eligibleForSpeedup(o.Remote(), size, options...)
trySpeedup = trySpeedup && o.fs.eligibleForSpeedup(o.Remote(), size, options...)
// Attempt to put by hash if file is local and eligible
if trySpeedup && localSrc {
if srcHash, err := src.Hash(ctx, MrHashType); err == nil && srcHash != "" {
fileHash, _ = mrhash.DecodeString(srcHash)
}
if fileHash != nil && o.putByHash(ctx, fileHash, src, "localfs") {
return nil
}
// If local file hashing has failed, it's pointless to try anymore
trySpeedup = false
}
// Attempt to put by calculating hash in memory
if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) {
//fs.Debugf(o, "attempt to put by hash from memory")
fileBuf, err = ioutil.ReadAll(in)
if err != nil {
return err
@@ -1643,7 +1664,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Attempt to put by hash using a spool file
if trySpeedup {
tmpFs, err := fs.TemporaryLocalFs()
tmpFs, err := fs.TemporaryLocalFs(ctx)
if err != nil {
fs.Infof(tmpFs, "Failed to create spool FS: %v", err)
} else {
@@ -1758,6 +1779,7 @@ func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
return nil
}
// putByHash is a thin wrapper around addFileMetaData
func (o *Object) putByHash(ctx context.Context, mrHash []byte, info fs.ObjectInfo, method string) bool {
oNew := new(Object)
*oNew = *o
@@ -1861,30 +1883,30 @@ func (f *Fs) uploadShard(ctx context.Context) (string, error) {
return f.shardURL, nil
}
token, err := f.accessToken()
if err != nil {
return "", err
}
opts := rest.Opts{
Method: "GET",
Path: "/api/m1/dispatcher",
Parameters: url.Values{
"client_id": {api.OAuthClientID},
"access_token": {token},
},
RootURL: api.DispatchServerURL,
Method: "GET",
Path: "/u",
}
var info api.ShardInfoResponse
var (
res *http.Response
url string
err error
)
err = f.pacer.Call(func() (bool, error) {
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(res, err, f, &opts)
res, err = f.srv.Call(ctx, &opts)
if err == nil {
url, err = readBodyWord(res)
}
return fserrors.ShouldRetry(err), err
})
if err != nil {
closeBody(res)
return "", err
}
f.shardURL = info.Body.Upload[0].URL
f.shardURL = url
f.shardExpiry = time.Now().Add(shardExpirySec * time.Second)
fs.Debugf(f, "new upload shard: %s", f.shardURL)
@@ -2116,7 +2138,18 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, err
}
start, end, partial := getTransferRange(o.size, options...)
start, end, partialRequest := getTransferRange(o.size, options...)
headers := map[string]string{
"Accept": "*/*",
"Content-Type": "application/octet-stream",
}
if partialRequest {
rangeStr := fmt.Sprintf("bytes=%d-%d", start, end-1)
headers["Range"] = rangeStr
// headers["Content-Range"] = rangeStr
headers["Accept-Ranges"] = "bytes"
}
// TODO: set custom timeouts
opts := rest.Opts{
@@ -2127,10 +2160,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
"client_id": {api.OAuthClientID},
"token": {token},
},
ExtraHeaders: map[string]string{
"Accept": "*/*",
"Range": fmt.Sprintf("bytes=%d-%d", start, end-1),
},
ExtraHeaders: headers,
}
var res *http.Response
@@ -2151,18 +2181,37 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, err
}
var hasher gohash.Hash
if !partial {
// Server should respond with Status 206 and Content-Range header to a range
// request. Status 200 (and no Content-Range) means a full-content response.
partialResponse := res.StatusCode == 206
var (
hasher gohash.Hash
wrapStream io.ReadCloser
)
if !partialResponse {
// Cannot check hash of partial download
hasher = mrhash.New()
}
wrapStream := &endHandler{
wrapStream = &endHandler{
ctx: ctx,
stream: res.Body,
hasher: hasher,
o: o,
server: server,
}
if partialRequest && !partialResponse {
fs.Debugf(o, "Server returned full content instead of range")
if start > 0 {
// Discard the beginning of the data
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
if err != nil {
closeBody(res)
return nil, err
}
}
wrapStream = readers.NewLimitedReadCloser(wrapStream, end-start)
}
return wrapStream, nil
}
@@ -2215,7 +2264,7 @@ func (e *endHandler) handle(err error) error {
return io.EOF
}
// serverPool backs server dispacher
// serverPool backs server dispatcher
type serverPool struct {
pool pendingServerMap
mu sync.Mutex
@@ -2330,7 +2379,7 @@ func (p *serverPool) addServer(url string, now time.Time) {
expiry := now.Add(p.expirySec * time.Second)
expiryStr := []byte("-")
if fs.Config.LogLevel >= fs.LogLevelInfo {
if p.fs.ci.LogLevel >= fs.LogLevelInfo {
expiryStr, _ = expiry.MarshalJSON()
}

View File

@@ -11,7 +11,7 @@ Improvements:
* Uploads could be done in parallel
* Downloads would be more efficient done in one go
* Uploads would be more efficient with bigger chunks
* Looks like mega can support server side copy, but it isn't implemented in go-mega
* Looks like mega can support server-side copy, but it isn't implemented in go-mega
* Upload can set modtime... - set as int64_t - can set ctime and mtime?
*/
@@ -180,7 +180,7 @@ func (f *Fs) readMetaDataForPath(remote string) (info *mega.Node, err error) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -194,6 +194,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.Wrap(err, "couldn't decrypt password")
}
}
ci := fs.GetConfig(ctx)
// cache *mega.Mega on username so we can re-use and share
// them between remotes. They are expensive to make as they
@@ -204,8 +205,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
defer megaCacheMu.Unlock()
srv := megaCache[opt.User]
if srv == nil {
srv = mega.New().SetClient(fshttp.NewClient(fs.Config))
srv.SetRetries(fs.Config.LowLevelRetries) // let mega do the low level retries
srv = mega.New().SetClient(fshttp.NewClient(ctx))
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
srv.SetLogger(func(format string, v ...interface{}) {
fs.Infof("*go-mega*", format, v...)
})
@@ -228,12 +229,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
root: root,
opt: *opt,
srv: srv,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
DuplicateFiles: true,
CanHaveEmptyDirectories: true,
}).Fill(f)
}).Fill(ctx, f)
// Find the root node and check if it is a file or not
_, err = f.findRoot(false)
@@ -699,7 +700,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
dstDirNode, err = dstFs.mkdir(absRoot, dstParent)
}
if err != nil {
return errors.Wrap(err, "server side move failed to make dst parent dir")
return errors.Wrap(err, "server-side move failed to make dst parent dir")
}
if srcRemote != "" {
@@ -712,7 +713,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
srcDirNode, err = f.findDir(absRoot, srcParent)
}
if err != nil {
return errors.Wrap(err, "server side move failed to lookup src parent dir")
return errors.Wrap(err, "server-side move failed to lookup src parent dir")
}
// move the object into its new directory if required
@@ -723,7 +724,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "server side move failed")
return errors.Wrap(err, "server-side move failed")
}
}
@@ -737,7 +738,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "server side rename failed")
return errors.Wrap(err, "server-side rename failed")
}
}
@@ -746,7 +747,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
return nil
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
@@ -781,7 +782,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//

View File

@@ -221,8 +221,8 @@ func (f *Fs) setRoot(root string) {
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
}
// NewFs contstructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// NewFs constructs an Fs from the path, bucket:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -241,7 +241,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
}).Fill(f)
}).Fill(ctx, f)
if f.rootBucket != "" && f.rootDirectory != "" {
od := buckets.getObjectData(f.rootBucket, f.rootDirectory)
if od != nil {
@@ -462,7 +462,7 @@ func (f *Fs) Precision() time.Duration {
return time.Nanosecond
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -592,7 +592,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
data: data,
hash: "",
modTime: src.ModTime(ctx),
mimeType: fs.MimeType(ctx, o),
mimeType: fs.MimeType(ctx, src),
}
buckets.updateObjectData(bucket, bucketPath, o.od)
return nil

View File

@@ -253,8 +253,10 @@ type MoveItemRequest struct {
//CreateShareLinkRequest is the request to create a sharing link
//Always Type:view and Scope:anonymous for public sharing
type CreateShareLinkRequest struct {
Type string `json:"type"` //Link type in View, Edit or Embed
Scope string `json:"scope,omitempty"` //Optional. Scope in anonymousi, organization
Type string `json:"type"` // Link type in View, Edit or Embed
Scope string `json:"scope,omitempty"` // Scope in anonymous, organization
Password string `json:"password,omitempty"` // The password of the sharing link that is set by the creator. Optional and OneDrive Personal only.
Expiry *time.Time `json:"expirationDateTime,omitempty"` // A String with format of yyyy-MM-ddTHH:mm:ssZ of DateTime indicates the expiration time of the permission.
}
//CreateShareLinkResponse is the response from CreateShareLinkRequest
@@ -281,6 +283,7 @@ type CreateShareLinkResponse struct {
type AsyncOperationStatus struct {
PercentageComplete float64 `json:"percentageComplete"` // A float value between 0 and 100 that indicates the percentage complete.
Status string `json:"status"` // A string value that maps to an enumeration of possible values about the status of the job. "notStarted | inProgress | completed | updating | failed | deletePending | deleteFailed | waiting"
ErrorCode string `json:"errorCode"` // Not officially documented :(
}
// GetID returns a normalized ID of the item

View File

@@ -11,7 +11,9 @@ import (
"io"
"log"
"net/http"
"net/url"
"path"
"regexp"
"strconv"
"strings"
"sync"
@@ -45,7 +47,6 @@ const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
graphURL = "https://graph.microsoft.com/v1.0"
configDriveID = "drive_id"
configDriveType = "drive_type"
driveTypePersonal = "personal"
@@ -53,22 +54,40 @@ const (
driveTypeSharepoint = "documentLibrary"
defaultChunkSize = 10 * fs.MebiByte
chunkSizeMultiple = 320 * fs.KibiByte
regionGlobal = "global"
regionUS = "us"
regionDE = "de"
regionCN = "cn"
)
// Globals
var (
authPath = "/common/oauth2/v2.0/authorize"
tokenPath = "/common/oauth2/v2.0/token"
// Description of how to auth for this app for a business account
oauthConfig = &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: "https://login.microsoftonline.com/common/oauth2/v2.0/authorize",
TokenURL: "https://login.microsoftonline.com/common/oauth2/v2.0/token",
},
Scopes: []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access", "Sites.Read.All"},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
graphAPIEndpoint = map[string]string{
"global": "https://graph.microsoft.com",
"us": "https://graph.microsoft.us",
"de": "https://graph.microsoft.de",
"cn": "https://microsoftgraph.chinacloudapi.cn",
}
authEndpoint = map[string]string{
"global": "https://login.microsoftonline.com",
"us": "https://login.microsoftonline.us",
"de": "https://login.microsoftonline.de",
"cn": "https://login.chinacloudapi.cn",
}
// QuickXorHashType is the hash.Type for OneDrive
QuickXorHashType hash.Type
)
@@ -80,16 +99,22 @@ func init() {
Name: "onedrive",
Description: "Microsoft OneDrive",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
ctx := context.TODO()
err := oauthutil.Config("onedrive", name, m, oauthConfig, nil)
Config: func(ctx context.Context, name string, m configmap.Mapper) {
region, _ := m.Get("region")
graphURL := graphAPIEndpoint[region] + "/v1.0"
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[region] + authPath,
TokenURL: authEndpoint[region] + tokenPath,
}
ci := fs.GetConfig(ctx)
err := oauthutil.Config(ctx, "onedrive", name, m, oauthConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
return
}
// Stop if we are running non-interactive config
if fs.Config.AutoConfirm {
if ci.AutoConfirm {
return
}
@@ -111,7 +136,7 @@ func init() {
Sites []siteResource `json:"value"`
}
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure OneDrive: %v", err)
}
@@ -120,9 +145,18 @@ func init() {
var opts rest.Opts
var finalDriveID string
var siteID string
var relativePath string
switch config.Choose("Your choice",
[]string{"onedrive", "sharepoint", "driveid", "siteid", "search"},
[]string{"OneDrive Personal or Business", "Root Sharepoint site", "Type in driveID", "Type in SiteID", "Search a Sharepoint site"},
[]string{"onedrive", "sharepoint", "url", "search", "driveid", "siteid", "path"},
[]string{
"OneDrive Personal or Business",
"Root Sharepoint site",
"Sharepoint site name or URL (e.g. mysite or https://contoso.sharepoint.com/sites/mysite)",
"Search for a Sharepoint site",
"Type in driveID (advanced)",
"Type in SiteID (advanced)",
"Sharepoint server-relative path (advanced, e.g. /teams/hr)",
},
false) {
case "onedrive":
@@ -143,6 +177,20 @@ func init() {
case "siteid":
fmt.Printf("Paste your Site ID here> ")
siteID = config.ReadLine()
case "url":
fmt.Println("Example: \"https://contoso.sharepoint.com/sites/mysite\" or \"mysite\"")
fmt.Printf("Paste your Site URL here> ")
siteURL := config.ReadLine()
re := regexp.MustCompile(`https://.*\.sharepoint.com/sites/(.*)`)
match := re.FindStringSubmatch(siteURL)
if len(match) == 2 {
relativePath = "/sites/" + match[1]
} else {
relativePath = "/sites/" + siteURL
}
case "path":
fmt.Printf("Enter server-relative URL here> ")
relativePath = config.ReadLine()
case "search":
fmt.Printf("What to search for> ")
searchTerm := config.ReadLine()
@@ -169,6 +217,21 @@ func init() {
}
}
// if we use server-relative URL for finding the drive
if relativePath != "" {
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites/root:" + relativePath,
}
site := siteResource{}
_, err := srv.CallJSON(ctx, &opts, nil, &site)
if err != nil {
log.Fatalf("Failed to query available site by relative path: %v", err)
}
siteID = site.SiteID
}
// if we have a siteID we need to ask for the drives
if siteID != "" {
opts = rest.Opts{
@@ -233,7 +296,7 @@ func init() {
fmt.Printf("Found drive '%s' of type '%s', URL: %s\nIs that okay?\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL)
// This does not work, YET :)
if !config.ConfirmWithConfig(m, "config_drive_ok", true) {
if !config.ConfirmWithConfig(ctx, m, "config_drive_ok", true) {
log.Fatalf("Cancelled by user")
}
@@ -242,6 +305,25 @@ func init() {
config.SaveConfig()
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "region",
Help: "Choose national cloud region for OneDrive.",
Default: "global",
Examples: []fs.OptionExample{
{
Value: regionGlobal,
Help: "Microsoft Cloud Global",
}, {
Value: regionUS,
Help: "Microsoft Cloud for US Government",
}, {
Value: regionDE,
Help: "Microsoft Cloud Germany",
}, {
Value: regionCN,
Help: "Azure and Office 365 operated by 21Vianet in China",
},
},
}, {
Name: "chunk_size",
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
@@ -274,12 +356,11 @@ listing, set this option.`,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Allow server side operations (eg copy) to work across different onedrive configs.
Help: `Allow server-side operations (e.g. copy) to work across different onedrive configs.
This can be useful if you wish to do a server side copy between two
different Onedrives. Note that this isn't enabled by default
because it isn't easy to tell if it will work between any two
configurations.`,
This will only work if you are copying between two OneDrive *Personal* drives AND
the files to copy are already shared between them. In other cases, rclone will
fall back to normal copy (which will be slightly slower).`,
Advanced: true,
}, {
Name: "no_versions",
@@ -296,6 +377,41 @@ modification time and removes all but the last version.
**NB** Onedrive personal can't currently delete versions so don't use
this flag there.
`,
Advanced: true,
}, {
Name: "link_scope",
Default: "anonymous",
Help: `Set the scope of the links created by the link command.`,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "anonymous",
Help: "Anyone with the link has access, without needing to sign in. This may include people outside of your organization. Anonymous link support may be disabled by an administrator.",
}, {
Value: "organization",
Help: "Anyone signed into your organization (tenant) can use the link to get access. Only available in OneDrive for Business and SharePoint.",
}},
}, {
Name: "link_type",
Default: "view",
Help: `Set the type of the links created by the link command.`,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "view",
Help: "Creates a read-only link to the item.",
}, {
Value: "edit",
Help: "Creates a read-write link to the item.",
}, {
Value: "embed",
Help: "Creates an embeddable link to the item.",
}},
}, {
Name: "link_password",
Default: "",
Help: `Set the password for links created by the link command.
At the time of writing this only works with OneDrive personal paid accounts.
`,
Advanced: true,
}, {
@@ -311,8 +427,6 @@ this flag there.
// | (vertical line) -> '' // FULLWIDTH VERTICAL LINE
// ? (question mark) -> '' // FULLWIDTH QUESTION MARK
// * (asterisk) -> '' // FULLWIDTH ASTERISK
// # (number sign) -> '' // FULLWIDTH NUMBER SIGN
// % (percent sign) -> '' // FULLWIDTH PERCENT SIGN
//
// Folder names cannot begin with a tilde ('~')
// List of replaced characters:
@@ -337,7 +451,6 @@ this flag there.
// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/addressing-driveitems?view=odsp-graph-online#path-encoding
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeHashPercent |
encoder.EncodeLeftSpace |
encoder.EncodeLeftTilde |
encoder.EncodeRightPeriod |
@@ -350,12 +463,16 @@ this flag there.
// Options defines the configuration for this backend
type Options struct {
Region string `config:"region"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DriveID string `config:"drive_id"`
DriveType string `config:"drive_type"`
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
NoVersions bool `config:"no_versions"`
LinkScope string `config:"link_scope"`
LinkType string `config:"link_type"`
LinkPassword string `config:"link_password"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -364,6 +481,7 @@ type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
ci *fs.ConfigInfo // global config
features *fs.Features // optional features
srv *rest.Client // the connection to the one drive server
dirCache *dircache.DirCache // Map of directory path to directory id
@@ -427,6 +545,9 @@ var retryErrorCodes = []int{
509, // Bandwidth Limit Exceeded
}
var gatewayTimeoutError sync.Once
var errAsyncJobAccessDenied = errors.New("async job failed - access denied")
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
@@ -451,6 +572,10 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
fs.Debugf(nil, "Too many requests. Trying again in %d seconds.", retryAfter)
}
}
case 504: // Gateway timeout
gatewayTimeoutError.Do(func() {
fs.Errorf(nil, "%v: upload chunks may be taking too long - try reducing --onedrive-chunk-size or decreasing --transfers", err)
})
case 507: // Insufficient Storage
return false, fserrors.FatalError(err)
}
@@ -468,10 +593,8 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
//
// If `relPath` == '', do not append the slash (See #3664)
func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
if relPath != "" {
relPath = "/" + withTrailingColon(rest.URLPathEscape(f.opt.Enc.FromStandardPath(relPath)))
}
opts := newOptsCall(normalizedID, "GET", ":"+relPath)
opts, _ := f.newOptsCallWithIDPath(normalizedID, relPath, true, "GET", "")
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(resp, err)
@@ -486,17 +609,8 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
if f.driveType != driveTypePersonal || firstSlashIndex == -1 {
var opts rest.Opts
if len(path) == 0 {
opts = rest.Opts{
Method: "GET",
Path: "/root",
}
} else {
opts = rest.Opts{
Method: "GET",
Path: "/root:/" + rest.URLPathEscape(f.opt.Enc.FromStandardPath(path)),
}
}
opts = f.newOptsCallWithPath(ctx, path, "GET", "")
opts.Path = strings.TrimSuffix(opts.Path, ":")
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(resp, err)
@@ -590,8 +704,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -608,27 +721,35 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.New("unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
}
rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[opt.Region] + authPath,
TokenURL: authEndpoint[opt.Region] + tokenPath,
}
root = parsePath(root)
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
oAuthClient, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure OneDrive")
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: ci,
driveID: opt.DriveID,
driveType: opt.DriveType,
srv: rest.NewClient(oAuthClient).SetRoot(graphURL + "/drives/" + opt.DriveID),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CaseInsensitive: true,
ReadMimeType: true,
CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
}).Fill(f)
}).Fill(ctx, f)
f.srv.SetErrorHandler(errorHandler)
// Renew the token in the background
@@ -741,7 +862,7 @@ func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, e
// fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf)
var resp *http.Response
var info *api.Item
opts := newOptsCall(dirID, "POST", "/children")
opts := f.newOptsCall(dirID, "POST", "/children")
mkdir := api.CreateItemRequest{
Name: f.opt.Enc.FromStandardName(leaf),
ConflictBehavior: "fail",
@@ -773,7 +894,7 @@ type listAllFn func(*api.Item) bool
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
// Top parameter asks for bigger pages of data
// https://dev.onedrive.com/odata/optional-query-parameters.htm
opts := newOptsCall(dirID, "GET", "/children?$top=1000")
opts := f.newOptsCall(dirID, "GET", "/children?$top=1000")
OUTER:
for {
var result api.ListChildrenResponse
@@ -912,7 +1033,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// deleteObject removes an object by ID
func (f *Fs) deleteObject(ctx context.Context, id string) error {
opts := newOptsCall(id, "DELETE", "")
opts := f.newOptsCall(id, "DELETE", "")
opts.NoResponse = true
return f.pacer.Call(func() (bool, error) {
@@ -967,7 +1088,7 @@ func (f *Fs) Precision() time.Duration {
// waitForJob waits for the job with status in url to complete
func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
deadline := time.Now().Add(fs.Config.Timeout)
deadline := time.Now().Add(f.ci.Timeout)
for time.Now().Before(deadline) {
var resp *http.Response
var err error
@@ -992,10 +1113,12 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
switch status.Status {
case "failed":
case "deleteFailed":
{
return errors.Errorf("%s: async operation returned %q", o.remote, status.Status)
if strings.HasPrefix(status.ErrorCode, "AccessDenied_") {
return errAsyncJobAccessDenied
}
fallthrough
case "deleteFailed":
return errors.Errorf("%s: async operation returned %q", o.remote, status.Status)
case "completed":
err = o.readMetaData(ctx)
return errors.Wrapf(err, "async operation completed but readMetaData failed")
@@ -1003,10 +1126,10 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
time.Sleep(1 * time.Second)
}
return errors.Errorf("async operation didn't complete after %v", fs.Config.Timeout)
return errors.Errorf("async operation didn't complete after %v", f.ci.Timeout)
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -1021,6 +1144,17 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
if f.driveType != srcObj.fs.driveType {
fs.Debugf(src, "Can't server-side copy - drive types differ")
return nil, fs.ErrorCantCopy
}
// For OneDrive Business, this is only supported within the same drive
if f.driveType != driveTypePersonal && srcObj.fs.driveID != f.driveID {
fs.Debugf(src, "Can't server-side copy - cross-drive but not OneDrive Personal")
return nil, fs.ErrorCantCopy
}
err := srcObj.readMetaData(ctx)
if err != nil {
return nil, err
@@ -1042,11 +1176,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// Copy the object
opts := newOptsCall(srcObj.id, "POST", "/copy")
// The query param is a workaround for OneDrive Business for #4590
opts := f.newOptsCall(srcObj.id, "POST", "/copy?@microsoft.graph.conflictBehavior=replace")
opts.ExtraHeaders = map[string]string{"Prefer": "respond-async"}
opts.NoResponse = true
id, dstDriveID, _ := parseNormalizedID(directoryID)
id, dstDriveID, _ := f.parseNormalizedID(directoryID)
replacedLeaf := f.opt.Enc.FromStandardName(leaf)
copyReq := api.CopyItemRequest{
@@ -1073,6 +1208,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Wait for job to finish
err = f.waitForJob(ctx, location, dstObj)
if err == errAsyncJobAccessDenied {
fs.Debugf(src, "Server-side copy failed - file not shared between drives")
return nil, fs.ErrorCantCopy
}
if err != nil {
return nil, err
}
@@ -1097,7 +1236,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
@@ -1119,8 +1258,8 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, err
}
id, dstDriveID, _ := parseNormalizedID(directoryID)
_, srcObjDriveID, _ := parseNormalizedID(srcObj.id)
id, dstDriveID, _ := f.parseNormalizedID(directoryID)
_, srcObjDriveID, _ := f.parseNormalizedID(srcObj.id)
if f.canonicalDriveID(dstDriveID) != srcObj.fs.canonicalDriveID(srcObjDriveID) {
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
@@ -1130,7 +1269,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// Move the object
opts := newOptsCall(srcObj.id, "PATCH", "")
opts := f.newOptsCall(srcObj.id, "PATCH", "")
move := api.MoveItemRequest{
Name: f.opt.Enc.FromStandardName(leaf),
@@ -1162,7 +1301,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1181,8 +1320,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return err
}
parsedDstDirID, dstDriveID, _ := parseNormalizedID(dstDirectoryID)
_, srcDriveID, _ := parseNormalizedID(srcID)
parsedDstDirID, dstDriveID, _ := f.parseNormalizedID(dstDirectoryID)
_, srcDriveID, _ := f.parseNormalizedID(srcID)
if f.canonicalDriveID(dstDriveID) != srcFs.canonicalDriveID(srcDriveID) {
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
@@ -1198,7 +1337,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// Do the move
opts := newOptsCall(srcID, "PATCH", "")
opts := f.newOptsCall(srcID, "PATCH", "")
move := api.MoveItemRequest{
Name: f.opt.Enc.FromStandardName(dstLeaf),
ParentReference: &api.ItemReference{
@@ -1247,6 +1386,10 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return nil, errors.Wrap(err, "about failed")
}
q := drive.Quota
// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
if q.Total == 0 && q.Used == 0 && q.Deleted == 0 && q.Remaining == 0 {
return &fs.Usage{}, nil
}
usage = &fs.Usage{
Total: fs.NewUsageValue(q.Total), // quota of bytes that can be used
Used: fs.NewUsageValue(q.Used), // bytes in use
@@ -1270,11 +1413,17 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
if err != nil {
return "", err
}
opts := newOptsCall(info.GetID(), "POST", "/createLink")
opts := f.newOptsCall(info.GetID(), "POST", "/createLink")
share := api.CreateShareLinkRequest{
Type: "view",
Scope: "anonymous",
Type: f.opt.LinkType,
Scope: f.opt.LinkScope,
Password: f.opt.LinkPassword,
}
if expire < fs.Duration(time.Hour*24*365*100) {
expiry := time.Now().Add(time.Duration(expire))
share.Expiry = &expiry
}
var resp *http.Response
@@ -1292,7 +1441,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
// CleanUp deletes all the hidden files.
func (f *Fs) CleanUp(ctx context.Context) error {
token := make(chan struct{}, fs.Config.Checkers)
token := make(chan struct{}, f.ci.Checkers)
var wg sync.WaitGroup
err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
err = entries.ForObjectError(func(obj fs.Object) error {
@@ -1322,7 +1471,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
// Finds and removes any old versions for o
func (o *Object) deleteVersions(ctx context.Context) error {
opts := newOptsCall(o.id, "GET", "/versions")
opts := o.fs.newOptsCall(o.id, "GET", "/versions")
var versions api.VersionsResponse
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &versions)
@@ -1349,7 +1498,7 @@ func (o *Object) deleteVersion(ctx context.Context, ID string) error {
return nil
}
fs.Infof(o, "removing version %q", ID)
opts := newOptsCall(o.id, "DELETE", "/versions/"+ID)
opts := o.fs.newOptsCall(o.id, "DELETE", "/versions/"+ID)
opts.NoResponse = true
return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.Call(ctx, &opts)
@@ -1494,21 +1643,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// setModTime sets the modification time of the local fs object
func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) {
var opts rest.Opts
leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false)
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
if drive != "" {
opts = rest.Opts{
Method: "PATCH",
RootURL: rootURL,
Path: "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf))),
}
} else {
opts = rest.Opts{
Method: "PATCH",
Path: "/root:/" + withTrailingColon(rest.URLPathEscape(o.srvPath())),
}
}
opts := o.fs.newOptsCallWithPath(ctx, o.remote, "PATCH", "")
update := api.SetFileSystemInfo{
FileSystemInfo: api.FileSystemInfoFacet{
CreatedDateTime: api.Timestamp(modTime),
@@ -1555,7 +1690,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
fs.FixRangeOption(options, o.size)
var resp *http.Response
opts := newOptsCall(o.id, "GET", "/content")
opts := o.fs.newOptsCall(o.id, "GET", "/content")
opts.Options = options
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1575,22 +1710,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// createUploadSession creates an upload session for the object
func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (response *api.CreateUploadResponse, err error) {
leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false)
id, drive, rootURL := parseNormalizedID(directoryID)
var opts rest.Opts
if drive != "" {
opts = rest.Opts{
Method: "POST",
RootURL: rootURL,
Path: fmt.Sprintf("/%s/items/%s:/%s:/createUploadSession",
drive, id, rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf))),
}
} else {
opts = rest.Opts{
Method: "POST",
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/createUploadSession",
}
}
opts := o.fs.newOptsCallWithPath(ctx, o.remote, "POST", "/createUploadSession")
createRequest := api.CreateUploadRequest{}
createRequest.Item.FileSystemInfo.CreatedDateTime = api.Timestamp(modTime)
createRequest.Item.FileSystemInfo.LastModifiedDateTime = api.Timestamp(modTime)
@@ -1763,27 +1883,10 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
fs.Debugf(o, "Starting singlepart upload")
var resp *http.Response
var opts rest.Opts
leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false)
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
if drive != "" {
opts = rest.Opts{
Method: "PUT",
RootURL: rootURL,
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf)) + ":/content",
ContentLength: &size,
Body: in,
Options: options,
}
} else {
opts = rest.Opts{
Method: "PUT",
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
ContentLength: &size,
Body: in,
Options: options,
}
}
opts := o.fs.newOptsCallWithPath(ctx, o.remote, "PUT", "/content")
opts.ContentLength = &size
opts.Body = in
opts.Options = options
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
@@ -1859,8 +1962,42 @@ func (o *Object) ID() string {
return o.id
}
func newOptsCall(normalizedID string, method string, route string) (opts rest.Opts) {
id, drive, rootURL := parseNormalizedID(normalizedID)
/*
* URL Build routine area start
* 1. In this area, region-related URL rewrites are applied. As the API is blackbox,
* we cannot thoroughly test this part. Please be extremely careful while changing them.
* 2. If possible, please don't introduce region related code in other region, but patch these helper functions.
* 3. To avoid region-related issues, please don't manually build rest.Opts from scratch.
* Instead, use these helper function, and customize the URL afterwards if needed.
*
* currently, the 21ViaNet's API differs in the following places:
* - https://{Endpoint}/drives/{driveID}/items/{leaf}:/{route}
* - this API doesn't work (gives invalid request)
* - can be replaced with the following API:
* - https://{Endpoint}/drives/{driveID}/items/children('{leaf}')/{route}
* - however, this API does NOT support multi-level leaf like a/b/c
* - https://{Endpoint}/drives/{driveID}/items/children('@a1')/{route}?@a1=URLEncode("'{leaf}'")
* - this API does support multi-level leaf like a/b/c
* - https://{Endpoint}/drives/{driveID}/root/children('@a1')/{route}?@a1=URLEncode({path})
* - Same as above
*/
// parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`)
// and returns itemID, driveID, rootURL.
// Such a normalized ID can come from (*Item).GetID()
func (f *Fs) parseNormalizedID(ID string) (string, string, string) {
rootURL := graphAPIEndpoint[f.opt.Region] + "/v1.0/drives"
if strings.Index(ID, "#") >= 0 {
s := strings.Split(ID, "#")
return s[1], s[0], rootURL
}
return ID, "", ""
}
// newOptsCall build the rest.Opts structure with *a normalizedID(driveID#fileID, or simply fileID)*
// using url template https://{Endpoint}/drives/{driveID}/items/{itemID}/{route}
func (f *Fs) newOptsCall(normalizedID string, method string, route string) (opts rest.Opts) {
id, drive, rootURL := f.parseNormalizedID(normalizedID)
if drive != "" {
return rest.Opts{
@@ -1875,17 +2012,91 @@ func newOptsCall(normalizedID string, method string, route string) (opts rest.Op
}
}
// parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`)
// and returns itemID, driveID, rootURL.
// Such a normalized ID can come from (*Item).GetID()
func parseNormalizedID(ID string) (string, string, string) {
if strings.Index(ID, "#") >= 0 {
s := strings.Split(ID, "#")
return s[1], s[0], graphURL + "/drives"
}
return ID, "", ""
func escapeSingleQuote(str string) string {
return strings.ReplaceAll(str, "'", "''")
}
// newOptsCallWithIDPath build the rest.Opts structure with *a normalizedID (driveID#fileID, or simply fileID) and leaf*
// using url template https://{Endpoint}/drives/{driveID}/items/{leaf}:/{route} (for international OneDrive)
// or https://{Endpoint}/drives/{driveID}/items/children('{leaf}')/{route}
// and https://{Endpoint}/drives/{driveID}/items/children('@a1')/{route}?@a1=URLEncode("'{leaf}'") (for 21ViaNet)
// if isPath is false, this function will only work when the leaf is "" or a child name (i.e. it doesn't accept multi-level leaf)
// if isPath is true, multi-level leaf like a/b/c can be passed
func (f *Fs) newOptsCallWithIDPath(normalizedID string, leaf string, isPath bool, method string, route string) (opts rest.Opts, ok bool) {
encoder := f.opt.Enc.FromStandardName
if isPath {
encoder = f.opt.Enc.FromStandardPath
}
trueDirID, drive, rootURL := f.parseNormalizedID(normalizedID)
if drive == "" {
trueDirID = normalizedID
}
entity := "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(encoder(leaf))) + route
if f.opt.Region == regionCN {
if isPath {
entity = "/items/" + trueDirID + "/children('@a1')" + route + "?@a1=" + url.QueryEscape("'"+encoder(escapeSingleQuote(leaf))+"'")
} else {
entity = "/items/" + trueDirID + "/children('" + rest.URLPathEscape(encoder(escapeSingleQuote(leaf))) + "')" + route
}
}
if drive == "" {
ok = false
opts = rest.Opts{
Method: method,
Path: entity,
}
return
}
ok = true
opts = rest.Opts{
Method: method,
RootURL: rootURL,
Path: "/" + drive + entity,
}
return
}
// newOptsCallWithIDPath build the rest.Opts structure with an *absolute path start from root*
// using url template https://{Endpoint}/drives/{driveID}/root:/{path}:/{route}
// or https://{Endpoint}/drives/{driveID}/root/children('@a1')/{route}?@a1=URLEncode({path})
func (f *Fs) newOptsCallWithRootPath(path string, method string, route string) (opts rest.Opts) {
path = strings.TrimSuffix(path, "/")
newURL := "/root:/" + withTrailingColon(rest.URLPathEscape(f.opt.Enc.FromStandardPath(path))) + route
if f.opt.Region == regionCN {
newURL = "/root/children('@a1')" + route + "?@a1=" + url.QueryEscape("'"+escapeSingleQuote(f.opt.Enc.FromStandardPath(path))+"'")
}
return rest.Opts{
Method: method,
Path: newURL,
}
}
// newOptsCallWithPath build the rest.Opt intelligently.
// It will first try to resolve the path using dircache, which enables support for "Share with me" files.
// If present in cache, then use ID + Path variant, else fallback into RootPath variant
func (f *Fs) newOptsCallWithPath(ctx context.Context, path string, method string, route string) (opts rest.Opts) {
if path == "" {
url := "/root" + route
return rest.Opts{
Method: method,
Path: url,
}
}
// find dircache
leaf, directoryID, _ := f.dirCache.FindPath(ctx, path, false)
// try to use IDPath variant first
if opts, ok := f.newOptsCallWithIDPath(directoryID, leaf, false, method, route); ok {
return opts
}
// fallback to use RootPath variant first
return f.newOptsCallWithRootPath(path, method, route)
}
/*
* URL Build routine area end
*/
// Returns the canonical form of the driveID
func (f *Fs) canonicalDriveID(driveID string) (canonicalDriveID string) {
if driveID == "" {

View File

@@ -5,6 +5,7 @@ import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
@@ -19,6 +20,20 @@ func TestIntegration(t *testing.T) {
})
}
// TestIntegrationCn runs integration tests against the remote
func TestIntegrationCn(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestOneDriveCn:",
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
CeilChunkSize: fstests.NextMultipleOf(chunkSizeMultiple),
},
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}

View File

@@ -164,8 +164,7 @@ func (f *Fs) DirCacheFlush() {
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -188,8 +187,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
srv: rest.NewClient(fshttp.NewClient(ctx)).SetErrorHandler(errorHandler),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.dirCache = dircache.New(root, "0", f)
@@ -217,7 +216,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.features = (&fs.Features{
CaseInsensitive: true,
CanHaveEmptyDirectories: true,
}).Fill(f)
}).Fill(ctx, f)
// Find the current root
err = f.dirCache.FindRoot(ctx, false)
@@ -338,7 +337,7 @@ func (f *Fs) Precision() time.Duration {
return time.Second
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -402,7 +401,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
@@ -460,7 +459,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -646,7 +645,6 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
400, // Bad request (seen in "Next token is expired")
401, // Unauthorized (seen in "Token has expired")
408, // Request Timeout
423, // Locked - get this on folders sometimes
@@ -723,7 +721,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
for _, folder := range folderList.Folders {
// fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID)
if leaf == folder.Name {
if strings.EqualFold(leaf, folder.Name) {
// found
return folder.FolderID, true, nil
}

View File

@@ -96,7 +96,7 @@ func (i *Item) ModTime() (t time.Time) {
return t
}
// ItemResult is returned from the /listfolder, /createfolder, /deletefolder, /deletefile etc methods
// ItemResult is returned from the /listfolder, /createfolder, /deletefolder, /deletefile, etc. methods
type ItemResult struct {
Error
Metadata Item `json:"metadata"`
@@ -104,8 +104,9 @@ type ItemResult struct {
// Hashes contains the supported hashes
type Hashes struct {
SHA1 string `json:"sha1"`
MD5 string `json:"md5"`
SHA1 string `json:"sha1"`
MD5 string `json:"md5"`
SHA256 string `json:"sha256"`
}
// UploadFileResponse is the response from /uploadfile

View File

@@ -72,7 +72,7 @@ func init() {
Name: "pcloud",
Description: "Pcloud",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
Config: func(ctx context.Context, name string, m configmap.Mapper) {
optc := new(Options)
err := configstruct.Set(m, optc)
if err != nil {
@@ -98,7 +98,7 @@ func init() {
CheckAuth: checkAuth,
StateBlankOK: true, // pCloud seems to drop the state parameter now - see #4210
}
err = oauthutil.Config("pcloud", name, m, oauthConfig, &opt)
err = oauthutil.Config(ctx, "pcloud", name, m, oauthConfig, &opt)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
@@ -219,7 +219,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
// Check if it is an api.Error
if apiErr, ok := err.(*api.Error); ok {
// See https://docs.pcloud.com/errors/ for error treatment
// Errors are classified as 1xxx, 2xxx etc
// Errors are classified as 1xxx, 2xxx, etc.
switch apiErr.Result / 1000 {
case 4: // 4xxx: rate limiting
doRetry = true
@@ -280,8 +280,7 @@ func errorHandler(resp *http.Response) error {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -289,7 +288,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
root = parsePath(root)
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
oAuthClient, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Pcloud")
}
@@ -300,12 +299,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
root: root,
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot("https://" + opt.Hostname),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CaseInsensitive: false,
CanHaveEmptyDirectories: true,
}).Fill(f)
}).Fill(ctx, f)
f.srv.SetErrorHandler(errorHandler)
// Renew the token in the background
@@ -622,7 +621,7 @@ func (f *Fs) Precision() time.Duration {
return time.Second
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -705,7 +704,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
})
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
@@ -755,7 +754,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -885,6 +884,13 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
// EU region supports SHA1 and SHA256 (but rclone doesn't
// support SHA256 yet).
//
// https://forum.rclone.org/t/pcloud-to-local-no-hashes-in-common/19440
if f.opt.Hostname == "eapi.pcloud.com" {
return hash.Set(hash.SHA1)
}
return hash.Set(hash.MD5 | hash.SHA1)
}
@@ -1111,7 +1117,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Method: "PUT",
Path: "/uploadfile",
Body: in,
ContentType: fs.MimeType(ctx, o),
ContentType: fs.MimeType(ctx, src),
ContentLength: &size,
Parameters: url.Values{},
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
@@ -1125,7 +1131,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Special treatment for a 0 length upload. This doesn't work
// with PUT even with Content-Length set (by setting
// opts.Body=0), so upload it as a multpart form POST with
// opts.Body=0), so upload it as a multipart form POST with
// Content-Length set.
if size == 0 {
formReader, contentType, overhead, err := rest.MultipartUpload(in, opts.Parameters, "content", leaf)

View File

@@ -78,8 +78,8 @@ func init() {
Name: "premiumizeme",
Description: "premiumize.me",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
err := oauthutil.Config("premiumizeme", name, m, oauthConfig, nil)
Config: func(ctx context.Context, name string, m configmap.Mapper) {
err := oauthutil.Config(ctx, "premiumizeme", name, m, oauthConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
@@ -234,8 +234,7 @@ func (f *Fs) baseParams() url.Values {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -248,12 +247,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
var client *http.Client
var ts *oauthutil.TokenSource
if opt.APIKey == "" {
client, ts, err = oauthutil.NewClient(name, m, oauthConfig)
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure premiumize.me")
}
} else {
client = fshttp.NewClient(fs.Config)
client = fshttp.NewClient(ctx)
}
f := &Fs{
@@ -261,13 +260,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
root: root,
opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CaseInsensitive: true,
CanHaveEmptyDirectories: true,
ReadMimeType: true,
}).Fill(f)
}).Fill(ctx, f)
f.srv.SetErrorHandler(errorHandler)
// Renew the token in the background
@@ -303,7 +302,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
return nil, err
}
f.features.Fill(&tempF)
f.features.Fill(ctx, &tempF)
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
@@ -346,7 +345,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
if item.Name == leaf {
if strings.EqualFold(item.Name, leaf) {
pathIDOut = item.ID
return true
}
@@ -682,7 +681,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
return nil
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
@@ -718,7 +717,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//

View File

@@ -68,7 +68,7 @@ func parsePath(path string) (root string) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (f fs.Fs, err error) {
// defer log.Trace(name, "root=%v", root)("f=%+v, err=%v", &f, &err)
// Parse config into Options struct
opt := new(Options)
@@ -77,8 +77,8 @@ func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
return nil, err
}
root = parsePath(root)
httpClient := fshttp.NewClient(fs.Config)
oAuthClient, _, err := oauthutil.NewClientWithBaseClient(name, m, putioConfig, httpClient)
httpClient := fshttp.NewClient(ctx)
oAuthClient, _, err := oauthutil.NewClientWithBaseClient(ctx, name, m, putioConfig, httpClient)
if err != nil {
return nil, errors.Wrap(err, "failed to configure putio")
}
@@ -86,7 +86,7 @@ func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
name: name,
root: root,
opt: *opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
client: putio.NewClient(oAuthClient),
httpClient: httpClient,
oAuthClient: oAuthClient,
@@ -95,9 +95,8 @@ func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
DuplicateFiles: true,
ReadMimeType: true,
CanHaveEmptyDirectories: true,
}).Fill(p)
}).Fill(ctx, p)
p.dirCache = dircache.New(root, "0", p)
ctx := context.Background()
// Find the current root
err = p.dirCache.FindRoot(ctx, false)
if err != nil {
@@ -236,10 +235,10 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
exisitingObj, err := f.NewObject(ctx, src.Remote())
existingObj, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
return existingObj, existingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src, options...)
@@ -283,11 +282,10 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID string, modTime time.Time, options []fs.OpenOption) (location string, err error) {
// defer log.Trace(f, "name=%v, size=%v, parentID=%v, modTime=%v", name, size, parentID, modTime.String())("location=%v, err=%v", location, &err)
err = f.pacer.Call(func() (bool, error) {
req, err := http.NewRequest("POST", "https://upload.put.io/files/", nil)
req, err := http.NewRequestWithContext(ctx, "POST", "https://upload.put.io/files/", nil)
if err != nil {
return false, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
req.Header.Set("tus-resumable", "1.0.0")
req.Header.Set("upload-length", strconv.FormatInt(size, 10))
b64name := base64.StdEncoding.EncodeToString([]byte(f.opt.Enc.FromStandardName(name)))
@@ -429,21 +427,19 @@ func (f *Fs) transferChunk(ctx context.Context, location string, start int64, ch
}
func (f *Fs) makeUploadHeadRequest(ctx context.Context, location string) (*http.Request, error) {
req, err := http.NewRequest("HEAD", location, nil)
req, err := http.NewRequestWithContext(ctx, "HEAD", location, nil)
if err != nil {
return nil, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
req.Header.Set("tus-resumable", "1.0.0")
return req, nil
}
func (f *Fs) makeUploadPatchRequest(ctx context.Context, location string, in io.Reader, offset, length int64) (*http.Request, error) {
req, err := http.NewRequest("PATCH", location, in)
req, err := http.NewRequestWithContext(ctx, "PATCH", location, in)
if err != nil {
return nil, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
req.Header.Set("tus-resumable", "1.0.0")
req.Header.Set("upload-offset", strconv.FormatInt(offset, 10))
req.Header.Set("content-length", strconv.FormatInt(length, 10))
@@ -525,7 +521,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
return f.purgeCheck(ctx, dir, false)
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -564,7 +560,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
return f.NewObject(ctx, remote)
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
@@ -604,7 +600,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//

View File

@@ -115,7 +115,7 @@ func (o *Object) MimeType(ctx context.Context) string {
// setMetadataFromEntry sets the fs data from a putio.File
//
// This isn't a complete set of metadata and has an inacurate date
// This isn't a complete set of metadata and has an inaccurate date
func (o *Object) setMetadataFromEntry(info putio.File) error {
o.file = &info
o.modtime = info.UpdatedAt.Time
@@ -229,11 +229,10 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var resp *http.Response
headers := fs.OpenOptionHeaders(options)
err = o.fs.pacer.Call(func() (bool, error) {
req, err := http.NewRequest(http.MethodGet, storageURL, nil)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, storageURL, nil)
if err != nil {
return shouldRetry(err)
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
req.Header.Set("User-Agent", o.fs.client.UserAgent)
// merge headers with extra headers

View File

@@ -1,6 +1,7 @@
package putio
import (
"context"
"log"
"regexp"
"time"
@@ -59,11 +60,11 @@ func init() {
Name: "putio",
Description: "Put.io",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
Config: func(ctx context.Context, name string, m configmap.Mapper) {
opt := oauthutil.Options{
NoOffline: true,
}
err := oauthutil.Config("putio", name, m, putioConfig, &opt)
err := oauthutil.Config(ctx, "putio", name, m, putioConfig, &opt)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}

View File

@@ -93,7 +93,7 @@ as multipart uploads using this chunk size.
Note that "--qingstor-upload-concurrency" chunks of this size are buffered
in memory per transfer.
If you are transferring large files over high speed links and you have
If you are transferring large files over high-speed links and you have
enough memory, then increasing this will speed up the transfers.`,
Default: minChunkSize,
Advanced: true,
@@ -104,10 +104,10 @@ enough memory, then increasing this will speed up the transfers.`,
This is the number of chunks of the same file that are uploaded
concurrently.
NB if you set this to > 1 then the checksums of multpart uploads
NB if you set this to > 1 then the checksums of multipart uploads
become corrupted (the uploads themselves are not corrupted though).
If you are uploading small numbers of large file over high speed link
If you are uploading small numbers of large files over high-speed links
and these uploads do not fully utilize your bandwidth, then increasing
this may help to speed up the transfers.`,
Default: 1,
@@ -207,7 +207,7 @@ func (o *Object) split() (bucket, bucketPath string) {
func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
/*
Pattern to match an endpoint,
eg: "http(s)://qingstor.com:443" --> "http(s)", "qingstor.com", 443
e.g.: "http(s)://qingstor.com:443" --> "http(s)", "qingstor.com", 443
"http(s)//qingstor.com" --> "http(s)", "qingstor.com", ""
"qingstor.com" --> "", "qingstor.com", ""
*/
@@ -228,7 +228,7 @@ func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
}
// qsConnection makes a connection to qingstor
func qsServiceConnection(opt *Options) (*qs.Service, error) {
func qsServiceConnection(ctx context.Context, opt *Options) (*qs.Service, error) {
accessKeyID := opt.AccessKeyID
secretAccessKey := opt.SecretAccessKey
@@ -277,7 +277,7 @@ func qsServiceConnection(opt *Options) (*qs.Service, error) {
cf.Host = host
cf.Port = port
// unsupported in v3.1: cf.ConnectionRetries = opt.ConnectionRetries
cf.Connection = fshttp.NewClient(fs.Config)
cf.Connection = fshttp.NewClient(ctx)
return qs.Init(cf)
}
@@ -319,7 +319,7 @@ func (f *Fs) setRoot(root string) {
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -334,7 +334,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
return nil, errors.Wrap(err, "qingstor: upload cutoff")
}
svc, err := qsServiceConnection(opt)
svc, err := qsServiceConnection(ctx, opt)
if err != nil {
return nil, err
}
@@ -357,7 +357,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
BucketBased: true,
BucketBasedRootOK: true,
SlowModTime: true,
}).Fill(f)
}).Fill(ctx, f)
if f.rootBucket != "" && f.rootDirectory != "" {
// Check to see if the object exists
@@ -428,7 +428,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
return fsObj, fsObj.Update(ctx, in, src, options...)
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -872,11 +872,12 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string) (err error) {
if err != nil {
return err
}
maxLimit := int(listLimitSize)
// maxLimit := int(listLimitSize)
var marker *string
for {
req := qs.ListMultipartUploadsInput{
Limit: &maxLimit,
// The default is 200 but this errors if more than 200 is put in so leave at the default
// Limit: &maxLimit,
KeyMarker: marker,
}
var resp *qs.ListMultipartUploadsOutput
@@ -927,7 +928,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
}
for _, entry := range entries {
cleanErr := f.cleanUpBucket(ctx, f.opt.Enc.FromStandardName(entry.Remote()))
if err != nil {
if cleanErr != nil {
fs.Errorf(f, "Failed to cleanup bucket: %q", cleanErr)
err = cleanErr
}

View File

@@ -5,6 +5,7 @@ import (
"bytes"
"context"
"crypto/md5"
"crypto/tls"
"encoding/base64"
"encoding/hex"
"encoding/xml"
@@ -32,7 +33,7 @@ import (
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/ncw/swift"
"github.com/ncw/swift/v2"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -58,7 +59,7 @@ import (
func init() {
fs.Register(&fs.RegInfo{
Name: "s3",
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, and Tencent COS",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
@@ -94,6 +95,9 @@ func init() {
}, {
Value: "StackPath",
Help: "StackPath Object Storage",
}, {
Value: "TencentCOS",
Help: "Tencent Cloud Object Storage (COS)",
}, {
Value: "Wasabi",
Help: "Wasabi Object Storage",
@@ -119,21 +123,24 @@ func init() {
Name: "secret_access_key",
Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
}, {
// References:
// 1. https://docs.aws.amazon.com/general/latest/gr/rande.html
// 2. https://docs.aws.amazon.com/general/latest/gr/s3.html
Name: "region",
Help: "Region to connect to.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "us-east-1",
Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty.",
Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia, or Pacific Northwest.\nLeave location constraint empty.",
}, {
Value: "us-east-2",
Help: "US East (Ohio) Region\nNeeds location constraint us-east-2.",
}, {
Value: "us-west-2",
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
}, {
Value: "us-west-1",
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
}, {
Value: "us-west-2",
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
}, {
Value: "ca-central-1",
Help: "Canada (Central) Region\nNeeds location constraint ca-central-1.",
@@ -143,9 +150,15 @@ func init() {
}, {
Value: "eu-west-2",
Help: "EU (London) Region\nNeeds location constraint eu-west-2.",
}, {
Value: "eu-west-3",
Help: "EU (Paris) Region\nNeeds location constraint eu-west-3.",
}, {
Value: "eu-north-1",
Help: "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
}, {
Value: "eu-south-1",
Help: "EU (Milan) Region\nNeeds location constraint eu-south-1.",
}, {
Value: "eu-central-1",
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
@@ -161,15 +174,36 @@ func init() {
}, {
Value: "ap-northeast-2",
Help: "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.",
}, {
Value: "ap-northeast-3",
Help: "Asia Pacific (Osaka-Local)\nNeeds location constraint ap-northeast-3.",
}, {
Value: "ap-south-1",
Help: "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.",
}, {
Value: "ap-east-1",
Help: "Asia Patific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
Help: "Asia Pacific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
}, {
Value: "sa-east-1",
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
}, {
Value: "me-south-1",
Help: "Middle East (Bahrain) Region\nNeeds location constraint me-south-1.",
}, {
Value: "af-south-1",
Help: "Africa (Cape Town) Region\nNeeds location constraint af-south-1.",
}, {
Value: "cn-north-1",
Help: "China (Beijing) Region\nNeeds location constraint cn-north-1.",
}, {
Value: "cn-northwest-1",
Help: "China (Ningxia) Region\nNeeds location constraint cn-northwest-1.",
}, {
Value: "us-gov-east-1",
Help: "AWS GovCloud (US-East) Region\nNeeds location constraint us-gov-east-1.",
}, {
Value: "us-gov-west-1",
Help: "AWS GovCloud (US) Region\nNeeds location constraint us-gov-west-1.",
}},
}, {
Name: "region",
@@ -185,13 +219,13 @@ func init() {
}, {
Name: "region",
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
Provider: "!AWS,Alibaba,Scaleway",
Provider: "!AWS,Alibaba,Scaleway,TencentCOS",
Examples: []fs.OptionExample{{
Value: "",
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
}, {
Value: "other-v2-signature",
Help: "Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.",
Help: "Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH.",
}},
}, {
Name: "endpoint",
@@ -476,10 +510,73 @@ func init() {
Value: "s3.eu-central-1.stackpathstorage.com",
Help: "EU Endpoint",
}},
}, {
// cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224
Name: "endpoint",
Help: "Endpoint for Tencent COS API.",
Provider: "TencentCOS",
Examples: []fs.OptionExample{{
Value: "cos.ap-beijing.myqcloud.com",
Help: "Beijing Region.",
}, {
Value: "cos.ap-nanjing.myqcloud.com",
Help: "Nanjing Region.",
}, {
Value: "cos.ap-shanghai.myqcloud.com",
Help: "Shanghai Region.",
}, {
Value: "cos.ap-guangzhou.myqcloud.com",
Help: "Guangzhou Region.",
}, {
Value: "cos.ap-nanjing.myqcloud.com",
Help: "Nanjing Region.",
}, {
Value: "cos.ap-chengdu.myqcloud.com",
Help: "Chengdu Region.",
}, {
Value: "cos.ap-chongqing.myqcloud.com",
Help: "Chongqing Region.",
}, {
Value: "cos.ap-hongkong.myqcloud.com",
Help: "Hong Kong (China) Region.",
}, {
Value: "cos.ap-singapore.myqcloud.com",
Help: "Singapore Region.",
}, {
Value: "cos.ap-mumbai.myqcloud.com",
Help: "Mumbai Region.",
}, {
Value: "cos.ap-seoul.myqcloud.com",
Help: "Seoul Region.",
}, {
Value: "cos.ap-bangkok.myqcloud.com",
Help: "Bangkok Region.",
}, {
Value: "cos.ap-tokyo.myqcloud.com",
Help: "Tokyo Region.",
}, {
Value: "cos.na-siliconvalley.myqcloud.com",
Help: "Silicon Valley Region.",
}, {
Value: "cos.na-ashburn.myqcloud.com",
Help: "Virginia Region.",
}, {
Value: "cos.na-toronto.myqcloud.com",
Help: "Toronto Region.",
}, {
Value: "cos.eu-frankfurt.myqcloud.com",
Help: "Frankfurt Region.",
}, {
Value: "cos.eu-moscow.myqcloud.com",
Help: "Moscow Region.",
}, {
Value: "cos.accelerate.myqcloud.com",
Help: "Use Tencent COS Accelerate Endpoint.",
}},
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath",
Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,Scaleway,StackPath",
Examples: []fs.OptionExample{{
Value: "objects-us-east-1.dream.io",
Help: "Dream Objects endpoint",
@@ -515,16 +612,16 @@ func init() {
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "Empty for US Region, Northern Virginia or Pacific Northwest.",
Help: "Empty for US Region, Northern Virginia, or Pacific Northwest.",
}, {
Value: "us-east-2",
Help: "US East (Ohio) Region.",
}, {
Value: "us-west-2",
Help: "US West (Oregon) Region.",
}, {
Value: "us-west-1",
Help: "US West (Northern California) Region.",
}, {
Value: "us-west-2",
Help: "US West (Oregon) Region.",
}, {
Value: "ca-central-1",
Help: "Canada (Central) Region.",
@@ -534,9 +631,15 @@ func init() {
}, {
Value: "eu-west-2",
Help: "EU (London) Region.",
}, {
Value: "eu-west-3",
Help: "EU (Paris) Region.",
}, {
Value: "eu-north-1",
Help: "EU (Stockholm) Region.",
}, {
Value: "eu-south-1",
Help: "EU (Milan) Region.",
}, {
Value: "EU",
Help: "EU Region.",
@@ -551,16 +654,37 @@ func init() {
Help: "Asia Pacific (Tokyo) Region.",
}, {
Value: "ap-northeast-2",
Help: "Asia Pacific (Seoul)",
Help: "Asia Pacific (Seoul) Region.",
}, {
Value: "ap-northeast-3",
Help: "Asia Pacific (Osaka-Local) Region.",
}, {
Value: "ap-south-1",
Help: "Asia Pacific (Mumbai)",
Help: "Asia Pacific (Mumbai) Region.",
}, {
Value: "ap-east-1",
Help: "Asia Pacific (Hong Kong)",
Help: "Asia Pacific (Hong Kong) Region.",
}, {
Value: "sa-east-1",
Help: "South America (Sao Paulo) Region.",
}, {
Value: "me-south-1",
Help: "Middle East (Bahrain) Region.",
}, {
Value: "af-south-1",
Help: "Africa (Cape Town) Region.",
}, {
Value: "cn-north-1",
Help: "China (Beijing) Region",
}, {
Value: "cn-northwest-1",
Help: "China (Ningxia) Region.",
}, {
Value: "us-gov-east-1",
Help: "AWS GovCloud (US-East) Region.",
}, {
Value: "us-gov-west-1",
Help: "AWS GovCloud (US) Region.",
}},
}, {
Name: "location_constraint",
@@ -666,7 +790,7 @@ func init() {
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath",
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath,TencentCOS",
}, {
Name: "acl",
Help: `Canned ACL used when creating buckets and storing or copying objects.
@@ -675,12 +799,16 @@ This ACL is used for creating objects and if bucket_acl isn't set, for creating
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Note that this ACL is applied when server side copying objects as S3
Note that this ACL is applied when server-side copying objects as S3
doesn't copy the ACL from the source but rather writes a fresh one.`,
Examples: []fs.OptionExample{{
Value: "default",
Help: "Owner gets Full_CONTROL. No one else has access rights (default).",
Provider: "TencentCOS",
}, {
Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
Provider: "!IBMCOS",
Provider: "!IBMCOS,TencentCOS",
}, {
Value: "public-read",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
@@ -740,6 +868,12 @@ isn't set then "acl" is used instead.`,
Value: "authenticated-read",
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
}},
}, {
Name: "requester_pays",
Help: "Enables requester pays option when interacting with S3 bucket.",
Provider: "AWS",
Default: false,
Advanced: true,
}, {
Name: "server_side_encryption",
Help: "The server-side encryption algorithm used when storing this object in S3.",
@@ -787,8 +921,11 @@ isn't set then "acl" is used instead.`,
Help: "None",
}},
}, {
Name: "sse_customer_key_md5",
Help: "If using SSE-C you must provide the secret encryption key MD5 checksum.",
Name: "sse_customer_key_md5",
Help: `If using SSE-C you may provide the secret encryption key MD5 checksum (optional).
If you leave it blank, this is calculated automatically from the sse_customer_key provided.
`,
Provider: "AWS,Ceph,Minio",
Advanced: true,
Examples: []fs.OptionExample{{
@@ -842,6 +979,24 @@ isn't set then "acl" is used instead.`,
Value: "STANDARD_IA",
Help: "Infrequent access storage mode.",
}},
}, {
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
Name: "storage_class",
Help: "The storage class to use when storing new objects in Tencent COS.",
Provider: "TencentCOS",
Examples: []fs.OptionExample{{
Value: "",
Help: "Default",
}, {
Value: "STANDARD",
Help: "Standard storage class",
}, {
Value: "ARCHIVE",
Help: "Archive storage mode.",
}, {
Value: "STANDARD_IA",
Help: "Infrequent access storage mode.",
}},
}, {
// Mapping from here: https://www.scaleway.com/en/docs/object-storage-glacier/#-Scaleway-Storage-Classes
Name: "storage_class",
@@ -870,14 +1025,14 @@ The minimum is 0 and the maximum is 5GB.`,
Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff or files with unknown
size (eg from "rclone rcat" or uploaded with "rclone mount" or google
size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google
photos or google docs) they will be uploaded as multipart uploads
using this chunk size.
Note that "--s3-upload-concurrency" chunks of this size are buffered
in memory per transfer.
If you are transferring large files over high speed links and you have
If you are transferring large files over high-speed links and you have
enough memory, then increasing this will speed up the transfers.
Rclone will automatically increase the chunk size when uploading a
@@ -886,7 +1041,7 @@ large file of known size to stay below the 10,000 chunks limit.
Files of unknown size are uploaded with the configured
chunk_size. Since the default chunk size is 5MB and there can be at
most 10,000 chunks, this means that by default the maximum size of
file you can stream upload is 48GB. If you wish to stream upload
a file you can stream upload is 48GB. If you wish to stream upload
larger files then you will need to increase chunk_size.`,
Default: minChunkSize,
Advanced: true,
@@ -909,7 +1064,7 @@ large file of a known size to stay below this number of chunks limit.
Name: "copy_cutoff",
Help: `Cutoff for switching to multipart copy
Any files larger than this that need to be server side copied will be
Any files larger than this that need to be server-side copied will be
copied in chunks of this size.
The minimum is 0 and the maximum is 5GB.`,
@@ -961,7 +1116,7 @@ If empty it will default to the environment variable "AWS_PROFILE" or
This is the number of chunks of the same file that are uploaded
concurrently.
If you are uploading small numbers of large file over high speed link
If you are uploading small numbers of large files over high-speed links
and these uploads do not fully utilize your bandwidth, then increasing
this may help to speed up the transfers.`,
Default: 4,
@@ -975,7 +1130,7 @@ if false then rclone will use virtual path style. See [the AWS S3
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
for more info.
Some providers (eg AWS, Aliyun OSS or Netease COS) require this set to
Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to
false - rclone will do this automatically based on the provider
setting.`,
Default: true,
@@ -987,7 +1142,7 @@ setting.`,
If this is false (the default) then rclone will use v4 authentication.
If it is set then rclone will use v2 authentication.
Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH.`,
Default: false,
Advanced: true,
}, {
@@ -1022,10 +1177,47 @@ In Ceph, this can be increased with the "rgw list buckets max chunk" option.
Advanced: true,
}, {
Name: "no_check_bucket",
Help: `If set don't attempt to check the bucket exists or create it
Help: `If set, don't attempt to check the bucket exists or create it
This can be useful when trying to minimise the number of transactions
rclone does if you know the bucket exists already.
It can also be needed if the user you are using does not have bucket
creation permissions. Before v1.52.0 this would have passed silently
due to a bug.
`,
Default: false,
Advanced: true,
}, {
Name: "no_head",
Help: `If set, don't HEAD uploaded objects to check integrity
This can be useful when trying to minimise the number of transactions
rclone does.
Setting it means that if rclone receives a 200 OK message after
uploading an object with PUT then it will assume that it got uploaded
properly.
In particular it will assume:
- the metadata, including modtime, storage class and content type was as uploaded
- the size was as uploaded
It reads the following items from the response for a single part PUT:
- the MD5SUM
- The uploaded date
For multipart uploads these items aren't read.
If an source object of unknown length is uploaded then rclone **will** do a
HEAD request.
Setting this flag increases the chance for undetected upload failures,
in particular an incorrect size, so it isn't recommended for normal
operation. In practice the chance of an undetected upload failure is
very small even with this flag.
`,
Default: false,
Advanced: true,
@@ -1058,13 +1250,26 @@ This option controls how often unused buffers will be removed from the pool.`,
Default: memoryPoolUseMmap,
Advanced: true,
Help: `Whether to use mmap buffers in internal memory pool.`,
}, {
Name: "disable_http2",
Default: false,
Advanced: true,
Help: `Disable usage of http2 for S3 backends
There is currently an unsolved issue with the s3 (specifically minio) backend
and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be
disabled here. When the issue is solved this flag will be removed.
See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631
`,
},
}})
}
// Constants
const (
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
metaMtime = "Mtime" // the meta key to store mtime in - e.g. X-Amz-Meta-Mtime
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
// The maximum size of object we can COPY - this should be 5GiB but is < 5GB for b2 compatibility
// See https://forum.rclone.org/t/copying-files-within-a-b2-bucket/16680/76
@@ -1091,6 +1296,7 @@ type Options struct {
LocationConstraint string `config:"location_constraint"`
ACL string `config:"acl"`
BucketACL string `config:"bucket_acl"`
RequesterPays bool `config:"requester_pays"`
ServerSideEncryption string `config:"server_side_encryption"`
SSEKMSKeyID string `config:"sse_kms_key_id"`
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
@@ -1112,9 +1318,11 @@ type Options struct {
LeavePartsOnError bool `config:"leave_parts_on_error"`
ListChunk int64 `config:"list_chunk"`
NoCheckBucket bool `config:"no_check_bucket"`
NoHead bool `config:"no_head"`
Enc encoder.MultiEncoder `config:"encoding"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
DisableHTTP2 bool `config:"disable_http2"`
}
// Fs represents a remote s3 server
@@ -1122,6 +1330,8 @@ type Fs struct {
name string // the name of the remote
root string // root of the bucket - ignore all objects above this
opt Options // parsed options
ci *fs.ConfigInfo // global config
ctx context.Context // global context for reading config
features *fs.Features // optional features
c *s3.S3 // the connection to the s3 server
ses *session.Session // the s3 session
@@ -1131,6 +1341,7 @@ type Fs struct {
pacer *fs.Pacer // To pace the API calls
srv *http.Client // a plain http client
pool *pool.Pool // memory pool
etagIsNotMD5 bool // if set ETags are not MD5s
}
// Object describes a s3 object
@@ -1141,12 +1352,12 @@ type Object struct {
// that in you need to call readMetaData
fs *Fs // what this object is part of
remote string // The remote path
etag string // md5sum of the object
md5 string // md5sum of the object
bytes int64 // size of the object
lastModified time.Time // Last modified
meta map[string]*string // The object metadata if known - may be nil
mimeType string // MimeType of object - may be ""
storageClass string // eg GLACIER
storageClass string // e.g. GLACIER
}
// ------------------------------------------------------------
@@ -1180,6 +1391,7 @@ func (f *Fs) Features() *fs.Features {
// retryErrorCodes is a slice of error codes that we will retry
// See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
var retryErrorCodes = []int{
429, // Too Many Requests
500, // Internal Server Error - "We encountered an internal error. Please try again."
503, // Service Unavailable/Slow Down - "Reduce your request rate"
}
@@ -1236,8 +1448,21 @@ func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote)
}
// getClient makes an http client according to the options
func getClient(ctx context.Context, opt *Options) *http.Client {
// TODO: Do we need cookies too?
t := fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
if opt.DisableHTTP2 {
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
}
})
return &http.Client{
Transport: t,
}
}
// s3Connection makes a connection to s3
func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
func s3Connection(ctx context.Context, opt *Options) (*s3.S3, *session.Session, error) {
// Make the auth
v := credentials.Value{
AccessKeyID: opt.AccessKeyID,
@@ -1246,6 +1471,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
}
lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
def := defaults.Get()
def.Config.HTTPClient = lowTimeoutClient
@@ -1305,7 +1531,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
if opt.Region == "" {
opt.Region = "us-east-1"
}
if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.Provider == "Scaleway" || opt.UseAccelerateEndpoint {
if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.Provider == "Scaleway" || opt.Provider == "TencentCOS" || opt.UseAccelerateEndpoint {
opt.ForcePathStyle = false
}
if opt.Provider == "Scaleway" && opt.MaxUploadParts > 1000 {
@@ -1314,7 +1540,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
awsConfig := aws.NewConfig().
WithMaxRetries(0). // Rely on rclone's retry logic
WithCredentials(cred).
WithHTTPClient(fshttp.NewClient(fs.Config)).
WithHTTPClient(getClient(ctx, opt)).
WithS3ForcePathStyle(opt.ForcePathStyle).
WithS3UseAccelerate(opt.UseAccelerateEndpoint).
WithS3UsEast1RegionalEndpoint(endpoints.RegionalS3UsEast1Endpoint)
@@ -1395,7 +1621,7 @@ func (f *Fs) setRoot(root string) {
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -1416,27 +1642,44 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if opt.BucketACL == "" {
opt.BucketACL = opt.ACL
}
c, ses, err := s3Connection(opt)
if opt.SSECustomerKey != "" && opt.SSECustomerKeyMD5 == "" {
// calculate CustomerKeyMD5 if not supplied
md5sumBinary := md5.Sum([]byte(opt.SSECustomerKey))
opt.SSECustomerKeyMD5 = base64.StdEncoding.EncodeToString(md5sumBinary[:])
}
c, ses, err := s3Connection(ctx, opt)
if err != nil {
return nil, err
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
opt: *opt,
ci: ci,
ctx: ctx,
c: c,
ses: ses,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(),
srv: fshttp.NewClient(fs.Config),
srv: getClient(ctx, opt),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
opt.UploadConcurrency*fs.Config.Transfers,
opt.UploadConcurrency*ci.Transfers,
opt.MemoryPoolUseMmap,
),
}
if opt.ServerSideEncryption == "aws:kms" || opt.SSECustomerAlgorithm != "" {
// From: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html
//
// Objects encrypted by SSE-S3 or plaintext have ETags that are an MD5
// digest of their data.
//
// Objects encrypted by SSE-C or SSE-KMS have ETags that are not an
// MD5 digest of their object data.
f.etagIsNotMD5 = true
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
@@ -1446,27 +1689,23 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
SetTier: true,
GetTier: true,
SlowModTime: true,
}).Fill(f)
}).Fill(ctx, f)
if f.rootBucket != "" && f.rootDirectory != "" {
// Check to see if the object exists
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
req := s3.HeadObjectInput{
Bucket: &f.rootBucket,
Key: &encodedDirectory,
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.c.HeadObject(&req)
return f.shouldRetry(err)
})
if err == nil {
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
// Check to see if the (bucket,directory) is actually an existing file
oldRoot := f.root
newRoot, leaf := path.Split(oldRoot)
f.setRoot(newRoot)
_, err := f.NewObject(ctx, leaf)
if err != nil {
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
// File doesn't exist or is a directory so return old f
f.setRoot(oldRoot)
return f, nil
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
return nil, err
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
// f.listMultipartUploads()
return f, nil
@@ -1488,7 +1727,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Obje
} else {
o.lastModified = *info.LastModified
}
o.etag = aws.StringValue(info.ETag)
o.setMD5FromEtag(aws.StringValue(info.ETag))
o.bytes = aws.Int64Value(info.Size)
o.storageClass = aws.StringValue(info.StorageClass)
} else {
@@ -1540,7 +1779,7 @@ func (f *Fs) updateRegionForBucket(bucket string) error {
// Make a new session with the new region
oldRegion := f.opt.Region
f.opt.Region = region
c, ses, err := s3Connection(&f.opt)
c, ses, err := s3Connection(f.ctx, &f.opt)
if err != nil {
return errors.Wrap(err, "creating new session failed")
}
@@ -1587,7 +1826,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
//
// So we enable only on providers we know supports it properly, all others can retry when a
// XML Syntax error is detected.
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio")
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio" || f.opt.Provider == "TencentCOS")
for {
// FIXME need to implement ALL loop
req := s3.ListObjectsInput{
@@ -1600,6 +1839,9 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
if urlEncodeListings {
req.EncodingType = aws.String(s3.EncodingTypeUrl)
}
if f.opt.RequesterPays {
req.RequestPayer = aws.String(s3.RequestPayerRequester)
}
var resp *s3.ListObjectsOutput
var err error
err = f.pacer.Call(func() (bool, error) {
@@ -1965,7 +2207,7 @@ func pathEscape(s string) string {
return strings.Replace(rest.URLPathEscape(s), "+", "%2B", -1)
}
// copy does a server side copy
// copy does a server-side copy
//
// It adds the boiler plate to the req passed in and calls the s3
// method
@@ -1975,9 +2217,24 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
req.Key = &dstPath
source := pathEscape(path.Join(srcBucket, srcPath))
req.CopySource = &source
if f.opt.RequesterPays {
req.RequestPayer = aws.String(s3.RequestPayerRequester)
}
if f.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &f.opt.ServerSideEncryption
}
if f.opt.SSECustomerAlgorithm != "" {
req.SSECustomerAlgorithm = &f.opt.SSECustomerAlgorithm
req.CopySourceSSECustomerAlgorithm = &f.opt.SSECustomerAlgorithm
}
if f.opt.SSECustomerKey != "" {
req.SSECustomerKey = &f.opt.SSECustomerKey
req.CopySourceSSECustomerKey = &f.opt.SSECustomerKey
}
if f.opt.SSECustomerKeyMD5 != "" {
req.SSECustomerKeyMD5 = &f.opt.SSECustomerKeyMD5
req.CopySourceSSECustomerKeyMD5 = &f.opt.SSECustomerKeyMD5
}
if f.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &f.opt.SSEKMSKeyID
}
@@ -2099,7 +2356,7 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
})
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -2143,7 +2400,7 @@ func (f *Fs) getMemoryPool(size int64) *pool.Pool {
return pool.New(
time.Duration(f.opt.MemoryPoolFlushTime),
int(size),
f.opt.UploadConcurrency*fs.Config.Transfers,
f.opt.UploadConcurrency*f.ci.Transfers,
f.opt.MemoryPoolUseMmap,
)
}
@@ -2190,7 +2447,7 @@ All the objects shown will be marked for restore, then
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard
It returns a list of status dictionaries with Remote and Status
keys. The Status will be OK if it was successfull or an error message
keys. The Status will be OK if it was successful or an error message
if not.
[
@@ -2355,7 +2612,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
// listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
//
// Note that rather lazily we treat key as a prefix so it matches
// directories and objects. This could suprise the user if they ask
// directories and objects. This could surprise the user if they ask
// for "dir" and it returns "dirKey"
func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uploads []*s3.MultipartUpload, err error) {
var (
@@ -2489,30 +2746,38 @@ func (o *Object) Remote() string {
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
// Set the MD5 from the etag
func (o *Object) setMD5FromEtag(etag string) {
if o.fs.etagIsNotMD5 {
o.md5 = ""
return
}
if etag == "" {
o.md5 = ""
return
}
hash := strings.Trim(strings.ToLower(etag), `"`)
// Check the etag is a valid md5sum
if !matchMd5.MatchString(hash) {
o.md5 = ""
return
}
o.md5 = hash
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
hash := strings.Trim(strings.ToLower(o.etag), `"`)
// Check the etag is a valid md5sum
if !matchMd5.MatchString(hash) {
// If we haven't got an MD5, then check the metadata
if o.md5 == "" {
err := o.readMetaData(ctx)
if err != nil {
return "", err
}
if md5sum, ok := o.meta[metaMD5Hash]; ok {
md5sumBytes, err := base64.StdEncoding.DecodeString(*md5sum)
if err != nil {
return "", err
}
hash = hex.EncodeToString(md5sumBytes)
} else {
hash = ""
}
}
return hash, nil
return o.md5, nil
}
// Size returns the size of an object in bytes
@@ -2526,6 +2791,18 @@ func (o *Object) headObject(ctx context.Context) (resp *s3.HeadObjectOutput, err
Bucket: &bucket,
Key: &bucketPath,
}
if o.fs.opt.RequesterPays {
req.RequestPayer = aws.String(s3.RequestPayerRequester)
}
if o.fs.opt.SSECustomerAlgorithm != "" {
req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm
}
if o.fs.opt.SSECustomerKey != "" {
req.SSECustomerKey = &o.fs.opt.SSECustomerKey
}
if o.fs.opt.SSECustomerKeyMD5 != "" {
req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5
}
err = o.fs.pacer.Call(func() (bool, error) {
var err error
resp, err = o.fs.c.HeadObjectWithContext(ctx, &req)
@@ -2560,12 +2837,23 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
if resp.ContentLength != nil {
size = *resp.ContentLength
}
o.etag = aws.StringValue(resp.ETag)
o.setMD5FromEtag(aws.StringValue(resp.ETag))
o.bytes = size
o.meta = resp.Metadata
if o.meta == nil {
o.meta = map[string]*string{}
}
// Read MD5 from metadata if present
if md5sumBase64, ok := o.meta[metaMD5Hash]; ok {
md5sumBytes, err := base64.StdEncoding.DecodeString(*md5sumBase64)
if err != nil {
fs.Debugf(o, "Failed to read md5sum from metadata %q: %v", *md5sumBase64, err)
} else if len(md5sumBytes) != 16 {
fs.Debugf(o, "Failed to read md5sum from metadata %q: wrong length", *md5sumBase64)
} else {
o.md5 = hex.EncodeToString(md5sumBytes)
}
}
o.storageClass = aws.StringValue(resp.StorageClass)
if resp.LastModified == nil {
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
@@ -2582,7 +2870,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
if fs.Config.UseServerModTime {
if o.fs.ci.UseServerModTime {
return o.lastModified
}
err := o.readMetaData(ctx)
@@ -2624,6 +2912,9 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
Metadata: o.meta,
MetadataDirective: aws.String(s3.MetadataDirectiveReplace), // replace metadata with that passed in
}
if o.fs.opt.RequesterPays {
req.RequestPayer = aws.String(s3.RequestPayerRequester)
}
return o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o)
}
@@ -2639,6 +2930,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
Bucket: &bucket,
Key: &bucketPath,
}
if o.fs.opt.RequesterPays {
req.RequestPayer = aws.String(s3.RequestPayerRequester)
}
if o.fs.opt.SSECustomerAlgorithm != "" {
req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm
}
@@ -2888,8 +3182,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// read the md5sum if available
// - for non multpart
// - for non multipart
// - so we can add a ContentMD5
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
// - for multipart provided checksums aren't disabled
// - so we can add the md5sum in the metadata as metaMD5Hash
var md5sum string
@@ -2899,7 +3194,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
hashBytes, err := hex.DecodeString(hash)
if err == nil {
md5sum = base64.StdEncoding.EncodeToString(hashBytes)
if multipart {
if (multipart || o.fs.etagIsNotMD5) && !o.fs.opt.DisableChecksum {
// Set the md5sum as metadata on the object if
// - a multipart upload
// - the Etag is not an MD5, eg when using SSE/SSE-C
// provided checksums aren't disabled
metadata[metaMD5Hash] = &md5sum
}
}
@@ -2918,6 +3217,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if md5sum != "" {
req.ContentMD5 = &md5sum
}
if o.fs.opt.RequesterPays {
req.RequestPayer = aws.String(s3.RequestPayerRequester)
}
if o.fs.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
}
@@ -2966,6 +3268,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
}
var resp *http.Response // response from PUT
if multipart {
err = o.uploadMultipart(ctx, &req, size, in)
if err != nil {
@@ -2995,18 +3298,18 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// create the vanilla http request
httpReq, err := http.NewRequest("PUT", url, in)
httpReq, err := http.NewRequestWithContext(ctx, "PUT", url, in)
if err != nil {
return errors.Wrap(err, "s3 upload: new request")
}
httpReq = httpReq.WithContext(ctx) // go1.13 can use NewRequestWithContext
// set the headers we signed and the length
httpReq.Header = headers
httpReq.ContentLength = size
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err := o.fs.srv.Do(httpReq)
var err error
resp, err = o.fs.srv.Do(httpReq)
if err != nil {
return o.fs.shouldRetry(err)
}
@@ -3025,6 +3328,26 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
}
// User requested we don't HEAD the object after uploading it
// so make up the object as best we can assuming it got
// uploaded properly. If size < 0 then we need to do the HEAD.
if o.fs.opt.NoHead && size >= 0 {
o.md5 = md5sum
o.bytes = size
o.lastModified = time.Now()
o.meta = req.Metadata
o.mimeType = aws.StringValue(req.ContentType)
o.storageClass = aws.StringValue(req.StorageClass)
// If we have done a single part PUT request then we can read these
if resp != nil {
if date, err := http.ParseTime(resp.Header.Get("Date")); err == nil {
o.lastModified = date
}
o.setMD5FromEtag(resp.Header.Get("Etag"))
}
return nil
}
// Read the metadata from the newly created object
o.meta = nil // wipe old metadata
err = o.readMetaData(ctx)
@@ -3038,6 +3361,9 @@ func (o *Object) Remove(ctx context.Context) error {
Bucket: &bucket,
Key: &bucketPath,
}
if o.fs.opt.RequesterPays {
req.RequestPayer = aws.String(s3.RequestPayerRequester)
}
err := o.fs.pacer.Call(func() (bool, error) {
_, err := o.fs.c.DeleteObjectWithContext(ctx, &req)
return o.fs.shouldRetry(err)

View File

@@ -53,6 +53,7 @@ func sign(AccessKey, SecretKey string, req *http.Request) {
var md5 string
var contentType string
var headersToSign []string
tmpHeadersToSign := make(map[string][]string)
for k, v := range req.Header {
k = strings.ToLower(k)
switch k {
@@ -62,15 +63,24 @@ func sign(AccessKey, SecretKey string, req *http.Request) {
contentType = v[0]
default:
if strings.HasPrefix(k, "x-amz-") {
vall := strings.Join(v, ",")
headersToSign = append(headersToSign, k+":"+vall)
tmpHeadersToSign[k] = v
}
}
}
var keys []string
for k := range tmpHeadersToSign {
keys = append(keys, k)
}
// https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
sort.Strings(keys)
for _, key := range keys {
vall := strings.Join(tmpHeadersToSign[key], ",")
headersToSign = append(headersToSign, key+":"+vall)
}
// Make headers of interest into canonical string
var joinedHeadersToSign string
if len(headersToSign) > 0 {
sort.StringSlice(headersToSign).Sort()
joinedHeadersToSign = strings.Join(headersToSign, "\n") + "\n"
}

View File

@@ -46,7 +46,7 @@ type Library struct {
Encrypted bool `json:"encrypted"`
Owner string `json:"owner"`
ID string `json:"id"`
Size int `json:"size"`
Size int64 `json:"size"`
Name string `json:"name"`
Modified int64 `json:"mtime"`
}

View File

@@ -1,6 +1,7 @@
package seafile
import (
"context"
"fmt"
"net/url"
"sync"
@@ -27,7 +28,7 @@ func init() {
}
// getPacer returns the unique pacer for that remote URL
func getPacer(remote string) *fs.Pacer {
func getPacer(ctx context.Context, remote string) *fs.Pacer {
pacerMutex.Lock()
defer pacerMutex.Unlock()
@@ -37,6 +38,7 @@ func getPacer(remote string) *fs.Pacer {
}
pacers[remote] = fs.NewPacer(
ctx,
pacer.NewDefault(
pacer.MinSleep(minSleep),
pacer.MaxSleep(maxSleep),

View File

@@ -147,7 +147,7 @@ type Fs struct {
// ------------------------------------------------------------
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -197,15 +197,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
opt: *opt,
endpoint: u,
endpointURL: u.String(),
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()),
pacer: getPacer(opt.URL),
srv: rest.NewClient(fshttp.NewClient(ctx)).SetRoot(u.String()),
pacer: getPacer(ctx, opt.URL),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
BucketBased: opt.LibraryName == "",
}).Fill(f)
}).Fill(ctx, f)
ctx := context.Background()
serverInfo, err := f.getServerInfo(ctx)
if err != nil {
return nil, err
@@ -297,7 +296,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
// Config callback for 2FA
func Config(name string, m configmap.Mapper) {
func Config(ctx context.Context, name string, m configmap.Mapper) {
ci := fs.GetConfig(ctx)
serverURL, ok := m.Get(configURL)
if !ok || serverURL == "" {
// If there's no server URL, it means we're trying an operation at the backend level, like a "rclone authorize seafile"
@@ -306,7 +306,7 @@ func Config(name string, m configmap.Mapper) {
}
// Stop if we are running non-interactive config
if fs.Config.AutoConfirm {
if ci.AutoConfirm {
return
}
@@ -343,7 +343,7 @@ func Config(name string, m configmap.Mapper) {
if !strings.HasPrefix(url, "/") {
url += "/"
}
srv := rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(url)
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(url)
// We loop asking for a 2FA code
for {
@@ -663,7 +663,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) e
// ==================== Optional Interface fs.Copier ====================
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -714,7 +714,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// ==================== Optional Interface fs.Mover ====================
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
@@ -804,7 +804,7 @@ func (f *Fs) adjustDestination(ctx context.Context, libraryID, srcFilename, dstP
// ==================== Optional Interface fs.DirMover ====================
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1004,7 +1004,7 @@ func (f *Fs) listLibraries(ctx context.Context) (entries fs.DirEntries, err erro
for _, library := range libraries {
d := fs.NewDir(library.Name, time.Unix(library.Modified, 0))
d.SetSize(int64(library.Size))
d.SetSize(library.Size)
entries = append(entries, d)
}

View File

@@ -11,7 +11,6 @@ import (
"io"
"io/ioutil"
"os"
"os/user"
"path"
"regexp"
"strconv"
@@ -22,6 +21,7 @@ import (
"github.com/pkg/errors"
"github.com/pkg/sftp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
@@ -33,6 +33,7 @@ import (
"github.com/rclone/rclone/lib/readers"
sshagent "github.com/xanzy/ssh-agent"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/knownhosts"
)
const (
@@ -43,7 +44,7 @@ const (
)
var (
currentUser = readCurrentUser()
currentUser = env.CurrentUser()
)
func init() {
@@ -82,6 +83,21 @@ func init() {
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
in the new OpenSSH format can't be used.`,
IsPassword: true,
}, {
Name: "pubkey_file",
Help: `Optional path to public key file.
Set this if you have a signed certificate you want to use for authentication.` + env.ShellExpandHelp,
}, {
Name: "known_hosts_file",
Help: `Optional path to known_hosts file.
Set this value to enable server host key validation.` + env.ShellExpandHelp,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "~/.ssh/known_hosts",
Help: "Use OpenSSH's known_hosts file",
}},
}, {
Name: "key_use_agent",
Help: `When set forces the usage of the ssh-agent.
@@ -176,6 +192,20 @@ Home directory can be found in a shared folder called "home"
The subsystem option is ignored when server_command is defined.`,
Advanced: true,
}, {
Name: "use_fstat",
Default: false,
Help: `If set use fstat instead of stat
Some servers limit the amount of open files and calling Stat after opening
the file will throw an error from the server. Setting this flag will call
Fstat instead of Stat which is called on an already open file handle.
It has been found that this helps with IBM Sterling SFTP servers which have
"extractability" level set to 1 which means only 1 file can be opened at
any given time.
`,
Advanced: true,
}},
}
fs.Register(fsi)
@@ -190,6 +220,8 @@ type Options struct {
KeyPem string `config:"key_pem"`
KeyFile string `config:"key_file"`
KeyFilePass string `config:"key_file_pass"`
PubKeyFile string `config:"pubkey_file"`
KnownHostsFile string `config:"known_hosts_file"`
KeyUseAgent bool `config:"key_use_agent"`
UseInsecureCipher bool `config:"use_insecure_cipher"`
DisableHashCheck bool `config:"disable_hashcheck"`
@@ -201,6 +233,7 @@ type Options struct {
SkipLinks bool `config:"skip_links"`
Subsystem string `config:"subsystem"`
ServerCommand string `config:"server_command"`
UseFstat bool `config:"use_fstat"`
}
// Fs stores the interface to the remote SFTP files
@@ -209,6 +242,7 @@ type Fs struct {
root string
absRoot string
opt Options // parsed options
ci *fs.ConfigInfo // global config
m configmap.Mapper // config
features *fs.Features // optional features
config *ssh.ClientConfig
@@ -218,6 +252,7 @@ type Fs struct {
poolMu sync.Mutex
pool []*conn
pacer *fs.Pacer // pacer for operations
savedpswd string
}
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
@@ -231,25 +266,11 @@ type Object struct {
sha1sum *string // Cached SHA1 checksum
}
// readCurrentUser finds the current user name or "" if not found
func readCurrentUser() (userName string) {
usr, err := user.Current()
if err == nil {
return usr.Username
}
// Fall back to reading $USER then $LOGNAME
userName = os.Getenv("USER")
if userName != "" {
return userName
}
return os.Getenv("LOGNAME")
}
// dial starts a client connection to the given SSH server. It is a
// convenience function that connects to the given network address,
// initiates the SSH handshake, and then sets up a Client.
func (f *Fs) dial(network, addr string, sshConfig *ssh.ClientConfig) (*ssh.Client, error) {
dialer := fshttp.NewDialer(fs.Config)
func (f *Fs) dial(ctx context.Context, network, addr string, sshConfig *ssh.ClientConfig) (*ssh.Client, error) {
dialer := fshttp.NewDialer(ctx)
conn, err := dialer.Dial(network, addr)
if err != nil {
return nil, err
@@ -295,12 +316,12 @@ func (c *conn) closed() error {
}
// Open a new connection to the SFTP server.
func (f *Fs) sftpConnection() (c *conn, err error) {
func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
// Rate limit rate of new connections
c = &conn{
err: make(chan error, 1),
}
c.sshClient, err = f.dial("tcp", f.opt.Host+":"+f.opt.Port, f.config)
c.sshClient, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port, f.config)
if err != nil {
return nil, errors.Wrap(err, "couldn't connect SSH")
}
@@ -338,12 +359,15 @@ func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.C
return nil, err
}
}
opts = opts[:len(opts):len(opts)] // make sure we don't overwrite the callers opts
opts = append(opts, sftp.UseFstat(f.opt.UseFstat))
return sftp.NewClientPipe(pr, pw, opts...)
}
// Get an SFTP connection from the pool, or open a new one
func (f *Fs) getSftpConnection() (c *conn, err error) {
func (f *Fs) getSftpConnection(ctx context.Context) (c *conn, err error) {
accounting.LimitTPS(ctx)
f.poolMu.Lock()
for len(f.pool) > 0 {
c = f.pool[0]
@@ -360,7 +384,7 @@ func (f *Fs) getSftpConnection() (c *conn, err error) {
return c, nil
}
err = f.pacer.Call(func() (bool, error) {
c, err = f.sftpConnection()
c, err = f.sftpConnection(ctx)
if err != nil {
return true, err
}
@@ -407,10 +431,32 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
f.poolMu.Unlock()
}
// Drain the pool of any connections
func (f *Fs) drainPool(ctx context.Context) (err error) {
f.poolMu.Lock()
defer f.poolMu.Unlock()
for i, c := range f.pool {
if cErr := c.closed(); cErr == nil {
cErr = c.close()
if cErr != nil {
err = cErr
}
}
f.pool[i] = nil
}
f.pool = nil
return err
}
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// This will hold the Fs object. We need to create it here
// so we can refer to it in the SSH callback, but it's populated
// in NewFsWithConnection
f := &Fs{
ci: fs.GetConfig(ctx),
}
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -423,12 +469,21 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if opt.Port == "" {
opt.Port = "22"
}
sshConfig := &ssh.ClientConfig{
User: opt.User,
Auth: []ssh.AuthMethod{},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
Timeout: fs.Config.ConnectTimeout,
ClientVersion: "SSH-2.0-" + fs.Config.UserAgent,
Timeout: f.ci.ConnectTimeout,
ClientVersion: "SSH-2.0-" + f.ci.UserAgent,
}
if opt.KnownHostsFile != "" {
hostcallback, err := knownhosts.New(opt.KnownHostsFile)
if err != nil {
return nil, errors.Wrap(err, "couldn't parse known_hosts_file")
}
sshConfig.HostKeyCallback = hostcallback
}
if opt.UseInsecureCipher {
@@ -438,6 +493,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
keyFile := env.ShellExpand(opt.KeyFile)
pubkeyFile := env.ShellExpand(opt.PubKeyFile)
//keyPem := env.ShellExpand(opt.KeyPem)
// Add ssh agent-auth if no password or file or key PEM specified
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent {
@@ -507,7 +563,38 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
return nil, errors.Wrap(err, "failed to parse private key file")
}
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signer))
// If a public key has been specified then use that
if pubkeyFile != "" {
certfile, err := ioutil.ReadFile(pubkeyFile)
if err != nil {
return nil, errors.Wrap(err, "unable to read cert file")
}
pk, _, _, _, err := ssh.ParseAuthorizedKey(certfile)
if err != nil {
return nil, errors.Wrap(err, "unable to parse cert file")
}
// And the signer for this, which includes the private key signer
// This is what we'll pass to the ssh client.
// Normally the ssh client will use the public key built
// into the private key, but we need to tell it to use the user
// specified public key cert. This signer is specific to the
// cert and will include the private key signer. Now ssh
// knows everything it needs.
cert, ok := pk.(*ssh.Certificate)
if !ok {
return nil, errors.New("public key file is not a certificate file: " + pubkeyFile)
}
pubsigner, err := ssh.NewCertSigner(cert, signer)
if err != nil {
return nil, errors.Wrap(err, "error generating cert signer")
}
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(pubsigner))
} else {
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signer))
}
}
// Auth from password if specified
@@ -516,39 +603,77 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
return nil, err
}
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
sshConfig.Auth = append(sshConfig.Auth,
ssh.Password(clearpass),
ssh.KeyboardInteractive(func(user, instruction string, questions []string, echos []bool) ([]string, error) {
return f.keyboardInteractiveReponse(user, instruction, questions, echos, clearpass)
}),
)
}
// Ask for password if none was defined and we're allowed to
// Config for password if none was defined and we're allowed to
// We don't ask now; we ask if the ssh connection succeeds
if opt.Pass == "" && opt.AskPassword {
_, _ = fmt.Fprint(os.Stderr, "Enter SFTP password: ")
clearpass := config.ReadPassword()
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
sshConfig.Auth = append(sshConfig.Auth,
ssh.PasswordCallback(f.getPass),
ssh.KeyboardInteractive(func(user, instruction string, questions []string, echos []bool) ([]string, error) {
pass, _ := f.getPass()
return f.keyboardInteractiveReponse(user, instruction, questions, echos, pass)
}),
)
}
return NewFsWithConnection(ctx, name, root, m, opt, sshConfig)
return NewFsWithConnection(ctx, f, name, root, m, opt, sshConfig)
}
// Do the keyboard interactive challenge
//
// Just send the password back for all questions
func (f *Fs) keyboardInteractiveReponse(user, instruction string, questions []string, echos []bool, pass string) ([]string, error) {
fs.Debugf(f, "keyboard interactive auth requested")
answers := make([]string, len(questions))
for i := range answers {
answers[i] = pass
}
return answers, nil
}
// If we're in password mode and ssh connection succeeds then this
// callback is called. First time around we ask the user, and then
// save it so on reconnection we give back the previous string.
// This removes the ability to let the user correct a mistaken entry,
// but means that reconnects are transparent.
// We'll re-use config.Pass for this, 'cos we know it's not been
// specified.
func (f *Fs) getPass() (string, error) {
for f.savedpswd == "" {
_, _ = fmt.Fprint(os.Stderr, "Enter SFTP password: ")
f.savedpswd = config.ReadPassword()
}
return f.savedpswd, nil
}
// NewFsWithConnection creates a new Fs object from the name and root and an ssh.ClientConfig. It connects to
// the host specified in the ssh.ClientConfig
func NewFsWithConnection(ctx context.Context, name string, root string, m configmap.Mapper, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
f := &Fs{
name: name,
root: root,
absRoot: root,
opt: *opt,
m: m,
config: sshConfig,
url: "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root,
mkdirLock: newStringLock(),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m configmap.Mapper, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
// Populate the Filesystem Object
f.name = name
f.root = root
f.absRoot = root
f.opt = *opt
f.m = m
f.config = sshConfig
f.url = "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root
f.mkdirLock = newStringLock()
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
f.savedpswd = ""
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
SlowHash: true,
}).Fill(f)
}).Fill(ctx, f)
// Make a connection and pool it to return errors early
c, err := f.getSftpConnection()
c, err := f.getSftpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "NewFs")
}
@@ -616,7 +741,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
fs: f,
remote: remote,
}
err := o.stat()
err := o.stat(ctx)
if err != nil {
return nil, err
}
@@ -625,11 +750,11 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// dirExists returns true,nil if the directory exists, false, nil if
// it doesn't or false, err
func (f *Fs) dirExists(dir string) (bool, error) {
func (f *Fs) dirExists(ctx context.Context, dir string) (bool, error) {
if dir == "" {
dir = "."
}
c, err := f.getSftpConnection()
c, err := f.getSftpConnection(ctx)
if err != nil {
return false, errors.Wrap(err, "dirExists")
}
@@ -658,7 +783,7 @@ func (f *Fs) dirExists(dir string) (bool, error) {
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
root := path.Join(f.absRoot, dir)
ok, err := f.dirExists(root)
ok, err := f.dirExists(ctx, root)
if err != nil {
return nil, errors.Wrap(err, "List failed")
}
@@ -669,7 +794,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if sftpDir == "" {
sftpDir = "."
}
c, err := f.getSftpConnection()
c, err := f.getSftpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "List")
}
@@ -688,7 +813,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
continue
}
oldInfo := info
info, err = f.stat(remote)
info, err = f.stat(ctx, remote)
if err != nil {
if !os.IsNotExist(err) {
fs.Errorf(remote, "stat of non-regular file failed: %v", err)
@@ -713,7 +838,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Put data from <in> into a new remote sftp file object described by <src.Remote()> and <src.ModTime(ctx)>
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
err := f.mkParentDir(src.Remote())
err := f.mkParentDir(ctx, src.Remote())
if err != nil {
return nil, errors.Wrap(err, "Put mkParentDir failed")
}
@@ -736,19 +861,19 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// mkParentDir makes the parent of remote if necessary and any
// directories above that
func (f *Fs) mkParentDir(remote string) error {
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
parent := path.Dir(remote)
return f.mkdir(path.Join(f.absRoot, parent))
return f.mkdir(ctx, path.Join(f.absRoot, parent))
}
// mkdir makes the directory and parents using native paths
func (f *Fs) mkdir(dirPath string) error {
func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
f.mkdirLock.Lock(dirPath)
defer f.mkdirLock.Unlock(dirPath)
if dirPath == "." || dirPath == "/" {
return nil
}
ok, err := f.dirExists(dirPath)
ok, err := f.dirExists(ctx, dirPath)
if err != nil {
return errors.Wrap(err, "mkdir dirExists failed")
}
@@ -756,11 +881,11 @@ func (f *Fs) mkdir(dirPath string) error {
return nil
}
parent := path.Dir(dirPath)
err = f.mkdir(parent)
err = f.mkdir(ctx, parent)
if err != nil {
return err
}
c, err := f.getSftpConnection()
c, err := f.getSftpConnection(ctx)
if err != nil {
return errors.Wrap(err, "mkdir")
}
@@ -775,7 +900,7 @@ func (f *Fs) mkdir(dirPath string) error {
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
root := path.Join(f.absRoot, dir)
return f.mkdir(root)
return f.mkdir(ctx, root)
}
// Rmdir removes the root directory of the Fs object
@@ -791,7 +916,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
}
// Remove the directory
root := path.Join(f.absRoot, dir)
c, err := f.getSftpConnection()
c, err := f.getSftpConnection(ctx)
if err != nil {
return errors.Wrap(err, "Rmdir")
}
@@ -807,11 +932,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
err := f.mkParentDir(remote)
err := f.mkParentDir(ctx, remote)
if err != nil {
return nil, errors.Wrap(err, "Move mkParentDir failed")
}
c, err := f.getSftpConnection()
c, err := f.getSftpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "Move")
}
@@ -831,7 +956,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -848,7 +973,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
dstPath := path.Join(f.absRoot, dstRemote)
// Check if destination exists
ok, err := f.dirExists(dstPath)
ok, err := f.dirExists(ctx, dstPath)
if err != nil {
return errors.Wrap(err, "DirMove dirExists dst failed")
}
@@ -857,13 +982,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// Make sure the parent directory exists
err = f.mkdir(path.Dir(dstPath))
err = f.mkdir(ctx, path.Dir(dstPath))
if err != nil {
return errors.Wrap(err, "DirMove mkParentDir dst failed")
}
// Do the move
c, err := f.getSftpConnection()
c, err := f.getSftpConnection(ctx)
if err != nil {
return errors.Wrap(err, "DirMove")
}
@@ -879,8 +1004,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// run runds cmd on the remote end returning standard output
func (f *Fs) run(cmd string) ([]byte, error) {
c, err := f.getSftpConnection()
func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
c, err := f.getSftpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "run: get SFTP connection")
}
@@ -888,7 +1013,7 @@ func (f *Fs) run(cmd string) ([]byte, error) {
session, err := c.sshClient.NewSession()
if err != nil {
return nil, errors.Wrap(err, "run: get SFTP sessiion")
return nil, errors.Wrap(err, "run: get SFTP session")
}
defer func() {
_ = session.Close()
@@ -908,6 +1033,7 @@ func (f *Fs) run(cmd string) ([]byte, error) {
// Hashes returns the supported hash types of the filesystem
func (f *Fs) Hashes() hash.Set {
ctx := context.TODO()
if f.opt.DisableHashCheck {
return hash.Set(hash.None)
}
@@ -926,7 +1052,7 @@ func (f *Fs) Hashes() hash.Set {
}
*changed = true
for _, command := range commands {
output, err := f.run(command)
output, err := f.run(ctx, command)
if err != nil {
continue
}
@@ -971,7 +1097,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if len(escapedPath) == 0 {
escapedPath = "/"
}
stdout, err := f.run("df -k " + escapedPath)
stdout, err := f.run(ctx, "df -k "+escapedPath)
if err != nil {
return nil, errors.Wrap(err, "your remote may not support About")
}
@@ -990,6 +1116,12 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return usage, nil
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
return f.drainPool(ctx)
}
// Fs is the filesystem this remote sftp file object is located within
func (o *Object) Fs() fs.Info {
return o.fs
@@ -1034,7 +1166,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
c, err := o.fs.getSftpConnection()
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
return "", errors.Wrap(err, "Hash get SFTP connection")
}
@@ -1087,7 +1219,7 @@ func shellEscape(str string) string {
func parseHash(bytes []byte) string {
// For strings with backslash *sum writes a leading \
// https://unix.stackexchange.com/q/313733/94054
return strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0] // Split at hash / filename separator
return strings.ToLower(strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0]) // Split at hash / filename separator / all convert to lowercase
}
// Parses the byte array output from the SSH session
@@ -1142,8 +1274,8 @@ func (o *Object) setMetadata(info os.FileInfo) {
}
// statRemote stats the file or directory at the remote given
func (f *Fs) stat(remote string) (info os.FileInfo, err error) {
c, err := f.getSftpConnection()
func (f *Fs) stat(ctx context.Context, remote string) (info os.FileInfo, err error) {
c, err := f.getSftpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "stat")
}
@@ -1154,8 +1286,8 @@ func (f *Fs) stat(remote string) (info os.FileInfo, err error) {
}
// stat updates the info in the Object
func (o *Object) stat() error {
info, err := o.fs.stat(o.remote)
func (o *Object) stat(ctx context.Context) error {
info, err := o.fs.stat(ctx, o.remote)
if err != nil {
if os.IsNotExist(err) {
return fs.ErrorObjectNotFound
@@ -1174,7 +1306,7 @@ func (o *Object) stat() error {
// it also updates the info field
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if o.fs.opt.SetModTime {
c, err := o.fs.getSftpConnection()
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
return errors.Wrap(err, "SetModTime")
}
@@ -1184,14 +1316,14 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return errors.Wrap(err, "SetModTime failed")
}
}
err := o.stat()
err := o.stat(ctx)
if err != nil {
return errors.Wrap(err, "SetModTime stat failed")
}
return nil
}
// Storable returns whether the remote sftp file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc)
// Storable returns whether the remote sftp file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
func (o *Object) Storable() bool {
return o.mode.IsRegular()
}
@@ -1257,7 +1389,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
}
}
c, err := o.fs.getSftpConnection()
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
return nil, errors.Wrap(err, "Open")
}
@@ -1281,7 +1413,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Clear the hash cache since we are about to update the object
o.md5sum = nil
o.sha1sum = nil
c, err := o.fs.getSftpConnection()
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
return errors.Wrap(err, "Update")
}
@@ -1292,7 +1424,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// remove the file if upload failed
remove := func() {
c, removeErr := o.fs.getSftpConnection()
c, removeErr := o.fs.getSftpConnection(ctx)
if removeErr != nil {
fs.Debugf(src, "Failed to open new SSH connection for delete: %v", removeErr)
return
@@ -1324,7 +1456,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove a remote sftp file object
func (o *Object) Remove(ctx context.Context) error {
c, err := o.fs.getSftpConnection()
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
return errors.Wrap(err, "Remove")
}
@@ -1340,5 +1472,6 @@ var (
_ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{}
_ fs.Abouter = &Fs{}
_ fs.Shutdowner = &Fs{}
_ fs.Object = &Object{}
)

View File

@@ -95,7 +95,7 @@ type UploadSpecification struct {
ChunkURI string `json:"ChunkUri"` // Specifies the URI the client must send the file data to
FinishURI string `json:"FinishUri"` // If provided, specifies the final call the client must perform to finish the upload process
ProgressData string `json:"ProgressData"` // Allows the client to check progress of standard uploads
IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supproted.
IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supported.
ResumeIndex int64 `json:"ResumeIndex"` // Specifies the initial index for resuming, if IsResume is true.
ResumeOffset int64 `json:"ResumeOffset"` // Specifies the initial file offset by bytes, if IsResume is true
ResumeFileHash string `json:"ResumeFileHash"` // Specifies the MD5 hash of the first ResumeOffset bytes of the partial file found at the server

View File

@@ -136,7 +136,7 @@ func init() {
Name: "sharefile",
Description: "Citrix Sharefile",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
Config: func(ctx context.Context, name string, m configmap.Mapper) {
oauthConfig := newOauthConfig("")
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
if auth == nil || auth.Form == nil {
@@ -155,7 +155,7 @@ func init() {
opt := oauthutil.Options{
CheckAuth: checkAuth,
}
err := oauthutil.Config("sharefile", name, m, oauthConfig, &opt)
err := oauthutil.Config(ctx, "sharefile", name, m, oauthConfig, &opt)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
@@ -237,6 +237,7 @@ type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
ci *fs.ConfigInfo // global config
features *fs.Features // optional features
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
@@ -410,8 +411,7 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -437,23 +437,25 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
oauthConfig := newOauthConfig(opt.Endpoint + tokenPath)
var client *http.Client
var ts *oauthutil.TokenSource
client, ts, err = oauthutil.NewClient(name, m, oauthConfig)
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure sharefile")
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: ci,
srv: rest.NewClient(client).SetRoot(opt.Endpoint + apiPath),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CaseInsensitive: true,
CanHaveEmptyDirectories: true,
ReadMimeType: false,
}).Fill(f)
}).Fill(ctx, f)
f.srv.SetErrorHandler(errorHandler)
f.fillBufferTokens()
@@ -518,7 +520,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
return nil, err
}
f.features.Fill(&tempF)
f.features.Fill(ctx, &tempF)
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
@@ -532,8 +534,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Fill up (or reset) the buffer tokens
func (f *Fs) fillBufferTokens() {
f.bufferTokens = make(chan []byte, fs.Config.Transfers)
for i := 0; i < fs.Config.Transfers; i++ {
f.bufferTokens = make(chan []byte, f.ci.Transfers)
for i := 0; i < f.ci.Transfers; i++ {
f.bufferTokens <- nil
}
}
@@ -964,7 +966,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
return item, nil
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
@@ -1006,7 +1008,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1034,7 +1036,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return nil
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -1090,7 +1092,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
} else if err != nil {
return nil, errors.Wrap(err, "copy: failed to examine destination dir")
} else {
// otherwise need to copy via a temporary directlry
// otherwise need to copy via a temporary directory
}
}
@@ -1339,7 +1341,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Overwrite: true,
CreatedDate: modTime,
ModifiedDate: modTime,
Tool: fs.Config.UserAgent,
Tool: o.fs.ci.UserAgent,
}
if isLargeFile {
@@ -1349,7 +1351,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} else {
// otherwise use threaded which is more efficient
req.Method = "threaded"
req.ThreadCount = &fs.Config.Transfers
req.ThreadCount = &o.fs.ci.Transfers
req.Filesize = &size
}
}

View File

@@ -32,7 +32,7 @@ type largeUpload struct {
wrap accounting.WrapFn // account parts being transferred
size int64 // total size
parts int64 // calculated number of parts, if known
info *api.UploadSpecification // where to post chunks etc
info *api.UploadSpecification // where to post chunks, etc.
threads int // number of threads to use in upload
streamed bool // set if using streamed upload
}
@@ -58,7 +58,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
return nil, errors.Errorf("can't use method %q with newLargeUpload", info.Method)
}
threads := fs.Config.Transfers
threads := f.ci.Transfers
if threads > info.MaxNumberOfThreads {
threads = info.MaxNumberOfThreads
}

View File

@@ -76,7 +76,7 @@ func init() {
Name: "sugarsync",
Description: "Sugarsync",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
Config: func(ctx context.Context, name string, m configmap.Mapper) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
@@ -85,7 +85,7 @@ func init() {
if opt.RefreshToken != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.ConfirmWithConfig(m, "config_refresh_token", true) {
if !config.ConfirmWithConfig(ctx, m, "config_refresh_token", true) {
return
}
}
@@ -106,7 +106,7 @@ func init() {
Method: "POST",
Path: "/app-authorization",
}
srv := rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(rootURL) // FIXME
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(rootURL) // FIXME
// FIXME
//err = f.pacer.Call(func() (bool, error) {
@@ -264,7 +264,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Fi
}
found, err := f.listAll(ctx, directoryID, func(item *api.File) bool {
if item.Name == leaf {
if strings.EqualFold(item.Name, leaf) {
info = item
return true
}
@@ -350,7 +350,7 @@ func (f *Fs) getAuth(req *http.Request) (err error) {
// if have auth, check it is in date
if f.opt.Authorization == "" || f.opt.User == "" || f.authExpiry.IsZero() || time.Until(f.authExpiry) < expiryLeeway {
// Get the auth token
f.srv.SetSigner(nil) // temporariliy remove the signer so we don't infinitely recurse
f.srv.SetSigner(nil) // temporarily remove the signer so we don't infinitely recurse
err = f.getAuthToken(ctx)
f.srv.SetSigner(f.getAuth) // replace signer
if err != nil {
@@ -395,9 +395,7 @@ func parseExpiry(expiryString string) time.Time {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
@@ -405,20 +403,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
root = parsePath(root)
client := fshttp.NewClient(fs.Config)
client := fshttp.NewClient(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
m: m,
authExpiry: parseExpiry(opt.AuthorizationExpiry),
}
f.features = (&fs.Features{
CaseInsensitive: true,
CanHaveEmptyDirectories: true,
}).Fill(f)
}).Fill(ctx, f)
f.srv.SetSigner(f.getAuth) // use signing hook to get the auth
f.srv.SetErrorHandler(errorHandler)
@@ -533,7 +531,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
// Find the leaf in pathID
found, err = f.listAll(ctx, pathID, nil, func(item *api.Collection) bool {
if item.Name == leaf {
if strings.EqualFold(item.Name, leaf) {
pathIDOut = item.Ref
return true
}
@@ -576,7 +574,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
}
newID = resp.Header.Get("Location")
if newID == "" {
// look up ID if not returned (eg for syncFolder)
// look up ID if not returned (e.g. for syncFolder)
var found bool
newID, found, err = f.FindLeaf(ctx, pathID, leaf)
if err != nil {
@@ -837,7 +835,7 @@ func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -923,7 +921,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
}
// moveFile moves a file server side
// moveFile moves a file server-side
func (f *Fs) moveFile(ctx context.Context, id, leaf, directoryID string) (info *api.File, err error) {
opts := rest.Opts{
Method: "PUT",
@@ -951,7 +949,7 @@ func (f *Fs) moveFile(ctx context.Context, id, leaf, directoryID string) (info *
return info, nil
}
// moveDir moves a folder server side
// moveDir moves a folder server-side
func (f *Fs) moveDir(ctx context.Context, id, leaf, directoryID string) (err error) {
// Move the object
opts := rest.Opts{
@@ -970,7 +968,7 @@ func (f *Fs) moveDir(ctx context.Context, id, leaf, directoryID string) (err err
})
}
// Move src to this remote using server side move operations.
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
@@ -1006,7 +1004,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//

View File

@@ -1,10 +1,11 @@
package swift
import (
"context"
"net/http"
"time"
"github.com/ncw/swift"
"github.com/ncw/swift/v2"
)
// auth is an authenticator for swift. It overrides the StorageUrl
@@ -28,19 +29,19 @@ func newAuth(parentAuth swift.Authenticator, storageURL string, authToken string
}
// Request creates an http.Request for the auth - return nil if not needed
func (a *auth) Request(c *swift.Connection) (*http.Request, error) {
func (a *auth) Request(ctx context.Context, c *swift.Connection) (*http.Request, error) {
if a.parentAuth == nil {
return nil, nil
}
return a.parentAuth.Request(c)
return a.parentAuth.Request(ctx, c)
}
// Response parses the http.Response
func (a *auth) Response(resp *http.Response) error {
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
if a.parentAuth == nil {
return nil
}
return a.parentAuth.Response(resp)
return a.parentAuth.Response(ctx, resp)
}
// The public storage URL - set Internal to true to read

View File

@@ -13,7 +13,7 @@ import (
"strings"
"time"
"github.com/ncw/swift"
"github.com/ncw/swift/v2"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -24,6 +24,7 @@ import (
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
@@ -51,7 +52,7 @@ default for this is 5GB which is its maximum value.`,
Name: "no_chunk",
Help: `Don't chunk files during streaming upload.
When doing streaming uploads (eg using rcat or mount) setting this
When doing streaming uploads (e.g. using rcat or mount) setting this
flag will cause the swift backend to not upload chunked files.
This will limit the maximum upload size to 5GB. However non chunked
@@ -167,6 +168,11 @@ func init() {
Help: "Admin",
Value: "admin",
}},
}, {
Name: "leave_parts_on_error",
Help: `If true avoid calling abort upload on a failure. It should be set to true for resuming uploads across different sessions.`,
Default: false,
Advanced: true,
}, {
Name: "storage_policy",
Help: `The storage policy to use when creating a new container
@@ -208,6 +214,7 @@ type Options struct {
ApplicationCredentialID string `config:"application_credential_id"`
ApplicationCredentialName string `config:"application_credential_name"`
ApplicationCredentialSecret string `config:"application_credential_secret"`
LeavePartsOnError bool `config:"leave_parts_on_error"`
StoragePolicy string `config:"storage_policy"`
EndpointType string `config:"endpoint_type"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
@@ -221,6 +228,7 @@ type Fs struct {
root string // the path we are working on if any
features *fs.Features // optional features
opt Options // options for this backend
ci *fs.ConfigInfo // global config
c *swift.Connection // the connection to the swift server
rootContainer string // container part of root (if any)
rootDirectory string // directory part of root (if any)
@@ -272,7 +280,7 @@ func (f *Fs) Features() *fs.Features {
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
401, // Unauthorized (eg "Token has expired")
401, // Unauthorized (e.g. "Token has expired")
408, // Request Timeout
409, // Conflict - various states that could be resolved on a retry
429, // Rate exceeded.
@@ -340,7 +348,8 @@ func (o *Object) split() (container, containerPath string) {
}
// swiftConnection makes a connection to swift
func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Connection, error) {
ci := fs.GetConfig(ctx)
c := &swift.Connection{
// Keep these in the same order as the Config for ease of checking
UserName: opt.User,
@@ -359,9 +368,9 @@ func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
ApplicationCredentialName: opt.ApplicationCredentialName,
ApplicationCredentialSecret: opt.ApplicationCredentialSecret,
EndpointType: swift.EndpointType(opt.EndpointType),
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(fs.Config),
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(ctx),
}
if opt.EnvAuth {
err := c.ApplyEnvironment()
@@ -382,7 +391,7 @@ func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
if c.AuthUrl == "" {
return nil, errors.New("auth not found")
}
err := c.Authenticate() // fills in c.StorageUrl and c.AuthToken
err := c.Authenticate(ctx) // fills in c.StorageUrl and c.AuthToken
if err != nil {
return nil, err
}
@@ -432,13 +441,15 @@ func (f *Fs) setRoot(root string) {
//
// if noCheckContainer is set then the Fs won't check the container
// exists before creating it.
func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) {
func NewFsWithConnection(ctx context.Context, opt *Options, name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) {
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
opt: *opt,
ci: ci,
c: c,
noCheckContainer: noCheckContainer,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(),
}
f.setRoot(root)
@@ -448,7 +459,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
BucketBased: true,
BucketBasedRootOK: true,
SlowModTime: true,
}).Fill(f)
}).Fill(ctx, f)
if f.rootContainer != "" && f.rootDirectory != "" {
// Check to see if the object exists - ignoring directory markers
var info swift.Object
@@ -456,7 +467,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
info, rxHeaders, err = f.c.Object(f.rootContainer, encodedDirectory)
info, rxHeaders, err = f.c.Object(ctx, f.rootContainer, encodedDirectory)
return shouldRetryHeaders(rxHeaders, err)
})
if err == nil && info.ContentType != directoryMarkerContentType {
@@ -473,7 +484,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -485,17 +496,17 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.Wrap(err, "swift: chunk size")
}
c, err := swiftConnection(opt, name)
c, err := swiftConnection(ctx, opt, name)
if err != nil {
return nil, err
}
return NewFsWithConnection(opt, name, root, c, false)
return NewFsWithConnection(ctx, opt, name, root, c, false)
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, error) {
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *swift.Object) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
@@ -505,7 +516,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, er
// making sure we read the full metadata for all 0 byte files.
// We don't read the metadata for directory marker objects.
if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" {
err := o.readMetaData() // reads info and headers, returning an error
err := o.readMetaData(ctx) // reads info and headers, returning an error
if err == fs.ErrorObjectNotFound {
// We have a dangling large object here so just return the original metadata
fs.Errorf(o, "dangling large object with no contents")
@@ -522,7 +533,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, er
return nil, err
}
} else {
err := o.readMetaData() // reads info and headers, returning an error
err := o.readMetaData(ctx) // reads info and headers, returning an error
if err != nil {
return nil, err
}
@@ -533,7 +544,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, er
// NewObject finds the Object at remote. If it can't be found it
// returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
return f.newObjectWithInfo(ctx, remote, nil)
}
// listFn is called from list and listContainerRoot to handle an object.
@@ -545,7 +556,7 @@ type listFn func(remote string, object *swift.Object, isDirectory bool) error
// container to the start.
//
// Set recurse to read sub directories
func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn listFn) error {
func (f *Fs) listContainerRoot(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn listFn) error {
if prefix != "" && !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
@@ -560,11 +571,11 @@ func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer
if !recurse {
opts.Delimiter = '/'
}
return f.c.ObjectsWalk(container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
return f.c.ObjectsWalk(ctx, container, &opts, func(ctx context.Context, opts *swift.ObjectsOpts) (interface{}, error) {
var objects []swift.Object
var err error
err = f.pacer.Call(func() (bool, error) {
objects, err = f.c.Objects(container, opts)
objects, err = f.c.Objects(ctx, container, opts)
return shouldRetry(err)
})
if err == nil {
@@ -602,8 +613,8 @@ func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer
type addEntryFn func(fs.DirEntry) error
// list the objects into the function supplied
func (f *Fs) list(container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn addEntryFn) error {
err := f.listContainerRoot(container, directory, prefix, addContainer, recurse, includeDirMarkers, func(remote string, object *swift.Object, isDirectory bool) (err error) {
func (f *Fs) list(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn addEntryFn) error {
err := f.listContainerRoot(ctx, container, directory, prefix, addContainer, recurse, includeDirMarkers, func(remote string, object *swift.Object, isDirectory bool) (err error) {
if isDirectory {
remote = strings.TrimRight(remote, "/")
d := fs.NewDir(remote, time.Time{}).SetSize(object.Bytes)
@@ -611,7 +622,7 @@ func (f *Fs) list(container, directory, prefix string, addContainer bool, recurs
} else {
// newObjectWithInfo does a full metadata read on 0 size objects which might be dynamic large objects
var o fs.Object
o, err = f.newObjectWithInfo(remote, object)
o, err = f.newObjectWithInfo(ctx, remote, object)
if err != nil {
return err
}
@@ -628,12 +639,12 @@ func (f *Fs) list(container, directory, prefix string, addContainer bool, recurs
}
// listDir lists a single directory
func (f *Fs) listDir(container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
if container == "" {
return nil, fs.ErrorListBucketRequired
}
// List the objects
err = f.list(container, directory, prefix, addContainer, false, false, func(entry fs.DirEntry) error {
err = f.list(ctx, container, directory, prefix, addContainer, false, false, func(entry fs.DirEntry) error {
entries = append(entries, entry)
return nil
})
@@ -649,7 +660,7 @@ func (f *Fs) listDir(container, directory, prefix string, addContainer bool) (en
func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) {
var containers []swift.Container
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(nil)
containers, err = f.c.ContainersAll(ctx, nil)
return shouldRetry(err)
})
if err != nil {
@@ -680,7 +691,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
return f.listContainers(ctx)
}
return f.listDir(container, directory, f.rootDirectory, f.rootContainer == "")
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
}
// ListR lists the objects and directories of the Fs starting
@@ -703,7 +714,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
container, directory := f.split(dir)
list := walk.NewListRHelper(callback)
listR := func(container, directory, prefix string, addContainer bool) error {
return f.list(container, directory, prefix, addContainer, true, false, func(entry fs.DirEntry) error {
return f.list(ctx, container, directory, prefix, addContainer, true, false, func(entry fs.DirEntry) error {
return list.Add(entry)
})
}
@@ -741,7 +752,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var containers []swift.Container
var err error
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(nil)
containers, err = f.c.ContainersAll(ctx, nil)
return shouldRetry(err)
})
if err != nil {
@@ -793,7 +804,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
if !f.noCheckContainer {
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
_, rxHeaders, err = f.c.Container(container)
_, rxHeaders, err = f.c.Container(ctx, container)
return shouldRetryHeaders(rxHeaders, err)
})
}
@@ -803,7 +814,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
headers["X-Storage-Policy"] = f.opt.StoragePolicy
}
err = f.pacer.Call(func() (bool, error) {
err = f.c.ContainerCreate(container, headers)
err = f.c.ContainerCreate(ctx, container, headers)
return shouldRetry(err)
})
if err == nil {
@@ -824,7 +835,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
}
err := f.cache.Remove(container, func() error {
err := f.pacer.Call(func() (bool, error) {
err := f.c.ContainerDelete(container)
err := f.c.ContainerDelete(ctx, container)
return shouldRetry(err)
})
if err == nil {
@@ -849,12 +860,12 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return fs.ErrorListBucketRequired
}
// Delete all the files including the directory markers
toBeDeleted := make(chan fs.Object, fs.Config.Transfers)
toBeDeleted := make(chan fs.Object, f.ci.Transfers)
delErr := make(chan error, 1)
go func() {
delErr <- operations.DeleteFiles(ctx, toBeDeleted)
}()
err := f.list(container, directory, f.rootDirectory, false, true, true, func(entry fs.DirEntry) error {
err := f.list(ctx, container, directory, f.rootDirectory, false, true, true, func(entry fs.DirEntry) error {
if o, ok := entry.(*Object); ok {
toBeDeleted <- o
}
@@ -871,7 +882,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.Rmdir(ctx, dir)
}
// Copy src to this remote using server side copy operations.
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
@@ -894,7 +905,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
srcContainer, srcPath := srcObj.split()
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = f.c.ObjectCopy(srcContainer, srcPath, dstContainer, dstPath, nil)
rxHeaders, err = f.c.ObjectCopy(ctx, srcContainer, srcPath, dstContainer, dstPath, nil)
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
@@ -933,11 +944,11 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
isDynamicLargeObject, err := o.isDynamicLargeObject()
isDynamicLargeObject, err := o.isDynamicLargeObject(ctx)
if err != nil {
return "", err
}
isStaticLargeObject, err := o.isStaticLargeObject()
isStaticLargeObject, err := o.isStaticLargeObject(ctx)
if err != nil {
return "", err
}
@@ -950,8 +961,8 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
// hasHeader checks for the header passed in returning false if the
// object isn't found.
func (o *Object) hasHeader(header string) (bool, error) {
err := o.readMetaData()
func (o *Object) hasHeader(ctx context.Context, header string) (bool, error) {
err := o.readMetaData(ctx)
if err != nil {
if err == fs.ErrorObjectNotFound {
return false, nil
@@ -963,17 +974,29 @@ func (o *Object) hasHeader(header string) (bool, error) {
}
// isDynamicLargeObject checks for X-Object-Manifest header
func (o *Object) isDynamicLargeObject() (bool, error) {
return o.hasHeader("X-Object-Manifest")
func (o *Object) isDynamicLargeObject(ctx context.Context) (bool, error) {
return o.hasHeader(ctx, "X-Object-Manifest")
}
// isStaticLargeObjectFile checks for the X-Static-Large-Object header
func (o *Object) isStaticLargeObject() (bool, error) {
return o.hasHeader("X-Static-Large-Object")
func (o *Object) isStaticLargeObject(ctx context.Context) (bool, error) {
return o.hasHeader(ctx, "X-Static-Large-Object")
}
func (o *Object) isInContainerVersioning(container string) (bool, error) {
_, headers, err := o.fs.c.Container(container)
func (o *Object) isLargeObject(ctx context.Context) (result bool, err error) {
result, err = o.hasHeader(ctx, "X-Static-Large-Object")
if result {
return
}
result, err = o.hasHeader(ctx, "X-Object-Manifest")
if result {
return
}
return false, nil
}
func (o *Object) isInContainerVersioning(ctx context.Context, container string) (bool, error) {
_, headers, err := o.fs.c.Container(ctx, container)
if err != nil {
return false, err
}
@@ -1009,7 +1032,7 @@ func (o *Object) decodeMetaData(info *swift.Object) (err error) {
// it also sets the info
//
// it returns fs.ErrorObjectNotFound if the object isn't found
func (o *Object) readMetaData() (err error) {
func (o *Object) readMetaData(ctx context.Context) (err error) {
if o.headers != nil {
return nil
}
@@ -1017,7 +1040,7 @@ func (o *Object) readMetaData() (err error) {
var h swift.Headers
container, containerPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
info, h, err = o.fs.c.Object(container, containerPath)
info, h, err = o.fs.c.Object(ctx, container, containerPath)
return shouldRetryHeaders(h, err)
})
if err != nil {
@@ -1040,10 +1063,10 @@ func (o *Object) readMetaData() (err error) {
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
if fs.Config.UseServerModTime {
if o.fs.ci.UseServerModTime {
return o.lastModified
}
err := o.readMetaData()
err := o.readMetaData(ctx)
if err != nil {
fs.Debugf(o, "Failed to read metadata: %s", err)
return o.lastModified
@@ -1058,7 +1081,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
err := o.readMetaData()
err := o.readMetaData(ctx)
if err != nil {
return err
}
@@ -1076,7 +1099,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
}
container, containerPath := o.split()
return o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectUpdate(container, containerPath, newHeaders)
err = o.fs.c.ObjectUpdate(ctx, container, containerPath, newHeaders)
return shouldRetry(err)
})
}
@@ -1097,7 +1120,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
container, containerPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
in, rxHeaders, err = o.fs.c.ObjectOpen(container, containerPath, !isRanging, headers)
in, rxHeaders, err = o.fs.c.ObjectOpen(ctx, container, containerPath, !isRanging, headers)
return shouldRetryHeaders(rxHeaders, err)
})
return
@@ -1111,50 +1134,41 @@ func min(x, y int64) int64 {
return y
}
// removeSegments removes any old segments from o
//
// if except is passed in then segments with that prefix won't be deleted
func (o *Object) removeSegments(except string) error {
segmentsContainer, _, err := o.getSegmentsDlo()
func (o *Object) getSegmentsLargeObject(ctx context.Context) (map[string][]string, error) {
container, objectName := o.split()
segmentContainer, segmentObjects, err := o.fs.c.LargeObjectGetSegments(ctx, container, objectName)
if err != nil {
return err
fs.Debugf(o, "Failed to get list segments of object: %v", err)
return nil, err
}
except = path.Join(o.remote, except)
// fs.Debugf(o, "segmentsContainer %q prefix %q", segmentsContainer, prefix)
err = o.fs.listContainerRoot(segmentsContainer, o.remote, "", false, true, true, func(remote string, object *swift.Object, isDirectory bool) error {
if isDirectory {
return nil
var containerSegments = make(map[string][]string)
for _, segment := range segmentObjects {
if _, ok := containerSegments[segmentContainer]; !ok {
containerSegments[segmentContainer] = make([]string, 0, len(segmentObjects))
}
if except != "" && strings.HasPrefix(remote, except) {
// fs.Debugf(o, "Ignoring current segment file %q in container %q", remote, segmentsContainer)
return nil
}
fs.Debugf(o, "Removing segment file %q in container %q", remote, segmentsContainer)
var err error
return o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectDelete(segmentsContainer, remote)
return shouldRetry(err)
})
})
if err != nil {
return err
segments, _ := containerSegments[segmentContainer]
segments = append(segments, segment.Name)
containerSegments[segmentContainer] = segments
}
// remove the segments container if empty, ignore errors
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ContainerDelete(segmentsContainer)
if err == swift.ContainerNotFound || err == swift.ContainerNotEmpty {
return false, err
return containerSegments, nil
}
func (o *Object) removeSegmentsLargeObject(ctx context.Context, containerSegments map[string][]string) error {
if containerSegments == nil || len(containerSegments) <= 0 {
return nil
}
for container, segments := range containerSegments {
_, err := o.fs.c.BulkDelete(ctx, container, segments)
if err != nil {
fs.Debugf(o, "Failed to delete bulk segments %v", err)
return err
}
return shouldRetry(err)
})
if err == nil {
fs.Debugf(o, "Removed empty container %q", segmentsContainer)
}
return nil
}
func (o *Object) getSegmentsDlo() (segmentsContainer string, prefix string, err error) {
if err = o.readMetaData(); err != nil {
func (o *Object) getSegmentsDlo(ctx context.Context) (segmentsContainer string, prefix string, err error) {
if err = o.readMetaData(ctx); err != nil {
return
}
dirManifest := o.headers["X-Object-Manifest"]
@@ -1178,7 +1192,7 @@ func urlEncode(str string) string {
var buf bytes.Buffer
for i := 0; i < len(str); i++ {
c := str[i]
if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '/' || c == '.' {
if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '/' || c == '.' || c == '_' || c == '-' {
_ = buf.WriteByte(c)
} else {
_, _ = buf.WriteString(fmt.Sprintf("%%%02X", c))
@@ -1189,14 +1203,14 @@ func urlEncode(str string) string {
// updateChunks updates the existing object using chunks to a separate
// container. It returns a string which prefixes current segments.
func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
container, containerPath := o.split()
segmentsContainer := container + "_segments"
// Create the segmentsContainer if it doesn't exist
var err error
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
_, rxHeaders, err = o.fs.c.Container(segmentsContainer)
_, rxHeaders, err = o.fs.c.Container(ctx, segmentsContainer)
return shouldRetryHeaders(rxHeaders, err)
})
if err == swift.ContainerNotFound {
@@ -1205,7 +1219,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
}
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ContainerCreate(segmentsContainer, headers)
err = o.fs.c.ContainerCreate(ctx, segmentsContainer, headers)
return shouldRetry(err)
})
}
@@ -1218,10 +1232,20 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
uniquePrefix := fmt.Sprintf("%s/%d", swift.TimeToFloatString(time.Now()), size)
segmentsPath := path.Join(containerPath, uniquePrefix)
in := bufio.NewReader(in0)
segmentInfos := make([]string, 0, ((size / int64(o.fs.opt.ChunkSize)) + 1))
segmentInfos := make([]string, 0, (size/int64(o.fs.opt.ChunkSize))+1)
defer atexit.OnError(&err, func() {
if o.fs.opt.LeavePartsOnError {
return
}
fs.Debugf(o, "Delete segments when err raise %v", err)
if segmentInfos == nil || len(segmentInfos) == 0 {
return
}
deleteChunks(ctx, o, segmentsContainer, segmentInfos)
})()
for {
// can we read at least one byte?
if _, err := in.Peek(1); err != nil {
if _, err = in.Peek(1); err != nil {
if left > 0 {
return "", err // read less than expected
}
@@ -1239,15 +1263,13 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, segmentsContainer)
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = o.fs.c.ObjectPut(segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
rxHeaders, err = o.fs.c.ObjectPut(ctx, segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
if err == nil {
segmentInfos = append(segmentInfos, segmentPath)
}
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
deleteChunks(o, segmentsContainer, segmentInfos)
segmentInfos = nil
return "", err
}
i++
@@ -1258,24 +1280,26 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
emptyReader := bytes.NewReader(nil)
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = o.fs.c.ObjectPut(container, containerPath, emptyReader, true, "", contentType, headers)
rxHeaders, err = o.fs.c.ObjectPut(ctx, container, containerPath, emptyReader, true, "", contentType, headers)
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
deleteChunks(o, segmentsContainer, segmentInfos)
if err == nil {
//reset data
segmentInfos = nil
}
return uniquePrefix + "/", err
}
func deleteChunks(o *Object, segmentsContainer string, segmentInfos []string) {
if segmentInfos != nil && len(segmentInfos) > 0 {
for _, v := range segmentInfos {
fs.Debugf(o, "Delete segment file %q on %q", v, segmentsContainer)
e := o.fs.c.ObjectDelete(segmentsContainer, v)
if e != nil {
fs.Errorf(o, "Error occurred in delete segment file %q on %q, error: %q", v, segmentsContainer, e)
}
func deleteChunks(ctx context.Context, o *Object, segmentsContainer string, segmentInfos []string) {
if segmentInfos == nil || len(segmentInfos) == 0 {
return
}
for _, v := range segmentInfos {
fs.Debugf(o, "Delete segment file %q on %q", v, segmentsContainer)
e := o.fs.c.ObjectDelete(ctx, segmentsContainer, v)
if e != nil {
fs.Errorf(o, "Error occurred in delete segment file %q on %q, error: %q", v, segmentsContainer, e)
}
}
}
@@ -1296,20 +1320,26 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
modTime := src.ModTime(ctx)
// Note whether this is a dynamic large object before starting
isDynamicLargeObject, err := o.isDynamicLargeObject()
isLargeObject, err := o.isLargeObject(ctx)
if err != nil {
return err
}
//capture segments before upload
var segmentsContainer map[string][]string
if isLargeObject {
segmentsContainer, _ = o.getSegmentsLargeObject(ctx)
}
// Set the mtime
m := swift.Metadata{}
m.SetModTime(modTime)
contentType := fs.MimeType(ctx, src)
headers := m.ObjectHeaders()
fs.OpenOptionAddHeaders(options, headers)
uniquePrefix := ""
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
uniquePrefix, err = o.updateChunks(in, headers, size, contentType)
_, err = o.updateChunks(ctx, in, headers, size, contentType)
if err != nil {
return err
}
@@ -1325,7 +1355,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
var rxHeaders swift.Headers
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
rxHeaders, err = o.fs.c.ObjectPut(container, containerPath, in, true, "", contentType, headers)
rxHeaders, err = o.fs.c.ObjectPut(ctx, container, containerPath, in, true, "", contentType, headers)
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
@@ -1343,47 +1373,59 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
o.size = int64(inCount.BytesRead())
}
}
// If file was a dynamic large object then remove old/all segments
if isDynamicLargeObject {
err = o.removeSegments(uniquePrefix)
isInContainerVersioning, _ := o.isInContainerVersioning(ctx, container)
// If file was a large object and the container is not enable versioning then remove old/all segments
if isLargeObject && len(segmentsContainer) > 0 && !isInContainerVersioning {
err := o.removeSegmentsLargeObject(ctx, segmentsContainer)
if err != nil {
fs.Logf(o, "Failed to remove old segments - carrying on with upload: %v", err)
}
}
// Read the metadata from the newly created object if necessary
return o.readMetaData()
return o.readMetaData(ctx)
}
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
container, containerPath := o.split()
//check object is large object
isLargeObject, err := o.isLargeObject(ctx)
if err != nil {
return err
}
//check container has enabled version to reserve segment when delete
isInContainerVersioning := false
if isLargeObject {
isInContainerVersioning, err = o.isInContainerVersioning(ctx, container)
if err != nil {
return err
}
}
//capture segments object if this object is large object
var containerSegments map[string][]string
if isLargeObject {
containerSegments, err = o.getSegmentsLargeObject(ctx)
if err != nil {
return err
}
}
// Remove file/manifest first
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectDelete(container, containerPath)
err = o.fs.c.ObjectDelete(ctx, container, containerPath)
return shouldRetry(err)
})
if err != nil {
return err
}
isDynamicLargeObject, err := o.isDynamicLargeObject()
if err != nil {
return err
if !isLargeObject || isInContainerVersioning {
return nil
}
// ...then segments if required
if isDynamicLargeObject {
isInContainerVersioning, err := o.isInContainerVersioning(container)
if err != nil {
return err
}
if !isInContainerVersioning {
err = o.removeSegments("")
if err != nil {
return err
}
}
if isLargeObject {
return o.removeSegmentsLargeObject(ctx, containerSegments)
}
return nil
}

View File

@@ -4,7 +4,7 @@ import (
"testing"
"time"
"github.com/ncw/swift"
"github.com/ncw/swift/v2"
"github.com/rclone/rclone/fs/fserrors"
"github.com/stretchr/testify/assert"
)

View File

@@ -4,15 +4,19 @@ package swift
import (
"bytes"
"context"
"errors"
"io"
"io/ioutil"
"testing"
"github.com/ncw/swift/v2"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -74,6 +78,80 @@ func (f *Fs) testNoChunk(t *testing.T) {
// Additional tests that aren't in the framework
func (f *Fs) InternalTest(t *testing.T) {
t.Run("NoChunk", f.testNoChunk)
t.Run("WithChunk", f.testWithChunk)
t.Run("WithChunkFail", f.testWithChunkFail)
}
func (f *Fs) testWithChunk(t *testing.T) {
preConfChunkSize := f.opt.ChunkSize
preConfChunk := f.opt.NoChunk
f.opt.NoChunk = false
f.opt.ChunkSize = 1024 * fs.Byte
defer func() {
//restore old config after test
f.opt.ChunkSize = preConfChunkSize
f.opt.NoChunk = preConfChunk
}()
file := fstest.Item{
ModTime: fstest.Time("2020-12-31T04:05:06.499999999Z"),
Path: "piped data chunk.txt",
Size: -1, // use unknown size during upload
}
const contentSize = 2048
contents := random.String(contentSize)
buf := bytes.NewBufferString(contents)
uploadHash := hash.NewMultiHasher()
in := io.TeeReader(buf, uploadHash)
file.Size = -1
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
ctx := context.TODO()
obj, err := f.Features().PutStream(ctx, in, obji)
require.NoError(t, err)
require.NotEmpty(t, obj)
}
func (f *Fs) testWithChunkFail(t *testing.T) {
preConfChunkSize := f.opt.ChunkSize
preConfChunk := f.opt.NoChunk
f.opt.NoChunk = false
f.opt.ChunkSize = 1024 * fs.Byte
segmentContainer := f.root + "_segments"
defer func() {
//restore config
f.opt.ChunkSize = preConfChunkSize
f.opt.NoChunk = preConfChunk
}()
path := "piped data chunk with error.txt"
file := fstest.Item{
ModTime: fstest.Time("2021-01-04T03:46:00.499999999Z"),
Path: path,
Size: -1, // use unknown size during upload
}
const contentSize = 4096
const errPosition = 3072
contents := random.String(contentSize)
buf := bytes.NewBufferString(contents[:errPosition])
errMessage := "potato"
er := &readers.ErrorReader{Err: errors.New(errMessage)}
in := ioutil.NopCloser(io.MultiReader(buf, er))
file.Size = contentSize
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
ctx := context.TODO()
_, err := f.Features().PutStream(ctx, in, obji)
// error is potato
require.NotNil(t, err)
require.Equal(t, errMessage, err.Error())
_, _, err = f.c.Object(ctx, f.rootContainer, path)
assert.Equal(t, swift.ObjectNotFound, err)
prefix := path
objs, err := f.c.Objects(ctx, segmentContainer, &swift.ObjectsOpts{
Prefix: prefix,
})
require.NoError(t, err)
require.Empty(t, objs)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -1,4 +1,4 @@
// +build go1.13,!plan9
// +build !plan9
// Package tardigrade provides an interface to Tardigrade decentralized object storage.
package tardigrade
@@ -42,7 +42,7 @@ func init() {
Name: "tardigrade",
Description: "Tardigrade Decentralized Cloud Storage",
NewFs: NewFs,
Config: func(name string, configMapper configmap.Mapper) {
Config: func(ctx context.Context, name string, configMapper configmap.Mapper) {
provider, _ := configMapper.Get(fs.ConfigProvider)
config.FileDeleteKey(name, fs.ConfigProvider)
@@ -67,12 +67,12 @@ func init() {
log.Fatalf("Couldn't create access grant: %v", err)
}
serialziedAccess, err := access.Serialize()
serializedAccess, err := access.Serialize()
if err != nil {
log.Fatalf("Couldn't serialize access grant: %v", err)
}
configMapper.Set("satellite_address", satellite)
configMapper.Set("access_grant", serialziedAccess)
configMapper.Set("access_grant", serializedAccess)
} else if provider == existingProvider {
config.FileDeleteKey(name, "satellite_address")
config.FileDeleteKey(name, "api_key")
@@ -165,9 +165,7 @@ var (
)
// NewFs creates a filesystem backed by Tardigrade.
func NewFs(name, root string, m configmap.Mapper) (_ fs.Fs, err error) {
ctx := context.Background()
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs, err error) {
// Setup filesystem and connection to Tardigrade
root = norm.NFC.String(root)
root = strings.Trim(root, "/")
@@ -219,7 +217,7 @@ func NewFs(name, root string, m configmap.Mapper) (_ fs.Fs, err error) {
f.features = (&fs.Features{
BucketBased: true,
BucketBasedRootOK: true,
}).Fill(f)
}).Fill(ctx, f)
project, err := f.connect(ctx)
if err != nil {

View File

@@ -1,4 +1,4 @@
// +build go1.13,!plan9
// +build !plan9
package tardigrade

Some files were not shown because too many files have changed in this diff Show More