1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-02 08:33:50 +00:00

Compare commits

..

376 Commits

Author SHA1 Message Date
Nick Craig-Wood
ad38432cc3 config: experiment with a text UI for config 2019-09-25 10:17:07 +01:00
Nick Craig-Wood
956fc5062b vendor: add github.com/manifoldco/promptui and dependencies 2019-09-25 10:12:15 +01:00
Nick Craig-Wood
28cc2009d4 Add Anthony Rusdi to contributors 2019-09-21 14:39:03 +01:00
Nick Craig-Wood
dd4fe9ff60 Add David to contributors 2019-09-21 14:39:03 +01:00
Anthony Rusdi
899f285319 s3: fix signature v2_auth headers
When used with v2_auth = true, PresignRequest doesn't return
signed headers, so remote dest authentication would be fail.
This commit copying back HTTPRequest.Header to headers.

Tested with RiakCS v2.1.0.

Signed-off-by: Anthony Rusdi <33247310+antrusd@users.noreply.github.com>
2019-09-21 14:38:51 +01:00
David
4788545b05 box: add options to get access token via JWT auth 2019-09-20 17:15:16 +01:00
David
1934426789 jwtutil: functionality to get an access token via JWT authentication 2019-09-20 17:15:16 +01:00
David
643192b347 vendor: add pkcs8 helpers for decrypting encrypted private keys 2019-09-20 17:15:16 +01:00
Nick Craig-Wood
1031bcfc5a build: remove azure pipelines build 2019-09-20 16:08:18 +01:00
Nick Craig-Wood
ce00c0a0d9 build: build rclone with github actions 2019-09-20 16:08:18 +01:00
Nick Craig-Wood
1164eed2af lib/pacer: make tests more reliable 2019-09-20 16:07:55 +01:00
Nick Craig-Wood
557edecd40 log: add Stack() function for debugging who calls what 2019-09-20 11:53:08 +01:00
Nick Craig-Wood
b242b0a078 lib/cache,rc/jobs: make tests more reliable 2019-09-20 11:53:08 +01:00
Nick Craig-Wood
08b86cc94b mount: skip tests on <= 2 CPUs to avoid lockup in #3154 2019-09-20 11:53:08 +01:00
Nick Craig-Wood
56544bb2fd accounting: fix file handle leak on errors - fixes #3547
In 53a1a0e3ef we introduced a problem where if there was an
error on the file being transferred then the file was re-opened and
the old one wasn't closed.

This was partially fixed in bfbddab46b however this didn't
address the case of the old file being closed.

This is now fixed by
- marking the file as open again in UpdateReader
- moving the stopping the accounting machinery to a new method Done
2019-09-19 16:20:07 +01:00
Matei David
70e043e641 Fixed typo in Docker image doc. 2019-09-19 16:17:57 +01:00
Dan Walters
c49a71f438 dlna: move root descriptor xml template to the static assets
Reduce binary size.
2019-09-17 12:52:32 +01:00
Dan Walters
5f07bbf8ce dlna: fake out implementation of X_MS_MediaReceiverRegistrar
Using the same responses as minidlna.

Fixes #3502.
2019-09-17 12:52:02 +01:00
Dan Walters
2f10472df3 dlna: count the number of children in the response to BrowseMetadata 2019-09-17 12:28:20 +01:00
Matei David
ab89e93968 Add Matei David to contributors 2019-09-17 10:12:32 +01:00
Matei David
070a8bfcd8 Dockerfile fixes
- ref: https://forum.rclone.org/t/run-docker-container-in-userspace/11734/7
- enable userspace operation
- enable Docker userspace mount exposed to the host
- add more Docker image usage documentation
2019-09-17 10:12:32 +01:00
Ivan Andreev
8fe87c8157 mailru: skip extra http request if data fits in hash 2019-09-17 10:04:51 +01:00
Ivan Andreev
8fb44a822d mailru: fix rare nil pointer panic 2019-09-17 10:04:51 +01:00
Nick Craig-Wood
3cff258577 sftp: fix --sftp-ask-password trying to contact the ssh agent
See: https://forum.rclone.org/t/rclone-command-line/11766
2019-09-16 11:16:27 +01:00
Nick Craig-Wood
66347aff2a fstest: calculate hashes for uploaded test files to fix minio integration tests
Before this change we didn't calculate any hashes for test files
created in the Run framework.

This means that files were uploaded to S3 without a `Content-MD5`
header.  This in turn caused minio to disengage `--compat` mode which
in turn caused the `TestSyncAfterChangingModtimeOnlyWithNoUpdateModTime`
test to fail in `fs/sync`.

After this change we supply all hashes supported by the destination Fs
on the upload object.

This means that the `Content-MD5` is set and minio engages `--compat`
mode to fix the problem.  Using `--compat` on the command line also
fixes the problem.

This much better replicates how objects are actually uploaded with
operations.Copy so should improve the integration tests.
2019-09-16 10:59:01 +01:00
Nick Craig-Wood
b8b12a4000 Start v1.49.3-DEV development 2019-09-15 18:10:08 +01:00
Dan Walters
8c038326b9 dlna: correct output for ContentDirectoryService#Browse with BrowseMetadata
We were marshalling the "cds object" instead of the "upnp object".

Fixes #3253  (I think)
2019-09-15 16:30:39 +01:00
pataquets
fd4b25932c Contrib: Add sample WebDAV server Docker Compose manifest. 2019-09-15 16:06:54 +01:00
pataquets
4374fd1df1 Contrib: Add sample DLNA server Docker Compose manifest. 2019-09-15 16:06:54 +01:00
Nick Craig-Wood
b6065561cf test_all: add ignores for tests which will never pass
- s3 backends which don't support SetTier
- mega which makes a duplicate for TestDirRename
2019-09-15 13:16:15 +01:00
Nick Craig-Wood
ef7bfd3f03 fs: Make prefix free backend config read prefix free env var also
Before this change you could only configure the local backend flags
which don't have the local prefix (eg `--copy-links`) with
`RCLONE_LOCAL_COPY_LINKS`.

This change makes `RCLONE_COPY_LINKS` valid too which is much more
logical for the users.

Fixes #3534
2019-09-14 18:26:07 +01:00
Nick Craig-Wood
ae2edc3b5b help: add short options to backend documentation also 2019-09-14 18:24:05 +01:00
Nick Craig-Wood
0baafb158f mount: allow files of unkown size to be read properly
Before this change, files of unknown size (eg Google Docs) would
appear in file listings with 0 size and would only allow 0 bytes to be
read.

This change sets the direct_io flag in the FUSE return which bypasses
the cache for these files.  This means that they can be read properly.

This is compatible with some, but not all applications.
2019-09-14 13:22:33 +01:00
Nick Craig-Wood
ba121eddf0 vfs: make objects of unknown size readable through the VFS
These objects (eg Google Docs) appear with 0 length in the VFS.

Before this change, these only read 0 bytes.

After this change, even though the size appears to be 0, the objects
can be read to the end.  If the objects are read to the end then the
size on the handle will be updated.
2019-09-14 13:09:07 +01:00
Nick Craig-Wood
2e80e035c9 fstest/mockobject: add UnknownSize() method to make Size() return -1 2019-09-14 13:07:01 +01:00
Nick Craig-Wood
ea9b6087cf fstest/mockfs: allow fs.Objects to be added to the root 2019-09-14 13:05:36 +01:00
Nick Craig-Wood
6959c997e2 config: remove error: can't use --size-only and --ignore-size together.
As sometimes they are useful together, for example when the remote
changes the sizes of the uploaded files.

See: https://forum.rclone.org/t/files-upload-will-be-auto-compress-how-do-i-sync-a-file-to-remote/10578
2019-09-14 10:13:44 +01:00
Nick Craig-Wood
25786cafd3 s3: fix SetModTime on GLACIER/ARCHIVE objects and implement set/get tier
- Read the storage class for each object
- Implement SetTier/GetTier
- Check the storage class on the **object** before using SetModTime

This updates the fix in 1a2fb52 so that SetModTime works when you are
using objects which have been migrated to GLACIER but you aren't using
GLACIER as a storage class.

Fixes #3522
2019-09-14 09:18:55 +01:00
Nick Craig-Wood
23dc313fa5 azureblob: add missing type assertions for GetTier/SetTier 2019-09-14 09:18:55 +01:00
Nick Craig-Wood
1a16849df0 http: fix race introduced in 7982aaf151 2019-09-14 08:48:13 +01:00
Nick Craig-Wood
3b68340eac http: add --http-no-head to stop rclone doing HEAD in listings #3523 2019-09-14 00:17:39 +01:00
Nick Craig-Wood
7982aaf151 http: HEAD directory entries in parallel to speedup #3523 2019-09-14 00:16:44 +01:00
Nick Craig-Wood
7b29ed8ec1 Add Lars Lehtonen to contributors 2019-09-13 23:50:54 +01:00
Lars Lehtonen
c93e0ff8ee rest: fix missing error check 2019-09-13 23:50:39 +01:00
Nick Craig-Wood
3b91fb6a2f Add David Baumgold to contributors 2019-09-13 18:39:36 +01:00
David Baumgold
7d8c15c030 docs: GitHub has a capital H 2019-09-13 18:39:23 +01:00
Nick Craig-Wood
bfbddab46b fs/accounting: Fix "file already closed" on transfer retries
This was caused by the recent reworking of the accounting interface.
The Transfer object was recycling the Accounting object without
resetting the stream.

See: https://forum.rclone.org/t/error-file-already-closed/11469/
See: https://forum.rclone.org/t/rclone-b2-sync-post-error-method-not-supported/11718/
2019-09-13 18:35:02 +01:00
Nick Craig-Wood
e09a4ff019 cmd: Make --progress work in git bash on Windows - fixes #3531
This detects the presence of a VT100 terminal by using the TERM
environment variable and switches to using VT100 codes directly under
windows if it is found.

This makes --progress work correctly with git bash.
2019-09-13 15:24:47 +01:00
Nick Craig-Wood
48e23d8c85 build: fix circleci build to use billziss/xgo-cgofuse container
Use built rclone to do upload
2019-09-12 21:59:50 +01:00
Aleksandar Jankovic
934440a9df accounting: fix total duration calculation
Fixes: #3498
2019-09-12 10:29:40 +01:00
Nick Craig-Wood
29b4f211ab gcs: add context to SDK calls #3257 2019-09-09 23:27:07 +01:00
Nick Craig-Wood
bd863f8868 drive: add context to SDK calls #3257 2019-09-09 23:27:07 +01:00
Nick Craig-Wood
66c23723e3 Add context to all http.NewRequest #3257
When we drop support for go1.12 we can use http.NewRequestWithContext
2019-09-09 23:27:07 +01:00
Nick Craig-Wood
58a531a203 rest: add context propagation to rest library #3257
This fixes up the calling and propagates the contexts for the backends
which use lib/rest.
2019-09-09 23:27:07 +01:00
Ivan Andreev
ba1daea072 mailru: backend for mail.ru 2019-09-09 21:56:16 +01:00
Ivan Andreev
bdcd0b4c64 Add mailru hash (mrhash) 2019-09-09 21:34:15 +01:00
Nick Craig-Wood
94eb9a4014 Start v1.49.2-DEV development 2019-09-08 17:36:21 +01:00
Nick Craig-Wood
e028c006fc test_all: write index.json and add branch, commit and Go version to report 2019-09-08 11:35:56 +01:00
Nick Craig-Wood
3f3f038b73 build: make sure we add version info to test_all build 2019-09-08 11:35:36 +01:00
calisro
2298834e83 docs: spell out google photos download limitations 2019-09-07 11:47:53 +01:00
Nick Craig-Wood
07dfb3aa11 bin: convert python scripts to python3 2019-09-06 22:55:28 +01:00
Danil Semelenov
1382dba3c8 cmd: make autocomplete compatible with bash's posix mode #3489 2019-09-06 13:11:08 +01:00
Nick Craig-Wood
f1347139fa config: check config names more carefully and report errors - fixes #3506
Before this change it was possible to make a remote with an invalid
name in the config file, either manually or with `rclone config
create` (but not with `rclone config`).

When this remote was used, because it was invalid, rclone would
presume this remote name was a local directory for a very suprising
user experience!

This change checks remote names more carefully and returns errors
- when the user tries to use an invalid remote name on the command line
- when an invalid remote name is used in `rclone config create/update/password`
- when the user tries to enter an invalid remote name in `rclone config`

This does not prevent the user entering a remote name with invalid
characters in the config manually, but such a remote will fail
immediately when it is used on the command line.
2019-09-06 12:07:09 +01:00
Nick Craig-Wood
27a730ef8f docs: Update changelog and RELEASE.md from v1.49.1 release 2019-09-06 12:07:09 +01:00
Ivan Andreev
d0c6e5cf5a vfs: skip TestCaseSensitivity on case insensitive backends 2019-09-06 10:44:59 +01:00
Nick Craig-Wood
cf9b973fe4 accounting: fix locking in Transfer to avoid deadlock with --progress
Before this change, using -P occasionally deadlocked on the Transfer
mutex when Transfer.Done() was called with a non nil error and the
StatsInfo mutex since they mutually call each other.

This was fixed by making sure that the Transfer mutex is always
released before calling any StatsInfo methods.

This improves on: 6f87267b34

Fixes #3505
2019-09-06 10:00:44 +01:00
Nick Craig-Wood
ffa1dac10b build: apply gofmt from go1.13 to change case of number literals 2019-09-05 13:59:06 +01:00
Nick Craig-Wood
7b0966880e Add Ivan Andreev to contributors 2019-09-04 21:32:16 +01:00
Ivan Andreev
1c4e33d4ad vfs: add flag --vfs-case-insensitive for windows/macOS mounts
rclone mount when run on Windows & macOS will now default to `--vfs-case-insensitive`.
This means that
2019-09-04 21:30:48 +01:00
Nick Craig-Wood
530ba66d35 operations: fix -u/--update with google photos / files of unknown size
Before this change if -u/--update was in effect we compared the size
of the files to see if the transfer should go ahead.  This was
comparing -1 with an actual size so the transfer always proceeded.

After this change we use the existing `sizeDiffers` function which
does the correct comparison with -1 for files of unknown length.

See: https://forum.rclone.org/t/sync-with-google-photos-to-local-drive-will-result-in-recoping/11605
2019-09-04 17:31:17 +01:00
Danil Semelenov
b3db38ae31 Disable __rclone_custom_func if posix mode is on
A workaround for #3489. Code in `__rclone_custom_func` relies on process substitutions `<(...)` to preserve changes of variables within `while` bodies, which is not supported in the posix mode.
2019-09-04 14:48:10 +01:00
Danil Semelenov
c0d1869204 Fix 'compopt: command not found' on autocomplete on macOS
As reported in #3489.
2019-09-04 14:47:26 +01:00
Nick Craig-Wood
89b6d89077 build: drop support for go1.9 2019-09-04 10:23:48 +01:00
Nick Craig-Wood
ef7b001626 build: update to use go1.13 for the build 2019-09-04 10:23:48 +01:00
Nick Craig-Wood
f97a3e853e Add Alfonso Montero to contributors 2019-09-03 17:26:13 +01:00
Denis
b71ac141cc copyurl: add --auto-filename flag for using file name from url in destination path (#3451) 2019-09-03 17:25:19 +01:00
Nick Craig-Wood
5932acfee3 rc: fix docs for config/create /update /password 2019-09-03 08:34:15 +01:00
Alfonso Montero
e2ce687f93 README.md: Add Docker pulls badge 2019-09-02 15:14:45 +01:00
Nick Craig-Wood
a3fb460c6b docs: add info on how to build and use the docker images 2019-09-02 14:30:11 +01:00
Nick Craig-Wood
8d296d0e1d Start v1.49.1-DEV development 2019-09-02 13:10:47 +01:00
Nick Craig-Wood
20a57aaccb gcs: fix need for elevated permissions on SetModTime - fixes #3493
Before this change we used PATCH on the object to update the metadata.

Apparently this requires the "full_control" scope which Google were
unhappy with in their oauth review.

This changes it to update the metadata by copying the object ontop of
itself (which is the way s3 works).  This can be done with normal
permissions.
2019-09-02 09:26:33 +01:00
Nick Craig-Wood
50a4ed8fc4 operations: fix accounting for server side copies
See: https://forum.rclone.org/t/b2-server-side-copy-doesnt-show-cumulative-progress/11154
2019-09-02 09:26:33 +01:00
Cnly
e2b5ed6c7a docs: fix template argument for mktemp in install.sh 2019-09-02 13:01:45 +08:00
Alfonso Montero
16e7da2cb5 Add Docker workflow support #3460
* Use a multi-stage build to reduce final image size.
* Run 'quicktest' make target before building.
* Built binary won't run on Alpine unless statically linked.
2019-08-29 11:04:57 +01:00
yparitcher
52df19ad34 allow usage of -short in the testing framework 2019-08-29 09:53:23 +01:00
Nick Craig-Wood
693112d57e config: Fix generated passwords being stored as empty password - Fixes #3492 2019-08-28 12:11:03 +01:00
Nick Craig-Wood
0edbc9578d googlephotos,onedrive: fix crash on error response - fixes #3491
This fixes a crash on the google photos backend when an error is
returned from the rest.Call function.

This turned out to be a mis-understanding of the rest docs so
- improved rest.Call docs
- fixed mis-understanding in google photos backend
- fixed similar mis-understading in onedrive backend
2019-08-28 12:11:03 +01:00
Chaitanya
7211c2dca7 rcd: Added missing parameter for web-gui info logs. 2019-08-27 17:21:21 +01:00
Nick Craig-Wood
af192d2507 vendor: update all dependencies 2019-08-26 18:00:17 +01:00
Nick Craig-Wood
d1a39dcc4b Start v1.49.0-DEV development 2019-08-26 17:44:09 +01:00
Nick Craig-Wood
a6387e1f81 Version v1.49.0 2019-08-26 15:25:20 +01:00
Nick Craig-Wood
a992a910ef rest: use readers.NoCloser to stop body being closed
Before this change, if you passed a io.ReadCloser to opt.Body then the
transaction would close it.  This happens as part of http.NewRequest
which documents that the io.Reader passed in will be upgraded to a
Closer if possible and closed as part of the Do call.

After this change, we wrap any io.ReadClosers to stop them being
upgraded.  This means that they will never get closed and that the
caller should always close them.

This fixes a panic in the googlephotos integration tests.
2019-08-26 12:23:31 +01:00
Nick Craig-Wood
ce3340621f lib/readers: add NoCloser to stop upgrades from io.Reader to io.ReadCloser 2019-08-26 12:23:31 +01:00
Nick Craig-Wood
73e010aff9 docs: make the config walkthroughs consistent for each backend 2019-08-26 10:47:17 +01:00
Nick Craig-Wood
a3faf98aa0 docs: add docs about GUI 2019-08-25 20:32:41 +01:00
Nick Craig-Wood
ed85092edb docs: remove social media tracking javascript and replace with links 2019-08-25 11:09:20 +01:00
Nick Craig-Wood
193c30d570 Review random string/password generation
- factor password generation into lib/random.Password
- call from appropriate places
- choose appropriate use of random.String vs random.Password
2019-08-25 11:09:19 +01:00
Nick Craig-Wood
beb8d5c134 docs: update analytics 2019-08-25 11:09:19 +01:00
Nick Craig-Wood
93810a739d docs: update fontawesome free to 5.10.2 and fixup broken images 2019-08-25 11:09:19 +01:00
Nick Craig-Wood
5d4d5d2b07 docs: update logo on website 2019-08-25 11:09:19 +01:00
Nick Craig-Wood
f02fc5d5b5 Add Andreas Chlupka to contributors 2019-08-25 11:09:19 +01:00
Andreas Chlupka
eab999f631 graphics: update rclone logos to new design
Committed-By: Nick Craig-Wood <nick@craig-wood.com>
2019-08-24 09:31:33 +01:00
Nick Craig-Wood
bd61eb89bc serve http/webdav/restic/rc: rename --prefix flag to --baseurl #3398
The name baseurl is widely accepted for this feature so I decided to
rename it before it made it into a stable release.
2019-08-24 09:10:50 +01:00
Nick Craig-Wood
077b45322d vfs: fix --vfs-cache-mode minimal,writes ignoring cached files
Before this change, with --vfs-cache-mode minimal,writes if files were
opened they would always be read from the remote, regardless of
whether they were in the cache or not.

This change checks to see if the file is in the cache when opening a
file with --vfs-cache-mode >= minimal and if so then it uses it from
the cache.

This makes --vfs-cache-mode writes in particular much more
efficient. No longer is a file uploaded (with write mode) then
immediately downloaded (with read only mode).

Fixes #3330
2019-08-23 13:58:15 +01:00
Nick Craig-Wood
67fae720d7 serve dlna: add more builtin mime types to cover standard audio/video
Add a minimal number of mime types to augment go's built in types
for environments which don't have access to a mime.types file (eg
Termux on android)

Fixes #3475
2019-08-23 13:30:48 +01:00
Nick Craig-Wood
39ae7c7ac0 serve dlna: fix missing mime types on Android causing missing videos
Before this fix serve dlna was only using the built in database of
mime types to look up the mime types of files.  On Android (and
possibly other systems) this is very small.

The symptoms of this problem was serve dlna only listing images and
not videos.

After this fix we use the backend's idea of the mime type if possible
which will be more accurate.

Fixes #3475
2019-08-23 13:30:48 +01:00
Nick Craig-Wood
f67798d73e Add Cenk Alti to contributors 2019-08-23 12:11:51 +01:00
Cenk Alti
a1ca65bd80 putio: add new backend 2019-08-23 12:11:36 +01:00
Cenk Alti
566aa0fca7 vendor: add github.com/putdotio/go-putio for putio client 2019-08-23 12:11:36 +01:00
Cenk Alti
8159658e67 hash: add CRC-32 support 2019-08-23 12:11:36 +01:00
Nick Craig-Wood
6f16588123 s3,b2,googlecloudstorage,swift,qingstor,azureblob: fixes after code review #3421
- change the interface of listBuckets() removing dir parameter and adding context
- add makeBucket() and use in place of Mkdir("")
    - this fixes some corner cases in Copy/Update
- mark all the listed buckets OK in ListR

Thanks to @yparitcher for the review.
2019-08-22 23:06:59 +01:00
Nick Craig-Wood
e339c9ff8f lib/bucket: shorten locking window where possible 2019-08-22 23:06:59 +01:00
Michał Matczuk
3247e69cf5 fs/rc/jobs: ExecuteJob propagate the error returned by function
Without this patch the resulting error is first converted to string and then recreated.
This makes it impossible to use the defined error types to figure out the cause of the error,
and may result in invalid HTTP status codes.

This patch adds a test TestExecuteJobErrorPropagation to validate that the errors are
properly propagated.
2019-08-22 16:10:48 +01:00
Nick Craig-Wood
341d880027 mount: remove nonseekable flag from write files - fixes #3461
Before this change rclone marked files opened for write without VFS
cache with the non seekable flag.

This caused problems with rclone mount layerd with mergerfs.

This change removes the hint and lets rclone do all the checking for
seekability.
2019-08-22 13:13:59 +01:00
Nick Craig-Wood
941dde6940 fstest: clear the fs cache between test runs
The fs cache makes test runs no longer independent and this can cause
a problem with some tests.

Clearing the fs cache between tests runs fixes the problem.

This was spotted by @cenkalti as part of merging #3469
2019-08-22 11:57:35 +01:00
Nick Craig-Wood
40cc8180f0 lib/dircache: add a way to dump the DirCache for debugging 2019-08-22 11:57:35 +01:00
Chaitanya
159f2e29a8 rcd: prefix patch for rcd and web-gui 2019-08-22 08:36:10 +01:00
Chaitanya
efd826ad4b rcd: auto-login for web-gui
rcd: auto use authentication if none is provided for web-gui
2019-08-22 08:36:10 +01:00
Michal Matczuk
5d6593de4f * rc/jobs: Add SetInitialJobID function that allows for setting the jobID 2019-08-21 11:01:39 +01:00
Nick Craig-Wood
82c6c77e07 Add Patrick Wang to contributors 2019-08-20 17:46:13 +01:00
Patrick Wang
badc8b3293 mount: Fix typo in argument checking 2019-08-20 17:46:04 +01:00
Nick Craig-Wood
27a9d0f570 serve dlna: only select interfaces which can multicast for SSDP
Before this change we used all UP interfaces - now we need the
interfaces to be UP and MULTICAST capable.

See: https://forum.rclone.org/t/error-using-rclone-serve-dlna-on-termux/11083
2019-08-20 16:24:56 +01:00
Nick Craig-Wood
6ca00c21a4 mount: update docs to show mounting from root OK for bucket based #3421 2019-08-17 10:30:41 +01:00
Nick Craig-Wood
b619430bcf qingstor: make all operations work from the root #3421 2019-08-17 10:30:41 +01:00
Nick Craig-Wood
8a0775ce3c azureblob: make all operations work from the root #3421 2019-08-17 10:30:41 +01:00
Nick Craig-Wood
d8e9b1a67c gcs: make all operations work from the root #3421 2019-08-17 10:30:41 +01:00
Nick Craig-Wood
e0e0e0c7bd b2: make all operations work from the root #3421 2019-08-17 10:30:41 +01:00
Nick Craig-Wood
eaaf2ded94 s3: make all operations work from the root #3421 2019-08-17 10:30:41 +01:00
Nick Craig-Wood
eaeef4811f swift: make all operations work from the root #3421 2019-08-17 10:30:41 +01:00
Nick Craig-Wood
d266a171c2 lib/bucket: utilities for dealing with bucket based backends #3421 2019-08-17 10:30:41 +01:00
Nick Craig-Wood
df8bdf0dcb fstests: add tests for operations from the root of the Fs #3421 2019-08-17 10:30:41 +01:00
Nick Craig-Wood
743dabf159 fstest: add precision to CompareItems so it works on non-local remotes 2019-08-17 10:30:38 +01:00
Nick Craig-Wood
9f549f848d fs: add feature flag BucketBasedRootOK #3421
This is for bucket based remotes which can be used from the root.
Eventually all bucket based remotes will support this.
2019-08-17 09:54:19 +01:00
Nick Craig-Wood
af3c47d282 fstest: remove -subdir flag as it no longer tests anything useful #3421 2019-08-17 09:54:19 +01:00
yparitcher
ba0e1ea6ae Docs: add emptydir support to table 2019-08-17 09:45:20 +01:00
yparitcher
82b3bfec3c fix empty dir test for object based remotes 2019-08-17 09:45:20 +01:00
buengese
898782ac35 help/showBackend: fixed advanced option category when there are no standard options 2019-08-15 11:46:56 +00:00
buengese
4e43fa746a jottacloud: update config docs 2019-08-15 11:46:56 +00:00
buengese
acc9dadcdc jottacloud: refactor configuration and minor cleanup 2019-08-15 11:46:56 +00:00
Michał Matczuk
712f7e38f7 backend/local: fadvise run syscall on a dedicated go routine
Before we issued an additional syscall periodically on a hot path.
This patch offloads the fadvise syscall to a dedicated go routine.
2019-08-14 21:01:39 +01:00
Nick Craig-Wood
24161d12ab fs: make sure config is persisted to the config file when using config.Mapper 2019-08-14 20:54:08 +01:00
Nick Craig-Wood
fa539b9d9b sftp: save the md5/sha1 command in use to the config file 2019-08-14 20:54:08 +01:00
Nick Craig-Wood
3ea82032e7 sftp: support md5/sha1 with rsync.net #3254
rsync.net uses the freebsd equivalent of sha1sum and md5sum so adapt
to that.
2019-08-14 20:54:08 +01:00
Nick Craig-Wood
71e172a139 serve/sftp: support empty "md5sum" and "sha1sum" commands
This is to enable the new command detection to work with the sftp
backend.
2019-08-14 20:54:08 +01:00
Nick Craig-Wood
6929f5d6e6 build: make azure pipelines stop if installs fail 2019-08-14 17:47:55 +01:00
Nick Craig-Wood
c2050172aa qingstor: upgrade to v3 SDK and fix listing loop 2019-08-14 16:15:34 +01:00
Nick Craig-Wood
a72ef7ca0e vendor: update github.com/yunify/qingstor-sdk-go to v3 2019-08-14 16:15:34 +01:00
Nick Craig-Wood
b84cc0cae7 vendor: run go tidy and go vendor 2019-08-14 16:15:34 +01:00
Nick Craig-Wood
93228dfcc9 operations: debug successful hashes as well as failures #3419 2019-08-14 15:07:38 +01:00
Nick Craig-Wood
eb087a3b04 operations: disable multi thread copy for local to local copies #3419
...unless --multi-thread-streams has been set explicitly
2019-08-14 15:07:38 +01:00
Nick Craig-Wood
ec8e0a6c58 fstest/mockobject: add SetFs method so it can have a valid Fs() #3419 2019-08-14 15:07:38 +01:00
Nick Craig-Wood
f0e0d6cc3c fs: add IsLocal feature to identify local backend #3419 2019-08-14 15:07:38 +01:00
Nick Craig-Wood
752d43d6fa fs: Implement UnWrapObject and UnWrapFs 2019-08-14 15:07:38 +01:00
Nick Craig-Wood
7c146e2618 operations: check transfer hashes when using --size-only mode #3419
Before this change we didn't calculate or check hashes of transferred
files if --size-only mode was explicitly set.

This problem was introduced in 20da3e6352 which was released with v1.37

After this change hashes are checked for all transfers unless
--ignore-checksums is set.
2019-08-14 15:07:38 +01:00
Nick Craig-Wood
f9ceade9b4 operations: don't calculate checksums when using --ignore-checksum #3419
Before this change we calculated the checkums when using
--ignore-checksum but ignored them at the end.

Now we don't calculate the checksums at all which is more efficient.
2019-08-14 15:07:38 +01:00
Nick Craig-Wood
ae9c0e56c8 operations: run hashing operations in parallel #3419
Before this change for a post copy Hash check we would run the hashes sequentially.

Now we run the hashes in parallel for a useful speedup.

Note that this refactors the hash check in Copy to use the standard
hash checking routine.
2019-08-14 15:07:38 +01:00
Nick Craig-Wood
402aaca7fe local: don't calculate any hashes by default #3419
Before this change, if the caller didn't provide a hint, we would
calculate all hashes for reads and writes.

The new whirlpool hash is particularly expensive and that has become noticeable.

Now we don't calculate any hashes on upload or download unless hints are provided.

This means that some operations may run slower and these will need to be discovered!

It does not affect anything calling operations.Copy which already puts
the corrects hints in.
2019-08-14 15:07:38 +01:00
Nick Craig-Wood
106cf1852d Add ginvine to contributors 2019-08-14 13:40:15 +01:00
Nick Craig-Wood
50b8f15b5d Add another email for Laura Hausmann to contributors 2019-08-14 13:40:07 +01:00
ginvine
1e7bc359be drive: Add error for purge with --drive-trashed-only - fixes #3407
Purge should not be used with --drive-trashed-only flag as it leads to
unexpected behavior. After this commit if TrashedOnly option is set to
true, error message is returned.

See also: https://forum.rclone.org/t/drive-trashed-only-weird-occurrence/11066/14
2019-08-14 13:34:52 +01:00
Nick Craig-Wood
23a0332185 config: don't offer hidden values for editing in the config - fixes #3416 2019-08-14 08:40:22 +01:00
buengese
6812844b3d march: Fix checking sub-directories when using --no-traverse 2019-08-13 19:30:56 +01:00
buengese
3a04d0d1a9 march: rework testcases to better reflect real use 2019-08-13 19:30:56 +01:00
buengese
6f4b86e569 jottacloud: use new api for retrieving internal username - fixes #3434 2019-08-13 17:18:14 +00:00
Laura Hausmann
9aa889bfa2 fichier: fix character encoding for file names, fixes rclone#3298 2019-08-13 16:56:59 +01:00
Nick Craig-Wood
8247c8a6af rc: add anchor tags to the docs so links are consistent 2019-08-13 11:57:01 +01:00
Nick Craig-Wood
535f5f3c99 rc: fix --loopback with rc/list and others
Before this change `rclone rc --loopback` would give the error "bad
JSON".

This was because the output of the `rc/list` command was not serialzed
through JSON.

This serializes it through JSON and fixes that (and probably other)
command.
2019-08-13 11:51:16 +01:00
Nick Craig-Wood
7f7946564d error: make "bad record MAC" a retriable error - Fixes #3338
The error "tls: bad record MAC" is very likely to be caused by
hardware issues.  It indicates that a packet got corrupted somewhere.

As a work around, this change treats it as retriable error which
allows the chunk to get retried and the transfer to continue.
2019-08-12 20:37:10 +01:00
Chaitanya
bbb8d43716 rc: (docs) Add new parameters --rc-web-gui and --rc-allow-origin, --rc-web-fetch-url and rc-web-gui-update to documentation. 2019-08-12 19:04:12 +01:00
Nick Craig-Wood
5e0a30509c http: add --http-headers flag for setting arbitrary headers 2019-08-12 18:04:24 +01:00
Nick Craig-Wood
cd7ca2a320 googlephotos: implement optional features UserInfo and Disconnect
As part of rclone's UX review it was required that rclone had a means
of disconnecting from google photos and showing which user is
connected.
2019-08-12 13:49:23 +01:00
Nick Craig-Wood
a808e98fe1 config: add reconnect, userinfo and disconnect subcommands.
- reconnect runs through the oauth flow again.
- userinfo shows the connected user info if available
- disconnect revokes the token
2019-08-12 13:49:23 +01:00
Nick Craig-Wood
3ebcb555f4 fs: add optional features UserInfo and Disconnect 2019-08-12 13:49:23 +01:00
Nick Craig-Wood
a1263e70cf premiumizeme: new backend for premiumize.me - Fixes #3063 2019-08-10 19:17:51 +01:00
Nick Craig-Wood
f47e5220a2 Add Abhinav Sharma to contributors 2019-08-10 17:31:25 +01:00
Abhinav Sharma
4db742dc77 oauthutil: note that the same version is recommended for remote auth 2019-08-10 17:31:08 +01:00
Nick Craig-Wood
3ecbd603ab rc: move job expire flags to rc to fix initalization problem
See: https://forum.rclone.org/t/rc-rc-job-expire-interval-bug/11188

rclone was ignoring the --rc-job-expire-duration and --rc-job-interval
flags.  This turned out to be an initialization order problem and was
fixed by moving those flags out of global config into rc config.
2019-08-10 17:12:22 +01:00
Nick Craig-Wood
0693deea1c rc: fix unmarshalable http.AuthFn in options and put in test for marshalability 2019-08-10 16:22:17 +01:00
Nick Craig-Wood
99eaa76dc8 Add Macavirus to contributors 2019-08-10 14:13:24 +01:00
Macavirus
ba3b0a175e docs: Add rsync.net stub link to SFTP page 2019-08-10 14:13:15 +01:00
Macavirus
01c0c0b009 docs: Add C14 Cold Storage to homepage and SFTP backend 2019-08-10 14:13:15 +01:00
Nick Craig-Wood
7d85ccb11e fs/cache: test for fix cached values pointing to files #3424 2019-08-10 08:39:56 +01:00
buengese
0c1eaf1bcb cache: correctly handle fs.ErrorIsFile in GetFn - fixes #3424 2019-08-09 21:45:46 +00:00
Chaitanya
873e87fc38 rc: WebGUI should check for new update only when rc-web-gui-update is specified or not already downloaded.
rc: WebGUI should check for new update only when rc-web-gui-update is specified or not already downloaded.

rc: change permission to 0755 instead of 755 to prevent unexpected behaviour.
2019-08-09 15:14:52 +01:00
Chaitanya
33677ff367 rc: Added command line parameter to control the cross origin resource sharing (CORS) in the rcd. (Security Improvement)
rc: Import statements


Fixing the problem with test
2019-08-09 15:14:52 +01:00
Nick Craig-Wood
5195075677 Add Michał Matczuk to contributors 2019-08-08 23:42:03 +01:00
Michał Matczuk
f396550934 backend/local: Avoid polluting page cache when uploading local files to remote backends
This patch makes rclone keep linux page cache usage under control when
uploading local files to remote backends. When opening a file it issues
FADV_SEQUENTIAL to configure read ahead strategy. While reading
the file it issues FADV_DONTNEED every 128kB to free page cache from
already consumed pages.

```
fadvise64(5, 0, 0, POSIX_FADV_SEQUENTIAL) = 0
read(5, "\324\375\251\376\213\361\240\224>\t5E\301\331X\274^\203oA\353\303.2'\206z\177N\27fB"..., 32768) = 32768
read(5, "\361\311\vW!\354_\317hf\276t\307\30L\351\272T\342C\243\370\240\213\355\210\v\221\201\177[\333"..., 32768) = 32768
read(5, ":\371\337Gn\355C\322\334 \253f\373\277\301;\215\n\240\347\305\6N\257\313\4\365\276ANq!"..., 32768) = 32768
read(5, "\312\243\360P\263\242\267H\304\240Y\310\367sT\321\256\6[b\310\224\361\344$Ms\234\5\314\306i"..., 32768) = 32768
fadvise64(5, 0, 131072, POSIX_FADV_DONTNEED) = 0
read(5, "m\251\7a\306\226\366-\v~\"\216\353\342~0\fht\315DK0\236.\\\201!A#\177\320"..., 32768) = 32768
read(5, "\7\324\207,\205\360\376\307\276\254\250\232\21G\323n\255\354\234\257P\322y\3502\37\246\21\334^42"..., 32768) = 32768
read(5, "e{*\225\223R\320\212EG:^\302\377\242\337\10\222J\16A\305\0\353\354\326P\336\357A|-"..., 32768) = 32768
read(5, "n\23XA4*R\352\234\257\364\355Y\204t9T\363\33\357\333\3674\246\221T\360\226\326G\354\374"..., 32768) = 32768
fadvise64(5, 131072, 131072, POSIX_FADV_DONTNEED) = 0
read(5, "SX\331\251}\24\353\37\310#\307|h%\372\34\310\3070YX\250s\2269\242\236\371\302z\357_"..., 32768) = 32768
read(5, "\177\3500\236Y\245\376NIY\177\360p!\337L]\2726\206@\240\246pG\213\254N\274\226\303\357"..., 32768) = 32768
read(5, "\242$*\364\217U\264]\221Y\245\342r\t\253\25Hr\363\263\364\336\322\t\325\325\f\37z\324\201\351"..., 32768) = 32768
read(5, "\2305\242\366\370\203tM\226<\230\25\316(9\25x\2\376\212\346Q\223 \353\225\323\264jf|\216"..., 32768) = 32768
fadvise64(5, 262144, 131072, POSIX_FADV_DONTNEED) = 0
```

Page cache consumption per file can be checked with tools like [pcstat](https://github.com/tobert/pcstat).

This patch does not have a performance impact. Please find below results
of an experiment comparing local copy of 1GB file with and without this
patch.

With the patch:

```
(mmt/fadvise)$ pcstat 1GB.bin.1
+-----------+----------------+------------+-----------+---------+
| Name      | Size (bytes)   | Pages      | Cached    | Percent |
|-----------+----------------+------------+-----------+---------|
| 1GB.bin.1 | 1073741824     | 262144     | 0         | 000.000 |
+-----------+----------------+------------+-----------+---------+
(mmt/fadvise)$ taskset -c 0 /usr/bin/time -v ./rclone copy 1GB.bin.1 /var/empty/rclone
        Command being timed: "./rclone copy 1GB.bin.1 /var/empty/rclone"
        User time (seconds): 13.19
        System time (seconds): 1.12
        Percent of CPU this job got: 96%
        Elapsed (wall clock) time (h:mm:ss or m:ss): 0:14.81
        Average shared text size (kbytes): 0
        Average unshared data size (kbytes): 0
        Average stack size (kbytes): 0
        Average total size (kbytes): 0
        Maximum resident set size (kbytes): 27660
        Average resident set size (kbytes): 0
        Major (requiring I/O) page faults: 0
        Minor (reclaiming a frame) page faults: 2212
        Voluntary context switches: 5755
        Involuntary context switches: 9782
        Swaps: 0
        File system inputs: 4155264
        File system outputs: 2097152
        Socket messages sent: 0
        Socket messages received: 0
        Signals delivered: 0
        Page size (bytes): 4096
        Exit status: 0
(mmt/fadvise)$ pcstat 1GB.bin.1
+-----------+----------------+------------+-----------+---------+
| Name      | Size (bytes)   | Pages      | Cached    | Percent |
|-----------+----------------+------------+-----------+---------|
| 1GB.bin.1 | 1073741824     | 262144     | 0         | 000.000 |
+-----------+----------------+------------+-----------+---------+
```

Without the patch:

```
(master)$ taskset -c 0 /usr/bin/time -v ./rclone copy 1GB.bin.1 /var/empty/rclone
        Command being timed: "./rclone copy 1GB.bin.1 /var/empty/rclone"
        User time (seconds): 14.46
        System time (seconds): 0.81
        Percent of CPU this job got: 93%
        Elapsed (wall clock) time (h:mm:ss or m:ss): 0:16.41
        Average shared text size (kbytes): 0
        Average unshared data size (kbytes): 0
        Average stack size (kbytes): 0
        Average total size (kbytes): 0
        Maximum resident set size (kbytes): 27600
        Average resident set size (kbytes): 0
        Major (requiring I/O) page faults: 0
        Minor (reclaiming a frame) page faults: 2228
        Voluntary context switches: 7190
        Involuntary context switches: 1980
        Swaps: 0
        File system inputs: 2097152
        File system outputs: 2097152
        Socket messages sent: 0
        Socket messages received: 0
        Signals delivered: 0
        Page size (bytes): 4096
        Exit status: 0
(master)$ pcstat 1GB.bin.1
+-----------+----------------+------------+-----------+---------+
| Name      | Size (bytes)   | Pages      | Cached    | Percent |
|-----------+----------------+------------+-----------+---------|
| 1GB.bin.1 | 1073741824     | 262144     | 262144    | 100.000 |
+-----------+----------------+------------+-----------+---------+
```
2019-08-08 23:41:52 +01:00
Nick Craig-Wood
6f87267b34 accounting: fix locking in Transfer to avoid deadlock with --progress
Before this change, using -P occasionally deadlocked on the transfer
mutex and the stats mutex since they call each other via the progress
printing.

This is fixed by shortening the locking windows and converting the
mutex to a RW mutex.
2019-08-08 15:46:46 +01:00
Nick Craig-Wood
9d1fb2f4e7 Revert "cmd: shorten the locking window when using --progress to avoid deadlock"
This reverts commit fdef567da6.

The problem turned out to be elsewhere.
2019-08-08 15:19:41 +01:00
Nick Craig-Wood
99b3154abd Revert "filter: Add BoundedRecursion method"
This reverts commit 047f00a411.

It turns out that BoundedRecursion is the wrong thing to measure.
2019-08-08 14:15:50 +01:00
Nick Craig-Wood
6c38bddf3e walk: fix listing with filters listing whole remote
Prior to this fix, a request such as

    rclone lsf -R --include "/dir/**" remote:

Would use ListR which is very inefficient as it lists the whole remote
for one directory.

This changes it to use recursive walking if the filters imply any
directory filtering.  So `--include *.jpg` and `--exclude *.jpg` will
still use ListR wheras `--include "/dir/**` will not.
2019-08-08 14:15:50 +01:00
Nick Craig-Wood
a00a0471a8 filter: Add UsesDirectoryFilters method 2019-08-08 14:15:50 +01:00
Nick Craig-Wood
9e81fc343e swift: fix upload when using no_chunk to return the correct size
When using the VFS with swift and --swift-no-chunk, PutStream was
returning objects with size -1 which was causing corrupted transfer
messages.

This was fixed by counting the bytes transferred in a streamed file
and updating the metadata with that.
2019-08-08 12:41:46 +01:00
Nick Craig-Wood
fdef567da6 cmd: shorten the locking window when using --progress to avoid deadlock
Before this change, using -P occasionally deadlocked on the progress
mutex and the stats mutex since they call each other.

This is fixed by shortening the locking window in the progress routine
so as not to include the stats calculation.
2019-08-08 12:37:50 +01:00
Nick Craig-Wood
d377842395 vfs: make write without cache more efficient
This updates the out of sequence write code to be more efficient using
a conditional lock with a timeout.
2019-08-08 12:37:50 +01:00
Nick Craig-Wood
c014b2e66b rcat: fix slowdown on systems with multiple hashes
Before this fix rclone calculated all the hashes on transfer.  This
was particularly slow for the local backend.

After the fix we just calculate one hash which is enough for data
integrity.
2019-08-08 12:37:50 +01:00
Nick Craig-Wood
62b769a0a7 serve sftp: fix spurious debugs on server close 2019-08-08 12:37:50 +01:00
Nick Craig-Wood
84b5da089e serve sftp: fix detection of whether server is authorized 2019-08-08 12:37:50 +01:00
Nick Craig-Wood
d0c65b4c5e copyurl: fix copying files that return HTTP errors 2019-08-07 22:29:44 +01:00
Nick Craig-Wood
e502be475a azureblob/b2/dropbox/gcs/koofr/qingstor/s3: fix 0 length files
In 0386d22cc9 we introduced a test for 0 length files read the
way mount does.

This test failed on these backends which we fix up here.
2019-08-06 15:18:08 +01:00
negative0
27a075e9fc rcd: Removed the shorthand for webgui. Shorthand is reserved for rsync compatibility. 2019-08-06 12:50:31 +01:00
Nick Craig-Wood
5065c422b4 lib/random: unify random string generation into random.String
This was factored from fstest as we were including the testing
enviroment into the main binary because of it.

This was causing opening the browser to fail because of 8243ff8bc8.
2019-08-06 12:44:08 +01:00
Nick Craig-Wood
72d5b11d1b serve restic: rename test file to avoid it being linked into main binary 2019-08-06 12:42:52 +01:00
Nick Craig-Wood
526a3347ac rcd: Fix permissions problems on cache directory with web gui download 2019-08-06 12:06:57 +01:00
Nick Craig-Wood
23910ba53b servetest: add tests for --auth-proxy 2019-08-06 11:43:42 +01:00
Nick Craig-Wood
ee7101e6af serve: factor out common testing parts for ftp, sftp and webdav tests 2019-08-06 11:43:42 +01:00
Nick Craig-Wood
36c1b37dd9 serve webdav: support --auth-proxy 2019-08-06 11:43:42 +01:00
Nick Craig-Wood
72782bdda6 serve ftp: implement --auth-proxy 2019-08-06 11:43:42 +01:00
Nick Craig-Wood
b94eef16c1 serve ftp: refactor to bring into line with other serve commands 2019-08-06 11:43:42 +01:00
Nick Craig-Wood
d75fbe4852 serve sftp: implement auth proxy 2019-08-06 11:43:42 +01:00
Nick Craig-Wood
e6ab237fcd serve: add auth proxy infrastructure 2019-08-06 11:43:42 +01:00
Nick Craig-Wood
a7eec91d69 vfs: add Fs() method to return underlying fs.Fs 2019-08-06 11:43:42 +01:00
Nick Craig-Wood
b3e94b018c cache: factor fs cache into lib/cache 2019-08-06 11:43:42 +01:00
Nick Craig-Wood
ca0e9ea55d build: add Azure Pipelines build status to README 2019-08-06 10:46:36 +01:00
Nick Craig-Wood
53e3c2e263 build: add azure pipelines build 2019-08-06 10:31:32 +01:00
Nick Craig-Wood
02eb747d71 serve http/webdav/restic: implement --prefix - fixes #3398
--prefix enables the servers to serve from a non root prefix.  This
enables easier proxying.
2019-08-06 10:30:48 +01:00
Chaitanya Bankanhal
d51a970932 rcd: Change URL after webgui move to rclone organization 2019-08-05 16:22:40 +01:00
Nick Craig-Wood
a9438cf364 build: add .gitattributes to mark generated files
This makes sure that GitHub ignores the auto generated documentation
files for language detection and diffs.

See: https://github.com/github/linguist#overrides for more info
2019-08-04 15:20:15 +01:00
Nick Craig-Wood
5ef3c988eb bin: add script to test all commits compile for git bisect 2019-08-04 13:29:59 +01:00
Nick Craig-Wood
78150e82a2 docs: update bugs and limitations document 2019-08-04 12:33:39 +01:00
Nick Craig-Wood
6f0cc51eeb Add Chaitanya Bankanhal to contributors 2019-08-04 12:33:39 +01:00
Chaitanya Bankanhal
84e2806c4b rc: Rclone-WebUI integration with rclone
This adds experimental support for web gui integration so that rclone can fetch and run a web based GUI using the --rc-web-ui and related flags.

It downloads and caches a webui zip file which it then unpacks and opens in the browser.
2019-08-04 12:32:37 +01:00
Nick Craig-Wood
0386d22cc9 vfs: add test for 0 length files read in the way mount does 2019-08-03 18:25:44 +01:00
Nick Craig-Wood
0be14120e4 swift: use FixRangeOption to fix 0 length files via the VFS 2019-08-03 18:25:44 +01:00
Nick Craig-Wood
95af1f9ccf fs: fix FixRangeOption so it works with 0 length files 2019-08-03 18:25:44 +01:00
Nick Craig-Wood
629b7eacd8 b2: fix integration tests after accounting changes
In 53a1a0e3ef we started returning non nil from NewObject when
an object isn't found.  This breaks the integration tests and the API
expected of a backend.

This fixes it.
2019-08-03 13:30:31 +01:00
yparitcher
d3149acc32 b2: link sharing 2019-08-03 13:30:31 +01:00
Aleksandar Jankovic
6a3e301303 accounting: add call to clear stats
- Make calls more consistent by changing path to kebab case.
- Add stacktrace information to job panics
2019-08-02 16:56:19 +01:00
Nick Craig-Wood
5be968c0ca drive: update API for teamdrive use - fixes #3348 2019-08-02 16:06:23 +01:00
Nick Craig-Wood
f1a687c540 Add justina777 to contributors 2019-08-02 15:57:25 +01:00
justina777
94ee43fe54 log: add object and objectType to json logs 2019-08-02 15:57:09 +01:00
Nick Craig-Wood
c2635e39cc build: fix appveyor secure variables after project move 2019-07-28 22:46:26 +01:00
Nick Craig-Wood
8c511ec9cd docs: fix star count on website 2019-07-28 20:58:21 +01:00
Nick Craig-Wood
ac0dce78d0 cmd: fix up stats printing on macOS after accounting change 2019-07-28 20:38:20 +01:00
Nick Craig-Wood
f347514f62 build: fix up CI and CI badges after repo move 2019-07-28 20:07:04 +01:00
Nick Craig-Wood
57d5de6fba build: fix up package paths after repo move
git grep -l github.com/ncw/rclone | xargs -d'\n' perl -i~ -lpe 's|github.com/ncw/rclone|github.com/rclone/rclone|g'
goimports -w `find . -name \*.go`
2019-07-28 18:47:38 +01:00
Aleksandar Jankovic
4ba6532915 accounting: make stats response consistent
core/stats can return two different schemas in 'transferring' field.
One is object with fields the other is just plain string.
This is confusing, unnecessary and makes defining response schema
more difficult. It also returns `lastError` as value which can be
rendered differently depending on source of error.

This change standardizes 'transferring' filed to always return
object but with reduced fields if they are not available.
Former string item is converted to {name:remote_name} object.

'lastError' is forced to be a string as in some cases it can be encoded
as an object.
2019-07-28 14:48:19 +01:00
Aleksandar Jankovic
ff235e4e56 docs: update documentation for stats 2019-07-28 14:48:19 +01:00
Aleksandar Jankovic
68e641f6cf accounting: add limits and listing to stats groups 2019-07-28 14:48:19 +01:00
Aleksandar Jankovic
53a1a0e3ef accounting: add reference to completed transfers
Add core/transferred call that lists completed transfers and their
status.
2019-07-28 14:48:19 +01:00
Aleksandar Jankovic
8243ff8bc8 accounting: isolate stats to groups
Introduce stats groups that will isolate accounting for logically
different transferring operations. That way multiple accounting
operations can be done in parallel without interfering with each other
stats.

Using groups is optional. There is dedicated global stats that will be
used by default if no group is specified. This is operating mode for CLI
usage which is just fire and forget operation.

For running rclone as rc http server each request will create it's own
group. Also there is an option to specify your own group.
2019-07-28 14:48:19 +01:00
Aleksandar Jankovic
be0464f5f1 accounting: change stats interface
This is done to make clear ownership over accounting object and prepare
for removing global stats object.

Stats elapsed time calculation has been altered to account for actual
transfer time instead of stats creation time.
2019-07-28 14:48:19 +01:00
Nick Craig-Wood
2d561b51db Add EliEron to contributors 2019-07-28 12:33:21 +01:00
Nick Craig-Wood
9241a93c2d Add justinalin to contributors 2019-07-28 12:33:21 +01:00
EliEron
fb32f77bac Docs: Fix typos in filtering demonstrations 2019-07-28 12:32:53 +01:00
justinalin
520fb03bfd log: add --use-json-log for JSON logging 2019-07-28 12:05:50 +01:00
justinalin
a3449bda30 vendor: add github.com/sirupsen/logrus 2019-07-28 12:05:50 +01:00
yparitcher
ccc416e62b b2: Fix link sharing #3314 2019-07-28 11:47:31 +01:00
jaKa
a35aa1360e Support setting modification times on Koofr backend.
Configuration time option to disable the above for if using Dropbox (does not
allow setting mtime on copy) or Amazon Drive (neither on upload nor on copy).
2019-07-24 21:11:58 +01:00
jaKa
3df9dbf887 vendor: updated github.com/koofr/go-koofrclient for set mtime support. 2019-07-24 21:11:58 +01:00
Nick Craig-Wood
9af0a704af Add Paul Millar to contributors 2019-07-24 20:34:39 +01:00
Nick Craig-Wood
691e5ae5f0 Add Yi FU to contributors 2019-07-24 20:34:39 +01:00
Nick Craig-Wood
5a44bafa4e fstest: add fs.ErrorCantShareDirectories for backends which can only share files 2019-07-24 20:34:29 +01:00
Nick Craig-Wood
8fdce31700 config: Fix hiding of options from the configurator 2019-07-24 20:34:29 +01:00
Nick Craig-Wood
493dfb68fd opendrive: refactor to use existing lib/rest facilities for uploads
This also checks the return of the call to make sure the number of
bytes written was as expected.
2019-07-24 20:34:29 +01:00
Nick Craig-Wood
71587344c6 lib/rest: allow Form upload with no file to upload 2019-07-24 20:34:29 +01:00
yparitcher
8e8b78d7e5 Implement --compare-dest & --copy-dest Fixes #3278 2019-07-22 19:42:29 +01:00
Nick Craig-Wood
266600dba7 build: reduce parallelism in cross compile to reduce memory and fix Travis
Before this change Travis builds were running out of memory when cross
compiling all the OSes.
2019-07-22 17:10:26 +01:00
Paul Millar
e4f6ccbff2 webdav: add docs for using bearer_token_command with oidc-agent 2019-07-22 16:01:55 +01:00
Nick Craig-Wood
1f1ab179a6 webdav: refresh token when it expires with --webdav-bearer-token-command
Fixes #2380
2019-07-22 16:01:55 +01:00
Nick Craig-Wood
c642531a1e webdav: add --webdav-bearer-token-command - fixes #2380
This can be used with oidc-agent to get a bearer token
2019-07-22 15:59:54 +01:00
buengese
19ae053168 rcserver: remove _async key from input parameters after parsing so later operations won't get confused - fixes #3346 2019-07-20 19:35:10 +02:00
buengese
def790986c fichier: make FolderID int and adjust related code - fixes #3359 2019-07-20 02:49:08 +02:00
Yi FU
0a1169e659 ssh: opt-in support for diffie-hellman-group-exchange-sha256 diffie-hellman-group-exchange-sha1 - fixes #1810 2019-07-13 12:21:56 +02:00
Nick Craig-Wood
5433021e8b drive: fix server side copy of big files
Before this change rclone was sending a MimeType in the requests for
server side Move and Copy.

The conjecture is that if you attempt to set the MimeType to something
different in a Copy then Google Drive has to do an actual copy of the
file data.  This takes a very long time (since it is large) and fails
after a 90s timeout.

After the change we no longer set the MimeType in Move or Copy and the
copies happen instantly and correctly.

Many thanks to @darthShadow for discovering that this was causing the
problem.

Fixes #3070
Fixes #3033
Fixes #3300
Fixes #3155
2019-07-05 10:49:19 +01:00
Nick Craig-Wood
c9f77719e4 b2: enable server side copy to copy between buckets - fixes #3303 2019-07-05 10:07:05 +01:00
Nick Craig-Wood
3cd63a00be googlephotos: fix configuration walkthrough 2019-07-04 15:19:59 +01:00
Nick Craig-Wood
d7016866e0 googlephotos: fix creation of duplicated albums
Also make sure we don't list the albums twice
2019-07-04 13:45:52 +01:00
yparitcher
d72e4105fb b2: Fix link sharing #3314 2019-07-04 11:53:59 +01:00
yparitcher
b4266da4eb sync: fix SyncSuffix tests #3272 2019-07-03 17:36:22 +01:00
yparitcher
3f5767b94e b2: Implement link sharing #2178 2019-07-03 14:10:25 +01:00
Nick Craig-Wood
1510e12659 build: move race test to go modules build
In an attempt to even out the build times.
2019-07-03 12:07:29 +01:00
Nick Craig-Wood
ede03258bc build: use go modules proxy when building modules 2019-07-03 12:07:29 +01:00
Nick Craig-Wood
7fcbb47b1c build: split other OS build into a separate builder
This is in order to make longest build (the Linux build) quicker
2019-07-03 12:07:29 +01:00
Nick Craig-Wood
9cafeeb4b6 dirtree: make tests more reliable 2019-07-02 16:29:40 +01:00
Nick Craig-Wood
a1cfe61ffd googlephotos: Backend for accessing Google Photos #369 2019-07-02 15:26:55 +01:00
Nick Craig-Wood
5eebbaaac4 test_all: add tests parameter to limit which tests to run for a backend 2019-07-02 15:26:55 +01:00
Nick Craig-Wood
bc70bff125 fs/dirtree: factor DirTree out of fs/walk and add tests 2019-07-02 15:26:55 +01:00
Nick Craig-Wood
cf15b88efa build: make explicit which matrix items we will deploy 2019-07-02 14:13:46 +01:00
Nick Craig-Wood
dcaee0016a build: add a builder for building with go modules 2019-07-02 12:33:03 +01:00
Nick Craig-Wood
387b496d1e operations: fix tests TestMoveFileBackupDir and TestCopyFileBackupDir again
Commit 734f504d5f wasn't tested properly and had a typo which
caused it not to build :-(
2019-07-02 12:22:29 +01:00
Nick Craig-Wood
734f504d5f operations: fix tests TestMoveFileBackupDir and TestCopyFileBackupDir
..so they don't run on backends which can't move or copy.
2019-07-02 10:46:49 +01:00
Dan Dascalescu
7153909390 docs: fix typo in google drive docs 2019-07-02 10:10:08 +01:00
Russell Davis
ea35e807db docs: clarify --update and --use-server-mod-time
It's likely a mistake to use `--use-server-modtime` if you're not also using `--update`. It might even make sense to emit a warning in the code when doing this, but for now, I made it more clear in the docs.

I also clarified how `--use-server-modtime` can be useful in the `--update` section.
2019-07-02 10:09:03 +01:00
Nick Craig-Wood
5df5a3b78e vendor: tidy go.mod and go.sum - fixes #3317 2019-07-02 09:47:00 +01:00
Nick Craig-Wood
37c1144b46 Add Russell Davis to contributors 2019-07-02 07:57:18 +01:00
Nick Craig-Wood
8d116ba0c9 Add Matti Niemenmaa to contributors 2019-07-02 07:57:18 +01:00
Russell Davis
6a3c3d9b89 Update docs on S3 policy to include ListAllMyBuckets permission
This permission is required for `rclone lsd`.
2019-07-02 07:56:54 +01:00
Matti Niemenmaa
a6dca4c13f s3: Add INTELLIGENT_TIERING storage class
For Intelligent-Tiering:
https://aws.amazon.com/s3/storage-classes/#Unknown_or_changing_access
2019-07-01 18:17:48 +01:00
Aleksandar Jankovic
cc0800a72e march: return errors when listing dirs
Partially fixes #3172
2019-07-01 15:32:46 +01:00
Nick Craig-Wood
1be1fc073e Add AbelThar to contributors 2019-07-01 12:09:35 +01:00
AbelThar
70c6b01f54 fs: Higher units for ETA - fixes #3221 2019-07-01 12:09:19 +01:00
Aleksandar Jankovic
7b2b396d37 context: fix errgroup interaction with context
Fixes #3307
2019-07-01 11:51:51 +01:00
Nick Craig-Wood
af2596f98b Add yparitcher to contributors 2019-07-01 11:13:53 +01:00
Nick Craig-Wood
61fb326a80 Add full name for Laura Hausmann 2019-07-01 10:53:47 +01:00
yparitcher
de14378734 Implement --suffix without --backup-dir for current dir
Fixes #2801
2019-07-01 10:46:26 +01:00
yparitcher
eea1b6de32 Abstract --Backup-dir checks so can be applied across Sync, Copy, Move 2019-07-01 10:46:26 +01:00
Nick Craig-Wood
6bae3595a8 Add Laura to contributors 2019-06-30 20:07:35 +01:00
Laura
dde4dd0198 fichier: 1fichier support - fixes #2908
This was started by Fionera, finished off by Laura with fixes and more
docs from Nick.

Co-authored-by: Fionera <fionera@fionera.de>
Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
2019-06-30 18:35:01 +01:00
Laura
2d0e9885bd vendor: add jzelinskie/whirlpool 2019-06-30 18:35:01 +01:00
Nick Craig-Wood
9ed81ac451 vfs: fix tests for backends which can't upload 0 length files 2019-06-30 18:35:01 +01:00
Nick Craig-Wood
3245c0ae0d fstests: add integration test for uploading empty files
This tests a remote can upload empty files.  If the remote can't
upload empty files it should return fs.ErrorCantUploadEmptyFiles.
2019-06-30 18:35:01 +01:00
Laura
6ff7b2eaab fs: add fs.ErrorCantUploadEmptyFiles
Any backends which can't upload 0 length files should return this
errror.
2019-06-30 18:11:45 +01:00
Laura
38ebdf54be sync/operations: don't use zero length files in tests
We now have a backend (fichier) which doesn't support 0 length
files. Therefore all 0 length files in the tests have been replaced
with length 1.

In a future commit we will implement a test for 0 length files.
2019-06-30 18:11:45 +01:00
Fionera
6cd7c3b774 lib/rest: Calculate correct Content-Length on MultiPart Uploads
This is used in the pcloud backend and in the upcoming 1fichier backend.
2019-06-30 17:57:22 +01:00
Nick Craig-Wood
07e2c3a50f b2: fix nil pointer error introduced by context propagation patch
For some reason f78cd1e043 introduced an unrelated change -
perhaps a merge error.  Removing this change fixes the nil pointer
problem.
2019-06-28 22:38:41 +01:00
Jon Fautley
cd762f04b8 sftp: Completely ignore all modtime checks if SetModTime=false 2019-06-28 10:33:14 +01:00
Sandeep
6907242cae azureblob: Updated config help details to remove connection string references (#3306) 2019-06-27 18:53:33 -07:00
Nick Craig-Wood
d61ba7ef78 vendor: update all dependencies 2019-06-27 13:52:32 +01:00
Nick Craig-Wood
b221d79273 Add nguyenhuuluan434 to contributors 2019-06-27 13:28:52 +01:00
nguyenhuuluan434
940d88b695 refactor code 2019-06-27 13:28:35 +01:00
nguyenhuuluan434
ca324b5084 trying to capture segments info during upload to swift backend and
delete if there is error duing upload object.
2019-06-27 13:28:35 +01:00
Nick Craig-Wood
9f4589a997 gcs: reduce oauth scope requested as suggested by Google
As part of getting the rclone oauth consent screen approved by Google,
it came up that the scope in use by the gcs backend was too large.

This change reduces it to the minimum scope which still allows rclone
to work correctly.

Old scope: https://www.googleapis.com/auth/devstorage.full_control
New scope: https://www.googleapis.com/auth/devstorage.read_write
2019-06-27 12:05:49 +01:00
Sandeep
fc44eb4093 Azure Storage Emulator support (#3285)
* azureblob - Add support for Azure Storage Emulator to test things locally.

Testing - Verified changes by testing manually.

* docs: update azureblob docs to reflect support of storage emulator
2019-06-26 20:46:22 -07:00
Nick Craig-Wood
a1840f6fc7 sftp: add missing interface check and fix About #3257
This bug was introduced as part of adding context to the backends and
slipped through the net because the About call did not have an
interface assertion in the sftp backend.

I checked there were no other missing interface assertions on all the
optional methods on all the backends.
2019-06-26 16:56:33 +01:00
Gary Kim
0cb7130dd2 ncdu: Display/Copy to Clipboard Current Path 2019-06-26 16:49:53 +01:00
Gary Kim
2655bea86f vendor: add github.com/atotto/clipboard 2019-06-26 16:49:53 +01:00
Gary Kim
08bf8faa2f vendor: update github.com/jlaffaye/ftp 2019-06-26 16:42:12 +01:00
Nick Craig-Wood
4e64ee38e2 mount: default --deamon-timout to 15 minutes on macOS and FreeBSD
See: https://forum.rclone.org/t/macos-fuse-mount-contents-disappear-after-writes-while-using-vfs-cache/10566/
2019-06-25 15:30:42 +01:00
Nick Craig-Wood
276f8cccf6 rc: return current settings if core/bwlimit called without parameters 2019-06-24 13:22:24 +01:00
Nick Craig-Wood
0ae844d1f8 config: reset environment variables in config file test to fix build 2019-06-22 17:49:23 +01:00
Nick Craig-Wood
4ee6de5c3e docs: add a new page with global flags and link to it from the command docs
In f544234 we removed the global flags from each command as it was
making each page very big and causing 1000s of lines of duplication in
the man page.

This change adds a new flags page with all the global flags on and
links each command page to it.

Fixes #3273
2019-06-20 16:45:44 +01:00
Nick Craig-Wood
71a19a1972 Add Maran to contributors 2019-06-19 19:36:20 +01:00
Maran
ba72e62b41 fs/config: Add method to reload configfile from disk
Fixes #3268
2019-06-19 14:47:54 +01:00
Aleksandar Jankovic
5935cb0a29 jobs: add ability to stop async jobs
Depends on #3257
2019-06-19 14:17:41 +01:00
Aleksandar Jankovic
f78cd1e043 Add context propagation to rclone
- Change rclone/fs interfaces to accept context.Context
- Update interface implementations to use context.Context
- Change top level usage to propagate context to lover level functions

Context propagation is needed for stopping transfers and passing other
request-scoped values.
2019-06-19 11:59:46 +01:00
Aleksandar Jankovic
a2c317b46e chore: update .gitignore 2019-06-19 11:59:46 +01:00
Nick Craig-Wood
6a2a075c14 fs/cache: fix locking
This was causing `fatal error: sync: unlock of unlocked mutex` if a
panic ocurred in fsNewFs.
2019-06-19 10:50:59 +01:00
Nick Craig-Wood
628530362a local: add --local-case-sensitive and --local-case-insensitive
This is to force the remote to declare itself as case sensitive or
insensitive where the defaults for the operating system are wrong.

See: https://forum.rclone.org/t/duplicate-object-found-in-source-ignoring-dedupe-not-finding-anything/10465
2019-06-17 17:09:48 +01:00
Nick Craig-Wood
4549305fec Start v1.48.0-DEV development 2019-06-15 18:32:17 +01:00
Nick Craig-Wood
245fed513a Version v1.48.0 2019-06-15 13:55:41 +01:00
Nick Craig-Wood
52332a4b24 moveto: fix detection of same file name to include the root
Fixes problem introduced in d2be792d5e
2019-06-15 13:55:41 +01:00
Nick Craig-Wood
3087c5d559 webdav: retry on 423 Locked errors #3263 2019-06-15 10:58:13 +01:00
Nick Craig-Wood
75606dcc27 sync: fix tests on union remote 2019-06-15 10:53:36 +01:00
Nick Craig-Wood
f3719fe269 fs/cache: unlock mutex in cache.Get to allow recursive calls
This fixes the test lockup in the union tests
2019-06-15 10:42:53 +01:00
Gary Kim
d2be792d5e moveto: fix case-insensitive same remote move 2019-06-15 10:06:01 +01:00
Wojciech Smigielski
2793d4b4cc remove duplicate code 2019-06-15 10:02:25 +01:00
Wojciech Smigielski
30ac9d920a enable creating encrypted config through external script invocation - fixes #3127 2019-06-15 10:02:25 +01:00
Gary Kim
6e8e620e71 serve webdav: fix serveDir not being updated with changes from webdav
Fixes an issue where changes such as renaming done using webdav
would not be reflected in the html directory listing
2019-06-15 10:00:46 +01:00
Gary Kim
5597d6d871 serve webdav: add tests for serve http functionality 2019-06-15 10:00:46 +01:00
Gary Kim
622e0d19ce serve webdav: combine serve webdav and serve http 2019-06-15 10:00:46 +01:00
Nick Craig-Wood
ce400a8fdc Add rsync.net as a provider #3254 2019-06-15 09:56:17 +01:00
Nick Craig-Wood
49c05cb89b Add notes on --b2-version not working with crypt #1627
See also: https://forum.rclone.org/t/how-to-restore-a-previous-version-of-a-folde/10456/2
2019-06-15 09:56:17 +01:00
Nick Craig-Wood
d533de0f5c docs: clarify that s3 can't access Glacier Vault 2019-06-15 09:56:17 +01:00
Nick Craig-Wood
1a4fe4bc6c Add Aleksandar Jankovic to contributors 2019-06-15 09:56:17 +01:00
Aleksandar Jankovic
93207ead9c rc/jobs: make job expiry timeouts configurable 2019-06-15 09:55:32 +01:00
Nick Craig-Wood
22368b997c b2: implement SetModTime #3210
SetModTime() is implemented by copying an object onto itself and
updating the metadata in the process.
2019-06-13 17:31:33 +01:00
Nick Craig-Wood
a5bed67016 b2: implement server side copy - fixes #3210 2019-06-13 17:31:33 +01:00
Gary Kim
44f6491731 bin: update make_changelog.py to support semver 2019-06-13 14:51:42 +01:00
Nick Craig-Wood
12c2a750f5 build: fix build lockup by increasing GOMAXPROCS - Fixes #3154
It was discovered after lots of experimentation that the cmd/mount
tests have a tendency to lock up if GOMAXPROCS=1 or 2.  Since the
Travis builders only have 2 vCPUs by default, this happens on the
build server very often.

This workaround increases GOMAXPROCS to make the mount test lockup
less likely.

Ideally this should be fixed in the mount tests at some point.
2019-06-13 13:47:43 +01:00
Nick Craig-Wood
92bbae5cca Add Florian Apolloner to contributors 2019-06-13 13:47:43 +01:00
Florian Apolloner
939b19c3b7 cmd: add support for private repositories in serve restic - fixes #3247 2019-06-12 13:39:38 +01:00
Nick Craig-Wood
64fb4effa7 docs: add FAQ entry about rclone using too much memory
See: #2200 #1391 3196
2019-06-12 11:08:19 +01:00
Nick Craig-Wood
4d195d5a52 gcs: Fix upload errors when uploading pre 1970 files
Before this change rclone attempted to set the "updated" field in
uploaded objects to the modification time.

However when this modification time was before 1970, google drive
would return the rather cryptic error:

    googleapi: Error 400: Invalid value for UnsignedLong: -42000, invalid

However API docs: https://cloud.google.com/storage/docs/json_api/v1/objects#resource
state the "updated" field is read only and tests confirm that.  Even
though the field is read only, it looks like Google parses it.

This change therefore removes the attempt to set the "updated" field
(which was doing nothing anyway) and fixes the problem uploading pre
1970 files.

See #3196 and https://forum.rclone.org/t/invalid-value-for-unsignedlong-file-missing-date-modified/3466
2019-06-12 10:51:49 +01:00
albertony
976a020a2f Use rclone.conf from rclone executable directory if already existing 2019-06-12 10:08:00 +01:00
Nick Craig-Wood
550ab441c5 rc: Skip auth for OPTIONS request
Before this change using --user and --pass was impossible on the rc
from a browser as the browser needed to make the OPTIONS request first
before sending Authorization: headers, but the OPTIONS request
required an Authorization: header.

After this change we allow OPTIONS requests to go through without
checking the Authorization: header.
2019-06-10 19:33:45 +01:00
Nick Craig-Wood
e24cadc7a1 box: Fix ineffectual assignment (ineffassign) 2019-06-10 19:33:10 +01:00
Nick Craig-Wood
903ede52cd config: make config create/update encrypt passwords where necessary
Before this change when using "rclone config create" it wasn't
possible to add passwords in one go, it was necessary to call "rclone
config password" to add the passwords afterwards as "rclone config
create" didn't obscure passwords.

After this change "rclone config create" and "rclone config update"
will obscure passwords as necessary as will the corresponding API
calls config/create and config/update.

This makes "rclone config password" and its API config/password
obsolete, however they will be left for backwards compatibility.
2019-06-10 18:08:55 +01:00
Nick Craig-Wood
f681d32996 rc: Fix serving bucket based objects with --rc-serve
Before this change serving bucket based objects
`[remote:bucket]/path/to/object` would fail with 404 not found.

This was because the leading `/` in `/path/to/object` was being passed
to NewObject.
2019-06-10 11:59:06 +01:00
Gary Kim
2c72e7f0a2 docs: add implicit FTP over TLS documentation 2019-06-09 16:06:39 +01:00
Gary Kim
db8cd1a993 ftp: Add no_check_certificate option for FTPS 2019-06-09 16:06:39 +01:00
Gary Kim
2890b69c48 ftp: Add FTP over TLS support 2019-06-09 16:06:39 +01:00
Gary Kim
66b3795eb8 vendor: update github.com/jlaffaye/ftp 2019-06-09 16:06:39 +01:00
Nick Craig-Wood
45f41c2c4a Add forgems to contributors 2019-06-09 16:02:20 +01:00
Garry McNulty
34f03ce590 operations: ignore negative sizes when calculating total (#3135) 2019-06-09 16:00:41 +01:00
Garry McNulty
e2fde62cd9 drive: add --drive-size-as-quota to show storage quota usage for file size - fixes #3135 2019-06-09 16:00:41 +01:00
forgems
4b27c6719b fs: Allow sync of a file and a directory with the same name
When sorting fs.DirEntries we sort by DirEntry type and
when synchronizing files let the directories be before objects,
so when the destintation fs doesn't support duplicate names,
we will only lose duplicated object instead of whole directory.

The enables synchronisation to work with a file and a directory of the same name
which is reasonably common on bucket based remotes.
2019-06-09 15:57:05 +01:00
Nick Craig-Wood
fb6966b5fe docs: Fix warnings after hugo upgrade to v0.55 2019-06-08 11:55:51 +01:00
Nick Craig-Wood
454dfd3c9e rc: Add operations/fsinfo: Return information about the remote
This returns a information about the remote including Name, Root,
Hashes and optional Features.
2019-06-08 09:19:07 +01:00
Nick Craig-Wood
e1cf551ded fs: add Features.Enabled to return map of enabled features by name 2019-06-08 08:46:53 +01:00
Nick Craig-Wood
bd10344d65 rc: add --loopback flag to run commands directly without a server 2019-06-08 08:45:55 +01:00
Nick Craig-Wood
1aa65d60e1 lsjson: add IsBucket field for bucket based remote listing of the root 2019-06-07 17:28:15 +01:00
Nick Craig-Wood
aa81957586 drive: add --drive-server-side-across-configs
In #2728 and 55b9a4e we decided to allow server side operations
between google drives with different configurations.

This works in some cases (eg between teamdrives) but does not work in
the general case, and this caused breakage in quite a number of
people's workflows.

This change makes the feature conditional on the
--drive-server-side-across-configs flag which defaults to off.

See: https://forum.rclone.org/t/gdrive-to-gdrive-error-404-file-not-found/9621/10

Fixes #3119
2019-06-07 12:12:49 +01:00
Nick Craig-Wood
b7800e96d7 vendor: update golang.org/x/net/webdav - fixes #3002
This fixes duplicacy working with rclone serve webdav
2019-06-07 11:54:57 +01:00
Cnly
fb1bbecb41 fstests: add FsRootCollapse test - #3164 2019-06-06 15:48:46 +01:00
Cnly
e4c2468244 onedrive: More accurately check if root is found - fixes #3164 2019-06-06 15:48:46 +01:00
1472 changed files with 175471 additions and 33677 deletions

View File

@@ -2,7 +2,7 @@ version: "{build}"
os: Windows Server 2012 R2 os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\ncw\rclone clone_folder: c:\gopath\src\github.com\rclone\rclone
cache: cache:
- '%LocalAppData%\go-build' - '%LocalAppData%\go-build'
@@ -16,7 +16,7 @@ environment:
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH% PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
PATH: '%PATHCC64%' PATH: '%PATHCC64%'
RCLONE_CONFIG_PASS: RCLONE_CONFIG_PASS:
secure: HbzxSy9zQ8NYWN9NNPf6ALQO9Q0mwRNqwehsLcOEHy0= secure: sq9CPBbwaeKJv+yd24U44neORYPQVy6jsjnQptC+5yk=
install: install:
- choco install winfsp -y - choco install winfsp -y
@@ -40,10 +40,11 @@ build_script:
test_script: test_script:
- make GOTAGS=cmount quicktest - make GOTAGS=cmount quicktest
- make GOTAGS=cmount racequicktest
artifacts: artifacts:
- path: rclone.exe - path: rclone.exe
- path: build/*-v*.zip - path: build/*-v*.zip
deploy_script: deploy_script:
- IF "%APPVEYOR_REPO_NAME%" == "ncw/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload - IF "%APPVEYOR_REPO_NAME%" == "rclone/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload

View File

@@ -6,7 +6,7 @@ jobs:
build: build:
machine: true machine: true
working_directory: ~/.go_workspace/src/github.com/ncw/rclone working_directory: ~/.go_workspace/src/github.com/rclone/rclone
steps: steps:
- checkout - checkout
@@ -14,37 +14,30 @@ jobs:
- run: - run:
name: Cross-compile rclone name: Cross-compile rclone
command: | command: |
docker pull rclone/xgo-cgofuse docker pull billziss/xgo-cgofuse
go get -v github.com/karalabe/xgo go get -v github.com/karalabe/xgo
xgo \ xgo \
--image=rclone/xgo-cgofuse \ -image=billziss/xgo-cgofuse \
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \ -targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
-tags cmount \ -tags cmount \
-dest build \
. .
xgo \ xgo \
--targets=android/*,ios/* \ -image=billziss/xgo-cgofuse \
-targets=android/*,ios/* \
-dest build \
. .
- run:
name: Prepare artifacts
command: |
mkdir -p /tmp/rclone.dist
cp -R rclone-* /tmp/rclone.dist
mkdir build
cp -R rclone-* build/
- run: - run:
name: Build rclone name: Build rclone
command: | command: |
go version docker pull golang
go build docker run --rm -v "$PWD":/usr/src/rclone -w /usr/src/rclone golang go build -mod=vendor -v
- run: - run:
name: Upload artifacts name: Upload artifacts
command: | command: |
if [[ $CIRCLE_PULL_REQUEST != "" ]]; then make circleci_upload
make circleci_upload
fi
- store_artifacts: - store_artifacts:
path: /tmp/rclone.dist path: build

7
.gitattributes vendored Normal file
View File

@@ -0,0 +1,7 @@
# Ignore generated files in GitHub language statistics and diffs
/MANUAL.* linguist-generated=true
/rclone.1 linguist-generated=true
# Don't fiddle with the line endings of test data
**/testdata/** -text
**/test/** -text

View File

@@ -10,7 +10,7 @@ instead of filing an issue for a quick response.
If you are reporting a bug or asking for a new feature then please use one of the templates here: If you are reporting a bug or asking for a new feature then please use one of the templates here:
https://github.com/ncw/rclone/issues/new https://github.com/rclone/rclone/issues/new
otherwise fill in the form below. otherwise fill in the form below.

View File

@@ -22,8 +22,8 @@ Link issues and relevant forum posts here.
#### Checklist #### Checklist
- [ ] I have read the [contribution guidelines](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request). - [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
- [ ] I have added tests for all changes in this PR if appropriate. - [ ] I have added tests for all changes in this PR if appropriate.
- [ ] I have added documentation for the changes if appropriate. - [ ] I have added documentation for the changes if appropriate.
- [ ] All commit messages are in [house style](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#commit-messages). - [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).
- [ ] I'm done, this Pull Request is ready for review :-) - [ ] I'm done, this Pull Request is ready for review :-)

206
.github/workflows/build.yml vendored Normal file
View File

@@ -0,0 +1,206 @@
---
# Github Actions build for rclone
# -*- compile-command: "yamllint -f parsable build.yml" -*-
name: build
# Trigger the workflow on push or pull request
on:
push:
branches:
- '*'
tags:
- '*'
pull_request:
jobs:
build:
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'modules_race', 'go1.10', 'go1.11', 'go1.12']
include:
- job_name: linux
os: ubuntu-latest
go: '1.13.x'
modules: off
gotags: cmount
build_flags: '-include "^linux/"'
check: true
quicktest: true
deploy: true
- job_name: mac
os: macOS-latest
go: '1.13.x'
modules: off
gotags: '' # cmount doesn't work on osx travis for some reason
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
racequicktest: true
deploy: true
- job_name: windows_amd64
os: windows-latest
go: '1.13.x'
modules: off
gotags: cmount
build_flags: '-include "^windows/amd64" -cgo'
quicktest: true
racequicktest: true
deploy: true
- job_name: windows_386
os: windows-latest
go: '1.13.x'
modules: off
gotags: cmount
goarch: '386'
cgo: '1'
build_flags: '-include "^windows/386" -cgo'
quicktest: true
deploy: true
- job_name: other_os
os: ubuntu-latest
go: '1.13.x'
modules: off
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
compile_all: true
deploy: true
- job_name: modules_race
os: ubuntu-latest
go: '1.13.x'
modules: on
quicktest: true
racequicktest: true
- job_name: go1.10
os: ubuntu-latest
go: '1.10.x'
modules: off
quicktest: true
- job_name: go1.11
os: ubuntu-latest
go: '1.11.x'
modules: off
quicktest: true
- job_name: go1.12
os: ubuntu-latest
go: '1.12.x'
modules: off
quicktest: true
name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }}
steps:
- name: Checkout
uses: actions/checkout@master
with:
path: ./src/github.com/${{ github.repository }}
- name: Install Go
uses: actions/setup-go@v1
with:
go-version: ${{ matrix.go }}
- name: Set environment variables
shell: bash
run: |
echo '::set-env name=GOPATH::${{ runner.workspace }}'
echo '::add-path::${{ runner.workspace }}/bin'
echo '::set-env name=GO111MODULE::${{ matrix.modules }}'
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
- name: Install Libraries on Linux
shell: bash
run: |
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get install fuse libfuse-dev rpm pkg-config
if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS
shell: bash
run: |
brew update
brew tap caskroom/cask
brew cask install osxfuse
if: matrix.os == 'macOS-latest'
- name: Install Libraries on Windows
shell: powershell
run: |
$ProgressPreference = 'SilentlyContinue'
choco install -y winfsp zip
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
if ($env:GOARCH -eq "386") {
choco install -y mingw --forcex86 --force
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
}
# Copy mingw32-make.exe to make.exe so the same command line
# can be used on Windows as on macOS and Linux
$path = (get-command mingw32-make.exe).Path
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
if: matrix.os == 'windows-latest'
- name: Print Go version and environment
shell: bash
run: |
printf "Using go at: $(which go)\n"
printf "Go version: $(go version)\n"
printf "\n\nGo environment:\n\n"
go env
printf "\n\nRclone environment:\n\n"
make vars
printf "\n\nSystem environment:\n\n"
env
- name: Run tests
shell: bash
run: |
make
make quicktest
if: matrix.quicktest
- name: Race test
shell: bash
run: |
make racequicktest
if: matrix.racequicktest
- name: Code quality test
shell: bash
run: |
make build_dep
make check
if: matrix.check
- name: Compile all architectures test
shell: bash
run: |
make
make compile_all
if: matrix.compile_all
- name: Deploy built binaries
shell: bash
run: |
make release_dep
make travis_beta
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
BETA_SUBDIR: 'github_actions' # FIXME remove when removing travis/appveyor
# working-directory: '$(modulePath)'
if: matrix.deploy && github.head_ref == ''

3
.gitignore vendored
View File

@@ -5,3 +5,6 @@ build
docs/public docs/public
rclone.iml rclone.iml
.idea .idea
.history
*.test
*.log

View File

@@ -4,7 +4,7 @@ sudo: required
dist: xenial dist: xenial
os: os:
- linux - linux
go_import_path: github.com/ncw/rclone go_import_path: github.com/rclone/rclone
before_install: before_install:
- git fetch --unshallow --tags - git fetch --unshallow --tags
- | - |
@@ -31,9 +31,9 @@ install:
env: env:
global: global:
- GOTAGS=cmount - GOTAGS=cmount
- GOMAXPROCS=8 # workaround for cmd/mount tests locking up - see #3154
- GO111MODULE=off - GO111MODULE=off
- GITHUB_USER=ncw - GITHUB_USER=ncw
- GOTRACEBACK=all
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA= - secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk= - secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
addons: addons:
@@ -50,9 +50,6 @@ matrix:
allow_failures: allow_failures:
- go: tip - go: tip
include: include:
- go: 1.9.x
script:
- make quicktest
- go: 1.10.x - go: 1.10.x
script: script:
- make quicktest - make quicktest
@@ -60,18 +57,41 @@ matrix:
script: script:
- make quicktest - make quicktest
- go: 1.12.x - go: 1.12.x
script:
- make quicktest
- go: 1.13.x
name: Linux
env: env:
- GOTAGS=cmount - GOTAGS=cmount
- BUILD_FLAGS='-include "^linux/"'
- DEPLOY=true
script: script:
- make build_dep - make build_dep
- make check - make check
- make quicktest - make quicktest
- go: 1.13.x
name: Go Modules / Race
env:
- GO111MODULE=on
- GOPROXY=https://proxy.golang.org
script:
- make quicktest
- make racequicktest - make racequicktest
- go: 1.13.x
name: Other OS
env:
- DEPLOY=true
- BUILD_FLAGS='-exclude "^(windows|darwin|linux)/"'
script:
- make
- make compile_all - make compile_all
- os: osx - go: 1.13.x
go: 1.12.x name: macOS
os: osx
env: env:
- GOTAGS= # cmount doesn't work on osx travis for some reason - GOTAGS= # cmount doesn't work on osx travis for some reason
- BUILD_FLAGS='-include "^darwin/" -cgo'
- DEPLOY=true
cache: cache:
directories: directories:
- $HOME/Library/Caches/go-build - $HOME/Library/Caches/go-build
@@ -80,10 +100,12 @@ matrix:
- make quicktest - make quicktest
- make racequicktest - make racequicktest
# - os: windows # - os: windows
# go: 1.12.x # name: Windows
# go: 1.13.x
# env: # env:
# - GOTAGS=cmount # - GOTAGS=cmount
# - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse' # - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse'
# - BUILD_FLAGS='-include "^windows/amd64" -cgo' # 386 doesn't build yet
# #filter_secrets: false # works around a problem with secrets under windows # #filter_secrets: false # works around a problem with secrets under windows
# cache: # cache:
# directories: # directories:
@@ -101,7 +123,6 @@ deploy:
script: make travis_beta script: make travis_beta
skip_cleanup: true skip_cleanup: true
on: on:
repo: ncw/rclone repo: rclone/rclone
all_branches: true all_branches: true
go: 1.12.x condition: $TRAVIS_PULL_REQUEST == false && $DEPLOY == true
condition: $TRAVIS_PULL_REQUEST == false && $TRAVIS_OS_NAME != "windows"

View File

@@ -29,12 +29,12 @@ You'll need a Go environment set up with GOPATH set. See [the Go
getting started docs](https://golang.org/doc/install) for more info. getting started docs](https://golang.org/doc/install) for more info.
First in your web browser press the fork button on [rclone's GitHub First in your web browser press the fork button on [rclone's GitHub
page](https://github.com/ncw/rclone). page](https://github.com/rclone/rclone).
Now in your terminal Now in your terminal
go get -u github.com/ncw/rclone go get -u github.com/rclone/rclone
cd $GOPATH/src/github.com/ncw/rclone cd $GOPATH/src/github.com/rclone/rclone
git remote rename origin upstream git remote rename origin upstream
git remote add origin git@github.com:YOURUSER/rclone.git git remote add origin git@github.com:YOURUSER/rclone.git
@@ -118,7 +118,7 @@ but they can be run against any of the remotes.
cd fs/sync cd fs/sync
go test -v -remote TestDrive: go test -v -remote TestDrive:
go test -v -remote TestDrive: -subdir go test -v -remote TestDrive: -fast-list
cd fs/operations cd fs/operations
go test -v -remote TestDrive: go test -v -remote TestDrive:
@@ -127,7 +127,7 @@ If you want to use the integration test framework to run these tests
all together with an HTML report and test retries then from the all together with an HTML report and test retries then from the
project root: project root:
go install github.com/ncw/rclone/fstest/test_all go install github.com/rclone/rclone/fstest/test_all
test_all -backend drive test_all -backend drive
If you want to run all the integration tests against all the remotes, If you want to run all the integration tests against all the remotes,
@@ -362,9 +362,7 @@ Or if you want to run the integration tests manually:
* `go test -v -remote TestRemote:` * `go test -v -remote TestRemote:`
* `cd fs/sync` * `cd fs/sync`
* `go test -v -remote TestRemote:` * `go test -v -remote TestRemote:`
* If you are making a bucket based remote, then check with this also * If your remote defines `ListR` check with this also
* `go test -v -remote TestRemote: -subdir`
* And if your remote defines `ListR` this also
* `go test -v -remote TestRemote: -fast-list` * `go test -v -remote TestRemote: -fast-list`
See the [testing](#testing) section for more information on integration tests. See the [testing](#testing) section for more information on integration tests.

22
Dockerfile Normal file
View File

@@ -0,0 +1,22 @@
FROM golang AS builder
COPY . /go/src/github.com/rclone/rclone/
WORKDIR /go/src/github.com/rclone/rclone/
RUN make quicktest
RUN \
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
make
RUN ./rclone version
# Begin final image
FROM alpine:latest
RUN apk --no-cache add ca-certificates fuse
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
ENTRYPOINT [ "rclone" ]
WORKDIR /data
ENV XDG_CONFIG_HOME=/config

View File

@@ -51,7 +51,7 @@ The milestones have these meanings:
* Help wanted - blue sky stuff that might get moved up, or someone could help with * Help wanted - blue sky stuff that might get moved up, or someone could help with
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment * Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
Tickets [with no milestone](https://github.com/ncw/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up. Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
## Closing Tickets ## ## Closing Tickets ##

3932
MANUAL.html generated

File diff suppressed because it is too large Load Diff

4342
MANUAL.md generated

File diff suppressed because it is too large Load Diff

4430
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
SHELL = bash SHELL = bash
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD)) BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD))
LAST_TAG := $(shell git describe --tags --abbrev=0) LAST_TAG := $(shell git describe --tags --abbrev=0)
ifeq ($(BRANCH),$(LAST_TAG)) ifeq ($(BRANCH),$(LAST_TAG))
BRANCH := master BRANCH := master
@@ -17,7 +17,10 @@ ifneq ($(TAG),$(LAST_TAG))
endif endif
GO_VERSION := $(shell go version) GO_VERSION := $(shell go version)
GO_FILES := $(shell go list ./... | grep -v /vendor/ ) GO_FILES := $(shell go list ./... | grep -v /vendor/ )
BETA_PATH := $(BRANCH_PATH)$(TAG) ifdef BETA_SUBDIR
BETA_SUBDIR := /$(BETA_SUBDIR)
endif
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/ BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
BETA_UPLOAD_ROOT := memstore:beta-rclone-org BETA_UPLOAD_ROOT := memstore:beta-rclone-org
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH) BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
@@ -27,12 +30,15 @@ BUILDTAGS=-tags "$(GOTAGS)"
LINTTAGS=--build-tags "$(GOTAGS)" LINTTAGS=--build-tags "$(GOTAGS)"
endif endif
.PHONY: rclone vars version .PHONY: rclone test_all vars version
rclone: rclone:
touch fs/version.go go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
go install -v --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS) mkdir -p `go env GOPATH`/bin/
cp -av `go env GOPATH`/bin/rclone . cp -av rclone`go env GOEXE` `go env GOPATH`/bin/
test_all:
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
vars: vars:
@echo SHELL="'$(SHELL)'" @echo SHELL="'$(SHELL)'"
@@ -47,17 +53,16 @@ version:
@echo '$(TAG)' @echo '$(TAG)'
# Full suite of integration tests # Full suite of integration tests
test: rclone test: rclone test_all
go install --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/ncw/rclone/fstest/test_all
-test_all 2>&1 | tee test_all.log -test_all 2>&1 | tee test_all.log
@echo "Written logs in test_all.log" @echo "Written logs in test_all.log"
# Quick test # Quick test
quicktest: quicktest:
RCLONE_CONFIG="/notfound" go test -v $(BUILDTAGS) $(GO_FILES) RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
racequicktest: racequicktest:
RCLONE_CONFIG="/notfound" go test -v $(BUILDTAGS) -cpu=2 -race $(GO_FILES) RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
# Do source code quality checks # Do source code quality checks
check: rclone check: rclone
@@ -95,7 +100,7 @@ MANUAL.txt: MANUAL.md
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone commanddocs: rclone
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/commands/ XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
backenddocs: rclone bin/make_backend_docs.py backenddocs: rclone bin/make_backend_docs.py
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
@@ -151,7 +156,7 @@ log_since_last_release:
git log $(LAST_TAG).. git log $(LAST_TAG)..
compile_all: compile_all:
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG) go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
appveyor_upload: appveyor_upload:
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD) rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
@@ -167,25 +172,15 @@ ifndef BRANCH_PATH
endif endif
@echo Beta release ready at $(BETA_URL)/testbuilds @echo Beta release ready at $(BETA_URL)/testbuilds
BUILD_FLAGS := -exclude "^(windows|darwin)/"
ifeq ($(TRAVIS_OS_NAME),osx)
BUILD_FLAGS := -include "^darwin/" -cgo
endif
ifeq ($(TRAVIS_OS_NAME),windows)
# BUILD_FLAGS := -include "^windows/" -cgo
# 386 doesn't build yet
BUILD_FLAGS := -include "^windows/amd64" -cgo
endif
travis_beta: travis_beta:
ifeq ($(TRAVIS_OS_NAME),linux) ifeq (linux,$(filter linux,$(subst Linux,linux,$(TRAVIS_OS_NAME) $(AGENT_OS))))
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz' go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
endif endif
git log $(LAST_TAG).. > /tmp/git-log.txt git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG) go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD) rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT) rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
endif endif
@echo Beta release ready at $(BETA_URL) @echo Beta release ready at $(BETA_URL)

View File

@@ -1,4 +1,4 @@
[![Logo](https://rclone.org/img/rclone-120x120.png)](https://rclone.org/) [<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
[Website](https://rclone.org) | [Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) | [Documentation](https://rclone.org/docs/) |
@@ -6,13 +6,15 @@
[Contributing](CONTRIBUTING.md) | [Contributing](CONTRIBUTING.md) |
[Changelog](https://rclone.org/changelog/) | [Changelog](https://rclone.org/changelog/) |
[Installation](https://rclone.org/install/) | [Installation](https://rclone.org/install/) |
[Forum](https://forum.rclone.org/) | [Forum](https://forum.rclone.org/)
[![Build Status](https://travis-ci.org/ncw/rclone.svg?branch=master)](https://travis-ci.org/ncw/rclone) [![Build Status](https://travis-ci.org/rclone/rclone.svg?branch=master)](https://travis-ci.org/rclone/rclone)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/ncw/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/ncw/rclone) [![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/rclone/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/rclone/rclone)
[![CircleCI](https://circleci.com/gh/ncw/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/ncw/rclone/tree/master) [![Build Status](https://dev.azure.com/rclone/rclone/_apis/build/status/rclone.rclone?branchName=master)](https://dev.azure.com/rclone/rclone/_build/latest?definitionId=2&branchName=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/ncw/rclone)](https://goreportcard.com/report/github.com/ncw/rclone) [![CircleCI](https://circleci.com/gh/rclone/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/rclone/rclone/tree/master)
[![GoDoc](https://godoc.org/github.com/ncw/rclone?status.svg)](https://godoc.org/github.com/ncw/rclone) [![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone)
[![Docker Pulls](https://img.shields.io/docker/pulls/rclone/rclone)](https://hub.docker.com/r/rclone/rclone)
# Rclone # Rclone
@@ -20,6 +22,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
## Storage providers ## Storage providers
* 1Fichier [:page_facing_up:](https://rclone.org/ficher/)
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss) * Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status)) * Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/) * Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
@@ -32,11 +35,13 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* FTP [:page_facing_up:](https://rclone.org/ftp/) * FTP [:page_facing_up:](https://rclone.org/ftp/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/) * Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/) * Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
* HTTP [:page_facing_up:](https://rclone.org/http/) * HTTP [:page_facing_up:](https://rclone.org/http/)
* Hubic [:page_facing_up:](https://rclone.org/hubic/) * Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/) * Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3) * IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* Koofr [:page_facing_up:](https://rclone.org/koofr/) * Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/) * Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/) * Mega [:page_facing_up:](https://rclone.org/mega/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/) * Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
@@ -49,7 +54,8 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/) * Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud) * ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/) * pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* put.io [:page_facing_up:](https://rclone.org/webdav/#put-io) * premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/) * QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/) * Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway) * Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)

View File

@@ -1,8 +1,14 @@
Extra required software for making a release # Release
This file describes how to make the various kinds of releases
## Extra required software for making a release
* [github-release](https://github.com/aktau/github-release) for uploading packages * [github-release](https://github.com/aktau/github-release) for uploading packages
* pandoc for making the html and man pages * pandoc for making the html and man pages
Making a release ## Making a release
* git status - make sure everything is checked in * git status - make sure everything is checked in
* Check travis & appveyor builds are green * Check travis & appveyor builds are green
* make check * make check
@@ -26,8 +32,8 @@ Making a release
* # announce with forum post, twitter post, G+ post * # announce with forum post, twitter post, G+ post
Early in the next release cycle update the vendored dependencies Early in the next release cycle update the vendored dependencies
* Review any pinned packages in go.mod and remove if possible * Review any pinned packages in go.mod and remove if possible
* GO111MODULE=on go get -u github.com/spf13/cobra@master
* make update * make update
* git status * git status
* git add new files * git add new files
@@ -48,24 +54,57 @@ Can be fixed with
* GO111MODULE=on go mod vendor * GO111MODULE=on go mod vendor
Making a point release. If rclone needs a point release due to some ## Making a point release
horrendous bug, then
* git branch v1.XX v1.XX-fixes If rclone needs a point release due to some horrendous bug then a
point release is necessary.
First make the release branch. If this is a second point release then
this will be done already.
* BASE_TAG=v1.XX # eg v1.49
* NEW_TAG=${BASE_TAG}.Y # eg v1.49.1
* echo $BASE_TAG $NEW_TAG # v1.49 v1.49.1
* git branch ${BASE_TAG} ${BASE_TAG}-fixes
Now
* git co ${BASE_TAG}-fixes
* git cherry-pick any fixes * git cherry-pick any fixes
* Test (see above) * Test (see above)
* make NEW_TAG=v1.XX.1 tag * make NEW_TAG=${NEW_TAG} tag
* edit docs/content/changelog.md * edit docs/content/changelog.md
* make TAG=v1.43.1 doc * make TAG=${NEW_TAG} doc
* git commit -a -v -m "Version v1.XX.1" * git commit -a -v -m "Version ${NEW_TAG}"
* git tag -d -v1.XX.1 * git tag -d ${NEW_TAG}
* git tag -s -m "Version v1.XX.1" v1.XX.1 * git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
* git push --tags -u origin v1.XX-fixes * git push --tags -u origin ${BASE_TAG}-fixes
* make BRANCH_PATH= TAG=v1.43.1 fetch_binaries * Wait for builds to complete
* make TAG=v1.43.1 tarball * make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
* make TAG=v1.43.1 sign_upload * make TAG=${NEW_TAG} tarball
* make TAG=v1.43.1 check_sign * make TAG=${NEW_TAG} sign_upload
* make TAG=v1.43.1 upload * make TAG=${NEW_TAG} check_sign
* make TAG=v1.43.1 upload_website * make TAG=${NEW_TAG} upload
* make TAG=v1.43.1 upload_github * make TAG=${NEW_TAG} upload_website
* NB this overwrites the current beta so after the release, rebuild the last travis build * make TAG=${NEW_TAG} upload_github
* NB this overwrites the current beta so we need to do this
* git co master
* make LAST_TAG=${NEW_TAG} startdev
* # cherry pick the changes to the changelog
* git checkout ${BASE_TAG}-fixes docs/content/changelog.md
* git commit --amend
* git push
* Announce! * Announce!
## Making a manual build of docker
The rclone docker image should autobuild on docker hub. If it doesn't
or needs to be updated then rebuild like this.
```
docker build -t rclone/rclone:1.49.1 -t rclone/rclone:1.49 -t rclone/rclone:1 -t rclone/rclone:latest .
docker push rclone/rclone:1.49.1
docker push rclone/rclone:1.49
docker push rclone/rclone:1
docker push rclone/rclone:latest
```

View File

@@ -4,10 +4,10 @@ import (
"errors" "errors"
"strings" "strings"
"github.com/ncw/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
) )
// Register with Fs // Register with Fs

View File

@@ -1,15 +1,16 @@
package alias package alias
import ( import (
"context"
"fmt" "fmt"
"path" "path"
"path/filepath" "path/filepath"
"sort" "sort"
"testing" "testing"
_ "github.com/ncw/rclone/backend/local" // pull in test backend _ "github.com/rclone/rclone/backend/local" // pull in test backend
"github.com/ncw/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/ncw/rclone/fs/config" "github.com/rclone/rclone/fs/config"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -69,7 +70,7 @@ func TestNewFS(t *testing.T) {
prepare(t, remoteRoot) prepare(t, remoteRoot)
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot)) f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
require.NoError(t, err, what) require.NoError(t, err, what)
gotEntries, err := f.List(test.fsList) gotEntries, err := f.List(context.Background(), test.fsList)
require.NoError(t, err, what) require.NoError(t, err, what)
sort.Sort(gotEntries) sort.Sort(gotEntries)

View File

@@ -2,31 +2,36 @@ package all
import ( import (
// Active file systems // Active file systems
_ "github.com/ncw/rclone/backend/alias" _ "github.com/rclone/rclone/backend/alias"
_ "github.com/ncw/rclone/backend/amazonclouddrive" _ "github.com/rclone/rclone/backend/amazonclouddrive"
_ "github.com/ncw/rclone/backend/azureblob" _ "github.com/rclone/rclone/backend/azureblob"
_ "github.com/ncw/rclone/backend/b2" _ "github.com/rclone/rclone/backend/b2"
_ "github.com/ncw/rclone/backend/box" _ "github.com/rclone/rclone/backend/box"
_ "github.com/ncw/rclone/backend/cache" _ "github.com/rclone/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/crypt" _ "github.com/rclone/rclone/backend/crypt"
_ "github.com/ncw/rclone/backend/drive" _ "github.com/rclone/rclone/backend/drive"
_ "github.com/ncw/rclone/backend/dropbox" _ "github.com/rclone/rclone/backend/dropbox"
_ "github.com/ncw/rclone/backend/ftp" _ "github.com/rclone/rclone/backend/fichier"
_ "github.com/ncw/rclone/backend/googlecloudstorage" _ "github.com/rclone/rclone/backend/ftp"
_ "github.com/ncw/rclone/backend/http" _ "github.com/rclone/rclone/backend/googlecloudstorage"
_ "github.com/ncw/rclone/backend/hubic" _ "github.com/rclone/rclone/backend/googlephotos"
_ "github.com/ncw/rclone/backend/jottacloud" _ "github.com/rclone/rclone/backend/http"
_ "github.com/ncw/rclone/backend/koofr" _ "github.com/rclone/rclone/backend/hubic"
_ "github.com/ncw/rclone/backend/local" _ "github.com/rclone/rclone/backend/jottacloud"
_ "github.com/ncw/rclone/backend/mega" _ "github.com/rclone/rclone/backend/koofr"
_ "github.com/ncw/rclone/backend/onedrive" _ "github.com/rclone/rclone/backend/local"
_ "github.com/ncw/rclone/backend/opendrive" _ "github.com/rclone/rclone/backend/mailru"
_ "github.com/ncw/rclone/backend/pcloud" _ "github.com/rclone/rclone/backend/mega"
_ "github.com/ncw/rclone/backend/qingstor" _ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/ncw/rclone/backend/s3" _ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/ncw/rclone/backend/sftp" _ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/ncw/rclone/backend/swift" _ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/ncw/rclone/backend/union" _ "github.com/rclone/rclone/backend/putio"
_ "github.com/ncw/rclone/backend/webdav" _ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/ncw/rclone/backend/yandex" _ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/webdav"
_ "github.com/rclone/rclone/backend/yandex"
) )

View File

@@ -12,6 +12,7 @@ we ignore assets completely!
*/ */
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
@@ -22,17 +23,17 @@ import (
"time" "time"
acd "github.com/ncw/go-acd" acd "github.com/ncw/go-acd"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"golang.org/x/oauth2" "golang.org/x/oauth2"
) )
@@ -246,6 +247,7 @@ func filterRequest(req *http.Request) {
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@@ -307,7 +309,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.dirCache = dircache.New(root, f.trueRootID, f) f.dirCache = dircache.New(root, f.trueRootID, f)
// Find the current root // Find the current root
err = f.dirCache.FindRoot(false) err = f.dirCache.FindRoot(ctx, false)
if err != nil { if err != nil {
// Assume it is a file // Assume it is a file
newRoot, remote := dircache.SplitPath(root) newRoot, remote := dircache.SplitPath(root)
@@ -315,12 +317,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF) tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF)
tempF.root = newRoot tempF.root = newRoot
// Make new Fs which is the parent // Make new Fs which is the parent
err = tempF.dirCache.FindRoot(false) err = tempF.dirCache.FindRoot(ctx, false)
if err != nil { if err != nil {
// No root so return old f // No root so return old f
return f, nil return f, nil
} }
_, err := tempF.newObjectWithInfo(remote, nil) _, err := tempF.newObjectWithInfo(ctx, remote, nil)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound { if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f // File doesn't exist so return old f
@@ -330,7 +332,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} }
// XXX: update the old f here instead of returning tempF, since // XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver. // `features` were already filled with functions having *f as a receiver.
// See https://github.com/ncw/rclone/issues/2182 // See https://github.com/rclone/rclone/issues/2182
f.dirCache = tempF.dirCache f.dirCache = tempF.dirCache
f.root = tempF.root f.root = tempF.root
// return an error with an fs which points to the parent // return an error with an fs which points to the parent
@@ -352,7 +354,7 @@ func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) {
// Return an Object from a path // Return an Object from a path
// //
// If it can't be found it returns the error fs.ErrorObjectNotFound. // If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error) { func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Node) (fs.Object, error) {
o := &Object{ o := &Object{
fs: f, fs: f,
remote: remote, remote: remote,
@@ -361,7 +363,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error)
// Set info but not meta // Set info but not meta
o.info = info o.info = info
} else { } else {
err := o.readMetaData() // reads info and meta, returning an error err := o.readMetaData(ctx) // reads info and meta, returning an error
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -371,12 +373,12 @@ func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error)
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) { func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil) return f.newObjectWithInfo(ctx, remote, nil)
} }
// FindLeaf finds a directory of name leaf in the folder with ID pathID // FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) { func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf) //fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
folder := acd.FolderFromId(pathID, f.c.Nodes) folder := acd.FolderFromId(pathID, f.c.Nodes)
var resp *http.Response var resp *http.Response
@@ -403,7 +405,7 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er
} }
// CreateDir makes a directory with pathID as parent and name leaf // CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) { func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf) //fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
folder := acd.FolderFromId(pathID, f.c.Nodes) folder := acd.FolderFromId(pathID, f.c.Nodes)
var resp *http.Response var resp *http.Response
@@ -501,12 +503,12 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(false) err = f.dirCache.FindRoot(ctx, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
directoryID, err := f.dirCache.FindDir(dir, false) directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -524,7 +526,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
d := fs.NewDir(remote, when).SetID(*node.Id) d := fs.NewDir(remote, when).SetID(*node.Id)
entries = append(entries, d) entries = append(entries, d)
case fileKind: case fileKind:
o, err := f.newObjectWithInfo(remote, node) o, err := f.newObjectWithInfo(ctx, remote, node)
if err != nil { if err != nil {
iErr = err iErr = err
return true return true
@@ -568,7 +570,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
// At the end of large uploads. The speculation is that the timeout // At the end of large uploads. The speculation is that the timeout
// is waiting for the sha1 hashing to complete and the file may well // is waiting for the sha1 hashing to complete and the file may well
// be properly uploaded. // be properly uploaded.
func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) { func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
// Return if no error - all is well // Return if no error - all is well
if inErr == nil { if inErr == nil {
return false, inInfo, inErr return false, inInfo, inErr
@@ -608,7 +610,7 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i
fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus) fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
remote := src.Remote() remote := src.Remote()
for i := 1; i <= retries; i++ { for i := 1; i <= retries; i++ {
o, err := f.NewObject(remote) o, err := f.NewObject(ctx, remote)
if err == fs.ErrorObjectNotFound { if err == fs.ErrorObjectNotFound {
fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries) fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries)
} else if err != nil { } else if err != nil {
@@ -634,7 +636,7 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote() remote := src.Remote()
size := src.Size() size := src.Size()
// Temporary Object under construction // Temporary Object under construction
@@ -643,17 +645,17 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
remote: remote, remote: remote,
} }
// Check if object already exists // Check if object already exists
err := o.readMetaData() err := o.readMetaData(ctx)
switch err { switch err {
case nil: case nil:
return o, o.Update(in, src, options...) return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound: case fs.ErrorObjectNotFound:
// Not found so create it // Not found so create it
default: default:
return nil, err return nil, err
} }
// If not create it // If not create it
leaf, directoryID, err := f.dirCache.FindRootAndPath(remote, true) leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -669,7 +671,7 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
info, resp, err = folder.Put(in, leaf) info, resp, err = folder.Put(in, leaf)
f.tokenRenewer.Stop() f.tokenRenewer.Stop()
var ok bool var ok bool
ok, info, err = f.checkUpload(resp, in, src, info, err, time.Since(start)) ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
if ok { if ok {
return false, nil return false, nil
} }
@@ -683,13 +685,13 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
} }
// Mkdir creates the container if it doesn't exist // Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(dir string) error { func (f *Fs) Mkdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(true) err := f.dirCache.FindRoot(ctx, true)
if err != nil { if err != nil {
return err return err
} }
if dir != "" { if dir != "" {
_, err = f.dirCache.FindDir(dir, true) _, err = f.dirCache.FindDir(ctx, dir, true)
} }
return err return err
} }
@@ -703,7 +705,7 @@ func (f *Fs) Mkdir(dir string) error {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantMove // If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
// go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$' // go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$'
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
@@ -712,15 +714,15 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
} }
// create the destination directory if necessary // create the destination directory if necessary
err := f.dirCache.FindRoot(true) err := f.dirCache.FindRoot(ctx, true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(srcObj.remote, false) srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(remote, true) dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, remote, true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -736,12 +738,12 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcErr, dstErr error srcErr, dstErr error
) )
for i := 1; i <= fs.Config.LowLevelRetries; i++ { for i := 1; i <= fs.Config.LowLevelRetries; i++ {
_, srcErr = srcObj.fs.NewObject(srcObj.remote) // try reading the object _, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
if srcErr != nil && srcErr != fs.ErrorObjectNotFound { if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
// exit if error on source // exit if error on source
return nil, srcErr return nil, srcErr
} }
dstObj, dstErr = f.NewObject(remote) dstObj, dstErr = f.NewObject(ctx, remote)
if dstErr != nil && dstErr != fs.ErrorObjectNotFound { if dstErr != nil && dstErr != fs.ErrorObjectNotFound {
// exit if error on dst // exit if error on dst
return nil, dstErr return nil, dstErr
@@ -770,7 +772,7 @@ func (f *Fs) DirCacheFlush() {
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) { func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
srcFs, ok := src.(*Fs) srcFs, ok := src.(*Fs)
if !ok { if !ok {
fs.Debugf(src, "DirMove error: not same remote type") fs.Debugf(src, "DirMove error: not same remote type")
@@ -786,14 +788,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
} }
// find the root src directory // find the root src directory
err = srcFs.dirCache.FindRoot(false) err = srcFs.dirCache.FindRoot(ctx, false)
if err != nil { if err != nil {
return err return err
} }
// find the root dst directory // find the root dst directory
if dstRemote != "" { if dstRemote != "" {
err = f.dirCache.FindRoot(true) err = f.dirCache.FindRoot(ctx, true)
if err != nil { if err != nil {
return err return err
} }
@@ -808,14 +810,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
if dstRemote == "" { if dstRemote == "" {
findPath = f.root findPath = f.root
} }
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(findPath, true) dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, findPath, true)
if err != nil { if err != nil {
return err return err
} }
// Check destination does not exist // Check destination does not exist
if dstRemote != "" { if dstRemote != "" {
_, err = f.dirCache.FindDir(dstRemote, false) _, err = f.dirCache.FindDir(ctx, dstRemote, false)
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
// OK // OK
} else if err != nil { } else if err != nil {
@@ -831,7 +833,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
if srcRemote == "" { if srcRemote == "" {
srcDirectoryID, err = srcFs.dirCache.RootParentID() srcDirectoryID, err = srcFs.dirCache.RootParentID()
} else { } else {
_, srcDirectoryID, err = srcFs.dirCache.FindPath(findPath, false) _, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false)
} }
if err != nil { if err != nil {
return err return err
@@ -839,7 +841,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
srcLeaf, _ := dircache.SplitPath(srcPath) srcLeaf, _ := dircache.SplitPath(srcPath)
// Find ID of src // Find ID of src
srcID, err := srcFs.dirCache.FindDir(srcRemote, false) srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
if err != nil { if err != nil {
return err return err
} }
@@ -872,17 +874,17 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
// purgeCheck remotes the root directory, if check is set then it // purgeCheck remotes the root directory, if check is set then it
// refuses to do so if it has anything in // refuses to do so if it has anything in
func (f *Fs) purgeCheck(dir string, check bool) error { func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
root := path.Join(f.root, dir) root := path.Join(f.root, dir)
if root == "" { if root == "" {
return errors.New("can't purge root directory") return errors.New("can't purge root directory")
} }
dc := f.dirCache dc := f.dirCache
err := dc.FindRoot(false) err := dc.FindRoot(ctx, false)
if err != nil { if err != nil {
return err return err
} }
rootID, err := dc.FindDir(dir, false) rootID, err := dc.FindDir(ctx, dir, false)
if err != nil { if err != nil {
return err return err
} }
@@ -931,8 +933,8 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
// Rmdir deletes the root folder // Rmdir deletes the root folder
// //
// Returns an error if it isn't empty // Returns an error if it isn't empty
func (f *Fs) Rmdir(dir string) error { func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeCheck(dir, true) return f.purgeCheck(ctx, dir, true)
} }
// Precision return the precision of this Fs // Precision return the precision of this Fs
@@ -954,7 +956,7 @@ func (f *Fs) Hashes() hash.Set {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
//func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { //func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
// srcObj, ok := src.(*Object) // srcObj, ok := src.(*Object)
// if !ok { // if !ok {
// fs.Debugf(src, "Can't copy - not same remote type") // fs.Debugf(src, "Can't copy - not same remote type")
@@ -965,7 +967,7 @@ func (f *Fs) Hashes() hash.Set {
// if err != nil { // if err != nil {
// return nil, err // return nil, err
// } // }
// return f.NewObject(remote), nil // return f.NewObject(ctx, remote), nil
//} //}
// Purge deletes all the files and the container // Purge deletes all the files and the container
@@ -973,8 +975,8 @@ func (f *Fs) Hashes() hash.Set {
// Optional interface: Only implement this if you have a way of // Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the // deleting all the files quicker than just running Remove() on the
// result of List() // result of List()
func (f *Fs) Purge() error { func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck("", false) return f.purgeCheck(ctx, "", false)
} }
// ------------------------------------------------------------ // ------------------------------------------------------------
@@ -998,7 +1000,7 @@ func (o *Object) Remote() string {
} }
// Hash returns the Md5sum of an object returning a lowercase hex string // Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(t hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 { if t != hash.MD5 {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -1021,11 +1023,11 @@ func (o *Object) Size() int64 {
// it also sets the info // it also sets the info
// //
// If it can't be found it returns the error fs.ErrorObjectNotFound. // If it can't be found it returns the error fs.ErrorObjectNotFound.
func (o *Object) readMetaData() (err error) { func (o *Object) readMetaData(ctx context.Context) (err error) {
if o.info != nil { if o.info != nil {
return nil return nil
} }
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(o.remote, false) leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
if err != nil { if err != nil {
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
return fs.ErrorObjectNotFound return fs.ErrorObjectNotFound
@@ -1054,8 +1056,8 @@ func (o *Object) readMetaData() (err error) {
// //
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime() time.Time { func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData() err := o.readMetaData(ctx)
if err != nil { if err != nil {
fs.Debugf(o, "Failed to read metadata: %v", err) fs.Debugf(o, "Failed to read metadata: %v", err)
return time.Now() return time.Now()
@@ -1069,7 +1071,7 @@ func (o *Object) ModTime() time.Time {
} }
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error { func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
// FIXME not implemented // FIXME not implemented
return fs.ErrorCantSetModTime return fs.ErrorCantSetModTime
} }
@@ -1080,7 +1082,7 @@ func (o *Object) Storable() bool {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold) bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
if bigObject { if bigObject {
fs.Debugf(o, "Downloading large object via tempLink") fs.Debugf(o, "Downloading large object via tempLink")
@@ -1102,7 +1104,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Update the object with the contents of the io.Reader, modTime and size // Update the object with the contents of the io.Reader, modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
file := acd.File{Node: o.info} file := acd.File{Node: o.info}
var info *acd.File var info *acd.File
var resp *http.Response var resp *http.Response
@@ -1113,7 +1115,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
info, resp, err = file.Overwrite(in) info, resp, err = file.Overwrite(in)
o.fs.tokenRenewer.Stop() o.fs.tokenRenewer.Stop()
var ok bool var ok bool
ok, info, err = o.fs.checkUpload(resp, in, src, info, err, time.Since(start)) ok, info, err = o.fs.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
if ok { if ok {
return false, nil return false, nil
} }
@@ -1138,7 +1140,7 @@ func (f *Fs) removeNode(info *acd.Node) error {
} }
// Remove an object // Remove an object
func (o *Object) Remove() error { func (o *Object) Remove(ctx context.Context) error {
return o.fs.removeNode(o.info) return o.fs.removeNode(o.info)
} }
@@ -1260,7 +1262,7 @@ OnConflict:
} }
// MimeType of an Object if known, "" otherwise // MimeType of an Object if known, "" otherwise
func (o *Object) MimeType() string { func (o *Object) MimeType(ctx context.Context) string {
if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil { if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil {
return *o.info.ContentProperties.ContentType return *o.info.ContentProperties.ContentType
} }
@@ -1273,7 +1275,7 @@ func (o *Object) MimeType() string {
// Automatically restarts itself in case of unexpected behaviour of the remote. // Automatically restarts itself in case of unexpected behaviour of the remote.
// //
// Close the returned channel to stop being notified. // Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
checkpoint := f.opt.Checkpoint checkpoint := f.opt.Checkpoint
go func() { go func() {

View File

@@ -7,9 +7,9 @@ package amazonclouddrive_test
import ( import (
"testing" "testing"
"github.com/ncw/rclone/backend/amazonclouddrive" "github.com/rclone/rclone/backend/amazonclouddrive"
"github.com/ncw/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/ncw/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -16,7 +16,6 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
"regexp"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@@ -24,16 +23,17 @@ import (
"github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-pipeline-go/pipeline"
"github.com/Azure/azure-storage-blob-go/azblob" "github.com/Azure/azure-storage-blob-go/azblob"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/pacer"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/pacer"
) )
const ( const (
@@ -53,6 +53,11 @@ const (
maxUploadCutoff = 256 * fs.MebiByte maxUploadCutoff = 256 * fs.MebiByte
defaultAccessTier = azblob.AccessTierNone defaultAccessTier = azblob.AccessTierNone
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing) maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
// Default storage account, key and blob endpoint for emulator support,
// though it is a base64 key checked in here, it is publicly available secret.
emulatorAccount = "devstoreaccount1"
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
) )
// Register with Fs // Register with Fs
@@ -63,13 +68,17 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "account", Name: "account",
Help: "Storage Account Name (leave blank to use connection string or SAS URL)", Help: "Storage Account Name (leave blank to use SAS URL or Emulator)",
}, { }, {
Name: "key", Name: "key",
Help: "Storage Account Key (leave blank to use connection string or SAS URL)", Help: "Storage Account Key (leave blank to use SAS URL or Emulator)",
}, { }, {
Name: "sas_url", Name: "sas_url",
Help: "SAS URL for container level access only\n(leave blank if using account/key or connection string)", Help: "SAS URL for container level access only\n(leave blank if using account/key or Emulator)",
}, {
Name: "use_emulator",
Help: "Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)",
Default: false,
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for the service\nLeave blank normally.", Help: "Endpoint for the service\nLeave blank normally.",
@@ -129,23 +138,25 @@ type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
ListChunkSize uint `config:"list_chunk"` ListChunkSize uint `config:"list_chunk"`
AccessTier string `config:"access_tier"` AccessTier string `config:"access_tier"`
UseEmulator bool `config:"use_emulator"`
} }
// Fs represents a remote azure server // Fs represents a remote azure server
type Fs struct { type Fs struct {
name string // name of this remote name string // name of this remote
root string // the path we are working on if any root string // the path we are working on if any
opt Options // parsed config options opt Options // parsed config options
features *fs.Features // optional features features *fs.Features // optional features
client *http.Client // http client we are using client *http.Client // http client we are using
svcURL *azblob.ServiceURL // reference to serviceURL svcURL *azblob.ServiceURL // reference to serviceURL
cntURL *azblob.ContainerURL // reference to containerURL cntURLcacheMu sync.Mutex // mutex to protect cntURLcache
container string // the container we are working on cntURLcache map[string]*azblob.ContainerURL // reference to containerURL per container
containerOKMu sync.Mutex // mutex to protect container OK rootContainer string // container part of root (if any)
containerOK bool // true if we have created the container rootDirectory string // directory part of root (if any)
containerDeleted bool // true if we have deleted the container isLimited bool // if limited to one container
pacer *fs.Pacer // To pace and retry the API calls cache *bucket.Cache // cache for container creation status
uploadToken *pacer.TokenDispenser // control concurrency pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency
} }
// Object describes a azure object // Object describes a azure object
@@ -169,18 +180,18 @@ func (f *Fs) Name() string {
// Root of the remote (as passed into NewFs) // Root of the remote (as passed into NewFs)
func (f *Fs) Root() string { func (f *Fs) Root() string {
if f.root == "" { return f.root
return f.container
}
return f.container + "/" + f.root
} }
// String converts this Fs to a string // String converts this Fs to a string
func (f *Fs) String() string { func (f *Fs) String() string {
if f.root == "" { if f.rootContainer == "" {
return fmt.Sprintf("Azure container %s", f.container) return fmt.Sprintf("Azure root")
} }
return fmt.Sprintf("Azure container %s path %s", f.container, f.root) if f.rootDirectory == "" {
return fmt.Sprintf("Azure container %s", f.rootContainer)
}
return fmt.Sprintf("Azure container %s path %s", f.rootContainer, f.rootDirectory)
} }
// Features returns the optional features of this Fs // Features returns the optional features of this Fs
@@ -188,21 +199,23 @@ func (f *Fs) Features() *fs.Features {
return f.features return f.features
} }
// Pattern to match a azure path // parsePath parses a remote 'url'
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`) func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
// parseParse parses a azure 'url'
func parsePath(path string) (container, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("couldn't find container in azure path %q", path)
} else {
container, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return return
} }
// split returns container and containerPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) {
return bucket.Split(path.Join(f.root, rootRelativePath))
}
// split returns container and containerPath from the object
func (o *Object) split() (container, containerPath string) {
return o.fs.split(o.remote)
}
// validateAccessTier checks if azureblob supports user supplied tier // validateAccessTier checks if azureblob supports user supplied tier
func validateAccessTier(tier string) bool { func validateAccessTier(tier string) bool {
switch tier { switch tier {
@@ -307,8 +320,15 @@ func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline
return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log}) return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log})
} }
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
}
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@@ -327,10 +347,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if opt.ListChunkSize > maxListChunkSize { if opt.ListChunkSize > maxListChunkSize {
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize) return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
} }
container, directory, err := parsePath(root)
if err != nil {
return nil, err
}
if opt.Endpoint == "" { if opt.Endpoint == "" {
opt.Endpoint = storageDefaultBaseURL opt.Endpoint = storageDefaultBaseURL
} }
@@ -345,26 +361,38 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f := &Fs{ f := &Fs{
name: name, name: name,
opt: *opt, opt: *opt,
container: container,
root: directory,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers), uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
client: fshttp.NewClient(fs.Config), client: fshttp.NewClient(fs.Config),
cache: bucket.NewCache(),
cntURLcache: make(map[string]*azblob.ContainerURL, 1),
} }
f.setRoot(root)
f.features = (&fs.Features{ f.features = (&fs.Features{
ReadMimeType: true, ReadMimeType: true,
WriteMimeType: true, WriteMimeType: true,
BucketBased: true, BucketBased: true,
SetTier: true, BucketBasedRootOK: true,
GetTier: true, SetTier: true,
GetTier: true,
}).Fill(f) }).Fill(f)
var ( var (
u *url.URL u *url.URL
serviceURL azblob.ServiceURL serviceURL azblob.ServiceURL
containerURL azblob.ContainerURL
) )
switch { switch {
case opt.UseEmulator:
credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey)
if err != nil {
return nil, errors.Wrapf(err, "Failed to parse credentials")
}
u, err = url.Parse(emulatorBlobEndpoint)
if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
}
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline)
case opt.Account != "" && opt.Key != "": case opt.Account != "" && opt.Key != "":
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key) credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
if err != nil { if err != nil {
@@ -377,7 +405,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} }
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}}) pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline) serviceURL = azblob.NewServiceURL(*u, pipeline)
containerURL = serviceURL.NewContainerURL(container)
case opt.SASURL != "": case opt.SASURL != "":
u, err = url.Parse(opt.SASURL) u, err = url.Parse(opt.SASURL)
if err != nil { if err != nil {
@@ -388,38 +415,30 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Check if we have container level SAS or account level sas // Check if we have container level SAS or account level sas
parts := azblob.NewBlobURLParts(*u) parts := azblob.NewBlobURLParts(*u)
if parts.ContainerName != "" { if parts.ContainerName != "" {
if container != "" && parts.ContainerName != container { if f.rootContainer != "" && parts.ContainerName != f.rootContainer {
return nil, errors.New("Container name in SAS URL and container provided in command do not match") return nil, errors.New("Container name in SAS URL and container provided in command do not match")
} }
containerURL := azblob.NewContainerURL(*u, pipeline)
f.container = parts.ContainerName f.cntURLcache[parts.ContainerName] = &containerURL
containerURL = azblob.NewContainerURL(*u, pipeline) f.isLimited = true
} else { } else {
serviceURL = azblob.NewServiceURL(*u, pipeline) serviceURL = azblob.NewServiceURL(*u, pipeline)
containerURL = serviceURL.NewContainerURL(container)
} }
default: default:
return nil, errors.New("Need account+key or connectionString or sasURL") return nil, errors.New("Need account+key or connectionString or sasURL")
} }
f.svcURL = &serviceURL f.svcURL = &serviceURL
f.cntURL = &containerURL
if f.root != "" { if f.rootContainer != "" && f.rootDirectory != "" {
f.root += "/"
// Check to see if the (container,directory) is actually an existing file // Check to see if the (container,directory) is actually an existing file
oldRoot := f.root oldRoot := f.root
remote := path.Base(directory) newRoot, leaf := path.Split(oldRoot)
f.root = path.Dir(directory) f.setRoot(newRoot)
if f.root == "." { _, err := f.NewObject(ctx, leaf)
f.root = ""
} else {
f.root += "/"
}
_, err := f.NewObject(remote)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile { if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
// File doesn't exist or is a directory so return old f // File doesn't exist or is a directory so return old f
f.root = oldRoot f.setRoot(oldRoot)
return f, nil return f, nil
} }
return nil, err return nil, err
@@ -430,6 +449,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return f, nil return f, nil
} }
// return the container URL for the container passed in
func (f *Fs) cntURL(container string) (containerURL *azblob.ContainerURL) {
f.cntURLcacheMu.Lock()
defer f.cntURLcacheMu.Unlock()
var ok bool
if containerURL, ok = f.cntURLcache[container]; !ok {
cntURL := f.svcURL.NewContainerURL(container)
containerURL = &cntURL
f.cntURLcache[container] = containerURL
}
return containerURL
}
// Return an Object from a path // Return an Object from a path
// //
// If it can't be found it returns the error fs.ErrorObjectNotFound. // If it can't be found it returns the error fs.ErrorObjectNotFound.
@@ -454,13 +487,13 @@ func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItem) (fs.Object,
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) { func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil) return f.newObjectWithInfo(remote, nil)
} }
// getBlobReference creates an empty blob reference with no metadata // getBlobReference creates an empty blob reference with no metadata
func (f *Fs) getBlobReference(remote string) azblob.BlobURL { func (f *Fs) getBlobReference(container, containerPath string) azblob.BlobURL {
return f.cntURL.NewBlobURL(f.root + remote) return f.cntURL(container).NewBlobURL(containerPath)
} }
// updateMetadataWithModTime adds the modTime passed in to o.meta. // updateMetadataWithModTime adds the modTime passed in to o.meta.
@@ -496,16 +529,18 @@ type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
// the container and root supplied // the container and root supplied
// //
// dir is the starting directory, "" for root // dir is the starting directory, "" for root
func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error { //
f.containerOKMu.Lock() // The remote has prefix removed from it and if addContainer is set then
deleted := f.containerDeleted // it adds the container to the start.
f.containerOKMu.Unlock() func (f *Fs) list(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, maxResults uint, fn listFn) error {
if deleted { if f.cache.IsDeleted(container) {
return fs.ErrorDirNotFound return fs.ErrorDirNotFound
} }
root := f.root if prefix != "" {
if dir != "" { prefix += "/"
root += dir + "/" }
if directory != "" {
directory += "/"
} }
delimiter := "" delimiter := ""
if !recurse { if !recurse {
@@ -520,16 +555,14 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
UncommittedBlobs: false, UncommittedBlobs: false,
Deleted: false, Deleted: false,
}, },
Prefix: root, Prefix: directory,
MaxResults: int32(maxResults), MaxResults: int32(maxResults),
} }
ctx := context.Background()
directoryMarkers := map[string]struct{}{}
for marker := (azblob.Marker{}); marker.NotDone(); { for marker := (azblob.Marker{}); marker.NotDone(); {
var response *azblob.ListBlobsHierarchySegmentResponse var response *azblob.ListBlobsHierarchySegmentResponse
err := f.pacer.Call(func() (bool, error) { err := f.pacer.Call(func() (bool, error) {
var err error var err error
response, err = f.cntURL.ListBlobsHierarchySegment(ctx, marker, delimiter, options) response, err = f.cntURL(container).ListBlobsHierarchySegment(ctx, marker, delimiter, options)
return f.shouldRetry(err) return f.shouldRetry(err)
}) })
@@ -549,26 +582,17 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
// if prefix != "" && !strings.HasPrefix(file.Name, prefix) { // if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
// return nil // return nil
// } // }
if !strings.HasPrefix(file.Name, f.root) { if !strings.HasPrefix(file.Name, prefix) {
fs.Debugf(f, "Odd name received %q", file.Name) fs.Debugf(f, "Odd name received %q", file.Name)
continue continue
} }
remote := file.Name[len(f.root):] remote := file.Name[len(prefix):]
if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) { if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) {
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
err = fn(remote, file, true)
if err != nil {
return err
}
// Keep track of directory markers. If recursing then
// there will be no Prefixes so no need to keep track
if !recurse {
directoryMarkers[remote] = struct{}{}
}
continue // skip directory marker continue // skip directory marker
} }
if addContainer {
remote = path.Join(container, remote)
}
// Send object // Send object
err = fn(remote, file, false) err = fn(remote, file, false)
if err != nil { if err != nil {
@@ -578,14 +602,13 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
// Send the subdirectories // Send the subdirectories
for _, remote := range response.Segment.BlobPrefixes { for _, remote := range response.Segment.BlobPrefixes {
remote := strings.TrimRight(remote.Name, "/") remote := strings.TrimRight(remote.Name, "/")
if !strings.HasPrefix(remote, f.root) { if !strings.HasPrefix(remote, prefix) {
fs.Debugf(f, "Odd directory name received %q", remote) fs.Debugf(f, "Odd directory name received %q", remote)
continue continue
} }
remote = remote[len(f.root):] remote = remote[len(prefix):]
// Don't send if already sent as a directory marker if addContainer {
if _, found := directoryMarkers[remote]; found { remote = path.Join(container, remote)
continue
} }
// Send object // Send object
err = fn(remote, nil, true) err = fn(remote, nil, true)
@@ -610,19 +633,9 @@ func (f *Fs) itemToDirEntry(remote string, object *azblob.BlobItem, isDirectory
return o, nil return o, nil
} }
// mark the container as being OK
func (f *Fs) markContainerOK() {
if f.container != "" {
f.containerOKMu.Lock()
f.containerOK = true
f.containerDeleted = false
f.containerOKMu.Unlock()
}
}
// listDir lists a single directory // listDir lists a single directory
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) { func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
err = f.list(dir, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error { err = f.list(ctx, container, directory, prefix, addContainer, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory) entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil { if err != nil {
return err return err
@@ -636,17 +649,24 @@ func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
return nil, err return nil, err
} }
// container must be present if listing succeeded // container must be present if listing succeeded
f.markContainerOK() f.cache.MarkOK(container)
return entries, nil return entries, nil
} }
// listContainers returns all the containers to out // listContainers returns all the containers to out
func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) { func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) {
if dir != "" { if f.isLimited {
return nil, fs.ErrorListBucketRequired f.cntURLcacheMu.Lock()
for container := range f.cntURLcache {
d := fs.NewDir(container, time.Time{})
entries = append(entries, d)
}
f.cntURLcacheMu.Unlock()
return entries, nil
} }
err = f.listContainersToFn(func(container *azblob.ContainerItem) error { err = f.listContainersToFn(func(container *azblob.ContainerItem) error {
d := fs.NewDir(container.Name, container.Properties.LastModified) d := fs.NewDir(container.Name, container.Properties.LastModified)
f.cache.MarkOK(container.Name)
entries = append(entries, d) entries = append(entries, d)
return nil return nil
}) })
@@ -665,11 +685,15 @@ func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.container == "" { container, directory := f.split(dir)
return f.listContainers(dir) if container == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listContainers(ctx)
} }
return f.listDir(dir) return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@@ -688,23 +712,44 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
// //
// Don't implement this unless you have a more efficient way // Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal. // of listing recursively that doing a directory traversal.
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
if f.container == "" { container, directory := f.split(dir)
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback) list := walk.NewListRHelper(callback)
err = f.list(dir, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error { listR := func(container, directory, prefix string, addContainer bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory) return f.list(ctx, container, directory, prefix, addContainer, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
}
if container == "" {
entries, err := f.listContainers(ctx)
if err != nil { if err != nil {
return err return err
} }
return list.Add(entry) for _, entry := range entries {
}) err = list.Add(entry)
if err != nil { if err != nil {
return err return err
}
container := entry.Remote()
err = listR(container, "", f.rootDirectory, true)
if err != nil {
return err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
}
} else {
err = listR(container, directory, f.rootDirectory, f.rootContainer == "")
if err != nil {
return err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
} }
// container must be present if listing succeeded
f.markContainerOK()
return list.Flush() return list.Flush()
} }
@@ -745,95 +790,52 @@ func (f *Fs) listContainersToFn(fn listContainerFn) error {
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction // Temporary Object under construction
fs := &Object{ fs := &Object{
fs: f, fs: f,
remote: src.Remote(), remote: src.Remote(),
} }
return fs, fs.Update(in, src, options...) return fs, fs.Update(ctx, in, src, options...)
}
// Check if the container exists
//
// NB this can return incorrect results if called immediately after container deletion
func (f *Fs) dirExists() (bool, error) {
options := azblob.ListBlobsSegmentOptions{
Details: azblob.BlobListingDetails{
Copy: false,
Metadata: false,
Snapshots: false,
UncommittedBlobs: false,
Deleted: false,
},
MaxResults: 1,
}
err := f.pacer.Call(func() (bool, error) {
ctx := context.Background()
_, err := f.cntURL.ListBlobsHierarchySegment(ctx, azblob.Marker{}, "", options)
return f.shouldRetry(err)
})
if err == nil {
return true, nil
}
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
return false, nil
}
return false, err
} }
// Mkdir creates the container if it doesn't exist // Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(dir string) error { func (f *Fs) Mkdir(ctx context.Context, dir string) error {
f.containerOKMu.Lock() container, _ := f.split(dir)
defer f.containerOKMu.Unlock() return f.makeContainer(ctx, container)
if f.containerOK {
return nil
}
if !f.containerDeleted {
exists, err := f.dirExists()
if err == nil {
f.containerOK = exists
}
if err != nil || exists {
return err
}
}
// now try to create the container
err := f.pacer.Call(func() (bool, error) {
ctx := context.Background()
_, err := f.cntURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
if err != nil {
if storageErr, ok := err.(azblob.StorageError); ok {
switch storageErr.ServiceCode() {
case azblob.ServiceCodeContainerAlreadyExists:
f.containerOK = true
return false, nil
case azblob.ServiceCodeContainerBeingDeleted:
// From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container
// When a container is deleted, a container with the same name cannot be created
// for at least 30 seconds; the container may not be available for more than 30
// seconds if the service is still processing the request.
time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds
f.containerDeleted = true
return true, err
}
}
}
return f.shouldRetry(err)
})
if err == nil {
f.containerOK = true
f.containerDeleted = false
}
return errors.Wrap(err, "failed to make container")
} }
// isEmpty checks to see if a given directory is empty and returns an error if not // makeContainer creates the container if it doesn't exist
func (f *Fs) isEmpty(dir string) (err error) { func (f *Fs) makeContainer(ctx context.Context, container string) error {
return f.cache.Create(container, func() error {
// now try to create the container
return f.pacer.Call(func() (bool, error) {
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
if err != nil {
if storageErr, ok := err.(azblob.StorageError); ok {
switch storageErr.ServiceCode() {
case azblob.ServiceCodeContainerAlreadyExists:
return false, nil
case azblob.ServiceCodeContainerBeingDeleted:
// From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container
// When a container is deleted, a container with the same name cannot be created
// for at least 30 seconds; the container may not be available for more than 30
// seconds if the service is still processing the request.
time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds
f.cache.MarkDeleted(container)
return true, err
}
}
}
return f.shouldRetry(err)
})
}, nil)
}
// isEmpty checks to see if a given (container, directory) is empty and returns an error if not
func (f *Fs) isEmpty(ctx context.Context, container, directory string) (err error) {
empty := true empty := true
err = f.list(dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error { err = f.list(ctx, container, directory, f.rootDirectory, f.rootContainer == "", true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
empty = false empty = false
return nil return nil
}) })
@@ -848,47 +850,42 @@ func (f *Fs) isEmpty(dir string) (err error) {
// deleteContainer deletes the container. It can delete a full // deleteContainer deletes the container. It can delete a full
// container so use isEmpty if you don't want that. // container so use isEmpty if you don't want that.
func (f *Fs) deleteContainer() error { func (f *Fs) deleteContainer(ctx context.Context, container string) error {
f.containerOKMu.Lock() return f.cache.Remove(container, func() error {
defer f.containerOKMu.Unlock() options := azblob.ContainerAccessConditions{}
options := azblob.ContainerAccessConditions{} return f.pacer.Call(func() (bool, error) {
ctx := context.Background() _, err := f.cntURL(container).GetProperties(ctx, azblob.LeaseAccessConditions{})
err := f.pacer.Call(func() (bool, error) { if err == nil {
_, err := f.cntURL.GetProperties(ctx, azblob.LeaseAccessConditions{}) _, err = f.cntURL(container).Delete(ctx, options)
if err == nil { }
_, err = f.cntURL.Delete(ctx, options)
}
if err != nil { if err != nil {
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes // Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) { if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
return false, fs.ErrorDirNotFound return false, fs.ErrorDirNotFound
}
return f.shouldRetry(err)
} }
return f.shouldRetry(err) return f.shouldRetry(err)
} })
return f.shouldRetry(err)
}) })
if err == nil {
f.containerOK = false
f.containerDeleted = true
}
return errors.Wrap(err, "failed to delete container")
} }
// Rmdir deletes the container if the fs is at the root // Rmdir deletes the container if the fs is at the root
// //
// Returns an error if it isn't empty // Returns an error if it isn't empty
func (f *Fs) Rmdir(dir string) error { func (f *Fs) Rmdir(ctx context.Context, dir string) error {
err := f.isEmpty(dir) container, directory := f.split(dir)
if container == "" || directory != "" {
return nil
}
err := f.isEmpty(ctx, container, directory)
if err != nil { if err != nil {
return err return err
} }
if f.root != "" || dir != "" { return f.deleteContainer(ctx, container)
return nil
}
return f.deleteContainer()
} }
// Precision of the remote // Precision of the remote
@@ -902,13 +899,14 @@ func (f *Fs) Hashes() hash.Set {
} }
// Purge deletes all the files and directories including the old versions. // Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge() error { func (f *Fs) Purge(ctx context.Context) error {
dir := "" // forward compat! dir := "" // forward compat!
if f.root != "" || dir != "" { container, directory := f.split(dir)
// Delegate to caller if not root container if container == "" || directory != "" {
// Delegate to caller if not root of a container
return fs.ErrorCantPurge return fs.ErrorCantPurge
} }
return f.deleteContainer() return f.deleteContainer(ctx, container)
} }
// Copy src to this remote using server side copy operations. // Copy src to this remote using server side copy operations.
@@ -920,8 +918,9 @@ func (f *Fs) Purge() error {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
err := f.Mkdir("") dstContainer, dstPath := f.split(remote)
err := f.makeContainer(ctx, dstContainer)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -930,7 +929,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(src, "Can't copy - not same remote type") fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
dstBlobURL := f.getBlobReference(remote) dstBlobURL := f.getBlobReference(dstContainer, dstPath)
srcBlobURL := srcObj.getBlobReference() srcBlobURL := srcObj.getBlobReference()
source, err := url.Parse(srcBlobURL.String()) source, err := url.Parse(srcBlobURL.String())
@@ -939,7 +938,6 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
} }
options := azblob.BlobAccessConditions{} options := azblob.BlobAccessConditions{}
ctx := context.Background()
var startCopy *azblob.BlobStartCopyFromURLResponse var startCopy *azblob.BlobStartCopyFromURLResponse
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
@@ -960,7 +958,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
copyStatus = getMetadata.CopyStatus() copyStatus = getMetadata.CopyStatus()
} }
return f.NewObject(remote) return f.NewObject(ctx, remote)
} }
// ------------------------------------------------------------ // ------------------------------------------------------------
@@ -984,7 +982,7 @@ func (o *Object) Remote() string {
} }
// Hash returns the MD5 of an object returning a lowercase hex string // Hash returns the MD5 of an object returning a lowercase hex string
func (o *Object) Hash(t hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 { if t != hash.MD5 {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -1064,7 +1062,8 @@ func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
// getBlobReference creates an empty blob reference with no metadata // getBlobReference creates an empty blob reference with no metadata
func (o *Object) getBlobReference() azblob.BlobURL { func (o *Object) getBlobReference() azblob.BlobURL {
return o.fs.getBlobReference(o.remote) container, directory := o.split()
return o.fs.getBlobReference(container, directory)
} }
// clearMetaData clears enough metadata so readMetaData will re-read it // clearMetaData clears enough metadata so readMetaData will re-read it
@@ -1116,7 +1115,7 @@ func (o *Object) parseTimeString(timeString string) (err error) {
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err) fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
return err return err
} }
o.modTime = time.Unix(unixMilliseconds/1E3, (unixMilliseconds%1E3)*1E6).UTC() o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
return nil return nil
} }
@@ -1124,14 +1123,14 @@ func (o *Object) parseTimeString(timeString string) (err error) {
// //
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime() (result time.Time) { func (o *Object) ModTime(ctx context.Context) (result time.Time) {
// The error is logged in readMetaData // The error is logged in readMetaData
_ = o.readMetaData() _ = o.readMetaData()
return o.modTime return o.modTime
} }
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error { func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
// Make sure o.meta is not nil // Make sure o.meta is not nil
if o.meta == nil { if o.meta == nil {
o.meta = make(map[string]string, 1) o.meta = make(map[string]string, 1)
@@ -1140,7 +1139,6 @@ func (o *Object) SetModTime(modTime time.Time) error {
o.meta[modTimeKey] = modTime.Format(timeFormatOut) o.meta[modTimeKey] = modTime.Format(timeFormatOut)
blob := o.getBlobReference() blob := o.getBlobReference()
ctx := context.Background()
err := o.fs.pacer.Call(func() (bool, error) { err := o.fs.pacer.Call(func() (bool, error) {
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{}) _, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{})
return o.fs.shouldRetry(err) return o.fs.shouldRetry(err)
@@ -1158,14 +1156,14 @@ func (o *Object) Storable() bool {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Offset and Count for range download // Offset and Count for range download
var offset int64 var offset int64
var count int64 var count int64
if o.AccessTier() == azblob.AccessTierArchive { if o.AccessTier() == azblob.AccessTierArchive {
return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first") return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
} }
fs.FixRangeOption(options, o.size)
for _, option := range options { for _, option := range options {
switch x := option.(type) { switch x := option.(type) {
case *fs.RangeOption: case *fs.RangeOption:
@@ -1182,7 +1180,6 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
} }
} }
blob := o.getBlobReference() blob := o.getBlobReference()
ctx := context.Background()
ac := azblob.BlobAccessConditions{} ac := azblob.BlobAccessConditions{}
var dowloadResponse *azblob.DownloadResponse var dowloadResponse *azblob.DownloadResponse
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
@@ -1371,26 +1368,27 @@ outer:
// Update the object with the contents of the io.Reader, modTime and size // Update the object with the contents of the io.Reader, modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
err = o.fs.Mkdir("") container, _ := o.split()
err = o.fs.makeContainer(ctx, container)
if err != nil { if err != nil {
return err return err
} }
size := src.Size() size := src.Size()
// Update Mod time // Update Mod time
o.updateMetadataWithModTime(src.ModTime()) o.updateMetadataWithModTime(src.ModTime(ctx))
if err != nil { if err != nil {
return err return err
} }
blob := o.getBlobReference() blob := o.getBlobReference()
httpHeaders := azblob.BlobHTTPHeaders{} httpHeaders := azblob.BlobHTTPHeaders{}
httpHeaders.ContentType = fs.MimeType(o) httpHeaders.ContentType = fs.MimeType(ctx, o)
// Compute the Content-MD5 of the file, for multiparts uploads it // Compute the Content-MD5 of the file, for multiparts uploads it
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header // will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
// Note: If multipart, a MD5 checksum will also be computed for each uploaded block // Note: If multipart, a MD5 checksum will also be computed for each uploaded block
// in order to validate its integrity during transport // in order to validate its integrity during transport
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" { if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5) sourceMD5bytes, err := hex.DecodeString(sourceMD5)
if err == nil { if err == nil {
httpHeaders.ContentMD5 = sourceMD5bytes httpHeaders.ContentMD5 = sourceMD5bytes
@@ -1408,14 +1406,13 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
// FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75 // FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75
// is merged the SDK can't upload a single blob of exactly the chunk // is merged the SDK can't upload a single blob of exactly the chunk
// size, so upload with a multpart upload to work around. // size, so upload with a multpart upload to work around.
// See: https://github.com/ncw/rclone/issues/2653 // See: https://github.com/rclone/rclone/issues/2653
multipartUpload := size >= int64(o.fs.opt.UploadCutoff) multipartUpload := size >= int64(o.fs.opt.UploadCutoff)
if size == int64(o.fs.opt.ChunkSize) { if size == int64(o.fs.opt.ChunkSize) {
multipartUpload = true multipartUpload = true
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size) fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
} }
ctx := context.Background()
// Don't retry, return a retry error instead // Don't retry, return a retry error instead
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.CallNoRetry(func() (bool, error) {
if multipartUpload { if multipartUpload {
@@ -1448,11 +1445,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
} }
// Remove an object // Remove an object
func (o *Object) Remove() error { func (o *Object) Remove(ctx context.Context) error {
blob := o.getBlobReference() blob := o.getBlobReference()
snapShotOptions := azblob.DeleteSnapshotsOptionNone snapShotOptions := azblob.DeleteSnapshotsOptionNone
ac := azblob.BlobAccessConditions{} ac := azblob.BlobAccessConditions{}
ctx := context.Background()
return o.fs.pacer.Call(func() (bool, error) { return o.fs.pacer.Call(func() (bool, error) {
_, err := blob.Delete(ctx, snapShotOptions, ac) _, err := blob.Delete(ctx, snapShotOptions, ac)
return o.fs.shouldRetry(err) return o.fs.shouldRetry(err)
@@ -1460,7 +1456,7 @@ func (o *Object) Remove() error {
} }
// MimeType of an Object if known, "" otherwise // MimeType of an Object if known, "" otherwise
func (o *Object) MimeType() string { func (o *Object) MimeType(ctx context.Context) string {
return o.mimeType return o.mimeType
} }
@@ -1512,4 +1508,6 @@ var (
_ fs.ListRer = &Fs{} _ fs.ListRer = &Fs{}
_ fs.Object = &Object{} _ fs.Object = &Object{}
_ fs.MimeTyper = &Object{} _ fs.MimeTyper = &Object{}
_ fs.GetTierer = &Object{}
_ fs.SetTierer = &Object{}
) )

View File

@@ -7,8 +7,8 @@ package azureblob
import ( import (
"testing" "testing"
"github.com/ncw/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/ncw/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -7,7 +7,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/ncw/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
) )
// Error describes a B2 error response // Error describes a B2 error response
@@ -50,7 +50,7 @@ type Timestamp time.Time
// MarshalJSON turns a Timestamp into JSON (in UTC) // MarshalJSON turns a Timestamp into JSON (in UTC)
func (t *Timestamp) MarshalJSON() (out []byte, err error) { func (t *Timestamp) MarshalJSON() (out []byte, err error) {
timestamp := (*time.Time)(t).UTC().UnixNano() timestamp := (*time.Time)(t).UTC().UnixNano()
return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil return []byte(strconv.FormatInt(timestamp/1e6, 10)), nil
} }
// UnmarshalJSON turns JSON into a Timestamp // UnmarshalJSON turns JSON into a Timestamp
@@ -59,7 +59,7 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
if err != nil { if err != nil {
return err return err
} }
*t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC()) *t = Timestamp(time.Unix(timestamp/1e3, (timestamp%1e3)*1e6).UTC())
return nil return nil
} }
@@ -189,6 +189,21 @@ type GetUploadURLResponse struct {
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file. AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
} }
// GetDownloadAuthorizationRequest is passed to b2_get_download_authorization
type GetDownloadAuthorizationRequest struct {
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to.
ValidDurationInSeconds int64 `json:"validDurationInSeconds"` // The number of seconds before the authorization token will expire. The minimum value is 1 second. The maximum value is 604800 which is one week in seconds.
B2ContentDisposition string `json:"b2ContentDisposition,omitempty"` // optional - If this is present, download requests using the returned authorization must include the same value for b2ContentDisposition.
}
// GetDownloadAuthorizationResponse is received from b2_get_download_authorization
type GetDownloadAuthorizationResponse struct {
BucketID string `json:"bucketId"` // The unique ID of the bucket.
FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to.
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when downloading files, see b2_download_file_by_name.
}
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file // FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
type FileInfo struct { type FileInfo struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version. ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
@@ -311,3 +326,14 @@ type CancelLargeFileResponse struct {
AccountID string `json:"accountId"` // The identifier for the account. AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId"` // The unique ID of the bucket. BucketID string `json:"bucketId"` // The unique ID of the bucket.
} }
// CopyFileRequest is as passed to b2_copy_file
type CopyFileRequest struct {
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
Name string `json:"fileName"` // The name of the new file being created.
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
}

View File

@@ -4,8 +4,8 @@ import (
"testing" "testing"
"time" "time"
"github.com/ncw/rclone/backend/b2/api" "github.com/rclone/rclone/backend/b2/api"
"github.com/ncw/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )

File diff suppressed because it is too large Load Diff

View File

@@ -4,7 +4,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/ncw/rclone/fstest" "github.com/rclone/rclone/fstest"
) )
// Test b2 string encoding // Test b2 string encoding

View File

@@ -4,8 +4,8 @@ package b2
import ( import (
"testing" "testing"
"github.com/ncw/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/ncw/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -6,6 +6,7 @@ package b2
import ( import (
"bytes" "bytes"
"context"
"crypto/sha1" "crypto/sha1"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
@@ -14,12 +15,12 @@ import (
"strings" "strings"
"sync" "sync"
"github.com/ncw/rclone/backend/b2/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/rest"
) )
type hashAppendingReader struct { type hashAppendingReader struct {
@@ -80,7 +81,7 @@ type largeUpload struct {
} }
// newLargeUpload starts an upload of object o from in with metadata in src // newLargeUpload starts an upload of object o from in with metadata in src
func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) { func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
remote := o.remote remote := o.remote
size := src.Size() size := src.Size()
parts := int64(0) parts := int64(0)
@@ -98,33 +99,34 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
sha1SliceSize = parts sha1SliceSize = parts
} }
modTime := src.ModTime() modTime := src.ModTime(ctx)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/b2_start_large_file", Path: "/b2_start_large_file",
} }
bucketID, err := f.getBucketID() bucket, bucketPath := o.split()
bucketID, err := f.getBucketID(ctx, bucket)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var request = api.StartLargeFileRequest{ var request = api.StartLargeFileRequest{
BucketID: bucketID, BucketID: bucketID,
Name: o.fs.root + remote, Name: bucketPath,
ContentType: fs.MimeType(src), ContentType: fs.MimeType(ctx, src),
Info: map[string]string{ Info: map[string]string{
timeKey: timeString(modTime), timeKey: timeString(modTime),
}, },
} }
// Set the SHA1 if known // Set the SHA1 if known
if !o.fs.opt.DisableCheckSum { if !o.fs.opt.DisableCheckSum {
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" { if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1 request.Info[sha1Key] = calculatedSha1
} }
} }
var response api.StartLargeFileResponse var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(&opts, &request, &response) resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
return f.shouldRetry(resp, err) return f.shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@@ -148,7 +150,7 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken // getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
// //
// This should be returned with returnUploadURL when finished // This should be returned with returnUploadURL when finished
func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) { func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
up.uploadMu.Lock() up.uploadMu.Lock()
defer up.uploadMu.Unlock() defer up.uploadMu.Unlock()
if len(up.uploads) == 0 { if len(up.uploads) == 0 {
@@ -160,8 +162,8 @@ func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err
ID: up.id, ID: up.id,
} }
err := up.f.pacer.Call(func() (bool, error) { err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(&opts, &request, &upload) resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(resp, err) return up.f.shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to get upload URL") return nil, errors.Wrap(err, "failed to get upload URL")
@@ -190,12 +192,12 @@ func (up *largeUpload) clearUploadURL() {
} }
// Transfer a chunk // Transfer a chunk
func (up *largeUpload) transferChunk(part int64, body []byte) error { func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
err := up.f.pacer.Call(func() (bool, error) { err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body)) fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
// Get upload URL // Get upload URL
upload, err := up.getUploadURL() upload, err := up.getUploadURL(ctx)
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -239,8 +241,8 @@ func (up *largeUpload) transferChunk(part int64, body []byte) error {
var response api.UploadPartResponse var response api.UploadPartResponse
resp, err := up.f.srv.CallJSON(&opts, nil, &response) resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
retry, err := up.f.shouldRetry(resp, err) retry, err := up.f.shouldRetry(ctx, resp, err)
if err != nil { if err != nil {
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err) fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
} }
@@ -262,7 +264,7 @@ func (up *largeUpload) transferChunk(part int64, body []byte) error {
} }
// finish closes off the large upload // finish closes off the large upload
func (up *largeUpload) finish() error { func (up *largeUpload) finish(ctx context.Context) error {
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts) fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
@@ -274,8 +276,8 @@ func (up *largeUpload) finish() error {
} }
var response api.FileInfo var response api.FileInfo
err := up.f.pacer.Call(func() (bool, error) { err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(&opts, &request, &response) resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
return up.f.shouldRetry(resp, err) return up.f.shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return err return err
@@ -284,7 +286,7 @@ func (up *largeUpload) finish() error {
} }
// cancel aborts the large upload // cancel aborts the large upload
func (up *largeUpload) cancel() error { func (up *largeUpload) cancel(ctx context.Context) error {
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/b2_cancel_large_file", Path: "/b2_cancel_large_file",
@@ -294,18 +296,18 @@ func (up *largeUpload) cancel() error {
} }
var response api.CancelLargeFileResponse var response api.CancelLargeFileResponse
err := up.f.pacer.Call(func() (bool, error) { err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(&opts, &request, &response) resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
return up.f.shouldRetry(resp, err) return up.f.shouldRetry(ctx, resp, err)
}) })
return err return err
} }
func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error, part int64, buf []byte) { func (up *largeUpload) managedTransferChunk(ctx context.Context, wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
wg.Add(1) wg.Add(1)
go func(part int64, buf []byte) { go func(part int64, buf []byte) {
defer wg.Done() defer wg.Done()
defer up.f.putUploadBlock(buf) defer up.f.putUploadBlock(buf)
err := up.transferChunk(part, buf) err := up.transferChunk(ctx, part, buf)
if err != nil { if err != nil {
select { select {
case errs <- err: case errs <- err:
@@ -315,7 +317,7 @@ func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error,
}(part, buf) }(part, buf)
} }
func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error { func (up *largeUpload) finishOrCancelOnError(ctx context.Context, err error, errs chan error) error {
if err == nil { if err == nil {
select { select {
case err = <-errs: case err = <-errs:
@@ -324,19 +326,19 @@ func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
} }
if err != nil { if err != nil {
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err) fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
cancelErr := up.cancel() cancelErr := up.cancel(ctx)
if cancelErr != nil { if cancelErr != nil {
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr) fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
} }
return err return err
} }
return up.finish() return up.finish(ctx)
} }
// Stream uploads the chunks from the input, starting with a required initial // Stream uploads the chunks from the input, starting with a required initial
// chunk. Assumes the file size is unknown and will upload until the input // chunk. Assumes the file size is unknown and will upload until the input
// reaches EOF. // reaches EOF.
func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) { func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id) fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
errs := make(chan error, 1) errs := make(chan error, 1)
hasMoreParts := true hasMoreParts := true
@@ -344,7 +346,7 @@ func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
// Transfer initial chunk // Transfer initial chunk
up.size = int64(len(initialUploadBlock)) up.size = int64(len(initialUploadBlock))
up.managedTransferChunk(&wg, errs, 1, initialUploadBlock) up.managedTransferChunk(ctx, &wg, errs, 1, initialUploadBlock)
outer: outer:
for part := int64(2); hasMoreParts; part++ { for part := int64(2); hasMoreParts; part++ {
@@ -386,16 +388,16 @@ outer:
} }
// Transfer the chunk // Transfer the chunk
up.managedTransferChunk(&wg, errs, part, buf) up.managedTransferChunk(ctx, &wg, errs, part, buf)
} }
wg.Wait() wg.Wait()
up.sha1s = up.sha1s[:up.parts] up.sha1s = up.sha1s[:up.parts]
return up.finishOrCancelOnError(err, errs) return up.finishOrCancelOnError(ctx, err, errs)
} }
// Upload uploads the chunks from the input // Upload uploads the chunks from the input
func (up *largeUpload) Upload() error { func (up *largeUpload) Upload(ctx context.Context) error {
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id) fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
remaining := up.size remaining := up.size
errs := make(chan error, 1) errs := make(chan error, 1)
@@ -426,10 +428,10 @@ outer:
} }
// Transfer the chunk // Transfer the chunk
up.managedTransferChunk(&wg, errs, part, buf) up.managedTransferChunk(ctx, &wg, errs, part, buf)
remaining -= reqSize remaining -= reqSize
} }
wg.Wait() wg.Wait()
return up.finishOrCancelOnError(err, errs) return up.finishOrCancelOnError(ctx, err, errs)
} }

View File

@@ -202,3 +202,23 @@ type CommitUpload struct {
ContentModifiedAt Time `json:"content_modified_at"` ContentModifiedAt Time `json:"content_modified_at"`
} `json:"attributes"` } `json:"attributes"`
} }
// ConfigJSON defines the shape of a box config.json
type ConfigJSON struct {
BoxAppSettings AppSettings `json:"boxAppSettings"`
EnterpriseID string `json:"enterpriseID"`
}
// AppSettings defines the shape of the boxAppSettings within box config.json
type AppSettings struct {
ClientID string `json:"clientID"`
ClientSecret string `json:"clientSecret"`
AppAuth AppAuth `json:"appAuth"`
}
// AppAuth defines the shape of the appAuth within boxAppSettings in config.json
type AppAuth struct {
PublicKeyID string `json:"publicKeyID"`
PrivateKey string `json:"privateKey"`
Passphrase string `json:"passphrase"`
}

View File

@@ -10,8 +10,13 @@ package box
// FIXME box can copy a directory // FIXME box can copy a directory
import ( import (
"context"
"crypto/rsa"
"encoding/json"
"encoding/pem"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"log" "log"
"net/http" "net/http"
"net/url" "net/url"
@@ -20,20 +25,26 @@ import (
"strings" "strings"
"time" "time"
"github.com/ncw/rclone/backend/box/api" "github.com/rclone/rclone/lib/jwtutil"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config" "github.com/youmark/pkcs8"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2" "golang.org/x/oauth2"
"golang.org/x/oauth2/jws"
) )
const ( const (
@@ -48,6 +59,7 @@ const (
listChunks = 1000 // chunk size to read directory listings listChunks = 1000 // chunk size to read directory listings
minUploadCutoff = 50000000 // upload cutoff can be no lower than this minUploadCutoff = 50000000 // upload cutoff can be no lower than this
defaultUploadCutoff = 50 * 1024 * 1024 defaultUploadCutoff = 50 * 1024 * 1024
tokenURL = "https://api.box.com/oauth2/token"
) )
// Globals // Globals
@@ -72,9 +84,34 @@ func init() {
Description: "Box", Description: "Box",
NewFs: NewFs, NewFs: NewFs,
Config: func(name string, m configmap.Mapper) { Config: func(name string, m configmap.Mapper) {
err := oauthutil.Config("box", name, m, oauthConfig) jsonFile, ok := m.Get("box_config_file")
if err != nil { boxSubType, boxSubTypeOk := m.Get("box_sub_type")
log.Fatalf("Failed to configure token: %v", err) var err error
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
claims, err := getClaims(boxConfig, boxSubType)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(fs.Config)
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
if err != nil {
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
}
} else {
err = oauthutil.Config("box", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
}
} }
}, },
Options: []fs.Option{{ Options: []fs.Option{{
@@ -83,6 +120,19 @@ func init() {
}, { }, {
Name: config.ConfigClientSecret, Name: config.ConfigClientSecret,
Help: "Box App Client Secret\nLeave blank normally.", Help: "Box App Client Secret\nLeave blank normally.",
}, {
Name: "box_config_file",
Help: "Box App config.json location\nLeave blank normally.",
}, {
Name: "box_sub_type",
Default: "user",
Examples: []fs.OptionExample{{
Value: "user",
Help: "Rclone should act on behalf of a user",
}, {
Value: "enterprise",
Help: "Rclone should act on behalf of a service account",
}},
}, { }, {
Name: "upload_cutoff", Name: "upload_cutoff",
Help: "Cutoff for switching to multipart upload (>= 50MB).", Help: "Cutoff for switching to multipart upload (>= 50MB).",
@@ -97,6 +147,74 @@ func init() {
}) })
} }
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, errors.Wrap(err, "box: failed to read Box config")
}
err = json.Unmarshal(file, &boxConfig)
if err != nil {
return nil, errors.Wrap(err, "box: failed to parse Box config")
}
return boxConfig, nil
}
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
val, err := jwtutil.RandomHex(20)
if err != nil {
return nil, errors.Wrap(err, "box: failed to generate random string for jti")
}
claims = &jws.ClaimSet{
Iss: boxConfig.BoxAppSettings.ClientID,
Sub: boxConfig.EnterpriseID,
Aud: tokenURL,
Iat: time.Now().Unix(),
Exp: time.Now().Add(time.Second * 45).Unix(),
PrivateClaims: map[string]interface{}{
"box_sub_type": boxSubType,
"aud": tokenURL,
"jti": val,
},
}
return claims, nil
}
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
signingHeaders := &jws.Header{
Algorithm: "RS256",
Typ: "JWT",
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
}
return signingHeaders
}
func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
queryParams := map[string]string{
"client_id": boxConfig.BoxAppSettings.ClientID,
"client_secret": boxConfig.BoxAppSettings.ClientSecret,
}
return queryParams
}
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
if len(rest) > 0 {
return nil, errors.Wrap(err, "box: extra data included in private key")
}
rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase))
if err != nil {
return nil, errors.Wrap(err, "box: failed to decrypt private key")
}
return rsaKey.(*rsa.PrivateKey), nil
}
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
@@ -193,9 +311,9 @@ func restoreReservedChars(x string) string {
} }
// readMetaDataForPath reads the metadata from the path // readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, err error) { func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err) // defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
leaf, directoryID, err := f.dirCache.FindRootAndPath(path, false) leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false)
if err != nil { if err != nil {
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound return nil, fs.ErrorObjectNotFound
@@ -203,7 +321,7 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.Item, err error) {
return nil, err return nil, err
} }
found, err := f.listAll(directoryID, false, true, func(item *api.Item) bool { found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
if item.Name == leaf { if item.Name == leaf {
info = item info = item
return true return true
@@ -238,6 +356,7 @@ func errorHandler(resp *http.Response) error {
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@@ -271,7 +390,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Renew the token in the background // Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath("") _, err := f.readMetaDataForPath(ctx, "")
return err return err
}) })
@@ -279,7 +398,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.dirCache = dircache.New(root, rootID, f) f.dirCache = dircache.New(root, rootID, f)
// Find the current root // Find the current root
err = f.dirCache.FindRoot(false) err = f.dirCache.FindRoot(ctx, false)
if err != nil { if err != nil {
// Assume it is a file // Assume it is a file
newRoot, remote := dircache.SplitPath(root) newRoot, remote := dircache.SplitPath(root)
@@ -287,12 +406,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
tempF.dirCache = dircache.New(newRoot, rootID, &tempF) tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
tempF.root = newRoot tempF.root = newRoot
// Make new Fs which is the parent // Make new Fs which is the parent
err = tempF.dirCache.FindRoot(false) err = tempF.dirCache.FindRoot(ctx, false)
if err != nil { if err != nil {
// No root so return old f // No root so return old f
return f, nil return f, nil
} }
_, err := tempF.newObjectWithInfo(remote, nil) _, err := tempF.newObjectWithInfo(ctx, remote, nil)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound { if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f // File doesn't exist so return old f
@@ -303,7 +422,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.features.Fill(&tempF) f.features.Fill(&tempF)
// XXX: update the old f here instead of returning tempF, since // XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver. // `features` were already filled with functions having *f as a receiver.
// See https://github.com/ncw/rclone/issues/2182 // See https://github.com/rclone/rclone/issues/2182
f.dirCache = tempF.dirCache f.dirCache = tempF.dirCache
f.root = tempF.root f.root = tempF.root
// return an error with an fs which points to the parent // return an error with an fs which points to the parent
@@ -323,7 +442,7 @@ func (f *Fs) rootSlash() string {
// Return an Object from a path // Return an Object from a path
// //
// If it can't be found it returns the error fs.ErrorObjectNotFound. // If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error) { func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) {
o := &Object{ o := &Object{
fs: f, fs: f,
remote: remote, remote: remote,
@@ -333,7 +452,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error)
// Set info // Set info
err = o.setMetaData(info) err = o.setMetaData(info)
} else { } else {
err = o.readMetaData() // reads info and meta, returning an error err = o.readMetaData(ctx) // reads info and meta, returning an error
} }
if err != nil { if err != nil {
return nil, err return nil, err
@@ -343,14 +462,14 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error)
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) { func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil) return f.newObjectWithInfo(ctx, remote, nil)
} }
// FindLeaf finds a directory of name leaf in the folder with ID pathID // FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) { func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID // Find the leaf in pathID
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool { found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
if item.Name == leaf { if item.Name == leaf {
pathIDOut = item.ID pathIDOut = item.ID
return true return true
@@ -368,7 +487,7 @@ func fieldsValue() url.Values {
} }
// CreateDir makes a directory with pathID as parent and name leaf // CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) { func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf) // fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
var resp *http.Response var resp *http.Response
var info *api.Item var info *api.Item
@@ -384,7 +503,7 @@ func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
}, },
} }
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, &mkdir, &info) resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -406,7 +525,7 @@ type listAllFn func(*api.Item) bool
// Lists the directory required calling the user function on each item found // Lists the directory required calling the user function on each item found
// //
// If the user fn ever returns true then it early exits with found = true // If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) { func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
Path: "/folders/" + dirID + "/items", Path: "/folders/" + dirID + "/items",
@@ -421,7 +540,7 @@ OUTER:
var result api.FolderItems var result api.FolderItems
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, nil, &result) resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -467,17 +586,17 @@ OUTER:
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(false) err = f.dirCache.FindRoot(ctx, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
directoryID, err := f.dirCache.FindDir(dir, false) directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var iErr error var iErr error
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool { _, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
remote := path.Join(dir, info.Name) remote := path.Join(dir, info.Name)
if info.Type == api.ItemTypeFolder { if info.Type == api.ItemTypeFolder {
// cache the directory ID for later lookups // cache the directory ID for later lookups
@@ -486,7 +605,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
// FIXME more info from dir? // FIXME more info from dir?
entries = append(entries, d) entries = append(entries, d)
} else if info.Type == api.ItemTypeFile { } else if info.Type == api.ItemTypeFile {
o, err := f.newObjectWithInfo(remote, info) o, err := f.newObjectWithInfo(ctx, remote, info)
if err != nil { if err != nil {
iErr = err iErr = err
return true return true
@@ -510,9 +629,9 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
// Returns the object, leaf, directoryID and error // Returns the object, leaf, directoryID and error
// //
// Used to create new objects // Used to create new objects
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
// Create the directory for the object if it doesn't exist // Create the directory for the object if it doesn't exist
leaf, directoryID, err = f.dirCache.FindRootAndPath(remote, true) leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil { if err != nil {
return return
} }
@@ -529,22 +648,22 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
existingObj, err := f.newObjectWithInfo(src.Remote(), nil) existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
switch err { switch err {
case nil: case nil:
return existingObj, existingObj.Update(in, src, options...) return existingObj, existingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound: case fs.ErrorObjectNotFound:
// Not found so create it // Not found so create it
return f.PutUnchecked(in, src) return f.PutUnchecked(ctx, in, src)
default: default:
return nil, err return nil, err
} }
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...) return f.Put(ctx, in, src, options...)
} }
// PutUnchecked the object into the container // PutUnchecked the object into the container
@@ -554,56 +673,56 @@ func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote() remote := src.Remote()
size := src.Size() size := src.Size()
modTime := src.ModTime() modTime := src.ModTime(ctx)
o, _, _, err := f.createObject(remote, modTime, size) o, _, _, err := f.createObject(ctx, remote, modTime, size)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return o, o.Update(in, src, options...) return o, o.Update(ctx, in, src, options...)
} }
// Mkdir creates the container if it doesn't exist // Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(dir string) error { func (f *Fs) Mkdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(true) err := f.dirCache.FindRoot(ctx, true)
if err != nil { if err != nil {
return err return err
} }
if dir != "" { if dir != "" {
_, err = f.dirCache.FindDir(dir, true) _, err = f.dirCache.FindDir(ctx, dir, true)
} }
return err return err
} }
// deleteObject removes an object by ID // deleteObject removes an object by ID
func (f *Fs) deleteObject(id string) error { func (f *Fs) deleteObject(ctx context.Context, id string) error {
opts := rest.Opts{ opts := rest.Opts{
Method: "DELETE", Method: "DELETE",
Path: "/files/" + id, Path: "/files/" + id,
NoResponse: true, NoResponse: true,
} }
return f.pacer.Call(func() (bool, error) { return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(&opts) resp, err := f.srv.Call(ctx, &opts)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
} }
// purgeCheck removes the root directory, if check is set then it // purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in // refuses to do so if it has anything in
func (f *Fs) purgeCheck(dir string, check bool) error { func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
root := path.Join(f.root, dir) root := path.Join(f.root, dir)
if root == "" { if root == "" {
return errors.New("can't purge root directory") return errors.New("can't purge root directory")
} }
dc := f.dirCache dc := f.dirCache
err := dc.FindRoot(false) err := dc.FindRoot(ctx, false)
if err != nil { if err != nil {
return err return err
} }
rootID, err := dc.FindDir(dir, false) rootID, err := dc.FindDir(ctx, dir, false)
if err != nil { if err != nil {
return err return err
} }
@@ -617,7 +736,7 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
opts.Parameters.Set("recursive", strconv.FormatBool(!check)) opts.Parameters.Set("recursive", strconv.FormatBool(!check))
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(&opts) resp, err = f.srv.Call(ctx, &opts)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -633,8 +752,8 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
// Rmdir deletes the root folder // Rmdir deletes the root folder
// //
// Returns an error if it isn't empty // Returns an error if it isn't empty
func (f *Fs) Rmdir(dir string) error { func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeCheck(dir, true) return f.purgeCheck(ctx, dir, true)
} }
// Precision return the precision of this Fs // Precision return the precision of this Fs
@@ -651,13 +770,13 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't copy - not same remote type") fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
err := srcObj.readMetaData() err := srcObj.readMetaData(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -669,7 +788,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
} }
// Create temporary object // Create temporary object
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size) dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -690,7 +809,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
var resp *http.Response var resp *http.Response
var info *api.Item var info *api.Item
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, &copyFile, &info) resp, err = f.srv.CallJSON(ctx, &opts, &copyFile, &info)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -708,12 +827,12 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
// Optional interface: Only implement this if you have a way of // Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the // deleting all the files quicker than just running Remove() on the
// result of List() // result of List()
func (f *Fs) Purge() error { func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck("", false) return f.purgeCheck(ctx, "", false)
} }
// move a file or folder // move a file or folder
func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err error) { func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (info *api.Item, err error) {
// Move the object // Move the object
opts := rest.Opts{ opts := rest.Opts{
Method: "PUT", Method: "PUT",
@@ -728,7 +847,7 @@ func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err e
} }
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, &move, &info) resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -746,7 +865,7 @@ func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err e
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantMove // If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't move - not same remote type") fs.Debugf(src, "Can't move - not same remote type")
@@ -754,13 +873,13 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
} }
// Create temporary object // Create temporary object
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size) dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Do the move // Do the move
info, err := f.move("/files/", srcObj.id, leaf, directoryID) info, err := f.move(ctx, "/files/", srcObj.id, leaf, directoryID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -780,7 +899,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs) srcFs, ok := src.(*Fs)
if !ok { if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type") fs.Debugf(srcFs, "Can't move directory - not same remote type")
@@ -796,14 +915,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
} }
// find the root src directory // find the root src directory
err := srcFs.dirCache.FindRoot(false) err := srcFs.dirCache.FindRoot(ctx, false)
if err != nil { if err != nil {
return err return err
} }
// find the root dst directory // find the root dst directory
if dstRemote != "" { if dstRemote != "" {
err = f.dirCache.FindRoot(true) err = f.dirCache.FindRoot(ctx, true)
if err != nil { if err != nil {
return err return err
} }
@@ -819,14 +938,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
if dstRemote == "" { if dstRemote == "" {
findPath = f.root findPath = f.root
} }
leaf, directoryID, err = f.dirCache.FindPath(findPath, true) leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
if err != nil { if err != nil {
return err return err
} }
// Check destination does not exist // Check destination does not exist
if dstRemote != "" { if dstRemote != "" {
_, err = f.dirCache.FindDir(dstRemote, false) _, err = f.dirCache.FindDir(ctx, dstRemote, false)
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
// OK // OK
} else if err != nil { } else if err != nil {
@@ -837,13 +956,13 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
} }
// Find ID of src // Find ID of src
srcID, err := srcFs.dirCache.FindDir(srcRemote, false) srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
if err != nil { if err != nil {
return err return err
} }
// Do the move // Do the move
_, err = f.move("/folders/", srcID, leaf, directoryID) _, err = f.move(ctx, "/folders/", srcID, leaf, directoryID)
if err != nil { if err != nil {
return err return err
} }
@@ -852,8 +971,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
} }
// PublicLink adds a "readable by anyone with link" permission on the given file or folder. // PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(remote string) (string, error) { func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
id, err := f.dirCache.FindDir(remote, false) id, err := f.dirCache.FindDir(ctx, remote, false)
var opts rest.Opts var opts rest.Opts
if err == nil { if err == nil {
fs.Debugf(f, "attempting to share directory '%s'", remote) fs.Debugf(f, "attempting to share directory '%s'", remote)
@@ -865,7 +984,7 @@ func (f *Fs) PublicLink(remote string) (string, error) {
} }
} else { } else {
fs.Debugf(f, "attempting to share single file '%s'", remote) fs.Debugf(f, "attempting to share single file '%s'", remote)
o, err := f.NewObject(remote) o, err := f.NewObject(ctx, remote)
if err != nil { if err != nil {
return "", err return "", err
} }
@@ -885,7 +1004,7 @@ func (f *Fs) PublicLink(remote string) (string, error) {
var info api.Item var info api.Item
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, &shareLink, &info) resp, err = f.srv.CallJSON(ctx, &opts, &shareLink, &info)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
return info.SharedLink.URL, err return info.SharedLink.URL, err
@@ -928,7 +1047,7 @@ func (o *Object) srvPath() string {
} }
// Hash returns the SHA-1 of an object returning a lowercase hex string // Hash returns the SHA-1 of an object returning a lowercase hex string
func (o *Object) Hash(t hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.SHA1 { if t != hash.SHA1 {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -937,7 +1056,7 @@ func (o *Object) Hash(t hash.Type) (string, error) {
// Size returns the size of an object in bytes // Size returns the size of an object in bytes
func (o *Object) Size() int64 { func (o *Object) Size() int64 {
err := o.readMetaData() err := o.readMetaData(context.TODO())
if err != nil { if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err) fs.Logf(o, "Failed to read metadata: %v", err)
return 0 return 0
@@ -962,11 +1081,11 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
// readMetaData gets the metadata if it hasn't already been fetched // readMetaData gets the metadata if it hasn't already been fetched
// //
// it also sets the info // it also sets the info
func (o *Object) readMetaData() (err error) { func (o *Object) readMetaData(ctx context.Context) (err error) {
if o.hasMetaData { if o.hasMetaData {
return nil return nil
} }
info, err := o.fs.readMetaDataForPath(o.remote) info, err := o.fs.readMetaDataForPath(ctx, o.remote)
if err != nil { if err != nil {
if apiErr, ok := err.(*api.Error); ok { if apiErr, ok := err.(*api.Error); ok {
if apiErr.Code == "not_found" || apiErr.Code == "trashed" { if apiErr.Code == "not_found" || apiErr.Code == "trashed" {
@@ -983,8 +1102,8 @@ func (o *Object) readMetaData() (err error) {
// //
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime() time.Time { func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData() err := o.readMetaData(ctx)
if err != nil { if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err) fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now() return time.Now()
@@ -993,7 +1112,7 @@ func (o *Object) ModTime() time.Time {
} }
// setModTime sets the modification time of the local fs object // setModTime sets the modification time of the local fs object
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) { func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "PUT", Method: "PUT",
Path: "/files/" + o.id, Path: "/files/" + o.id,
@@ -1004,15 +1123,15 @@ func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
} }
var info *api.Item var info *api.Item
err := o.fs.pacer.Call(func() (bool, error) { err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(&opts, &update, &info) resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
return info, err return info, err
} }
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error { func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
info, err := o.setModTime(modTime) info, err := o.setModTime(ctx, modTime)
if err != nil { if err != nil {
return err return err
} }
@@ -1025,7 +1144,7 @@ func (o *Object) Storable() bool {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.id == "" { if o.id == "" {
return nil, errors.New("can't download - no id") return nil, errors.New("can't download - no id")
} }
@@ -1037,7 +1156,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
Options: options, Options: options,
} }
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(&opts) resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -1049,7 +1168,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
// upload does a single non-multipart upload // upload does a single non-multipart upload
// //
// This is recommended for less than 50 MB of content // This is recommended for less than 50 MB of content
func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Time) (err error) { func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
upload := api.UploadFile{ upload := api.UploadFile{
Name: replaceReservedChars(leaf), Name: replaceReservedChars(leaf),
ContentModifiedAt: api.Time(modTime), ContentModifiedAt: api.Time(modTime),
@@ -1076,7 +1195,7 @@ func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Tim
opts.Path = "/files/content" opts.Path = "/files/content"
} }
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(&opts, &upload, &result) resp, err = o.fs.srv.CallJSON(ctx, &opts, &upload, &result)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -1093,32 +1212,32 @@ func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Tim
// If existing is set then it updates the object rather than creating a new one // If existing is set then it updates the object rather than creating a new one
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
o.fs.tokenRenewer.Start() o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop() defer o.fs.tokenRenewer.Stop()
size := src.Size() size := src.Size()
modTime := src.ModTime() modTime := src.ModTime(ctx)
remote := o.Remote() remote := o.Remote()
// Create the directory for the object if it doesn't exist // Create the directory for the object if it doesn't exist
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(remote, true) leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil { if err != nil {
return err return err
} }
// Upload with simple or multipart // Upload with simple or multipart
if size <= int64(o.fs.opt.UploadCutoff) { if size <= int64(o.fs.opt.UploadCutoff) {
err = o.upload(in, leaf, directoryID, modTime) err = o.upload(ctx, in, leaf, directoryID, modTime)
} else { } else {
err = o.uploadMultipart(in, leaf, directoryID, size, modTime) err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime)
} }
return err return err
} }
// Remove an object // Remove an object
func (o *Object) Remove() error { func (o *Object) Remove(ctx context.Context) error {
return o.fs.deleteObject(o.id) return o.fs.deleteObject(ctx, o.id)
} }
// ID returns the ID of the Object if known, or "" if not // ID returns the ID of the Object if known, or "" if not

View File

@@ -4,8 +4,8 @@ package box_test
import ( import (
"testing" "testing"
"github.com/ncw/rclone/backend/box" "github.com/rclone/rclone/backend/box"
"github.com/ncw/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -4,6 +4,7 @@ package box
import ( import (
"bytes" "bytes"
"context"
"crypto/sha1" "crypto/sha1"
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
@@ -14,15 +15,15 @@ import (
"sync" "sync"
"time" "time"
"github.com/ncw/rclone/backend/box/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/rest"
) )
// createUploadSession creates an upload session for the object // createUploadSession creates an upload session for the object
func (o *Object) createUploadSession(leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) { func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/files/upload_sessions", Path: "/files/upload_sessions",
@@ -41,7 +42,7 @@ func (o *Object) createUploadSession(leaf, directoryID string, size int64) (resp
} }
var resp *http.Response var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(&opts, &request, &response) resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
return return
@@ -53,7 +54,7 @@ func sha1Digest(digest []byte) string {
} }
// uploadPart uploads a part in an upload session // uploadPart uploads a part in an upload session
func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) { func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
chunkSize := int64(len(chunk)) chunkSize := int64(len(chunk))
sha1sum := sha1.Sum(chunk) sha1sum := sha1.Sum(chunk)
opts := rest.Opts{ opts := rest.Opts{
@@ -70,7 +71,7 @@ func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []b
var resp *http.Response var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
opts.Body = wrap(bytes.NewReader(chunk)) opts.Body = wrap(bytes.NewReader(chunk))
resp, err = o.fs.srv.CallJSON(&opts, nil, &response) resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -80,7 +81,7 @@ func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []b
} }
// commitUpload finishes an upload session // commitUpload finishes an upload session
func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) { func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/files/upload_sessions/" + SessionID + "/commit", Path: "/files/upload_sessions/" + SessionID + "/commit",
@@ -97,14 +98,14 @@ func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.T
var body []byte var body []byte
var resp *http.Response var resp *http.Response
// For discussion of this value see: // For discussion of this value see:
// https://github.com/ncw/rclone/issues/2054 // https://github.com/rclone/rclone/issues/2054
maxTries := o.fs.opt.CommitRetries maxTries := o.fs.opt.CommitRetries
const defaultDelay = 10 const defaultDelay = 10
var tries int var tries int
outer: outer:
for tries = 0; tries < maxTries; tries++ { for tries = 0; tries < maxTries; tries++ {
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(&opts, &request, nil) resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
if err != nil { if err != nil {
return shouldRetry(resp, err) return shouldRetry(resp, err)
} }
@@ -112,7 +113,7 @@ outer:
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
delay := defaultDelay delay := defaultDelay
why := "unknown" var why string
if err != nil { if err != nil {
// Sometimes we get 400 Error with // Sometimes we get 400 Error with
// parts_mismatch immediately after uploading // parts_mismatch immediately after uploading
@@ -154,7 +155,7 @@ outer:
} }
// abortUpload cancels an upload session // abortUpload cancels an upload session
func (o *Object) abortUpload(SessionID string) (err error) { func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "DELETE", Method: "DELETE",
Path: "/files/upload_sessions/" + SessionID, Path: "/files/upload_sessions/" + SessionID,
@@ -163,16 +164,16 @@ func (o *Object) abortUpload(SessionID string) (err error) {
} }
var resp *http.Response var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(&opts) resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
return err return err
} }
// uploadMultipart uploads a file using multipart upload // uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) { func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
// Create upload session // Create upload session
session, err := o.createUploadSession(leaf, directoryID, size) session, err := o.createUploadSession(ctx, leaf, directoryID, size)
if err != nil { if err != nil {
return errors.Wrap(err, "multipart upload create session failed") return errors.Wrap(err, "multipart upload create session failed")
} }
@@ -183,7 +184,7 @@ func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size in
defer func() { defer func() {
if err != nil { if err != nil {
fs.Debugf(o, "Cancelling multipart upload: %v", err) fs.Debugf(o, "Cancelling multipart upload: %v", err)
cancelErr := o.abortUpload(session.ID) cancelErr := o.abortUpload(ctx, session.ID)
if cancelErr != nil { if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v", err) fs.Logf(o, "Failed to cancel multipart upload: %v", err)
} }
@@ -235,7 +236,7 @@ outer:
defer wg.Done() defer wg.Done()
defer o.fs.uploadToken.Put() defer o.fs.uploadToken.Put()
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize)) fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
partResponse, err := o.uploadPart(session.ID, position, size, buf, wrap) partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap)
if err != nil { if err != nil {
err = errors.Wrap(err, "multipart upload failed to upload part") err = errors.Wrap(err, "multipart upload failed to upload part")
select { select {
@@ -263,7 +264,7 @@ outer:
} }
// Finalise the upload session // Finalise the upload session
result, err := o.commitUpload(session.ID, parts, modTime, hash.Sum(nil)) result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
if err != nil { if err != nil {
return errors.Wrap(err, "multipart upload failed to finalize") return errors.Wrap(err, "multipart upload failed to finalize")
} }

184
backend/cache/cache.go vendored
View File

@@ -18,19 +18,19 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/ncw/rclone/backend/crypt"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/cache"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fspath"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/rc"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/atexit"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
"golang.org/x/time/rate" "golang.org/x/time/rate"
) )
@@ -509,7 +509,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil { if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
pollInterval := make(chan time.Duration, 1) pollInterval := make(chan time.Duration, 1)
pollInterval <- time.Duration(f.opt.ChunkCleanInterval) pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
doChangeNotify(f.receiveChangeNotify, pollInterval) doChangeNotify(context.Background(), f.receiveChangeNotify, pollInterval)
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
@@ -600,7 +600,7 @@ is used on top of the cache.
return f, fsErr return f, fsErr
} }
func (f *Fs) httpStats(in rc.Params) (out rc.Params, err error) { func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err error) {
out = make(rc.Params) out = make(rc.Params)
m, err := f.Stats() m, err := f.Stats()
if err != nil { if err != nil {
@@ -627,7 +627,7 @@ func (f *Fs) unwrapRemote(remote string) string {
return remote return remote
} }
func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) { func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params, err error) {
out = make(rc.Params) out = make(rc.Params)
remoteInt, ok := in["remote"] remoteInt, ok := in["remote"]
if !ok { if !ok {
@@ -672,7 +672,7 @@ func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
return out, nil return out, nil
} }
func (f *Fs) rcFetch(in rc.Params) (rc.Params, error) { func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
type chunkRange struct { type chunkRange struct {
start, end int64 start, end int64
} }
@@ -777,18 +777,18 @@ func (f *Fs) rcFetch(in rc.Params) (rc.Params, error) {
for _, pair := range files { for _, pair := range files {
file, remote := pair[0], pair[1] file, remote := pair[0], pair[1]
var status fileStatus var status fileStatus
o, err := f.NewObject(remote) o, err := f.NewObject(ctx, remote)
if err != nil { if err != nil {
fetchedChunks[file] = fileStatus{Error: err.Error()} fetchedChunks[file] = fileStatus{Error: err.Error()}
continue continue
} }
co := o.(*Object) co := o.(*Object)
err = co.refreshFromSource(true) err = co.refreshFromSource(ctx, true)
if err != nil { if err != nil {
fetchedChunks[file] = fileStatus{Error: err.Error()} fetchedChunks[file] = fileStatus{Error: err.Error()}
continue continue
} }
handle := NewObjectHandle(co, f) handle := NewObjectHandle(ctx, co, f)
handle.UseMemory = false handle.UseMemory = false
handle.scaleWorkers(1) handle.scaleWorkers(1)
walkChunkRanges(crs, co.Size(), func(chunk int64) { walkChunkRanges(crs, co.Size(), func(chunk int64) {
@@ -874,7 +874,7 @@ func (f *Fs) notifyChangeUpstream(remote string, entryType fs.EntryType) {
// ChangeNotify can subscribe multiple callers // ChangeNotify can subscribe multiple callers
// this is coupled with the wrapped fs ChangeNotify (if it supports it) // this is coupled with the wrapped fs ChangeNotify (if it supports it)
// and also notifies other caches (i.e VFS) to clear out whenever something changes // and also notifies other caches (i.e VFS) to clear out whenever something changes
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) { func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
f.parentsForgetMu.Lock() f.parentsForgetMu.Lock()
defer f.parentsForgetMu.Unlock() defer f.parentsForgetMu.Unlock()
fs.Debugf(f, "subscribing to ChangeNotify") fs.Debugf(f, "subscribing to ChangeNotify")
@@ -921,7 +921,7 @@ func (f *Fs) TempUploadWaitTime() time.Duration {
} }
// NewObject finds the Object at remote. // NewObject finds the Object at remote.
func (f *Fs) NewObject(remote string) (fs.Object, error) { func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
var err error var err error
fs.Debugf(f, "new object '%s'", remote) fs.Debugf(f, "new object '%s'", remote)
@@ -940,16 +940,16 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
// search for entry in source or temp fs // search for entry in source or temp fs
var obj fs.Object var obj fs.Object
if f.opt.TempWritePath != "" { if f.opt.TempWritePath != "" {
obj, err = f.tempFs.NewObject(remote) obj, err = f.tempFs.NewObject(ctx, remote)
// not found in temp fs // not found in temp fs
if err != nil { if err != nil {
fs.Debugf(remote, "find: not found in local cache fs") fs.Debugf(remote, "find: not found in local cache fs")
obj, err = f.Fs.NewObject(remote) obj, err = f.Fs.NewObject(ctx, remote)
} else { } else {
fs.Debugf(obj, "find: found in local cache fs") fs.Debugf(obj, "find: found in local cache fs")
} }
} else { } else {
obj, err = f.Fs.NewObject(remote) obj, err = f.Fs.NewObject(ctx, remote)
} }
// not found in either fs // not found in either fs
@@ -959,13 +959,13 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
} }
// cache the new entry // cache the new entry
co = ObjectFromOriginal(f, obj).persist() co = ObjectFromOriginal(ctx, f, obj).persist()
fs.Debugf(co, "find: cached object") fs.Debugf(co, "find: cached object")
return co, nil return co, nil
} }
// List the objects and directories in dir into entries // List the objects and directories in dir into entries
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
fs.Debugf(f, "list '%s'", dir) fs.Debugf(f, "list '%s'", dir)
cd := ShallowDirectory(f, dir) cd := ShallowDirectory(f, dir)
@@ -995,12 +995,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
fs.Debugf(dir, "list: temp fs entries: %v", queuedEntries) fs.Debugf(dir, "list: temp fs entries: %v", queuedEntries)
for _, queuedRemote := range queuedEntries { for _, queuedRemote := range queuedEntries {
queuedEntry, err := f.tempFs.NewObject(f.cleanRootFromPath(queuedRemote)) queuedEntry, err := f.tempFs.NewObject(ctx, f.cleanRootFromPath(queuedRemote))
if err != nil { if err != nil {
fs.Debugf(dir, "list: temp file not found in local fs: %v", err) fs.Debugf(dir, "list: temp file not found in local fs: %v", err)
continue continue
} }
co := ObjectFromOriginal(f, queuedEntry).persist() co := ObjectFromOriginal(ctx, f, queuedEntry).persist()
fs.Debugf(co, "list: cached temp object") fs.Debugf(co, "list: cached temp object")
cachedEntries = append(cachedEntries, co) cachedEntries = append(cachedEntries, co)
} }
@@ -1008,7 +1008,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
} }
// search from the source // search from the source
sourceEntries, err := f.Fs.List(dir) sourceEntries, err := f.Fs.List(ctx, dir)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -1046,11 +1046,11 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if i < tmpCnt && cachedEntries[i].Remote() == oRemote { if i < tmpCnt && cachedEntries[i].Remote() == oRemote {
continue continue
} }
co := ObjectFromOriginal(f, o).persist() co := ObjectFromOriginal(ctx, f, o).persist()
cachedEntries = append(cachedEntries, co) cachedEntries = append(cachedEntries, co)
fs.Debugf(dir, "list: cached object: %v", co) fs.Debugf(dir, "list: cached object: %v", co)
case fs.Directory: case fs.Directory:
cdd := DirectoryFromOriginal(f, o) cdd := DirectoryFromOriginal(ctx, f, o)
// check if the dir isn't expired and add it in cache if it isn't // check if the dir isn't expired and add it in cache if it isn't
if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(time.Duration(f.opt.InfoAge))) { if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
batchDirectories = append(batchDirectories, cdd) batchDirectories = append(batchDirectories, cdd)
@@ -1080,8 +1080,8 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
return cachedEntries, nil return cachedEntries, nil
} }
func (f *Fs) recurse(dir string, list *walk.ListRHelper) error { func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
entries, err := f.List(dir) entries, err := f.List(ctx, dir)
if err != nil { if err != nil {
return err return err
} }
@@ -1089,7 +1089,7 @@ func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
for i := 0; i < len(entries); i++ { for i := 0; i < len(entries); i++ {
innerDir, ok := entries[i].(fs.Directory) innerDir, ok := entries[i].(fs.Directory)
if ok { if ok {
err := f.recurse(innerDir.Remote(), list) err := f.recurse(ctx, innerDir.Remote(), list)
if err != nil { if err != nil {
return err return err
} }
@@ -1106,21 +1106,21 @@ func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
// from dir recursively into out. // from dir recursively into out.
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
fs.Debugf(f, "list recursively from '%s'", dir) fs.Debugf(f, "list recursively from '%s'", dir)
// we check if the source FS supports ListR // we check if the source FS supports ListR
// if it does, we'll use that to get all the entries, cache them and return // if it does, we'll use that to get all the entries, cache them and return
do := f.Fs.Features().ListR do := f.Fs.Features().ListR
if do != nil { if do != nil {
return do(dir, func(entries fs.DirEntries) error { return do(ctx, dir, func(entries fs.DirEntries) error {
// we got called back with a set of entries so let's cache them and call the original callback // we got called back with a set of entries so let's cache them and call the original callback
for _, entry := range entries { for _, entry := range entries {
switch o := entry.(type) { switch o := entry.(type) {
case fs.Object: case fs.Object:
_ = f.cache.AddObject(ObjectFromOriginal(f, o)) _ = f.cache.AddObject(ObjectFromOriginal(ctx, f, o))
case fs.Directory: case fs.Directory:
_ = f.cache.AddDir(DirectoryFromOriginal(f, o)) _ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
default: default:
return errors.Errorf("Unknown object type %T", entry) return errors.Errorf("Unknown object type %T", entry)
} }
@@ -1133,7 +1133,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
// if we're here, we're gonna do a standard recursive traversal and cache everything // if we're here, we're gonna do a standard recursive traversal and cache everything
list := walk.NewListRHelper(callback) list := walk.NewListRHelper(callback)
err = f.recurse(dir, list) err = f.recurse(ctx, dir, list)
if err != nil { if err != nil {
return err return err
} }
@@ -1142,9 +1142,9 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
} }
// Mkdir makes the directory (container, bucket) // Mkdir makes the directory (container, bucket)
func (f *Fs) Mkdir(dir string) error { func (f *Fs) Mkdir(ctx context.Context, dir string) error {
fs.Debugf(f, "mkdir '%s'", dir) fs.Debugf(f, "mkdir '%s'", dir)
err := f.Fs.Mkdir(dir) err := f.Fs.Mkdir(ctx, dir)
if err != nil { if err != nil {
return err return err
} }
@@ -1172,7 +1172,7 @@ func (f *Fs) Mkdir(dir string) error {
} }
// Rmdir removes the directory (container, bucket) if empty // Rmdir removes the directory (container, bucket) if empty
func (f *Fs) Rmdir(dir string) error { func (f *Fs) Rmdir(ctx context.Context, dir string) error {
fs.Debugf(f, "rmdir '%s'", dir) fs.Debugf(f, "rmdir '%s'", dir)
if f.opt.TempWritePath != "" { if f.opt.TempWritePath != "" {
@@ -1182,9 +1182,9 @@ func (f *Fs) Rmdir(dir string) error {
// we check if the source exists on the remote and make the same move on it too if it does // we check if the source exists on the remote and make the same move on it too if it does
// otherwise, we skip this step // otherwise, we skip this step
_, err := f.UnWrap().List(dir) _, err := f.UnWrap().List(ctx, dir)
if err == nil { if err == nil {
err := f.Fs.Rmdir(dir) err := f.Fs.Rmdir(ctx, dir)
if err != nil { if err != nil {
return err return err
} }
@@ -1192,10 +1192,10 @@ func (f *Fs) Rmdir(dir string) error {
} }
var queuedEntries []*Object var queuedEntries []*Object
err = walk.ListR(f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error { err = walk.ListR(ctx, f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
for _, o := range entries { for _, o := range entries {
if oo, ok := o.(fs.Object); ok { if oo, ok := o.(fs.Object); ok {
co := ObjectFromOriginal(f, oo) co := ObjectFromOriginal(ctx, f, oo)
queuedEntries = append(queuedEntries, co) queuedEntries = append(queuedEntries, co)
} }
} }
@@ -1212,7 +1212,7 @@ func (f *Fs) Rmdir(dir string) error {
} }
} }
} else { } else {
err := f.Fs.Rmdir(dir) err := f.Fs.Rmdir(ctx, dir)
if err != nil { if err != nil {
return err return err
} }
@@ -1243,7 +1243,7 @@ func (f *Fs) Rmdir(dir string) error {
// DirMove moves src, srcRemote to this remote at dstRemote // DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations. // using server side move operations.
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote) fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
do := f.Fs.Features().DirMove do := f.Fs.Features().DirMove
@@ -1265,8 +1265,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
f.backgroundRunner.pause() f.backgroundRunner.pause()
defer f.backgroundRunner.play() defer f.backgroundRunner.play()
_, errInWrap := srcFs.UnWrap().List(srcRemote) _, errInWrap := srcFs.UnWrap().List(ctx, srcRemote)
_, errInTemp := f.tempFs.List(srcRemote) _, errInTemp := f.tempFs.List(ctx, srcRemote)
// not found in either fs // not found in either fs
if errInWrap != nil && errInTemp != nil { if errInWrap != nil && errInTemp != nil {
return fs.ErrorDirNotFound return fs.ErrorDirNotFound
@@ -1275,7 +1275,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
// we check if the source exists on the remote and make the same move on it too if it does // we check if the source exists on the remote and make the same move on it too if it does
// otherwise, we skip this step // otherwise, we skip this step
if errInWrap == nil { if errInWrap == nil {
err := do(srcFs.UnWrap(), srcRemote, dstRemote) err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote)
if err != nil { if err != nil {
return err return err
} }
@@ -1288,10 +1288,10 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
} }
var queuedEntries []*Object var queuedEntries []*Object
err := walk.ListR(f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error { err := walk.ListR(ctx, f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
for _, o := range entries { for _, o := range entries {
if oo, ok := o.(fs.Object); ok { if oo, ok := o.(fs.Object); ok {
co := ObjectFromOriginal(f, oo) co := ObjectFromOriginal(ctx, f, oo)
queuedEntries = append(queuedEntries, co) queuedEntries = append(queuedEntries, co)
if co.tempFileStartedUpload() { if co.tempFileStartedUpload() {
fs.Errorf(co, "can't move - upload has already started. need to finish that") fs.Errorf(co, "can't move - upload has already started. need to finish that")
@@ -1312,16 +1312,16 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
fs.Errorf(srcRemote, "dirmove: can't move dir in temp fs") fs.Errorf(srcRemote, "dirmove: can't move dir in temp fs")
return fs.ErrorCantDirMove return fs.ErrorCantDirMove
} }
err = do(f.tempFs, srcRemote, dstRemote) err = do(ctx, f.tempFs, srcRemote, dstRemote)
if err != nil { if err != nil {
return err return err
} }
err = f.cache.ReconcileTempUploads(f) err = f.cache.ReconcileTempUploads(ctx, f)
if err != nil { if err != nil {
return err return err
} }
} else { } else {
err := do(srcFs.UnWrap(), srcRemote, dstRemote) err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote)
if err != nil { if err != nil {
return err return err
} }
@@ -1427,10 +1427,10 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
} }
} }
type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
// put in to the remote path // put in to the remote path
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) { func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
var err error var err error
var obj fs.Object var obj fs.Object
@@ -1441,7 +1441,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
_ = f.cache.ExpireDir(parentCd) _ = f.cache.ExpireDir(parentCd)
f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory) f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
obj, err = f.tempFs.Put(in, src, options...) obj, err = f.tempFs.Put(ctx, in, src, options...)
if err != nil { if err != nil {
fs.Errorf(obj, "put: failed to upload in temp fs: %v", err) fs.Errorf(obj, "put: failed to upload in temp fs: %v", err)
return nil, err return nil, err
@@ -1456,14 +1456,14 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
// if cache writes is enabled write it first through cache // if cache writes is enabled write it first through cache
} else if f.opt.StoreWrites { } else if f.opt.StoreWrites {
f.cacheReader(in, src, func(inn io.Reader) { f.cacheReader(in, src, func(inn io.Reader) {
obj, err = put(inn, src, options...) obj, err = put(ctx, inn, src, options...)
}) })
if err == nil { if err == nil {
fs.Debugf(obj, "put: uploaded to remote fs and saved in cache") fs.Debugf(obj, "put: uploaded to remote fs and saved in cache")
} }
// last option: save it directly in remote fs // last option: save it directly in remote fs
} else { } else {
obj, err = put(in, src, options...) obj, err = put(ctx, in, src, options...)
if err == nil { if err == nil {
fs.Debugf(obj, "put: uploaded to remote fs") fs.Debugf(obj, "put: uploaded to remote fs")
} }
@@ -1475,7 +1475,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
} }
// cache the new file // cache the new file
cachedObj := ObjectFromOriginal(f, obj) cachedObj := ObjectFromOriginal(ctx, f, obj)
// deleting cached chunks and info to be replaced with new ones // deleting cached chunks and info to be replaced with new ones
_ = f.cache.RemoveObject(cachedObj.abs()) _ = f.cache.RemoveObject(cachedObj.abs())
@@ -1498,33 +1498,33 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
} }
// Put in to the remote path with the modTime given of the given size // Put in to the remote path with the modTime given of the given size
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
fs.Debugf(f, "put data at '%s'", src.Remote()) fs.Debugf(f, "put data at '%s'", src.Remote())
return f.put(in, src, options, f.Fs.Put) return f.put(ctx, in, src, options, f.Fs.Put)
} }
// PutUnchecked uploads the object // PutUnchecked uploads the object
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.Fs.Features().PutUnchecked do := f.Fs.Features().PutUnchecked
if do == nil { if do == nil {
return nil, errors.New("can't PutUnchecked") return nil, errors.New("can't PutUnchecked")
} }
fs.Debugf(f, "put data unchecked in '%s'", src.Remote()) fs.Debugf(f, "put data unchecked in '%s'", src.Remote())
return f.put(in, src, options, do) return f.put(ctx, in, src, options, do)
} }
// PutStream uploads the object // PutStream uploads the object
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.Fs.Features().PutStream do := f.Fs.Features().PutStream
if do == nil { if do == nil {
return nil, errors.New("can't PutStream") return nil, errors.New("can't PutStream")
} }
fs.Debugf(f, "put data streaming in '%s'", src.Remote()) fs.Debugf(f, "put data streaming in '%s'", src.Remote())
return f.put(in, src, options, do) return f.put(ctx, in, src, options, do)
} }
// Copy src to this remote using server side copy operations. // Copy src to this remote using server side copy operations.
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote) fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
do := f.Fs.Features().Copy do := f.Fs.Features().Copy
@@ -1544,7 +1544,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
// refresh from source or abort // refresh from source or abort
if err := srcObj.refreshFromSource(false); err != nil { if err := srcObj.refreshFromSource(ctx, false); err != nil {
fs.Errorf(f, "can't copy %v - %v", src, err) fs.Errorf(f, "can't copy %v - %v", src, err)
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
@@ -1563,7 +1563,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
} }
} }
obj, err := do(srcObj.Object, remote) obj, err := do(ctx, srcObj.Object, remote)
if err != nil { if err != nil {
fs.Errorf(srcObj, "error moving in cache: %v", err) fs.Errorf(srcObj, "error moving in cache: %v", err)
return nil, err return nil, err
@@ -1571,7 +1571,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(obj, "copy: file copied") fs.Debugf(obj, "copy: file copied")
// persist new // persist new
co := ObjectFromOriginal(f, obj).persist() co := ObjectFromOriginal(ctx, f, obj).persist()
fs.Debugf(co, "copy: added to cache") fs.Debugf(co, "copy: added to cache")
// expire the destination path // expire the destination path
parentCd := NewDirectory(f, cleanPath(path.Dir(co.Remote()))) parentCd := NewDirectory(f, cleanPath(path.Dir(co.Remote())))
@@ -1598,7 +1598,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
} }
// Move src to this remote using server side move operations. // Move src to this remote using server side move operations.
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(f, "moving obj '%s' -> %s", src, remote) fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
// if source fs doesn't support move abort // if source fs doesn't support move abort
@@ -1619,7 +1619,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
} }
// refresh from source or abort // refresh from source or abort
if err := srcObj.refreshFromSource(false); err != nil { if err := srcObj.refreshFromSource(ctx, false); err != nil {
fs.Errorf(f, "can't move %v - %v", src, err) fs.Errorf(f, "can't move %v - %v", src, err)
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
} }
@@ -1655,7 +1655,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(srcObj, "move: queued file moved to %v", remote) fs.Debugf(srcObj, "move: queued file moved to %v", remote)
} }
obj, err := do(srcObj.Object, remote) obj, err := do(ctx, srcObj.Object, remote)
if err != nil { if err != nil {
fs.Errorf(srcObj, "error moving: %v", err) fs.Errorf(srcObj, "error moving: %v", err)
return nil, err return nil, err
@@ -1680,7 +1680,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
// advertise to ChangeNotify if wrapped doesn't do that // advertise to ChangeNotify if wrapped doesn't do that
f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory) f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
// persist new // persist new
cachedObj := ObjectFromOriginal(f, obj).persist() cachedObj := ObjectFromOriginal(ctx, f, obj).persist()
fs.Debugf(cachedObj, "move: added to cache") fs.Debugf(cachedObj, "move: added to cache")
// expire new parent // expire new parent
parentCd = NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote()))) parentCd = NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote())))
@@ -1702,7 +1702,7 @@ func (f *Fs) Hashes() hash.Set {
} }
// Purge all files in the root and the root directory // Purge all files in the root and the root directory
func (f *Fs) Purge() error { func (f *Fs) Purge(ctx context.Context) error {
fs.Infof(f, "purging cache") fs.Infof(f, "purging cache")
f.cache.Purge() f.cache.Purge()
@@ -1711,7 +1711,7 @@ func (f *Fs) Purge() error {
return nil return nil
} }
err := do() err := do(ctx)
if err != nil { if err != nil {
return err return err
} }
@@ -1720,7 +1720,7 @@ func (f *Fs) Purge() error {
} }
// CleanUp the trash in the Fs // CleanUp the trash in the Fs
func (f *Fs) CleanUp() error { func (f *Fs) CleanUp(ctx context.Context) error {
f.CleanUpCache(false) f.CleanUpCache(false)
do := f.Fs.Features().CleanUp do := f.Fs.Features().CleanUp
@@ -1728,16 +1728,16 @@ func (f *Fs) CleanUp() error {
return nil return nil
} }
return do() return do(ctx)
} }
// About gets quota information from the Fs // About gets quota information from the Fs
func (f *Fs) About() (*fs.Usage, error) { func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.Fs.Features().About do := f.Fs.Features().About
if do == nil { if do == nil {
return nil, errors.New("About not supported") return nil, errors.New("About not supported")
} }
return do() return do(ctx)
} }
// Stats returns stats about the cache storage // Stats returns stats about the cache storage
@@ -1864,6 +1864,24 @@ func cleanPath(p string) string {
return p return p
} }
// UserInfo returns info about the connected user
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
do := f.Fs.Features().UserInfo
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx)
}
// Disconnect the current user
func (f *Fs) Disconnect(ctx context.Context) error {
do := f.Fs.Features().Disconnect
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx)
}
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = (*Fs)(nil) _ fs.Fs = (*Fs)(nil)
@@ -1879,4 +1897,6 @@ var (
_ fs.ListRer = (*Fs)(nil) _ fs.ListRer = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil) _ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
) )

View File

@@ -4,6 +4,7 @@ package cache_test
import ( import (
"bytes" "bytes"
"context"
"encoding/base64" "encoding/base64"
goflag "flag" goflag "flag"
"fmt" "fmt"
@@ -21,19 +22,20 @@ import (
"testing" "testing"
"time" "time"
"github.com/ncw/rclone/backend/cache"
"github.com/ncw/rclone/backend/crypt"
_ "github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/object"
"github.com/ncw/rclone/fs/rc"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/vfs"
"github.com/ncw/rclone/vfs/vfsflags"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/cache"
"github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive"
"github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -120,7 +122,7 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
listRootInner, err := runInstance.list(t, rootFs, innerFolder) listRootInner, err := runInstance.list(t, rootFs, innerFolder)
require.NoError(t, err) require.NoError(t, err)
listInner, err := rootFs2.List("") listInner, err := rootFs2.List(context.Background(), "")
require.NoError(t, err) require.NoError(t, err)
require.Len(t, listRoot, 1) require.Len(t, listRoot, 1)
@@ -138,10 +140,10 @@ func TestInternalVfsCache(t *testing.T) {
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"}) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("test") err := rootFs.Mkdir(context.Background(), "test")
require.NoError(t, err) require.NoError(t, err)
runInstance.writeObjectString(t, rootFs, "test/second", "content") runInstance.writeObjectString(t, rootFs, "test/second", "content")
_, err = rootFs.List("test") _, err = rootFs.List(context.Background(), "test")
require.NoError(t, err) require.NoError(t, err)
testReader := runInstance.randomReader(t, testSize) testReader := runInstance.randomReader(t, testSize)
@@ -266,7 +268,7 @@ func TestInternalObjNotFound(t *testing.T) {
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
obj, err := rootFs.NewObject("404") obj, err := rootFs.NewObject(context.Background(), "404")
require.Error(t, err) require.Error(t, err)
require.Nil(t, obj) require.Nil(t, obj)
} }
@@ -354,8 +356,8 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64) testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64)
require.NoError(t, err) require.NoError(t, err)
} else { } else {
testData1 = []byte(fstest.RandomString(100)) testData1 = []byte(random.String(100))
testData2 = []byte(fstest.RandomString(200)) testData2 = []byte(random.String(200))
} }
// write the object // write the object
@@ -445,7 +447,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
log.Printf("original size: %v", originalSize) log.Printf("original size: %v", originalSize)
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin")) o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err) require.NoError(t, err)
expectedSize := int64(len([]byte("test content"))) expectedSize := int64(len([]byte("test content")))
var data2 []byte var data2 []byte
@@ -457,7 +459,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
data2 = []byte("test content") data2 = []byte("test content")
} }
objInfo := object.NewStaticObjectInfo(runInstance.encryptRemoteIfNeeded(t, "data.bin"), time.Now(), int64(len(data2)), true, nil, cfs.UnWrap()) objInfo := object.NewStaticObjectInfo(runInstance.encryptRemoteIfNeeded(t, "data.bin"), time.Now(), int64(len(data2)), true, nil, cfs.UnWrap())
err = o.Update(bytes.NewReader(data2), objInfo) err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, int64(len(data2)), o.Size()) require.Equal(t, int64(len(data2)), o.Size())
log.Printf("updated size: %v", len(data2)) log.Printf("updated size: %v", len(data2))
@@ -503,9 +505,9 @@ func TestInternalMoveWithNotify(t *testing.T) {
} else { } else {
testData = []byte("test content") testData = []byte("test content")
} }
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test")) _ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test"))
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/one")) _ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/one"))
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/second")) _ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/second"))
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData) srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
// list in mount // list in mount
@@ -515,7 +517,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// move file // move file
_, err = cfs.UnWrap().Features().Move(srcObj, dstName) _, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName)
require.NoError(t, err) require.NoError(t, err)
err = runInstance.retryBlock(func() error { err = runInstance.retryBlock(func() error {
@@ -589,9 +591,9 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
} else { } else {
testData = []byte("test content") testData = []byte("test content")
} }
err = rootFs.Mkdir("test") err = rootFs.Mkdir(context.Background(), "test")
require.NoError(t, err) require.NoError(t, err)
err = rootFs.Mkdir("test/one") err = rootFs.Mkdir(context.Background(), "test/one")
require.NoError(t, err) require.NoError(t, err)
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData) srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
@@ -608,7 +610,7 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
require.False(t, found) require.False(t, found)
// move file // move file
_, err = cfs.UnWrap().Features().Move(srcObj, dstName) _, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName)
require.NoError(t, err) require.NoError(t, err)
err = runInstance.retryBlock(func() error { err = runInstance.retryBlock(func() error {
@@ -670,23 +672,23 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData) runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
// update in the wrapped fs // update in the wrapped fs
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin")) o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err) require.NoError(t, err)
wrappedTime := time.Now().Add(-1 * time.Hour) wrappedTime := time.Now().Add(-1 * time.Hour)
err = o.SetModTime(wrappedTime) err = o.SetModTime(context.Background(), wrappedTime)
require.NoError(t, err) require.NoError(t, err)
// get a new instance from the cache // get a new instance from the cache
co, err := rootFs.NewObject("data.bin") co, err := rootFs.NewObject(context.Background(), "data.bin")
require.NoError(t, err) require.NoError(t, err)
require.NotEqual(t, o.ModTime().String(), co.ModTime().String()) require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
cfs.DirCacheFlush() // flush the cache cfs.DirCacheFlush() // flush the cache
// get a new instance from the cache // get a new instance from the cache
co, err = rootFs.NewObject("data.bin") co, err = rootFs.NewObject(context.Background(), "data.bin")
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix()) require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
} }
func TestInternalChangeSeenAfterRc(t *testing.T) { func TestInternalChangeSeenAfterRc(t *testing.T) {
@@ -713,19 +715,19 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData) runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
// update in the wrapped fs // update in the wrapped fs
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin")) o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err) require.NoError(t, err)
wrappedTime := time.Now().Add(-1 * time.Hour) wrappedTime := time.Now().Add(-1 * time.Hour)
err = o.SetModTime(wrappedTime) err = o.SetModTime(context.Background(), wrappedTime)
require.NoError(t, err) require.NoError(t, err)
// get a new instance from the cache // get a new instance from the cache
co, err := rootFs.NewObject("data.bin") co, err := rootFs.NewObject(context.Background(), "data.bin")
require.NoError(t, err) require.NoError(t, err)
require.NotEqual(t, o.ModTime().String(), co.ModTime().String()) require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
// Call the rc function // Call the rc function
m, err := cacheExpire.Fn(rc.Params{"remote": "data.bin"}) m, err := cacheExpire.Fn(context.Background(), rc.Params{"remote": "data.bin"})
require.NoError(t, err) require.NoError(t, err)
require.Contains(t, m, "status") require.Contains(t, m, "status")
require.Contains(t, m, "message") require.Contains(t, m, "message")
@@ -733,9 +735,9 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
require.Contains(t, m["message"], "cached file cleared") require.Contains(t, m["message"], "cached file cleared")
// get a new instance from the cache // get a new instance from the cache
co, err = rootFs.NewObject("data.bin") co, err = rootFs.NewObject(context.Background(), "data.bin")
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix()) require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
_, err = runInstance.list(t, rootFs, "") _, err = runInstance.list(t, rootFs, "")
require.NoError(t, err) require.NoError(t, err)
@@ -749,7 +751,7 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
require.Len(t, li1, 1) require.Len(t, li1, 1)
// Call the rc function // Call the rc function
m, err = cacheExpire.Fn(rc.Params{"remote": "/"}) m, err = cacheExpire.Fn(context.Background(), rc.Params{"remote": "/"})
require.NoError(t, err) require.NoError(t, err)
require.Contains(t, m, "status") require.Contains(t, m, "status")
require.Contains(t, m, "message") require.Contains(t, m, "message")
@@ -794,7 +796,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
// create some rand test data // create some rand test data
testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2)) testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2))
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData) runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
o, err := cfs.NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin")) o, err := cfs.NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err) require.NoError(t, err)
co, ok := o.(*cache.Object) co, ok := o.(*cache.Object)
require.True(t, ok) require.True(t, ok)
@@ -833,7 +835,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Len(t, l, 1) require.Len(t, l, 1)
err = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/third")) err = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/third"))
require.NoError(t, err) require.NoError(t, err)
l, err = runInstance.list(t, rootFs, "test") l, err = runInstance.list(t, rootFs, "test")
@@ -868,14 +870,14 @@ func TestInternalBug2117(t *testing.T) {
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
err = cfs.UnWrap().Mkdir("test") err = cfs.UnWrap().Mkdir(context.Background(), "test")
require.NoError(t, err) require.NoError(t, err)
for i := 1; i <= 4; i++ { for i := 1; i <= 4; i++ {
err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d", i)) err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d", i))
require.NoError(t, err) require.NoError(t, err)
for j := 1; j <= 4; j++ { for j := 1; j <= 4; j++ {
err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d/dir%d", i, j)) err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d/dir%d", i, j))
require.NoError(t, err) require.NoError(t, err)
runInstance.writeObjectString(t, cfs.UnWrap(), fmt.Sprintf("test/dir%d/dir%d/test.txt", i, j), "test") runInstance.writeObjectString(t, cfs.UnWrap(), fmt.Sprintf("test/dir%d/dir%d/test.txt", i, j), "test")
@@ -1080,10 +1082,10 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
} }
if purge { if purge {
_ = f.Features().Purge() _ = f.Features().Purge(context.Background())
require.NoError(t, err) require.NoError(t, err)
} }
err = f.Mkdir("") err = f.Mkdir(context.Background(), "")
require.NoError(t, err) require.NoError(t, err)
if r.useMount && !r.isMounted { if r.useMount && !r.isMounted {
r.mountFs(t, f) r.mountFs(t, f)
@@ -1097,7 +1099,7 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
r.unmountFs(t, f) r.unmountFs(t, f)
} }
err := f.Features().Purge() err := f.Features().Purge(context.Background())
require.NoError(t, err) require.NoError(t, err)
cfs, err := r.getCacheFs(f) cfs, err := r.getCacheFs(f)
require.NoError(t, err) require.NoError(t, err)
@@ -1199,7 +1201,7 @@ func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.Read
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object { func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
in := bytes.NewReader(data) in := bytes.NewReader(data)
_ = r.writeObjectReader(t, f, remote, in) _ = r.writeObjectReader(t, f, remote, in)
o, err := f.NewObject(remote) o, err := f.NewObject(context.Background(), remote)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, int64(len(data)), o.Size()) require.Equal(t, int64(len(data)), o.Size())
return o return o
@@ -1208,7 +1210,7 @@ func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte
func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Reader) fs.Object { func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Reader) fs.Object {
modTime := time.Now() modTime := time.Now()
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f) objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
obj, err := f.Put(in, objInfo) obj, err := f.Put(context.Background(), in, objInfo)
require.NoError(t, err) require.NoError(t, err)
if r.useMount { if r.useMount {
r.vfs.WaitForWriters(10 * time.Second) r.vfs.WaitForWriters(10 * time.Second)
@@ -1228,18 +1230,18 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600) err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
require.NoError(t, err) require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second) r.vfs.WaitForWriters(10 * time.Second)
obj, err = f.NewObject(remote) obj, err = f.NewObject(context.Background(), remote)
} else { } else {
in1 := bytes.NewReader(data1) in1 := bytes.NewReader(data1)
in2 := bytes.NewReader(data2) in2 := bytes.NewReader(data2)
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f) objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f) objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
obj, err = f.Put(in1, objInfo1) obj, err = f.Put(context.Background(), in1, objInfo1)
require.NoError(t, err) require.NoError(t, err)
obj, err = f.NewObject(remote) obj, err = f.NewObject(context.Background(), remote)
require.NoError(t, err) require.NoError(t, err)
err = obj.Update(in2, objInfo2) err = obj.Update(context.Background(), in2, objInfo2)
} }
require.NoError(t, err) require.NoError(t, err)
@@ -1268,7 +1270,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
return checkSample, err return checkSample, err
} }
} else { } else {
co, err := f.NewObject(remote) co, err := f.NewObject(context.Background(), remote)
if err != nil { if err != nil {
return checkSample, err return checkSample, err
} }
@@ -1283,7 +1285,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLengthCheck bool) []byte { func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLengthCheck bool) []byte {
size := end - offset size := end - offset
checkSample := make([]byte, size) checkSample := make([]byte, size)
reader, err := o.Open(&fs.SeekOption{Offset: offset}) reader, err := o.Open(context.Background(), &fs.SeekOption{Offset: offset})
require.NoError(t, err) require.NoError(t, err)
totalRead, err := io.ReadFull(reader, checkSample) totalRead, err := io.ReadFull(reader, checkSample)
if (err == io.EOF || err == io.ErrUnexpectedEOF) && noLengthCheck { if (err == io.EOF || err == io.ErrUnexpectedEOF) && noLengthCheck {
@@ -1300,7 +1302,7 @@ func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) {
if r.useMount { if r.useMount {
err = os.Mkdir(path.Join(r.mntDir, remote), 0700) err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
} else { } else {
err = f.Mkdir(remote) err = f.Mkdir(context.Background(), remote)
} }
require.NoError(t, err) require.NoError(t, err)
} }
@@ -1312,11 +1314,11 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
err = os.Remove(path.Join(r.mntDir, remote)) err = os.Remove(path.Join(r.mntDir, remote))
} else { } else {
var obj fs.Object var obj fs.Object
obj, err = f.NewObject(remote) obj, err = f.NewObject(context.Background(), remote)
if err != nil { if err != nil {
err = f.Rmdir(remote) err = f.Rmdir(context.Background(), remote)
} else { } else {
err = obj.Remove() err = obj.Remove(context.Background())
} }
} }
@@ -1334,7 +1336,7 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
} }
} else { } else {
var list fs.DirEntries var list fs.DirEntries
list, err = f.List(remote) list, err = f.List(context.Background(), remote)
for _, ll := range list { for _, ll := range list {
l = append(l, ll) l = append(l, ll)
} }
@@ -1353,7 +1355,7 @@ func (r *run) listPath(t *testing.T, f fs.Fs, remote string) []string {
} }
} else { } else {
var list fs.DirEntries var list fs.DirEntries
list, err = f.List(remote) list, err = f.List(context.Background(), remote)
for _, ll := range list { for _, ll := range list {
l = append(l, ll.Remote()) l = append(l, ll.Remote())
} }
@@ -1393,7 +1395,7 @@ func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
} }
r.vfs.WaitForWriters(10 * time.Second) r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().DirMove != nil { } else if rootFs.Features().DirMove != nil {
err = rootFs.Features().DirMove(rootFs, src, dst) err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst)
if err != nil { if err != nil {
return err return err
} }
@@ -1415,11 +1417,11 @@ func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
} }
r.vfs.WaitForWriters(10 * time.Second) r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().Move != nil { } else if rootFs.Features().Move != nil {
obj1, err := rootFs.NewObject(src) obj1, err := rootFs.NewObject(context.Background(), src)
if err != nil { if err != nil {
return err return err
} }
_, err = rootFs.Features().Move(obj1, dst) _, err = rootFs.Features().Move(context.Background(), obj1, dst)
if err != nil { if err != nil {
return err return err
} }
@@ -1441,11 +1443,11 @@ func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
} }
r.vfs.WaitForWriters(10 * time.Second) r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().Copy != nil { } else if rootFs.Features().Copy != nil {
obj, err := rootFs.NewObject(src) obj, err := rootFs.NewObject(context.Background(), src)
if err != nil { if err != nil {
return err return err
} }
_, err = rootFs.Features().Copy(obj, dst) _, err = rootFs.Features().Copy(context.Background(), obj, dst)
if err != nil { if err != nil {
return err return err
} }
@@ -1467,11 +1469,11 @@ func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error)
} }
return fi.ModTime(), nil return fi.ModTime(), nil
} }
obj1, err := rootFs.NewObject(src) obj1, err := rootFs.NewObject(context.Background(), src)
if err != nil { if err != nil {
return time.Time{}, err return time.Time{}, err
} }
return obj1.ModTime(), nil return obj1.ModTime(context.Background()), nil
} }
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) { func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
@@ -1484,7 +1486,7 @@ func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
} }
return fi.Size(), nil return fi.Size(), nil
} }
obj1, err := rootFs.NewObject(src) obj1, err := rootFs.NewObject(context.Background(), src)
if err != nil { if err != nil {
return int64(0), err return int64(0), err
} }
@@ -1507,14 +1509,14 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
_, err = f.WriteString(data + append) _, err = f.WriteString(data + append)
} else { } else {
var obj1 fs.Object var obj1 fs.Object
obj1, err = rootFs.NewObject(src) obj1, err = rootFs.NewObject(context.Background(), src)
if err != nil { if err != nil {
return err return err
} }
data1 := []byte(data + append) data1 := []byte(data + append)
r := bytes.NewReader(data1) r := bytes.NewReader(data1)
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs) objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
err = obj1.Update(r, objInfo1) err = obj1.Update(context.Background(), r, objInfo1)
} }
return err return err

View File

@@ -9,9 +9,9 @@ import (
"bazil.org/fuse" "bazil.org/fuse"
fusefs "bazil.org/fuse/fs" fusefs "bazil.org/fuse/fs"
"github.com/ncw/rclone/cmd/mount" "github.com/rclone/rclone/cmd/mount"
"github.com/ncw/rclone/cmd/mountlib" "github.com/rclone/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )

View File

@@ -9,10 +9,10 @@ import (
"time" "time"
"github.com/billziss-gh/cgofuse/fuse" "github.com/billziss-gh/cgofuse/fuse"
"github.com/ncw/rclone/cmd/cmount"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/cmd/cmount"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )

View File

@@ -7,9 +7,9 @@ package cache_test
import ( import (
"testing" "testing"
"github.com/ncw/rclone/backend/cache" "github.com/rclone/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/local" _ "github.com/rclone/rclone/backend/local"
"github.com/ncw/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -3,6 +3,7 @@
package cache_test package cache_test
import ( import (
"context"
"fmt" "fmt"
"math/rand" "math/rand"
"os" "os"
@@ -11,9 +12,9 @@ import (
"testing" "testing"
"time" "time"
"github.com/ncw/rclone/backend/cache" "github.com/rclone/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/drive" _ "github.com/rclone/rclone/backend/drive"
"github.com/ncw/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -85,11 +86,11 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("one") err := rootFs.Mkdir(context.Background(), "one")
require.NoError(t, err) require.NoError(t, err)
err = rootFs.Mkdir("one/test") err = rootFs.Mkdir(context.Background(), "one/test")
require.NoError(t, err) require.NoError(t, err)
err = rootFs.Mkdir("second") err = rootFs.Mkdir(context.Background(), "second")
require.NoError(t, err) require.NoError(t, err)
// create some rand test data // create some rand test data
@@ -122,11 +123,11 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"}) map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("one") err := rootFs.Mkdir(context.Background(), "one")
require.NoError(t, err) require.NoError(t, err)
err = rootFs.Mkdir("one/test") err = rootFs.Mkdir(context.Background(), "one/test")
require.NoError(t, err) require.NoError(t, err)
err = rootFs.Mkdir("second") err = rootFs.Mkdir(context.Background(), "second")
require.NoError(t, err) require.NoError(t, err)
// create some rand test data // create some rand test data
@@ -165,7 +166,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("test") err := rootFs.Mkdir(context.Background(), "test")
require.NoError(t, err) require.NoError(t, err)
minSize := 5242880 minSize := 5242880
maxSize := 10485760 maxSize := 10485760
@@ -233,9 +234,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
err = runInstance.dirMove(t, rootFs, "test", "second") err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported { if err != errNotSupported {
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject("test/one") _, err = rootFs.NewObject(context.Background(), "test/one")
require.Error(t, err) require.Error(t, err)
_, err = rootFs.NewObject("second/one") _, err = rootFs.NewObject(context.Background(), "second/one")
require.NoError(t, err) require.NoError(t, err)
// validate that it exists in temp fs // validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -256,7 +257,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
err = runInstance.rm(t, rootFs, "test") err = runInstance.rm(t, rootFs, "test")
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), "directory not empty") require.Contains(t, err.Error(), "directory not empty")
_, err = rootFs.NewObject("test/one") _, err = rootFs.NewObject(context.Background(), "test/one")
require.NoError(t, err) require.NoError(t, err)
// validate that it exists in temp fs // validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -270,9 +271,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
if err != errNotSupported { if err != errNotSupported {
require.NoError(t, err) require.NoError(t, err)
// try to read from it // try to read from it
_, err = rootFs.NewObject("test/one") _, err = rootFs.NewObject(context.Background(), "test/one")
require.Error(t, err) require.Error(t, err)
_, err = rootFs.NewObject("test/second") _, err = rootFs.NewObject(context.Background(), "test/second")
require.NoError(t, err) require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false) data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err) require.NoError(t, err)
@@ -289,9 +290,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third")) err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported { if err != errNotSupported {
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject("test/one") _, err = rootFs.NewObject(context.Background(), "test/one")
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject("test/third") _, err = rootFs.NewObject(context.Background(), "test/third")
require.NoError(t, err) require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false) data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err) require.NoError(t, err)
@@ -306,7 +307,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
// test Remove -- allowed // test Remove -- allowed
err = runInstance.rm(t, rootFs, "test/one") err = runInstance.rm(t, rootFs, "test/one")
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject("test/one") _, err = rootFs.NewObject(context.Background(), "test/one")
require.Error(t, err) require.Error(t, err)
// validate that it doesn't exist in temp fs // validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -318,7 +319,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated") err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
require.NoError(t, err) require.NoError(t, err)
obj2, err := rootFs.NewObject("test/one") obj2, err := rootFs.NewObject(context.Background(), "test/one")
require.NoError(t, err) require.NoError(t, err)
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false) data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
require.Equal(t, "one content updated", string(data2)) require.Equal(t, "one content updated", string(data2))
@@ -366,7 +367,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
err = runInstance.dirMove(t, rootFs, "test", "second") err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported { if err != errNotSupported {
require.Error(t, err) require.Error(t, err)
_, err = rootFs.NewObject("test/one") _, err = rootFs.NewObject(context.Background(), "test/one")
require.NoError(t, err) require.NoError(t, err)
// validate that it exists in temp fs // validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -378,7 +379,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
// test Rmdir // test Rmdir
err = runInstance.rm(t, rootFs, "test") err = runInstance.rm(t, rootFs, "test")
require.Error(t, err) require.Error(t, err)
_, err = rootFs.NewObject("test/one") _, err = rootFs.NewObject(context.Background(), "test/one")
require.NoError(t, err) require.NoError(t, err)
// validate that it doesn't exist in temp fs // validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -389,9 +390,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
if err != errNotSupported { if err != errNotSupported {
require.Error(t, err) require.Error(t, err)
// try to read from it // try to read from it
_, err = rootFs.NewObject("test/one") _, err = rootFs.NewObject(context.Background(), "test/one")
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject("test/second") _, err = rootFs.NewObject(context.Background(), "test/second")
require.Error(t, err) require.Error(t, err)
// validate that it exists in temp fs // validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -404,9 +405,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third")) err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported { if err != errNotSupported {
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject("test/one") _, err = rootFs.NewObject(context.Background(), "test/one")
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject("test/third") _, err = rootFs.NewObject(context.Background(), "test/third")
require.NoError(t, err) require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false) data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err) require.NoError(t, err)
@@ -421,7 +422,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
// test Remove // test Remove
err = runInstance.rm(t, rootFs, "test/one") err = runInstance.rm(t, rootFs, "test/one")
require.Error(t, err) require.Error(t, err)
_, err = rootFs.NewObject("test/one") _, err = rootFs.NewObject(context.Background(), "test/one")
require.NoError(t, err) require.NoError(t, err)
// validate that it doesn't exist in temp fs // validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))

View File

@@ -3,10 +3,11 @@
package cache package cache
import ( import (
"context"
"path" "path"
"time" "time"
"github.com/ncw/rclone/fs" "github.com/rclone/rclone/fs"
) )
// Directory is a generic dir that stores basic information about it // Directory is a generic dir that stores basic information about it
@@ -55,7 +56,7 @@ func ShallowDirectory(f *Fs, remote string) *Directory {
} }
// DirectoryFromOriginal builds one from a generic fs.Directory // DirectoryFromOriginal builds one from a generic fs.Directory
func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory { func DirectoryFromOriginal(ctx context.Context, f *Fs, d fs.Directory) *Directory {
var cd *Directory var cd *Directory
fullRemote := path.Join(f.Root(), d.Remote()) fullRemote := path.Join(f.Root(), d.Remote())
@@ -67,7 +68,7 @@ func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory {
CacheFs: f, CacheFs: f,
Name: name, Name: name,
Dir: dir, Dir: dir,
CacheModTime: d.ModTime().UnixNano(), CacheModTime: d.ModTime(ctx).UnixNano(),
CacheSize: d.Size(), CacheSize: d.Size(),
CacheItems: d.Items(), CacheItems: d.Items(),
CacheType: "Directory", CacheType: "Directory",
@@ -110,7 +111,7 @@ func (d *Directory) parentRemote() string {
} }
// ModTime returns the cached ModTime // ModTime returns the cached ModTime
func (d *Directory) ModTime() time.Time { func (d *Directory) ModTime(ctx context.Context) time.Time {
return time.Unix(0, d.CacheModTime) return time.Unix(0, d.CacheModTime)
} }

View File

@@ -3,6 +3,7 @@
package cache package cache
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"path" "path"
@@ -11,9 +12,9 @@ import (
"sync" "sync"
"time" "time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
) )
var uploaderMap = make(map[string]*backgroundWriter) var uploaderMap = make(map[string]*backgroundWriter)
@@ -40,6 +41,7 @@ func initBackgroundUploader(fs *Fs) (*backgroundWriter, error) {
// Handle is managing the read/write/seek operations on an open handle // Handle is managing the read/write/seek operations on an open handle
type Handle struct { type Handle struct {
ctx context.Context
cachedObject *Object cachedObject *Object
cfs *Fs cfs *Fs
memory *Memory memory *Memory
@@ -58,8 +60,9 @@ type Handle struct {
} }
// NewObjectHandle returns a new Handle for an existing Object // NewObjectHandle returns a new Handle for an existing Object
func NewObjectHandle(o *Object, cfs *Fs) *Handle { func NewObjectHandle(ctx context.Context, o *Object, cfs *Fs) *Handle {
r := &Handle{ r := &Handle{
ctx: ctx,
cachedObject: o, cachedObject: o,
cfs: cfs, cfs: cfs,
offset: 0, offset: 0,
@@ -351,7 +354,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
r := w.rc r := w.rc
if w.rc == nil { if w.rc == nil {
r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) { r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
return w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1}) return w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1})
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@@ -361,7 +364,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
if !closeOpen { if !closeOpen {
if do, ok := r.(fs.RangeSeeker); ok { if do, ok := r.(fs.RangeSeeker); ok {
_, err = do.RangeSeek(offset, io.SeekStart, end-offset) _, err = do.RangeSeek(w.r.ctx, offset, io.SeekStart, end-offset)
return r, err return r, err
} else if do, ok := r.(io.Seeker); ok { } else if do, ok := r.(io.Seeker); ok {
_, err = do.Seek(offset, io.SeekStart) _, err = do.Seek(offset, io.SeekStart)
@@ -371,7 +374,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
_ = w.rc.Close() _ = w.rc.Close()
return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) { return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
r, err = w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1}) r, err = w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -449,7 +452,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
// we seem to be getting only errors so we abort // we seem to be getting only errors so we abort
if err != nil { if err != nil {
fs.Errorf(w, "object open failed %v: %v", chunkStart, err) fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
err = w.r.cachedObject.refreshFromSource(true) err = w.r.cachedObject.refreshFromSource(w.r.ctx, true)
if err != nil { if err != nil {
fs.Errorf(w, "%v", err) fs.Errorf(w, "%v", err)
} }
@@ -462,7 +465,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
sourceRead, err = io.ReadFull(w.rc, data) sourceRead, err = io.ReadFull(w.rc, data)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err) fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
err = w.r.cachedObject.refreshFromSource(true) err = w.r.cachedObject.refreshFromSource(w.r.ctx, true)
if err != nil { if err != nil {
fs.Errorf(w, "%v", err) fs.Errorf(w, "%v", err)
} }
@@ -588,7 +591,7 @@ func (b *backgroundWriter) run() {
remote := b.fs.cleanRootFromPath(absPath) remote := b.fs.cleanRootFromPath(absPath)
b.notify(remote, BackgroundUploadStarted, nil) b.notify(remote, BackgroundUploadStarted, nil)
fs.Infof(remote, "background upload: started upload") fs.Infof(remote, "background upload: started upload")
err = operations.MoveFile(b.fs.UnWrap(), b.fs.tempFs, remote, remote) err = operations.MoveFile(context.TODO(), b.fs.UnWrap(), b.fs.tempFs, remote, remote)
if err != nil { if err != nil {
b.notify(remote, BackgroundUploadError, err) b.notify(remote, BackgroundUploadError, err)
_ = b.fs.cache.rollbackPendingUpload(absPath) _ = b.fs.cache.rollbackPendingUpload(absPath)
@@ -598,14 +601,14 @@ func (b *backgroundWriter) run() {
// clean empty dirs up to root // clean empty dirs up to root
thisDir := cleanPath(path.Dir(remote)) thisDir := cleanPath(path.Dir(remote))
for thisDir != "" { for thisDir != "" {
thisList, err := b.fs.tempFs.List(thisDir) thisList, err := b.fs.tempFs.List(context.TODO(), thisDir)
if err != nil { if err != nil {
break break
} }
if len(thisList) > 0 { if len(thisList) > 0 {
break break
} }
err = b.fs.tempFs.Rmdir(thisDir) err = b.fs.tempFs.Rmdir(context.TODO(), thisDir)
fs.Debugf(thisDir, "cleaned from temp path") fs.Debugf(thisDir, "cleaned from temp path")
if err != nil { if err != nil {
break break

View File

@@ -3,15 +3,16 @@
package cache package cache
import ( import (
"context"
"io" "io"
"path" "path"
"sync" "sync"
"time" "time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers"
) )
const ( const (
@@ -68,7 +69,7 @@ func NewObject(f *Fs, remote string) *Object {
} }
// ObjectFromOriginal builds one from a generic fs.Object // ObjectFromOriginal builds one from a generic fs.Object
func ObjectFromOriginal(f *Fs, o fs.Object) *Object { func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object {
var co *Object var co *Object
fullRemote := cleanPath(path.Join(f.Root(), o.Remote())) fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
dir, name := path.Split(fullRemote) dir, name := path.Split(fullRemote)
@@ -92,13 +93,13 @@ func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
CacheType: cacheType, CacheType: cacheType,
CacheTs: time.Now(), CacheTs: time.Now(),
} }
co.updateData(o) co.updateData(ctx, o)
return co return co
} }
func (o *Object) updateData(source fs.Object) { func (o *Object) updateData(ctx context.Context, source fs.Object) {
o.Object = source o.Object = source
o.CacheModTime = source.ModTime().UnixNano() o.CacheModTime = source.ModTime(ctx).UnixNano()
o.CacheSize = source.Size() o.CacheSize = source.Size()
o.CacheStorable = source.Storable() o.CacheStorable = source.Storable()
o.CacheTs = time.Now() o.CacheTs = time.Now()
@@ -130,20 +131,20 @@ func (o *Object) abs() string {
} }
// ModTime returns the cached ModTime // ModTime returns the cached ModTime
func (o *Object) ModTime() time.Time { func (o *Object) ModTime(ctx context.Context) time.Time {
_ = o.refresh() _ = o.refresh(ctx)
return time.Unix(0, o.CacheModTime) return time.Unix(0, o.CacheModTime)
} }
// Size returns the cached Size // Size returns the cached Size
func (o *Object) Size() int64 { func (o *Object) Size() int64 {
_ = o.refresh() _ = o.refresh(context.TODO())
return o.CacheSize return o.CacheSize
} }
// Storable returns the cached Storable // Storable returns the cached Storable
func (o *Object) Storable() bool { func (o *Object) Storable() bool {
_ = o.refresh() _ = o.refresh(context.TODO())
return o.CacheStorable return o.CacheStorable
} }
@@ -151,18 +152,18 @@ func (o *Object) Storable() bool {
// all these conditions must be true to ignore a refresh // all these conditions must be true to ignore a refresh
// 1. cache ts didn't expire yet // 1. cache ts didn't expire yet
// 2. is not pending a notification from the wrapped fs // 2. is not pending a notification from the wrapped fs
func (o *Object) refresh() error { func (o *Object) refresh(ctx context.Context) error {
isNotified := o.CacheFs.isNotifiedRemote(o.Remote()) isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge))) isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
if !isExpired && !isNotified { if !isExpired && !isNotified {
return nil return nil
} }
return o.refreshFromSource(true) return o.refreshFromSource(ctx, true)
} }
// refreshFromSource requests the original FS for the object in case it comes from a cached entry // refreshFromSource requests the original FS for the object in case it comes from a cached entry
func (o *Object) refreshFromSource(force bool) error { func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
o.refreshMutex.Lock() o.refreshMutex.Lock()
defer o.refreshMutex.Unlock() defer o.refreshMutex.Unlock()
var err error var err error
@@ -172,29 +173,29 @@ func (o *Object) refreshFromSource(force bool) error {
return nil return nil
} }
if o.isTempFile() { if o.isTempFile() {
liveObject, err = o.ParentFs.NewObject(o.Remote()) liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs) err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
} else { } else {
liveObject, err = o.CacheFs.Fs.NewObject(o.Remote()) liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs) err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
} }
if err != nil { if err != nil {
fs.Errorf(o, "error refreshing object in : %v", err) fs.Errorf(o, "error refreshing object in : %v", err)
return err return err
} }
o.updateData(liveObject) o.updateData(ctx, liveObject)
o.persist() o.persist()
return nil return nil
} }
// SetModTime sets the ModTime of this object // SetModTime sets the ModTime of this object
func (o *Object) SetModTime(t time.Time) error { func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
if err := o.refreshFromSource(false); err != nil { if err := o.refreshFromSource(ctx, false); err != nil {
return err return err
} }
err := o.Object.SetModTime(t) err := o.Object.SetModTime(ctx, t)
if err != nil { if err != nil {
return err return err
} }
@@ -207,19 +208,19 @@ func (o *Object) SetModTime(t time.Time) error {
} }
// Open is used to request a specific part of the file using fs.RangeOption // Open is used to request a specific part of the file using fs.RangeOption
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
var err error var err error
if o.Object == nil { if o.Object == nil {
err = o.refreshFromSource(true) err = o.refreshFromSource(ctx, true)
} else { } else {
err = o.refresh() err = o.refresh(ctx)
} }
if err != nil { if err != nil {
return nil, err return nil, err
} }
cacheReader := NewObjectHandle(o, o.CacheFs) cacheReader := NewObjectHandle(ctx, o, o.CacheFs)
var offset, limit int64 = 0, -1 var offset, limit int64 = 0, -1
for _, option := range options { for _, option := range options {
switch x := option.(type) { switch x := option.(type) {
@@ -238,8 +239,8 @@ func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
} }
// Update will change the object data // Update will change the object data
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if err := o.refreshFromSource(false); err != nil { if err := o.refreshFromSource(ctx, false); err != nil {
return err return err
} }
// pause background uploads if active // pause background uploads if active
@@ -254,7 +255,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
fs.Debugf(o, "updating object contents with size %v", src.Size()) fs.Debugf(o, "updating object contents with size %v", src.Size())
// FIXME use reliable upload // FIXME use reliable upload
err := o.Object.Update(in, src, options...) err := o.Object.Update(ctx, in, src, options...)
if err != nil { if err != nil {
fs.Errorf(o, "error updating source: %v", err) fs.Errorf(o, "error updating source: %v", err)
return err return err
@@ -265,7 +266,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
// advertise to ChangeNotify if wrapped doesn't do that // advertise to ChangeNotify if wrapped doesn't do that
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject) o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
o.CacheModTime = src.ModTime().UnixNano() o.CacheModTime = src.ModTime(ctx).UnixNano()
o.CacheSize = src.Size() o.CacheSize = src.Size()
o.CacheHashes = make(map[hash.Type]string) o.CacheHashes = make(map[hash.Type]string)
o.CacheTs = time.Now() o.CacheTs = time.Now()
@@ -275,8 +276,8 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
} }
// Remove deletes the object from both the cache and the source // Remove deletes the object from both the cache and the source
func (o *Object) Remove() error { func (o *Object) Remove(ctx context.Context) error {
if err := o.refreshFromSource(false); err != nil { if err := o.refreshFromSource(ctx, false); err != nil {
return err return err
} }
// pause background uploads if active // pause background uploads if active
@@ -288,7 +289,7 @@ func (o *Object) Remove() error {
return errors.Errorf("%v is currently uploading, can't delete", o) return errors.Errorf("%v is currently uploading, can't delete", o)
} }
} }
err := o.Object.Remove() err := o.Object.Remove(ctx)
if err != nil { if err != nil {
return err return err
} }
@@ -306,8 +307,8 @@ func (o *Object) Remove() error {
// Hash requests a hash of the object and stores in the cache // Hash requests a hash of the object and stores in the cache
// since it might or might not be called, this is lazy loaded // since it might or might not be called, this is lazy loaded
func (o *Object) Hash(ht hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
_ = o.refresh() _ = o.refresh(ctx)
if o.CacheHashes == nil { if o.CacheHashes == nil {
o.CacheHashes = make(map[hash.Type]string) o.CacheHashes = make(map[hash.Type]string)
} }
@@ -316,10 +317,10 @@ func (o *Object) Hash(ht hash.Type) (string, error) {
if found { if found {
return cachedHash, nil return cachedHash, nil
} }
if err := o.refreshFromSource(false); err != nil { if err := o.refreshFromSource(ctx, false); err != nil {
return "", err return "", err
} }
liveHash, err := o.Object.Hash(ht) liveHash, err := o.Object.Hash(ctx, ht)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@@ -14,8 +14,8 @@ import (
"sync" "sync"
"time" "time"
"github.com/ncw/rclone/fs"
cache "github.com/patrickmn/go-cache" cache "github.com/patrickmn/go-cache"
"github.com/rclone/rclone/fs"
"golang.org/x/net/websocket" "golang.org/x/net/websocket"
) )

View File

@@ -7,9 +7,9 @@ import (
"strings" "strings"
"time" "time"
"github.com/ncw/rclone/fs"
cache "github.com/patrickmn/go-cache" cache "github.com/patrickmn/go-cache"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
) )
// Memory is a wrapper of transient storage for a go-cache store // Memory is a wrapper of transient storage for a go-cache store

View File

@@ -4,6 +4,7 @@ package cache
import ( import (
"bytes" "bytes"
"context"
"encoding/binary" "encoding/binary"
"encoding/json" "encoding/json"
"fmt" "fmt"
@@ -16,9 +17,9 @@ import (
"time" "time"
bolt "github.com/coreos/bbolt" bolt "github.com/coreos/bbolt"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/walk"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
) )
// Constants // Constants
@@ -1014,7 +1015,7 @@ func (b *Persistent) SetPendingUploadToStarted(remote string) error {
} }
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue // ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error { func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error {
return b.db.Update(func(tx *bolt.Tx) error { return b.db.Update(func(tx *bolt.Tx) error {
_ = tx.DeleteBucket([]byte(tempBucket)) _ = tx.DeleteBucket([]byte(tempBucket))
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket)) bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
@@ -1023,7 +1024,7 @@ func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
} }
var queuedEntries []fs.Object var queuedEntries []fs.Object
err = walk.ListR(cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error { err = walk.ListR(ctx, cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
for _, o := range entries { for _, o := range entries {
if oo, ok := o.(fs.Object); ok { if oo, ok := o.(fs.Object); ok {
queuedEntries = append(queuedEntries, oo) queuedEntries = append(queuedEntries, oo)

View File

@@ -2,6 +2,7 @@ package crypt
import ( import (
"bytes" "bytes"
"context"
"crypto/aes" "crypto/aes"
gocipher "crypto/cipher" gocipher "crypto/cipher"
"crypto/rand" "crypto/rand"
@@ -13,10 +14,10 @@ import (
"sync" "sync"
"unicode/utf8" "unicode/utf8"
"github.com/ncw/rclone/backend/crypt/pkcs7"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rfjakob/eme" "github.com/rfjakob/eme"
"golang.org/x/crypto/nacl/secretbox" "golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/scrypt" "golang.org/x/crypto/scrypt"
@@ -68,7 +69,7 @@ type ReadSeekCloser interface {
} }
// OpenRangeSeek opens the file handle at the offset with the limit given // OpenRangeSeek opens the file handle at the offset with the limit given
type OpenRangeSeek func(offset, limit int64) (io.ReadCloser, error) type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error)
// Cipher is used to swap out the encryption implementations // Cipher is used to swap out the encryption implementations
type Cipher interface { type Cipher interface {
@@ -85,7 +86,7 @@ type Cipher interface {
// DecryptData // DecryptData
DecryptData(io.ReadCloser) (io.ReadCloser, error) DecryptData(io.ReadCloser) (io.ReadCloser, error)
// DecryptDataSeek decrypt at a given position // DecryptDataSeek decrypt at a given position
DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
// EncryptedSize calculates the size of the data when encrypted // EncryptedSize calculates the size of the data when encrypted
EncryptedSize(int64) int64 EncryptedSize(int64) int64
// DecryptedSize calculates the size of the data when decrypted // DecryptedSize calculates the size of the data when decrypted
@@ -755,22 +756,22 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
} }
// newDecrypterSeek creates a new file handle decrypting on the fly // newDecrypterSeek creates a new file handle decrypting on the fly
func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) { func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
var rc io.ReadCloser var rc io.ReadCloser
doRangeSeek := false doRangeSeek := false
setLimit := false setLimit := false
// Open initially with no seek // Open initially with no seek
if offset == 0 && limit < 0 { if offset == 0 && limit < 0 {
// If no offset or limit then open whole file // If no offset or limit then open whole file
rc, err = open(0, -1) rc, err = open(ctx, 0, -1)
} else if offset == 0 { } else if offset == 0 {
// If no offset open the header + limit worth of the file // If no offset open the header + limit worth of the file
_, underlyingLimit, _, _ := calculateUnderlying(offset, limit) _, underlyingLimit, _, _ := calculateUnderlying(offset, limit)
rc, err = open(0, int64(fileHeaderSize)+underlyingLimit) rc, err = open(ctx, 0, int64(fileHeaderSize)+underlyingLimit)
setLimit = true setLimit = true
} else { } else {
// Otherwise just read the header to start with // Otherwise just read the header to start with
rc, err = open(0, int64(fileHeaderSize)) rc, err = open(ctx, 0, int64(fileHeaderSize))
doRangeSeek = true doRangeSeek = true
} }
if err != nil { if err != nil {
@@ -783,7 +784,7 @@ func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh *
} }
fh.open = open // will be called by fh.RangeSeek fh.open = open // will be called by fh.RangeSeek
if doRangeSeek { if doRangeSeek {
_, err = fh.RangeSeek(offset, io.SeekStart, limit) _, err = fh.RangeSeek(ctx, offset, io.SeekStart, limit)
if err != nil { if err != nil {
_ = fh.Close() _ = fh.Close()
return nil, err return nil, err
@@ -903,7 +904,7 @@ func calculateUnderlying(offset, limit int64) (underlyingOffset, underlyingLimit
// limiting the total length to limit. // limiting the total length to limit.
// //
// RangeSeek with a limit of < 0 is equivalent to a regular Seek. // RangeSeek with a limit of < 0 is equivalent to a regular Seek.
func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, error) { func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, limit int64) (int64, error) {
fh.mu.Lock() fh.mu.Lock()
defer fh.mu.Unlock() defer fh.mu.Unlock()
@@ -930,7 +931,7 @@ func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, er
// Can we seek underlying stream directly? // Can we seek underlying stream directly?
if do, ok := fh.rc.(fs.RangeSeeker); ok { if do, ok := fh.rc.(fs.RangeSeeker); ok {
// Seek underlying stream directly // Seek underlying stream directly
_, err := do.RangeSeek(underlyingOffset, 0, underlyingLimit) _, err := do.RangeSeek(ctx, underlyingOffset, 0, underlyingLimit)
if err != nil { if err != nil {
return 0, fh.finish(err) return 0, fh.finish(err)
} }
@@ -940,7 +941,7 @@ func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, er
fh.rc = nil fh.rc = nil
// Re-open the underlying object with the offset given // Re-open the underlying object with the offset given
rc, err := fh.open(underlyingOffset, underlyingLimit) rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
if err != nil { if err != nil {
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit")) return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
} }
@@ -969,7 +970,7 @@ func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, er
// Seek implements the io.Seeker interface // Seek implements the io.Seeker interface
func (fh *decrypter) Seek(offset int64, whence int) (int64, error) { func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
return fh.RangeSeek(offset, whence, -1) return fh.RangeSeek(context.TODO(), offset, whence, -1)
} }
// finish sets the final error and tidies up // finish sets the final error and tidies up
@@ -1043,8 +1044,8 @@ func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
// The open function must return a ReadCloser opened to the offset supplied // The open function must return a ReadCloser opened to the offset supplied
// //
// You must use this form of DecryptData if you might want to Seek the file handle // You must use this form of DecryptData if you might want to Seek the file handle
func (c *cipher) DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) { func (c *cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
out, err := c.newDecrypterSeek(open, offset, limit) out, err := c.newDecrypterSeek(ctx, open, offset, limit)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@@ -2,6 +2,7 @@ package crypt
import ( import (
"bytes" "bytes"
"context"
"encoding/base32" "encoding/base32"
"fmt" "fmt"
"io" "io"
@@ -9,8 +10,8 @@ import (
"strings" "strings"
"testing" "testing"
"github.com/ncw/rclone/backend/crypt/pkcs7"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -704,16 +705,16 @@ var (
// Test test infrastructure first! // Test test infrastructure first!
func TestRandomSource(t *testing.T) { func TestRandomSource(t *testing.T) {
source := newRandomSource(1E8) source := newRandomSource(1e8)
sink := newRandomSource(1E8) sink := newRandomSource(1e8)
n, err := io.Copy(sink, source) n, err := io.Copy(sink, source)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, int64(1E8), n) assert.Equal(t, int64(1e8), n)
source = newRandomSource(1E8) source = newRandomSource(1e8)
buf := make([]byte, 16) buf := make([]byte, 16)
_, _ = source.Read(buf) _, _ = source.Read(buf)
sink = newRandomSource(1E8) sink = newRandomSource(1e8)
_, err = io.Copy(sink, source) _, err = io.Copy(sink, source)
assert.Error(t, err, "Error in stream") assert.Error(t, err, "Error in stream")
} }
@@ -753,23 +754,23 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
} }
func TestEncryptDecrypt1(t *testing.T) { func TestEncryptDecrypt1(t *testing.T) {
testEncryptDecrypt(t, 1, 1E7) testEncryptDecrypt(t, 1, 1e7)
} }
func TestEncryptDecrypt32(t *testing.T) { func TestEncryptDecrypt32(t *testing.T) {
testEncryptDecrypt(t, 32, 1E8) testEncryptDecrypt(t, 32, 1e8)
} }
func TestEncryptDecrypt4096(t *testing.T) { func TestEncryptDecrypt4096(t *testing.T) {
testEncryptDecrypt(t, 4096, 1E8) testEncryptDecrypt(t, 4096, 1e8)
} }
func TestEncryptDecrypt65536(t *testing.T) { func TestEncryptDecrypt65536(t *testing.T) {
testEncryptDecrypt(t, 65536, 1E8) testEncryptDecrypt(t, 65536, 1e8)
} }
func TestEncryptDecrypt65537(t *testing.T) { func TestEncryptDecrypt65537(t *testing.T) {
testEncryptDecrypt(t, 65537, 1E8) testEncryptDecrypt(t, 65537, 1e8)
} }
var ( var (
@@ -802,7 +803,7 @@ func TestEncryptData(t *testing.T) {
} { } {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err) assert.NoError(t, err)
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
// Check encode works // Check encode works
buf := bytes.NewBuffer(test.in) buf := bytes.NewBuffer(test.in)
@@ -825,7 +826,7 @@ func TestEncryptData(t *testing.T) {
func TestNewEncrypter(t *testing.T) { func TestNewEncrypter(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err) assert.NoError(t, err)
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
z := &zeroes{} z := &zeroes{}
@@ -852,7 +853,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
fh, err := c.newEncrypter(in, nil) fh, err := c.newEncrypter(in, nil)
assert.NoError(t, err) assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, fh, 1E6) n, err := io.CopyN(ioutil.Discard, fh, 1e6)
assert.Equal(t, io.ErrUnexpectedEOF, err) assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(32), n) assert.Equal(t, int64(32), n)
} }
@@ -884,7 +885,7 @@ func (c *closeDetector) Close() error {
func TestNewDecrypter(t *testing.T) { func TestNewDecrypter(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err) assert.NoError(t, err)
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
cd := newCloseDetector(bytes.NewBuffer(file0)) cd := newCloseDetector(bytes.NewBuffer(file0))
fh, err := c.newDecrypter(cd) fh, err := c.newDecrypter(cd)
@@ -935,7 +936,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
fh, err := c.newDecrypter(in) fh, err := c.newDecrypter(in)
assert.NoError(t, err) assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, fh, 1E6) n, err := io.CopyN(ioutil.Discard, fh, 1e6)
assert.Equal(t, io.ErrUnexpectedEOF, err) assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(16), n) assert.Equal(t, int64(16), n)
} }
@@ -965,7 +966,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
// Open stream with a seek of underlyingOffset // Open stream with a seek of underlyingOffset
var reader io.ReadCloser var reader io.ReadCloser
open := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
end := len(ciphertext) end := len(ciphertext)
if underlyingLimit >= 0 { if underlyingLimit >= 0 {
end = int(underlyingOffset + underlyingLimit) end = int(underlyingOffset + underlyingLimit)
@@ -1006,7 +1007,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
if offset+limit > len(plaintext) { if offset+limit > len(plaintext) {
continue continue
} }
rc, err := c.DecryptDataSeek(open, int64(offset), int64(limit)) rc, err := c.DecryptDataSeek(context.Background(), open, int64(offset), int64(limit))
assert.NoError(t, err) assert.NoError(t, err)
check(rc, offset, limit) check(rc, offset, limit)
@@ -1014,14 +1015,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
} }
// Try decoding it with a single open and lots of seeks // Try decoding it with a single open and lots of seeks
fh, err := c.DecryptDataSeek(open, 0, -1) fh, err := c.DecryptDataSeek(context.Background(), open, 0, -1)
assert.NoError(t, err) assert.NoError(t, err)
for _, offset := range trials { for _, offset := range trials {
for _, limit := range limits { for _, limit := range limits {
if offset+limit > len(plaintext) { if offset+limit > len(plaintext) {
continue continue
} }
_, err := fh.RangeSeek(int64(offset), io.SeekStart, int64(limit)) _, err := fh.RangeSeek(context.Background(), int64(offset), io.SeekStart, int64(limit))
assert.NoError(t, err) assert.NoError(t, err)
check(fh, offset, limit) check(fh, offset, limit)
@@ -1072,7 +1073,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
} { } {
what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit) what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit)
callCount := 0 callCount := 0
testOpen := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { testOpen := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
switch callCount { switch callCount {
case 0: case 0:
assert.Equal(t, int64(0), underlyingOffset, what) assert.Equal(t, int64(0), underlyingOffset, what)
@@ -1084,11 +1085,11 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
t.Errorf("Too many calls %d for %s", callCount+1, what) t.Errorf("Too many calls %d for %s", callCount+1, what)
} }
callCount++ callCount++
return open(underlyingOffset, underlyingLimit) return open(ctx, underlyingOffset, underlyingLimit)
} }
fh, err := c.DecryptDataSeek(testOpen, 0, -1) fh, err := c.DecryptDataSeek(context.Background(), testOpen, 0, -1)
assert.NoError(t, err) assert.NoError(t, err)
gotOffset, err := fh.RangeSeek(test.offset, io.SeekStart, test.limit) gotOffset, err := fh.RangeSeek(context.Background(), test.offset, io.SeekStart, test.limit)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, gotOffset, test.offset) assert.Equal(t, gotOffset, test.offset)
} }

View File

@@ -2,19 +2,20 @@
package crypt package crypt
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"strings" "strings"
"time" "time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fspath"
"github.com/ncw/rclone/fs/hash"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
) )
// Globals // Globals
@@ -232,7 +233,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
} }
// Encrypt an directory file name to entries. // Encrypt an directory file name to entries.
func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) { func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
remote := dir.Remote() remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote) decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil { if err != nil {
@@ -242,18 +243,18 @@ func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
if f.opt.ShowMapping { if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote) fs.Logf(decryptedRemote, "Encrypts to %q", remote)
} }
*entries = append(*entries, f.newDir(dir)) *entries = append(*entries, f.newDir(ctx, dir))
} }
// Encrypt some directory entries. This alters entries returning it as newEntries. // Encrypt some directory entries. This alters entries returning it as newEntries.
func (f *Fs) encryptEntries(entries fs.DirEntries) (newEntries fs.DirEntries, err error) { func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
newEntries = entries[:0] // in place filter newEntries = entries[:0] // in place filter
for _, entry := range entries { for _, entry := range entries {
switch x := entry.(type) { switch x := entry.(type) {
case fs.Object: case fs.Object:
f.add(&newEntries, x) f.add(&newEntries, x)
case fs.Directory: case fs.Directory:
f.addDir(&newEntries, x) f.addDir(ctx, &newEntries, x)
default: default:
return nil, errors.Errorf("Unknown object type %T", entry) return nil, errors.Errorf("Unknown object type %T", entry)
} }
@@ -270,12 +271,12 @@ func (f *Fs) encryptEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entries, err = f.Fs.List(f.cipher.EncryptDirName(dir)) entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
if err != nil { if err != nil {
return nil, err return nil, err
} }
return f.encryptEntries(entries) return f.encryptEntries(ctx, entries)
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@@ -294,9 +295,9 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
// //
// Don't implement this unless you have a more efficient way // Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal. // of listing recursively that doing a directory traversal.
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
return f.Fs.Features().ListR(f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error { return f.Fs.Features().ListR(ctx, f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
newEntries, err := f.encryptEntries(entries) newEntries, err := f.encryptEntries(ctx, entries)
if err != nil { if err != nil {
return err return err
} }
@@ -305,18 +306,18 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
} }
// NewObject finds the Object at remote. // NewObject finds the Object at remote.
func (f *Fs) NewObject(remote string) (fs.Object, error) { func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
o, err := f.Fs.NewObject(f.cipher.EncryptFileName(remote)) o, err := f.Fs.NewObject(ctx, f.cipher.EncryptFileName(remote))
if err != nil { if err != nil {
return nil, err return nil, err
} }
return f.newObject(o), nil return f.newObject(o), nil
} }
type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
// put implements Put or PutStream // put implements Put or PutStream
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) { func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
// Encrypt the data into wrappedIn // Encrypt the data into wrappedIn
wrappedIn, err := f.cipher.EncryptData(in) wrappedIn, err := f.cipher.EncryptData(in)
if err != nil { if err != nil {
@@ -342,7 +343,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
} }
// Transfer the data // Transfer the data
o, err := put(wrappedIn, f.newObjectInfo(src), options...) o, err := put(ctx, wrappedIn, f.newObjectInfo(src), options...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -351,13 +352,13 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
if ht != hash.None && hasher != nil { if ht != hash.None && hasher != nil {
srcHash := hasher.Sums()[ht] srcHash := hasher.Sums()[ht]
var dstHash string var dstHash string
dstHash, err = o.Hash(ht) dstHash, err = o.Hash(ctx, ht)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to read destination hash") return nil, errors.Wrap(err, "failed to read destination hash")
} }
if srcHash != "" && dstHash != "" && srcHash != dstHash { if srcHash != "" && dstHash != "" && srcHash != dstHash {
// remove object // remove object
err = o.Remove() err = o.Remove(ctx)
if err != nil { if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err) fs.Errorf(o, "Failed to remove corrupted object: %v", err)
} }
@@ -373,13 +374,13 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
// May create the object even if it returns an error - if so // May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return // will return the object and the error, otherwise will return
// nil and the error // nil and the error
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(in, src, options, f.Fs.Put) return f.put(ctx, in, src, options, f.Fs.Put)
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(in, src, options, f.Fs.Features().PutStream) return f.put(ctx, in, src, options, f.Fs.Features().PutStream)
} }
// Hashes returns the supported hash sets. // Hashes returns the supported hash sets.
@@ -390,15 +391,15 @@ func (f *Fs) Hashes() hash.Set {
// Mkdir makes the directory (container, bucket) // Mkdir makes the directory (container, bucket)
// //
// Shouldn't return an error if it already exists // Shouldn't return an error if it already exists
func (f *Fs) Mkdir(dir string) error { func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.Fs.Mkdir(f.cipher.EncryptDirName(dir)) return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
} }
// Rmdir removes the directory (container, bucket) if empty // Rmdir removes the directory (container, bucket) if empty
// //
// Return an error if it doesn't exist or isn't empty // Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(dir string) error { func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.Fs.Rmdir(f.cipher.EncryptDirName(dir)) return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir))
} }
// Purge all files in the root and the root directory // Purge all files in the root and the root directory
@@ -407,12 +408,12 @@ func (f *Fs) Rmdir(dir string) error {
// quicker than just running Remove() on the result of List() // quicker than just running Remove() on the result of List()
// //
// Return an error if it doesn't exist // Return an error if it doesn't exist
func (f *Fs) Purge() error { func (f *Fs) Purge(ctx context.Context) error {
do := f.Fs.Features().Purge do := f.Fs.Features().Purge
if do == nil { if do == nil {
return fs.ErrorCantPurge return fs.ErrorCantPurge
} }
return do() return do(ctx)
} }
// Copy src to this remote using server side copy operations. // Copy src to this remote using server side copy operations.
@@ -424,7 +425,7 @@ func (f *Fs) Purge() error {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Copy do := f.Fs.Features().Copy
if do == nil { if do == nil {
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
@@ -433,7 +434,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
if !ok { if !ok {
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote)) oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -449,7 +450,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantMove // If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Move do := f.Fs.Features().Move
if do == nil { if do == nil {
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
@@ -458,7 +459,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
if !ok { if !ok {
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
} }
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote)) oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -473,7 +474,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
do := f.Fs.Features().DirMove do := f.Fs.Features().DirMove
if do == nil { if do == nil {
return fs.ErrorCantDirMove return fs.ErrorCantDirMove
@@ -483,14 +484,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
fs.Debugf(srcFs, "Can't move directory - not same remote type") fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove return fs.ErrorCantDirMove
} }
return do(srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote)) return do(ctx, srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
} }
// PutUnchecked uploads the object // PutUnchecked uploads the object
// //
// This will create a duplicate if we upload a new file without // This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that. // checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.Fs.Features().PutUnchecked do := f.Fs.Features().PutUnchecked
if do == nil { if do == nil {
return nil, errors.New("can't PutUnchecked") return nil, errors.New("can't PutUnchecked")
@@ -499,7 +500,7 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt
if err != nil { if err != nil {
return nil, err return nil, err
} }
o, err := do(wrappedIn, f.newObjectInfo(src)) o, err := do(ctx, wrappedIn, f.newObjectInfo(src))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -510,21 +511,21 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt
// //
// Implement this if you have a way of emptying the trash or // Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files. // otherwise cleaning up old versions of files.
func (f *Fs) CleanUp() error { func (f *Fs) CleanUp(ctx context.Context) error {
do := f.Fs.Features().CleanUp do := f.Fs.Features().CleanUp
if do == nil { if do == nil {
return errors.New("can't CleanUp") return errors.New("can't CleanUp")
} }
return do() return do(ctx)
} }
// About gets quota information from the Fs // About gets quota information from the Fs
func (f *Fs) About() (*fs.Usage, error) { func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.Fs.Features().About do := f.Fs.Features().About
if do == nil { if do == nil {
return nil, errors.New("About not supported") return nil, errors.New("About not supported")
} }
return do() return do(ctx)
} }
// UnWrap returns the Fs that this Fs is wrapping // UnWrap returns the Fs that this Fs is wrapping
@@ -556,10 +557,10 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
// src with it, and calculates the hash given by HashType on the fly // src with it, and calculates the hash given by HashType on the fly
// //
// Note that we break lots of encapsulation in this function. // Note that we break lots of encapsulation in this function.
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) { func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
// Read the nonce - opening the file is sufficient to read the nonce in // Read the nonce - opening the file is sufficient to read the nonce in
// use a limited read so we only read the header // use a limited read so we only read the header
in, err := o.Object.Open(&fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1}) in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
if err != nil { if err != nil {
return "", errors.Wrap(err, "failed to open object to read nonce") return "", errors.Wrap(err, "failed to open object to read nonce")
} }
@@ -589,7 +590,7 @@ func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr
} }
// Open the src for input // Open the src for input
in, err = src.Open() in, err = src.Open(ctx)
if err != nil { if err != nil {
return "", errors.Wrap(err, "failed to open src") return "", errors.Wrap(err, "failed to open src")
} }
@@ -616,16 +617,16 @@ func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr
// MergeDirs merges the contents of all the directories passed // MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories. // in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(dirs []fs.Directory) error { func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
do := f.Fs.Features().MergeDirs do := f.Fs.Features().MergeDirs
if do == nil { if do == nil {
return errors.New("MergeDirs not supported") return errors.New("MergeDirs not supported")
} }
out := make([]fs.Directory, len(dirs)) out := make([]fs.Directory, len(dirs))
for i, dir := range dirs { for i, dir := range dirs {
out[i] = fs.NewDirCopy(dir).SetRemote(f.cipher.EncryptDirName(dir.Remote())) out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
} }
return do(out) return do(ctx, out)
} }
// DirCacheFlush resets the directory cache - used in testing // DirCacheFlush resets the directory cache - used in testing
@@ -638,23 +639,23 @@ func (f *Fs) DirCacheFlush() {
} }
// PublicLink generates a public link to the remote path (usually readable by anyone) // PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(remote string) (string, error) { func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
do := f.Fs.Features().PublicLink do := f.Fs.Features().PublicLink
if do == nil { if do == nil {
return "", errors.New("PublicLink not supported") return "", errors.New("PublicLink not supported")
} }
o, err := f.NewObject(remote) o, err := f.NewObject(ctx, remote)
if err != nil { if err != nil {
// assume it is a directory // assume it is a directory
return do(f.cipher.EncryptDirName(remote)) return do(ctx, f.cipher.EncryptDirName(remote))
} }
return do(o.(*Object).Object.Remote()) return do(ctx, o.(*Object).Object.Remote())
} }
// ChangeNotify calls the passed function with a path // ChangeNotify calls the passed function with a path
// that has had changes. If the implementation // that has had changes. If the implementation
// uses polling, it should adhere to the given interval. // uses polling, it should adhere to the given interval.
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
do := f.Fs.Features().ChangeNotify do := f.Fs.Features().ChangeNotify
if do == nil { if do == nil {
return return
@@ -680,7 +681,7 @@ func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalCha
} }
notifyFunc(decrypted, entryType) notifyFunc(decrypted, entryType)
} }
do(wrappedNotifyFunc, pollIntervalChan) do(ctx, wrappedNotifyFunc, pollIntervalChan)
} }
// Object describes a wrapped for being read from the Fs // Object describes a wrapped for being read from the Fs
@@ -733,7 +734,7 @@ func (o *Object) Size() int64 {
// Hash returns the selected checksum of the file // Hash returns the selected checksum of the file
// If no checksum is available it returns "" // If no checksum is available it returns ""
func (o *Object) Hash(ht hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -743,7 +744,7 @@ func (o *Object) UnWrap() fs.Object {
} }
// Open opens the file for read. Call Close() on the returned io.ReadCloser // Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
var openOptions []fs.OpenOption var openOptions []fs.OpenOption
var offset, limit int64 = 0, -1 var offset, limit int64 = 0, -1
for _, option := range options { for _, option := range options {
@@ -757,10 +758,10 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
openOptions = append(openOptions, option) openOptions = append(openOptions, option)
} }
} }
rc, err = o.f.cipher.DecryptDataSeek(func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { rc, err = o.f.cipher.DecryptDataSeek(ctx, func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
if underlyingOffset == 0 && underlyingLimit < 0 { if underlyingOffset == 0 && underlyingLimit < 0 {
// Open with no seek // Open with no seek
return o.Object.Open(openOptions...) return o.Object.Open(ctx, openOptions...)
} }
// Open stream with a range of underlyingOffset, underlyingLimit // Open stream with a range of underlyingOffset, underlyingLimit
end := int64(-1) end := int64(-1)
@@ -771,7 +772,7 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
} }
} }
newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end}) newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end})
return o.Object.Open(newOpenOptions...) return o.Object.Open(ctx, newOpenOptions...)
}, offset, limit) }, offset, limit)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -780,17 +781,17 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
} }
// Update in to the object with the modTime given of the given size // Update in to the object with the modTime given of the given size
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
update := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { update := func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return o.Object, o.Object.Update(in, src, options...) return o.Object, o.Object.Update(ctx, in, src, options...)
} }
_, err := o.f.put(in, src, options, update) _, err := o.f.put(ctx, in, src, options, update)
return err return err
} }
// newDir returns a dir with the Name decrypted // newDir returns a dir with the Name decrypted
func (f *Fs) newDir(dir fs.Directory) fs.Directory { func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
newDir := fs.NewDirCopy(dir) newDir := fs.NewDirCopy(ctx, dir)
remote := dir.Remote() remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote) decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil { if err != nil {
@@ -801,6 +802,24 @@ func (f *Fs) newDir(dir fs.Directory) fs.Directory {
return newDir return newDir
} }
// UserInfo returns info about the connected user
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
do := f.Fs.Features().UserInfo
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx)
}
// Disconnect the current user
func (f *Fs) Disconnect(ctx context.Context) error {
do := f.Fs.Features().Disconnect
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx)
}
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source // ObjectInfo describes a wrapped fs.ObjectInfo for being the source
// //
// This encrypts the remote name and adjusts the size // This encrypts the remote name and adjusts the size
@@ -837,7 +856,7 @@ func (o *ObjectInfo) Size() int64 {
// Hash returns the selected checksum of the file // Hash returns the selected checksum of the file
// If no checksum is available it returns "" // If no checksum is available it returns ""
func (o *ObjectInfo) Hash(hash hash.Type) (string, error) { func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
return "", nil return "", nil
} }
@@ -887,6 +906,8 @@ var (
_ fs.DirCacheFlusher = (*Fs)(nil) _ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil) _ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.ObjectInfo = (*ObjectInfo)(nil) _ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.Object = (*Object)(nil) _ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil) _ fs.ObjectUnWrapper = (*Object)(nil)

View File

@@ -6,13 +6,13 @@ import (
"path/filepath" "path/filepath"
"testing" "testing"
"github.com/ncw/rclone/backend/crypt" "github.com/rclone/rclone/backend/crypt"
_ "github.com/ncw/rclone/backend/drive" // for integration tests _ "github.com/rclone/rclone/backend/drive" // for integration tests
_ "github.com/ncw/rclone/backend/local" _ "github.com/rclone/rclone/backend/local"
_ "github.com/ncw/rclone/backend/swift" // for integration tests _ "github.com/rclone/rclone/backend/swift" // for integration tests
"github.com/ncw/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
"github.com/ncw/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/ncw/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

File diff suppressed because it is too large Load Diff

View File

@@ -2,6 +2,7 @@ package drive
import ( import (
"bytes" "bytes"
"context"
"encoding/json" "encoding/json"
"io" "io"
"io/ioutil" "io/ioutil"
@@ -10,11 +11,11 @@ import (
"strings" "strings"
"testing" "testing"
_ "github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fstest/fstests"
"github.com/pkg/errors" "github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"google.golang.org/api/drive/v3" "google.golang.org/api/drive/v3"
@@ -195,7 +196,7 @@ func (f *Fs) InternalTestDocumentImport(t *testing.T) {
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc") _, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
require.NoError(t, err) require.NoError(t, err)
err = operations.CopyFile(f, testFilesFs, "example2.doc", "example2.doc") err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.doc", "example2.doc")
require.NoError(t, err) require.NoError(t, err)
} }
@@ -209,7 +210,7 @@ func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc") _, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
require.NoError(t, err) require.NoError(t, err)
err = operations.CopyFile(f, testFilesFs, "example2.xlsx", "example1.ods") err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.xlsx", "example1.ods")
require.NoError(t, err) require.NoError(t, err)
} }
@@ -220,10 +221,10 @@ func (f *Fs) InternalTestDocumentExport(t *testing.T) {
f.exportExtensions, _, err = parseExtensions("txt") f.exportExtensions, _, err = parseExtensions("txt")
require.NoError(t, err) require.NoError(t, err)
obj, err := f.NewObject("example2.txt") obj, err := f.NewObject(context.Background(), "example2.txt")
require.NoError(t, err) require.NoError(t, err)
rc, err := obj.Open() rc, err := obj.Open(context.Background())
require.NoError(t, err) require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }() defer func() { require.NoError(t, rc.Close()) }()
@@ -246,10 +247,10 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
f.exportExtensions, _, err = parseExtensions("link.html") f.exportExtensions, _, err = parseExtensions("link.html")
require.NoError(t, err) require.NoError(t, err)
obj, err := f.NewObject("example2.link.html") obj, err := f.NewObject(context.Background(), "example2.link.html")
require.NoError(t, err) require.NoError(t, err)
rc, err := obj.Open() rc, err := obj.Open(context.Background())
require.NoError(t, err) require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }() defer func() { require.NoError(t, rc.Close()) }()

View File

@@ -5,8 +5,8 @@ package drive
import ( import (
"testing" "testing"
"github.com/ncw/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/ncw/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -11,6 +11,7 @@
package drive package drive
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
@@ -19,10 +20,10 @@ import (
"regexp" "regexp"
"strconv" "strconv"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/readers"
"google.golang.org/api/drive/v3" "google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
) )
@@ -50,15 +51,13 @@ type resumableUpload struct {
} }
// Upload the io.Reader in of size bytes with contentType and info // Upload the io.Reader in of size bytes with contentType and info
func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) { func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
params := url.Values{ params := url.Values{
"alt": {"json"}, "alt": {"json"},
"uploadType": {"resumable"}, "uploadType": {"resumable"},
"fields": {partialFields}, "fields": {partialFields},
} }
if f.isTeamDrive { params.Set("supportsAllDrives", "true")
params.Set("supportsTeamDrives", "true")
}
if f.opt.KeepRevisionForever { if f.opt.KeepRevisionForever {
params.Set("keepRevisionForever", "true") params.Set("keepRevisionForever", "true")
} }
@@ -83,6 +82,7 @@ func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string
if err != nil { if err != nil {
return false, err return false, err
} }
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
googleapi.Expand(req.URL, map[string]string{ googleapi.Expand(req.URL, map[string]string{
"fileId": fileID, "fileId": fileID,
}) })
@@ -108,12 +108,13 @@ func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string
MediaType: contentType, MediaType: contentType,
ContentLength: size, ContentLength: size,
} }
return rx.Upload() return rx.Upload(ctx)
} }
// Make an http.Request for the range passed in // Make an http.Request for the range passed in
func (rx *resumableUpload) makeRequest(start int64, body io.ReadSeeker, reqSize int64) *http.Request { func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request {
req, _ := http.NewRequest("POST", rx.URI, body) req, _ := http.NewRequest("POST", rx.URI, body)
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
req.ContentLength = reqSize req.ContentLength = reqSize
if reqSize != 0 { if reqSize != 0 {
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength)) req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
@@ -131,8 +132,8 @@ var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
// Query drive for the amount transferred so far // Query drive for the amount transferred so far
// //
// If error is nil, then start should be valid // If error is nil, then start should be valid
func (rx *resumableUpload) transferStatus() (start int64, err error) { func (rx *resumableUpload) transferStatus(ctx context.Context) (start int64, err error) {
req := rx.makeRequest(0, nil, 0) req := rx.makeRequest(ctx, 0, nil, 0)
res, err := rx.f.client.Do(req) res, err := rx.f.client.Do(req)
if err != nil { if err != nil {
return 0, err return 0, err
@@ -159,9 +160,9 @@ func (rx *resumableUpload) transferStatus() (start int64, err error) {
} }
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil // Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) { func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
_, _ = chunk.Seek(0, io.SeekStart) _, _ = chunk.Seek(0, io.SeekStart)
req := rx.makeRequest(start, chunk, chunkSize) req := rx.makeRequest(ctx, start, chunk, chunkSize)
res, err := rx.f.client.Do(req) res, err := rx.f.client.Do(req)
if err != nil { if err != nil {
return 599, err return 599, err
@@ -194,7 +195,7 @@ func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunk
// Upload uploads the chunks from the input // Upload uploads the chunks from the input
// It retries each chunk using the pacer and --low-level-retries // It retries each chunk using the pacer and --low-level-retries
func (rx *resumableUpload) Upload() (*drive.File, error) { func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
start := int64(0) start := int64(0)
var StatusCode int var StatusCode int
var err error var err error
@@ -209,7 +210,7 @@ func (rx *resumableUpload) Upload() (*drive.File, error) {
// Transfer the chunk // Transfer the chunk
err = rx.f.pacer.Call(func() (bool, error) { err = rx.f.pacer.Call(func() (bool, error) {
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize) fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
StatusCode, err = rx.transferChunk(start, chunk, reqSize) StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize)
again, err := shouldRetry(err) again, err := shouldRetry(err)
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK { if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
again = false again = false

View File

@@ -5,7 +5,7 @@ import (
"fmt" "fmt"
"testing" "testing"
"github.com/ncw/rclone/backend/dropbox/dbhash" "github.com/rclone/rclone/backend/dropbox/dbhash"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )

View File

@@ -22,6 +22,7 @@ of path_display and all will be well.
*/ */
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"log" "log"
@@ -37,17 +38,17 @@ import (
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing" "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team" "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users" "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/oauth2" "golang.org/x/oauth2"
) )
@@ -441,7 +442,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) { func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil) return f.newObjectWithInfo(remote, nil)
} }
@@ -454,7 +455,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
root := f.slashRoot root := f.slashRoot
if dir != "" { if dir != "" {
root += "/" + dir root += "/" + dir
@@ -541,22 +542,22 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction // Temporary Object under construction
o := &Object{ o := &Object{
fs: f, fs: f,
remote: src.Remote(), remote: src.Remote(),
} }
return o, o.Update(in, src, options...) return o, o.Update(ctx, in, src, options...)
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...) return f.Put(ctx, in, src, options...)
} }
// Mkdir creates the container if it doesn't exist // Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(dir string) error { func (f *Fs) Mkdir(ctx context.Context, dir string) error {
root := path.Join(f.slashRoot, dir) root := path.Join(f.slashRoot, dir)
// can't create or run metadata on root // can't create or run metadata on root
@@ -586,7 +587,7 @@ func (f *Fs) Mkdir(dir string) error {
// Rmdir deletes the container // Rmdir deletes the container
// //
// Returns an error if it isn't empty // Returns an error if it isn't empty
func (f *Fs) Rmdir(dir string) error { func (f *Fs) Rmdir(ctx context.Context, dir string) error {
root := path.Join(f.slashRoot, dir) root := path.Join(f.slashRoot, dir)
// can't remove root // can't remove root
@@ -642,7 +643,7 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't copy - not same remote type") fs.Debugf(src, "Can't copy - not same remote type")
@@ -687,7 +688,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
// Optional interface: Only implement this if you have a way of // Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the // deleting all the files quicker than just running Remove() on the
// result of List() // result of List()
func (f *Fs) Purge() (err error) { func (f *Fs) Purge(ctx context.Context) (err error) {
// Let dropbox delete the filesystem tree // Let dropbox delete the filesystem tree
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: f.slashRoot}) _, err = f.srv.DeleteV2(&files.DeleteArg{Path: f.slashRoot})
@@ -705,7 +706,7 @@ func (f *Fs) Purge() (err error) {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantMove // If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't move - not same remote type") fs.Debugf(src, "Can't move - not same remote type")
@@ -745,7 +746,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
} }
// PublicLink adds a "readable by anyone with link" permission on the given file or folder. // PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(remote string) (link string, err error) { func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
absPath := "/" + path.Join(f.Root(), remote) absPath := "/" + path.Join(f.Root(), remote)
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath) fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
createArg := sharing.CreateSharedLinkWithSettingsArg{ createArg := sharing.CreateSharedLinkWithSettingsArg{
@@ -798,7 +799,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs) srcFs, ok := src.(*Fs)
if !ok { if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type") fs.Debugf(srcFs, "Can't move directory - not same remote type")
@@ -834,7 +835,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
} }
// About gets quota information // About gets quota information
func (f *Fs) About() (usage *fs.Usage, err error) { func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
var q *users.SpaceUsage var q *users.SpaceUsage
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
q, err = f.users.GetSpaceUsage() q, err = f.users.GetSpaceUsage()
@@ -886,7 +887,7 @@ func (o *Object) Remote() string {
} }
// Hash returns the dropbox special hash // Hash returns the dropbox special hash
func (o *Object) Hash(t hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.Dropbox { if t != hash.Dropbox {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -948,7 +949,7 @@ func (o *Object) readMetaData() (err error) {
// //
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime() time.Time { func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData() err := o.readMetaData()
if err != nil { if err != nil {
fs.Debugf(o, "Failed to read metadata: %v", err) fs.Debugf(o, "Failed to read metadata: %v", err)
@@ -960,7 +961,7 @@ func (o *Object) ModTime() time.Time {
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
// //
// Commits the datastore // Commits the datastore
func (o *Object) SetModTime(modTime time.Time) error { func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
// Dropbox doesn't have a way of doing this so returning this // Dropbox doesn't have a way of doing this so returning this
// error will cause the file to be deleted first then // error will cause the file to be deleted first then
// re-uploaded to set the time. // re-uploaded to set the time.
@@ -973,7 +974,8 @@ func (o *Object) Storable() bool {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.bytes)
headers := fs.OpenOptionHeaders(options) headers := fs.OpenOptionHeaders(options)
arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers} arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers}
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
@@ -1099,7 +1101,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
// Copy the reader into the object updating modTime and size // Copy the reader into the object updating modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
remote := o.remotePath() remote := o.remotePath()
if ignoredFiles.MatchString(remote) { if ignoredFiles.MatchString(remote) {
fs.Logf(o, "File name disallowed - not uploading") fs.Logf(o, "File name disallowed - not uploading")
@@ -1108,7 +1110,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
commitInfo := files.NewCommitInfo(o.remotePath()) commitInfo := files.NewCommitInfo(o.remotePath())
commitInfo.Mode.Tag = "overwrite" commitInfo.Mode.Tag = "overwrite"
// The Dropbox API only accepts timestamps in UTC with second precision. // The Dropbox API only accepts timestamps in UTC with second precision.
commitInfo.ClientModified = src.ModTime().UTC().Round(time.Second) commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
size := src.Size() size := src.Size()
var err error var err error
@@ -1128,7 +1130,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
} }
// Remove an object // Remove an object
func (o *Object) Remove() (err error) { func (o *Object) Remove(ctx context.Context) (err error) {
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{Path: o.remotePath()}) _, err = o.fs.srv.DeleteV2(&files.DeleteArg{Path: o.remotePath()})
return shouldRetry(err) return shouldRetry(err)

View File

@@ -4,8 +4,8 @@ package dropbox
import ( import (
"testing" "testing"
"github.com/ncw/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/ncw/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

389
backend/fichier/api.go Normal file
View File

@@ -0,0 +1,389 @@
package fichier
import (
"context"
"io"
"net/http"
"regexp"
"strconv"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/rest"
)
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
request := DownloadRequest{
URL: url,
Single: 1,
}
opts := rest.Opts{
Method: "POST",
Path: "/download/get_token.cgi",
}
var token GetTokenResponse
err := f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
}
return &token, nil
}
func fileFromSharedFile(file *SharedFile) File {
return File{
URL: file.Link,
Filename: file.Filename,
Size: file.Size,
}
}
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: "https://1fichier.com/dir/",
Path: id,
Parameters: map[string][]string{"json": {"1"}},
}
var sharedFiles SharedFolderResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, &sharedFiles)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
}
entries = make([]fs.DirEntry, len(sharedFiles))
for i, sharedFile := range sharedFiles {
entries[i] = f.newObjectFromFile(ctx, "", fileFromSharedFile(&sharedFile))
}
return entries, nil
}
func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesList, err error) {
// fs.Debugf(f, "Requesting files for dir `%s`", directoryID)
request := ListFilesRequest{
FolderID: directoryID,
}
opts := rest.Opts{
Method: "POST",
Path: "/file/ls.cgi",
}
filesList = &FilesList{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, filesList)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
}
return filesList, nil
}
func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *FoldersList, err error) {
// fs.Debugf(f, "Requesting folders for id `%s`", directoryID)
request := ListFolderRequest{
FolderID: directoryID,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/ls.cgi",
}
foldersList = &FoldersList{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, foldersList)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list folders")
}
// fs.Debugf(f, "Got FoldersList for id `%s`", directoryID)
return foldersList, err
}
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
files, err := f.listFiles(ctx, folderID)
if err != nil {
return nil, err
}
folders, err := f.listFolders(ctx, folderID)
if err != nil {
return nil, err
}
entries = make([]fs.DirEntry, len(files.Items)+len(folders.SubFolders))
for i, item := range files.Items {
item.Filename = restoreReservedChars(item.Filename)
entries[i] = f.newObjectFromFile(ctx, dir, item)
}
for i, folder := range folders.SubFolders {
createDate, err := time.Parse("2006-01-02 15:04:05", folder.CreateDate)
if err != nil {
return nil, err
}
folder.Name = restoreReservedChars(folder.Name)
fullPath := getRemote(dir, folder.Name)
folderID := strconv.Itoa(folder.ID)
entries[len(files.Items)+i] = fs.NewDir(fullPath, createDate).SetID(folderID)
// fs.Debugf(f, "Put Path `%s` for id `%d` into dircache", fullPath, folder.ID)
f.dirCache.Put(fullPath, folderID)
}
return entries, nil
}
func (f *Fs) newObjectFromFile(ctx context.Context, dir string, item File) *Object {
return &Object{
fs: f,
remote: getRemote(dir, item.Filename),
file: item,
}
}
func getRemote(dir, fileName string) string {
if dir == "" {
return fileName
}
return dir + "/" + fileName
}
func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (response *MakeFolderResponse, err error) {
name := replaceReservedChars(leaf)
// fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID)
request := MakeFolderRequest{
FolderID: folderID,
Name: name,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/mkdir.cgi",
}
response = &MakeFolderResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create folder")
}
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
return response, err
}
func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (response *GenericOKResponse, err error) {
// fs.Debugf(f, "Removing folder with id `%s`", directoryID)
request := &RemoveFolderRequest{
FolderID: folderID,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/rm.cgi",
}
response = &GenericOKResponse{}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't remove folder")
}
if response.Status != "OK" {
return nil, errors.New("Can't remove non-empty dir")
}
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
return response, nil
}
func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKResponse, err error) {
request := &RemoveFileRequest{
Files: []RmFile{
{url},
},
}
opts := rest.Opts{
Method: "POST",
Path: "/file/rm.cgi",
}
response = &GenericOKResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't remove file")
}
// fs.Debugf(f, "Removed file with url `%s`", url)
return response, nil
}
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
// fs.Debugf(f, "Requesting Upload node")
opts := rest.Opts{
Method: "GET",
ContentType: "application/json", // 1Fichier API is bad
Path: "/upload/get_upload_server.cgi",
}
response = &GetUploadNodeResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "didnt got an upload node")
}
// fs.Debugf(f, "Got Upload node")
return response, err
}
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
// fs.Debugf(f, "Uploading File `%s`", fileName)
fileName = replaceReservedChars(fileName)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("Invalid UploadID")
}
opts := rest.Opts{
Method: "POST",
Path: "/upload.cgi",
Parameters: map[string][]string{
"id": {uploadID},
},
NoResponse: true,
Body: in,
ContentLength: &size,
MultipartContentName: "file[]",
MultipartFileName: fileName,
MultipartParams: map[string][]string{
"did": {folderID},
},
}
if node != "" {
opts.RootURL = "https://" + node
}
err = f.pacer.CallNoRetry(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, nil)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't upload file")
}
// fs.Debugf(f, "Uploaded File `%s`", fileName)
return response, err
}
func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) {
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("Invalid UploadID")
}
opts := rest.Opts{
Method: "GET",
Path: "/end.pl",
RootURL: "https://" + nodeurl,
Parameters: map[string][]string{
"xid": {uploadID},
},
ExtraHeaders: map[string]string{
"JSON": "1",
},
}
response = &EndFileUploadResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't finish file upload")
}
return response, err
}

411
backend/fichier/fichier.go Normal file
View File

@@ -0,0 +1,411 @@
package fichier
import (
"context"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
const (
rootID = "0"
apiBaseURL = "https://api.1fichier.com/v1"
minSleep = 334 * time.Millisecond // 3 API calls per second is recommended
maxSleep = 5 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
func init() {
fs.Register(&fs.RegInfo{
Name: "fichier",
Description: "1Fichier",
Config: func(name string, config configmap.Mapper) {
},
NewFs: NewFs,
Options: []fs.Option{
{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
Name: "api_key",
},
{
Help: "If you want to download a shared folder, add this parameter",
Name: "shared_folder",
Required: false,
Advanced: true,
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
}
// Fs is the interface a cloud storage system must provide
type Fs struct {
root string
name string
features *fs.Features
dirCache *dircache.DirCache
baseClient *http.Client
options *Options
pacer *fs.Pacer
rest *rest.Client
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
folderID, err := strconv.Atoi(pathID)
if err != nil {
return "", false, err
}
folders, err := f.listFolders(ctx, folderID)
if err != nil {
return "", false, err
}
for _, folder := range folders.SubFolders {
if folder.Name == leaf {
pathIDOut := strconv.Itoa(folder.ID)
return pathIDOut, true, nil
}
}
return "", false, nil
}
// CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
folderID, err := strconv.Atoi(pathID)
if err != nil {
return "", err
}
resp, err := f.makeFolder(ctx, leaf, folderID)
if err != nil {
return "", err
}
return strconv.Itoa(resp.FolderID), err
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("1Fichier root '%s'", f.root)
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash types of the filesystem
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.Whirlpool)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// NewFs makes a new Fs object from the path
//
// The path is of the form remote:path
//
// Remotes are looked up in the config file. If the remote isn't
// found then NotFoundInConfigFile will be returned.
//
// On Windows avoid single character remote names as they can be mixed
// up with drive letters.
func NewFs(name string, rootleaf string, config configmap.Mapper) (fs.Fs, error) {
root := replaceReservedChars(rootleaf)
opt := new(Options)
err := configstruct.Set(config, opt)
if err != nil {
return nil, err
}
// If using a Shared Folder override root
if opt.SharedFolder != "" {
root = ""
}
//workaround for wonky parser
root = strings.Trim(root, "/")
f := &Fs{
name: name,
root: root,
options: opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
baseClient: &http.Client{},
}
f.features = (&fs.Features{
DuplicateFiles: true,
CanHaveEmptyDirectories: true,
}).Fill(f)
client := fshttp.NewClient(fs.Config)
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
f.rest.SetHeader("Authorization", "Bearer "+f.options.APIKey)
f.dirCache = dircache.New(root, rootID, f)
ctx := context.Background()
// Find the current root
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
tempF := *f
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
tempF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := tempF.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
return f, nil
}
return nil, err
}
f.features.Fill(&tempF)
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.options.SharedFolder != "" {
return f.listSharedFiles(ctx, f.options.SharedFolder)
}
dirContent, err := f.listDir(ctx, dir)
if err != nil {
return nil, err
}
return dirContent, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound
}
return nil, err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
files, err := f.listFiles(ctx, folderID)
if err != nil {
return nil, err
}
for _, file := range files.Items {
if file.Filename == leaf {
path, ok := f.dirCache.GetInv(directoryID)
if !ok {
return nil, errors.New("Cannot find dir in dircache")
}
return f.newObjectFromFile(ctx, path, file), nil
}
}
return nil, fs.ErrorObjectNotFound
}
// Put in to the remote path with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
// return an error or upload it properly (rather than e.g. calling panic).
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
exisitingObj, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src, options...)
default:
return nil, err
}
}
// putUnchecked uploads the object with the given name and size
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(100e9) {
return nil, errors.New("File too big, cant upload")
} else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
nodeResponse, err := f.getUploadNode(ctx)
if err != nil {
return nil, err
}
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return nil, err
}
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
if err != nil {
return nil, err
}
fileUploadResponse, err := f.endUpload(ctx, nodeResponse.ID, nodeResponse.URL)
if err != nil {
return nil, err
}
if len(fileUploadResponse.Links) != 1 {
return nil, errors.New("unexpected amount of files")
}
link := fileUploadResponse.Links[0]
fileSize, err := strconv.ParseInt(link.Size, 10, 64)
if err != nil {
return nil, err
}
return &Object{
fs: f,
remote: remote,
file: File{
ACL: 0,
CDN: 0,
Checksum: link.Whirlpool,
ContentType: "",
Date: time.Now().Format("2006-01-02 15:04:05"),
Filename: link.Filename,
Pass: 0,
Size: int(fileSize),
URL: link.Download,
},
}, nil
}
// PutUnchecked uploads the object
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.putUnchecked(ctx, in, src.Remote(), src.Size(), options...)
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
return err
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return err
}
_, err = f.removeFolder(ctx, dir, folderID)
if err != nil {
return err
}
f.dirCache.FlushDir(dir)
return nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ dircache.DirCacher = (*Fs)(nil)
)

View File

@@ -0,0 +1,17 @@
// Test 1Fichier filesystem interface
package fichier
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fs.Config.LogLevel = fs.LogLevelDebug
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFichier:",
})
}

158
backend/fichier/object.go Normal file
View File

@@ -0,0 +1,158 @@
package fichier
import (
"context"
"io"
"net/http"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/rest"
)
// Object is a filesystem like object provided by an Fs
type Object struct {
fs *Fs
remote string
file File
}
// String returns a description of the Object
func (o *Object) String() string {
return o.file.Filename
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
func (o *Object) ModTime(ctx context.Context) time.Time {
modTime, err := time.Parse("2006-01-02 15:04:05", o.file.Date)
if err != nil {
return time.Now()
}
return modTime
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return int64(o.file.Size)
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.Whirlpool {
return "", hash.ErrUnsupported
}
return o.file.Checksum, nil
}
// Storable says whether this object can be stored
func (o *Object) Storable() bool {
return true
}
// SetModTime sets the metadata on the object to set the modification date
func (o *Object) SetModTime(context.Context, time.Time) error {
return fs.ErrorCantSetModTime
//return errors.New("setting modtime is not supported for 1fichier remotes")
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
fs.FixRangeOption(options, int64(o.file.Size))
downloadToken, err := o.fs.getDownloadToken(ctx, o.file.URL)
if err != nil {
return nil, err
}
var resp *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: downloadToken.URL,
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.rest.Call(ctx, &opts)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
return resp.Body, err
}
// Update in to the object with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if src.Size() < 0 {
return errors.New("refusing to update with unknown size")
}
// upload with new size but old name
info, err := o.fs.putUnchecked(ctx, in, o.Remote(), src.Size(), options...)
if err != nil {
return err
}
// Delete duplicate after successful upload
err = o.Remove(ctx)
if err != nil {
return errors.Wrap(err, "failed to remove old version")
}
// Replace guts of old object with new one
*o = *info.(*Object)
return nil
}
// Remove removes this object
func (o *Object) Remove(ctx context.Context) error {
// fs.Debugf(f, "Removing file `%s` with url `%s`", o.file.Filename, o.file.URL)
_, err := o.fs.deleteFile(ctx, o.file.URL)
if err != nil {
return err
}
return nil
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.file.ContentType
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
return o.file.URL
}
// Check the interfaces are satisfied
var (
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -0,0 +1,71 @@
/*
Translate file names for 1fichier
1Fichier reserved characters
The following characters are 1Fichier reserved characters, and can't
be used in 1Fichier folder and file names.
*/
package fichier
import (
"regexp"
"strings"
)
// charMap holds replacements for characters
//
// 1Fichier has a restricted set of characters compared to other cloud
// storage systems, so we to map these to the FULLWIDTH unicode
// equivalents
//
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
var (
charMap = map[rune]rune{
'\\': '', // FULLWIDTH REVERSE SOLIDUS
'<': '', // FULLWIDTH LESS-THAN SIGN
'>': '', // FULLWIDTH GREATER-THAN SIGN
'"': '', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
'\'': '', // FULLWIDTH APOSTROPHE
'$': '', // FULLWIDTH DOLLAR SIGN
'`': '', // FULLWIDTH GRAVE ACCENT
' ': '␠', // SYMBOL FOR SPACE
}
invCharMap map[rune]rune
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
)
func init() {
// Create inverse charMap
invCharMap = make(map[rune]rune, len(charMap))
for k, v := range charMap {
invCharMap[v] = k
}
}
// replaceReservedChars takes a path and substitutes any reserved
// characters in it
func replaceReservedChars(in string) string {
// file names can't start with space either
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
// Replace reserved characters
return strings.Map(func(c rune) rune {
if replacement, ok := charMap[c]; ok && c != ' ' {
return replacement
}
return c
}, in)
}
// restoreReservedChars takes a path and undoes any substitutions
// made by replaceReservedChars
func restoreReservedChars(in string) string {
return strings.Map(func(c rune) rune {
if replacement, ok := invCharMap[c]; ok {
return replacement
}
return c
}, in)
}

View File

@@ -0,0 +1,24 @@
package fichier
import "testing"
func TestReplace(t *testing.T) {
for _, test := range []struct {
in string
out string
}{
{"", ""},
{"abc 123", "abc 123"},
{"\"'<>/\\$`", `/`},
{" leading space", "␠leading space"},
} {
got := replaceReservedChars(test.in)
if got != test.out {
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
}
got2 := restoreReservedChars(got)
if got2 != test.in {
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
}
}
}

120
backend/fichier/structs.go Normal file
View File

@@ -0,0 +1,120 @@
package fichier
// ListFolderRequest is the request structure of the corresponding request
type ListFolderRequest struct {
FolderID int `json:"folder_id"`
}
// ListFilesRequest is the request structure of the corresponding request
type ListFilesRequest struct {
FolderID int `json:"folder_id"`
}
// DownloadRequest is the request structure of the corresponding request
type DownloadRequest struct {
URL string `json:"url"`
Single int `json:"single"`
}
// RemoveFolderRequest is the request structure of the corresponding request
type RemoveFolderRequest struct {
FolderID int `json:"folder_id"`
}
// RemoveFileRequest is the request structure of the corresponding request
type RemoveFileRequest struct {
Files []RmFile `json:"files"`
}
// RmFile is the request structure of the corresponding request
type RmFile struct {
URL string `json:"url"`
}
// GenericOKResponse is the response structure of the corresponding request
type GenericOKResponse struct {
Status string `json:"status"`
Message string `json:"message"`
}
// MakeFolderRequest is the request structure of the corresponding request
type MakeFolderRequest struct {
Name string `json:"name"`
FolderID int `json:"folder_id"`
}
// MakeFolderResponse is the response structure of the corresponding request
type MakeFolderResponse struct {
Name string `json:"name"`
FolderID int `json:"folder_id"`
}
// GetUploadNodeResponse is the response structure of the corresponding request
type GetUploadNodeResponse struct {
ID string `json:"id"`
URL string `json:"url"`
}
// GetTokenResponse is the response structure of the corresponding request
type GetTokenResponse struct {
URL string `json:"url"`
Status string `json:"Status"`
Message string `json:"Message"`
}
// SharedFolderResponse is the response structure of the corresponding request
type SharedFolderResponse []SharedFile
// SharedFile is the structure how 1Fichier returns a shared File
type SharedFile struct {
Filename string `json:"filename"`
Link string `json:"link"`
Size int `json:"size"`
}
// EndFileUploadResponse is the response structure of the corresponding request
type EndFileUploadResponse struct {
Incoming int `json:"incoming"`
Links []struct {
Download string `json:"download"`
Filename string `json:"filename"`
Remove string `json:"remove"`
Size string `json:"size"`
Whirlpool string `json:"whirlpool"`
} `json:"links"`
}
// File is the structure how 1Fichier returns a File
type File struct {
ACL int `json:"acl"`
CDN int `json:"cdn"`
Checksum string `json:"checksum"`
ContentType string `json:"content-type"`
Date string `json:"date"`
Filename string `json:"filename"`
Pass int `json:"pass"`
Size int `json:"size"`
URL string `json:"url"`
}
// FilesList is the structure how 1Fichier returns a list of files
type FilesList struct {
Items []File `json:"items"`
Status string `json:"Status"`
}
// Folder is the structure how 1Fichier returns a Folder
type Folder struct {
CreateDate string `json:"create_date"`
ID int `json:"id"`
Name string `json:"name"`
Pass int `json:"pass"`
}
// FoldersList is the structure how 1Fichier returns a list of Folders
type FoldersList struct {
FolderID int `json:"folder_id"`
Name string `json:"name"`
Status string `json:"Status"`
SubFolders []Folder `json:"sub_folders"`
}

View File

@@ -2,6 +2,8 @@
package ftp package ftp
import ( import (
"context"
"crypto/tls"
"io" "io"
"net/textproto" "net/textproto"
"os" "os"
@@ -10,14 +12,14 @@ import (
"time" "time"
"github.com/jlaffaye/ftp" "github.com/jlaffaye/ftp"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
) )
// Register with Fs // Register with Fs
@@ -46,11 +48,20 @@ func init() {
Help: "FTP password", Help: "FTP password",
IsPassword: true, IsPassword: true,
Required: true, Required: true,
}, {
Name: "tls",
Help: "Use FTP over TLS (Implicit)",
Default: false,
}, { }, {
Name: "concurrency", Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited", Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
Default: 0, Default: 0,
Advanced: true, Advanced: true,
}, {
Name: "no_check_certificate",
Help: "Do not verify the TLS certificate of the server",
Default: false,
Advanced: true,
}, },
}, },
}) })
@@ -58,11 +69,13 @@ func init() {
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
Host string `config:"host"` Host string `config:"host"`
User string `config:"user"` User string `config:"user"`
Pass string `config:"pass"` Pass string `config:"pass"`
Port string `config:"port"` Port string `config:"port"`
Concurrency int `config:"concurrency"` TLS bool `config:"tls"`
Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
} }
// Fs represents a remote FTP server // Fs represents a remote FTP server
@@ -120,7 +133,15 @@ func (f *Fs) Features() *fs.Features {
// Open a new connection to the FTP server. // Open a new connection to the FTP server.
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) { func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
fs.Debugf(f, "Connecting to FTP server") fs.Debugf(f, "Connecting to FTP server")
c, err := ftp.DialTimeout(f.dialAddr, fs.Config.ConnectTimeout) ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)}
if f.opt.TLS {
tlsConfig := &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
}
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
if err != nil { if err != nil {
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err) fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
return nil, errors.Wrap(err, "ftpConnection Dial") return nil, errors.Wrap(err, "ftpConnection Dial")
@@ -182,6 +203,7 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) { func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
ctx := context.Background()
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err) // defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
@@ -203,7 +225,11 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
} }
dialAddr := opt.Host + ":" + port dialAddr := opt.Host + ":" + port
u := "ftp://" + path.Join(dialAddr+"/", root) protocol := "ftp://"
if opt.TLS {
protocol = "ftps://"
}
u := protocol + path.Join(dialAddr+"/", root)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
@@ -230,7 +256,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
if f.root == "." { if f.root == "." {
f.root = "" f.root = ""
} }
_, err := f.NewObject(remote) _, err := f.NewObject(ctx, remote)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile { if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
// File doesn't exist so return old f // File doesn't exist so return old f
@@ -295,7 +321,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (o fs.Object, err error) { func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err) // defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
entry, err := f.findItem(remote) entry, err := f.findItem(remote)
if err != nil { if err != nil {
@@ -339,7 +365,7 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer fs.Trace(dir, "curlevel=%d", curlevel)("") // defer fs.Trace(dir, "curlevel=%d", curlevel)("")
c, err := f.getFtpConnection() c, err := f.getFtpConnection()
if err != nil { if err != nil {
@@ -429,7 +455,7 @@ func (f *Fs) Precision() time.Duration {
// May create the object even if it returns an error - if so // May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return // will return the object and the error, otherwise will return
// nil and the error // nil and the error
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// fs.Debugf(f, "Trying to put file %s", src.Remote()) // fs.Debugf(f, "Trying to put file %s", src.Remote())
err := f.mkParentDir(src.Remote()) err := f.mkParentDir(src.Remote())
if err != nil { if err != nil {
@@ -439,13 +465,13 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
fs: f, fs: f,
remote: src.Remote(), remote: src.Remote(),
} }
err = o.Update(in, src, options...) err = o.Update(ctx, in, src, options...)
return o, err return o, err
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...) return f.Put(ctx, in, src, options...)
} }
// getInfo reads the FileInfo for a path // getInfo reads the FileInfo for a path
@@ -523,7 +549,7 @@ func (f *Fs) mkParentDir(remote string) error {
} }
// Mkdir creates the directory if it doesn't exist // Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(dir string) (err error) { func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
// defer fs.Trace(dir, "")("err=%v", &err) // defer fs.Trace(dir, "")("err=%v", &err)
root := path.Join(f.root, dir) root := path.Join(f.root, dir)
return f.mkdir(root) return f.mkdir(root)
@@ -532,7 +558,7 @@ func (f *Fs) Mkdir(dir string) (err error) {
// Rmdir removes the directory (container, bucket) if empty // Rmdir removes the directory (container, bucket) if empty
// //
// Return an error if it doesn't exist or isn't empty // Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(dir string) error { func (f *Fs) Rmdir(ctx context.Context, dir string) error {
c, err := f.getFtpConnection() c, err := f.getFtpConnection()
if err != nil { if err != nil {
return errors.Wrap(translateErrorFile(err), "Rmdir") return errors.Wrap(translateErrorFile(err), "Rmdir")
@@ -543,7 +569,7 @@ func (f *Fs) Rmdir(dir string) error {
} }
// Move renames a remote file object // Move renames a remote file object
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't move - not same remote type") fs.Debugf(src, "Can't move - not same remote type")
@@ -565,7 +591,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Move Rename failed") return nil, errors.Wrap(err, "Move Rename failed")
} }
dstObj, err := f.NewObject(remote) dstObj, err := f.NewObject(ctx, remote)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Move NewObject failed") return nil, errors.Wrap(err, "Move NewObject failed")
} }
@@ -580,7 +606,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs) srcFs, ok := src.(*Fs)
if !ok { if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type") fs.Debugf(srcFs, "Can't move directory - not same remote type")
@@ -643,7 +669,7 @@ func (o *Object) Remote() string {
} }
// Hash returns the hash of an object returning a lowercase hex string // Hash returns the hash of an object returning a lowercase hex string
func (o *Object) Hash(t hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -653,12 +679,12 @@ func (o *Object) Size() int64 {
} }
// ModTime returns the modification time of the object // ModTime returns the modification time of the object
func (o *Object) ModTime() time.Time { func (o *Object) ModTime(ctx context.Context) time.Time {
return o.info.ModTime return o.info.ModTime
} }
// SetModTime sets the modification time of the object // SetModTime sets the modification time of the object
func (o *Object) SetModTime(modTime time.Time) error { func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return nil return nil
} }
@@ -719,7 +745,7 @@ func (f *ftpReadCloser) Close() error {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
// defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err) // defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err)
path := path.Join(o.fs.root, o.remote) path := path.Join(o.fs.root, o.remote)
var offset, limit int64 = 0, -1 var offset, limit int64 = 0, -1
@@ -753,7 +779,7 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
// Copy the reader into the object updating modTime and size // Copy the reader into the object updating modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
// defer fs.Trace(o, "src=%v", src)("err=%v", &err) // defer fs.Trace(o, "src=%v", src)("err=%v", &err)
path := path.Join(o.fs.root, o.remote) path := path.Join(o.fs.root, o.remote)
// remove the file if upload failed // remove the file if upload failed
@@ -763,7 +789,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
// may still be dealing with it for a moment. A sleep isn't ideal but I haven't been // may still be dealing with it for a moment. A sleep isn't ideal but I haven't been
// able to think of a better method to find out if the server has finished - ncw // able to think of a better method to find out if the server has finished - ncw
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
removeErr := o.Remove() removeErr := o.Remove(ctx)
if removeErr != nil { if removeErr != nil {
fs.Debugf(o, "Failed to remove: %v", removeErr) fs.Debugf(o, "Failed to remove: %v", removeErr)
} else { } else {
@@ -789,7 +815,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
} }
// Remove an object // Remove an object
func (o *Object) Remove() (err error) { func (o *Object) Remove(ctx context.Context) (err error) {
// defer fs.Trace(o, "")("err=%v", &err) // defer fs.Trace(o, "")("err=%v", &err)
path := path.Join(o.fs.root, o.remote) path := path.Join(o.fs.root, o.remote)
// Check if it's a directory or a file // Check if it's a directory or a file
@@ -798,7 +824,7 @@ func (o *Object) Remove() (err error) {
return err return err
} }
if info.IsDir { if info.IsDir {
err = o.fs.Rmdir(o.remote) err = o.fs.Rmdir(ctx, o.remote)
} else { } else {
c, err := o.fs.getFtpConnection() c, err := o.fs.getFtpConnection()
if err != nil { if err != nil {

View File

@@ -4,8 +4,8 @@ package ftp_test
import ( import (
"testing" "testing"
"github.com/ncw/rclone/backend/ftp" "github.com/rclone/rclone/backend/ftp"
"github.com/ncw/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -23,23 +23,22 @@ import (
"net/http" "net/http"
"os" "os"
"path" "path"
"regexp"
"strings" "strings"
"sync"
"time" "time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"golang.org/x/oauth2" "golang.org/x/oauth2"
"golang.org/x/oauth2/google" "golang.org/x/oauth2/google"
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
@@ -61,7 +60,7 @@ const (
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
storageConfig = &oauth2.Config{ storageConfig = &oauth2.Config{
Scopes: []string{storage.DevstorageFullControlScope}, Scopes: []string{storage.DevstorageReadWriteScope},
Endpoint: google.Endpoint, Endpoint: google.Endpoint,
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
@@ -264,16 +263,16 @@ type Options struct {
// Fs represents a remote storage server // Fs represents a remote storage server
type Fs struct { type Fs struct {
name string // name of this remote name string // name of this remote
root string // the path we are working on if any root string // the path we are working on if any
opt Options // parsed options opt Options // parsed options
features *fs.Features // optional features features *fs.Features // optional features
svc *storage.Service // the connection to the storage server svc *storage.Service // the connection to the storage server
client *http.Client // authorized client client *http.Client // authorized client
bucket string // the bucket we are working on rootBucket string // bucket part of root (if any)
bucketOKMu sync.Mutex // mutex to protect bucket OK rootDirectory string // directory part of root (if any)
bucketOK bool // true if we have created the bucket cache *bucket.Cache // cache of bucket status
pacer *fs.Pacer // To pace the API calls pacer *fs.Pacer // To pace the API calls
} }
// Object describes a storage object // Object describes a storage object
@@ -298,18 +297,18 @@ func (f *Fs) Name() string {
// Root of the remote (as passed into NewFs) // Root of the remote (as passed into NewFs)
func (f *Fs) Root() string { func (f *Fs) Root() string {
if f.root == "" { return f.root
return f.bucket
}
return f.bucket + "/" + f.root
} }
// String converts this Fs to a string // String converts this Fs to a string
func (f *Fs) String() string { func (f *Fs) String() string {
if f.root == "" { if f.rootBucket == "" {
return fmt.Sprintf("Storage bucket %s", f.bucket) return fmt.Sprintf("GCS root")
} }
return fmt.Sprintf("Storage bucket %s path %s", f.bucket, f.root) if f.rootDirectory == "" {
return fmt.Sprintf("GCS bucket %s", f.rootBucket)
}
return fmt.Sprintf("GCS bucket %s path %s", f.rootBucket, f.rootDirectory)
} }
// Features returns the optional features of this Fs // Features returns the optional features of this Fs
@@ -341,21 +340,23 @@ func shouldRetry(err error) (again bool, errOut error) {
return again, err return again, err
} }
// Pattern to match a storage path // parsePath parses a remote 'url'
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`) func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
// parseParse parses a storage 'url'
func parsePath(path string) (bucket, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("couldn't find bucket in storage path %q", path)
} else {
bucket, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return return
} }
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
return bucket.Split(path.Join(f.root, rootRelativePath))
}
// split returns bucket and bucketPath from the object
func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote)
}
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) { func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...) conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
if err != nil { if err != nil {
@@ -365,8 +366,15 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
} }
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
}
// NewFs constructs an Fs from the path, bucket:path // NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
var oAuthClient *http.Client var oAuthClient *http.Client
// Parse config into Options struct // Parse config into Options struct
@@ -406,22 +414,19 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} }
} }
bucket, directory, err := parsePath(root)
if err != nil {
return nil, err
}
f := &Fs{ f := &Fs{
name: name, name: name,
bucket: bucket, root: root,
root: directory, opt: *opt,
opt: *opt, pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))), cache: bucket.NewCache(),
} }
f.setRoot(root)
f.features = (&fs.Features{ f.features = (&fs.Features{
ReadMimeType: true, ReadMimeType: true,
WriteMimeType: true, WriteMimeType: true,
BucketBased: true, BucketBased: true,
BucketBasedRootOK: true,
}).Fill(f) }).Fill(f)
// Create a new authorized Drive client. // Create a new authorized Drive client.
@@ -431,20 +436,18 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client") return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
} }
if f.root != "" { if f.rootBucket != "" && f.rootDirectory != "" {
f.root += "/"
// Check to see if the object exists // Check to see if the object exists
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.Get(bucket, directory).Do() _, err = f.svc.Objects.Get(f.rootBucket, f.rootDirectory).Context(ctx).Do()
return shouldRetry(err) return shouldRetry(err)
}) })
if err == nil { if err == nil {
f.root = path.Dir(directory) newRoot := path.Dir(f.root)
if f.root == "." { if newRoot == "." {
f.root = "" newRoot = ""
} else {
f.root += "/"
} }
f.setRoot(newRoot)
// return an error with an fs which points to the parent // return an error with an fs which points to the parent
return f, fs.ErrorIsFile return f, fs.ErrorIsFile
} }
@@ -455,7 +458,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Return an Object from a path // Return an Object from a path
// //
// If it can't be found it returns the error fs.ErrorObjectNotFound. // If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object, error) { func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage.Object) (fs.Object, error) {
o := &Object{ o := &Object{
fs: f, fs: f,
remote: remote, remote: remote,
@@ -463,7 +466,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object,
if info != nil { if info != nil {
o.setMetaData(info) o.setMetaData(info)
} else { } else {
err := o.readMetaData() // reads info and meta, returning an error err := o.readMetaData(ctx) // reads info and meta, returning an error
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -473,8 +476,8 @@ func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object,
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) { func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil) return f.newObjectWithInfo(ctx, remote, nil)
} }
// listFn is called from list to handle an object. // listFn is called from list to handle an object.
@@ -485,20 +488,24 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
// dir is the starting directory, "" for root // dir is the starting directory, "" for root
// //
// Set recurse to read sub directories // Set recurse to read sub directories
func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) { //
root := f.root // The remote has prefix removed from it and if addBucket is set
rootLength := len(root) // then it adds the bucket to the start.
if dir != "" { func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) (err error) {
root += dir + "/" if prefix != "" {
prefix += "/"
} }
list := f.svc.Objects.List(f.bucket).Prefix(root).MaxResults(listChunks) if directory != "" {
directory += "/"
}
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
if !recurse { if !recurse {
list = list.Delimiter("/") list = list.Delimiter("/")
} }
for { for {
var objects *storage.Objects var objects *storage.Objects
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
objects, err = list.Do() objects, err = list.Context(ctx).Do()
return shouldRetry(err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
@@ -511,31 +518,36 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
} }
if !recurse { if !recurse {
var object storage.Object var object storage.Object
for _, prefix := range objects.Prefixes { for _, remote := range objects.Prefixes {
if !strings.HasSuffix(prefix, "/") { if !strings.HasSuffix(remote, "/") {
continue continue
} }
err = fn(prefix[rootLength:len(prefix)-1], &object, true) if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[len(prefix) : len(remote)-1]
if addBucket {
remote = path.Join(bucket, remote)
}
err = fn(remote, &object, true)
if err != nil { if err != nil {
return err return err
} }
} }
} }
for _, object := range objects.Items { for _, object := range objects.Items {
if !strings.HasPrefix(object.Name, root) { if !strings.HasPrefix(object.Name, prefix) {
fs.Logf(f, "Odd name received %q", object.Name) fs.Logf(f, "Odd name received %q", object.Name)
continue continue
} }
remote := object.Name[rootLength:] remote := object.Name[len(prefix):]
isDirectory := strings.HasSuffix(remote, "/")
if addBucket {
remote = path.Join(bucket, remote)
}
// is this a directory marker? // is this a directory marker?
if (strings.HasSuffix(remote, "/") || remote == "") && object.Size == 0 { if isDirectory && object.Size == 0 {
if recurse && remote != "" {
// add a directory in if --fast-list since will have no prefixes
err = fn(remote[:len(remote)-1], object, true)
if err != nil {
return err
}
}
continue // skip directory marker continue // skip directory marker
} }
err = fn(remote, object, false) err = fn(remote, object, false)
@@ -552,32 +564,23 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
} }
// Convert a list item into a DirEntry // Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) { func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
if isDirectory { if isDirectory {
d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size)) d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size))
return d, nil return d, nil
} }
o, err := f.newObjectWithInfo(remote, object) o, err := f.newObjectWithInfo(ctx, remote, object)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return o, nil return o, nil
} }
// mark the bucket as being OK
func (f *Fs) markBucketOK() {
if f.bucket != "" {
f.bucketOKMu.Lock()
f.bucketOK = true
f.bucketOKMu.Unlock()
}
}
// listDir lists a single directory // listDir lists a single directory
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) { func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
// List the objects // List the objects
err = f.list(dir, false, func(remote string, object *storage.Object, isDirectory bool) error { err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory) entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil { if err != nil {
return err return err
} }
@@ -590,15 +593,12 @@ func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
return nil, err return nil, err
} }
// bucket must be present if listing succeeded // bucket must be present if listing succeeded
f.markBucketOK() f.cache.MarkOK(bucket)
return entries, err return entries, err
} }
// listBuckets lists the buckets // listBuckets lists the buckets
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) { func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
if f.opt.ProjectNumber == "" { if f.opt.ProjectNumber == "" {
return nil, errors.New("can't list buckets without project number") return nil, errors.New("can't list buckets without project number")
} }
@@ -606,7 +606,7 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
for { for {
var buckets *storage.Buckets var buckets *storage.Buckets
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
buckets, err = listBuckets.Do() buckets, err = listBuckets.Context(ctx).Do()
return shouldRetry(err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
@@ -633,11 +633,15 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.bucket == "" { bucket, directory := f.split(dir)
return f.listBuckets(dir) if bucket == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
} }
return f.listDir(dir) return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@@ -656,23 +660,44 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
// //
// Don't implement this unless you have a more efficient way // Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal. // of listing recursively that doing a directory traversal.
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
if f.bucket == "" { bucket, directory := f.split(dir)
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback) list := walk.NewListRHelper(callback)
err = f.list(dir, true, func(remote string, object *storage.Object, isDirectory bool) error { listR := func(bucket, directory, prefix string, addBucket bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory) return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
}
if bucket == "" {
entries, err := f.listBuckets(ctx)
if err != nil { if err != nil {
return err return err
} }
return list.Add(entry) for _, entry := range entries {
}) err = list.Add(entry)
if err != nil { if err != nil {
return err return err
}
bucket := entry.Remote()
err = listR(bucket, "", f.rootDirectory, true)
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
}
} else {
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
} }
// bucket must be present if listing succeeded
f.markBucketOK()
return list.Flush() return list.Flush()
} }
@@ -681,94 +706,88 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction // Temporary Object under construction
o := &Object{ o := &Object{
fs: f, fs: f,
remote: src.Remote(), remote: src.Remote(),
} }
return o, o.Update(in, src, options...) return o, o.Update(ctx, in, src, options...)
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...) return f.Put(ctx, in, src, options...)
} }
// Mkdir creates the bucket if it doesn't exist // Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(dir string) (err error) { func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
f.bucketOKMu.Lock() bucket, _ := f.split(dir)
defer f.bucketOKMu.Unlock() return f.makeBucket(ctx, bucket)
if f.bucketOK { }
return nil
}
// List something from the bucket to see if it exists. Doing it like this enables the use of a
// service account that only has the "Storage Object Admin" role. See #2193 for details.
err = f.pacer.Call(func() (bool, error) { // makeBucket creates the bucket if it doesn't exist
_, err = f.svc.Objects.List(f.bucket).MaxResults(1).Do() func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
return shouldRetry(err) return f.cache.Create(bucket, func() error {
}) // List something from the bucket to see if it exists. Doing it like this enables the use of a
if err == nil { // service account that only has the "Storage Object Admin" role. See #2193 for details.
// Bucket already exists err = f.pacer.Call(func() (bool, error) {
f.bucketOK = true _, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
return nil return shouldRetry(err)
} else if gErr, ok := err.(*googleapi.Error); ok { })
if gErr.Code != http.StatusNotFound { if err == nil {
// Bucket already exists
return nil
} else if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code != http.StatusNotFound {
return errors.Wrap(err, "failed to get bucket")
}
} else {
return errors.Wrap(err, "failed to get bucket") return errors.Wrap(err, "failed to get bucket")
} }
} else {
return errors.Wrap(err, "failed to get bucket")
}
if f.opt.ProjectNumber == "" { if f.opt.ProjectNumber == "" {
return errors.New("can't make bucket without project number") return errors.New("can't make bucket without project number")
} }
bucket := storage.Bucket{ bucket := storage.Bucket{
Name: f.bucket, Name: bucket,
Location: f.opt.Location, Location: f.opt.Location,
StorageClass: f.opt.StorageClass, StorageClass: f.opt.StorageClass,
}
if f.opt.BucketPolicyOnly {
bucket.IamConfiguration = &storage.BucketIamConfiguration{
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
Enabled: true,
},
} }
} if f.opt.BucketPolicyOnly {
err = f.pacer.Call(func() (bool, error) { bucket.IamConfiguration = &storage.BucketIamConfiguration{
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket) BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
if !f.opt.BucketPolicyOnly { Enabled: true,
insertBucket.PredefinedAcl(f.opt.BucketACL) },
}
} }
_, err = insertBucket.Do() return f.pacer.Call(func() (bool, error) {
return shouldRetry(err) insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
}) if !f.opt.BucketPolicyOnly {
if err == nil { insertBucket.PredefinedAcl(f.opt.BucketACL)
f.bucketOK = true }
} _, err = insertBucket.Context(ctx).Do()
return err return shouldRetry(err)
})
}, nil)
} }
// Rmdir deletes the bucket if the fs is at the root // Rmdir deletes the bucket if the fs is at the root
// //
// Returns an error if it isn't empty: Error 409: The bucket you tried // Returns an error if it isn't empty: Error 409: The bucket you tried
// to delete was not empty. // to delete was not empty.
func (f *Fs) Rmdir(dir string) (err error) { func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
f.bucketOKMu.Lock() bucket, directory := f.split(dir)
defer f.bucketOKMu.Unlock() if bucket == "" || directory != "" {
if f.root != "" || dir != "" {
return nil return nil
} }
err = f.pacer.Call(func() (bool, error) { return f.cache.Remove(bucket, func() error {
err = f.svc.Buckets.Delete(f.bucket).Do() return f.pacer.Call(func() (bool, error) {
return shouldRetry(err) err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
return shouldRetry(err)
})
}) })
if err == nil {
f.bucketOK = false
}
return err
} }
// Precision returns the precision // Precision returns the precision
@@ -785,8 +804,9 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
err := f.Mkdir("") dstBucket, dstPath := f.split(remote)
err := f.makeBucket(ctx, dstBucket)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -795,6 +815,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(src, "Can't copy - not same remote type") fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
srcBucket, srcPath := srcObj.split()
// Temporary Object under construction // Temporary Object under construction
dstObj := &Object{ dstObj := &Object{
@@ -802,13 +823,13 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
remote: remote, remote: remote,
} }
srcBucket := srcObj.fs.bucket
srcObject := srcObj.fs.root + srcObj.remote
dstBucket := f.bucket
dstObject := f.root + remote
var newObject *storage.Object var newObject *storage.Object
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
newObject, err = f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do() copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil)
if !f.opt.BucketPolicyOnly {
copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
}
newObject, err = copyObject.Context(ctx).Do()
return shouldRetry(err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
@@ -845,7 +866,7 @@ func (o *Object) Remote() string {
} }
// Hash returns the Md5sum of an object returning a lowercase hex string // Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(t hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 { if t != hash.MD5 {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -891,24 +912,33 @@ func (o *Object) setMetaData(info *storage.Object) {
} }
} }
// readMetaData gets the metadata if it hasn't already been fetched // readObjectInfo reads the definition for an object
// func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
// it also sets the info bucket, bucketPath := o.split()
func (o *Object) readMetaData() (err error) {
if !o.modTime.IsZero() {
return nil
}
var object *storage.Object
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
object, err = o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do() object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
return shouldRetry(err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
if gErr, ok := err.(*googleapi.Error); ok { if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code == http.StatusNotFound { if gErr.Code == http.StatusNotFound {
return fs.ErrorObjectNotFound return nil, fs.ErrorObjectNotFound
} }
} }
return nil, err
}
return object, nil
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData(ctx context.Context) (err error) {
if !o.modTime.IsZero() {
return nil
}
object, err := o.readObjectInfo(ctx)
if err != nil {
return err return err
} }
o.setMetaData(object) o.setMetaData(object)
@@ -919,8 +949,8 @@ func (o *Object) readMetaData() (err error) {
// //
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime() time.Time { func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData() err := o.readMetaData(ctx)
if err != nil { if err != nil {
// fs.Logf(o, "Failed to read metadata: %v", err) // fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now() return time.Now()
@@ -936,16 +966,28 @@ func metadataFromModTime(modTime time.Time) map[string]string {
} }
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) (err error) { func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
// This only adds metadata so will perserve other metadata // read the complete existing object first
object := storage.Object{ object, err := o.readObjectInfo(ctx)
Bucket: o.fs.bucket, if err != nil {
Name: o.fs.root + o.remote, return err
Metadata: metadataFromModTime(modTime),
} }
// Add the mtime to the existing metadata
mtime := modTime.Format(timeFormatOut)
if object.Metadata == nil {
object.Metadata = make(map[string]string, 1)
}
object.Metadata[metaMtime] = mtime
// Copy the object to itself to update the metadata
// Using PATCH requires too many permissions
bucket, bucketPath := o.split()
var newObject *storage.Object var newObject *storage.Object
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
newObject, err = o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do() copyObject := o.fs.svc.Objects.Copy(bucket, bucketPath, bucket, bucketPath, object)
if !o.fs.opt.BucketPolicyOnly {
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = copyObject.Context(ctx).Do()
return shouldRetry(err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
@@ -961,11 +1003,13 @@ func (o *Object) Storable() bool {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
req, err := http.NewRequest("GET", o.url, nil) req, err := http.NewRequest("GET", o.url, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
fs.FixRangeOption(options, o.bytes)
fs.OpenOptionAddHTTPHeaders(req.Header, options) fs.OpenOptionAddHTTPHeaders(req.Header, options)
var res *http.Response var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
@@ -992,27 +1036,27 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Update the object with the contents of the io.Reader, modTime and size // Update the object with the contents of the io.Reader, modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
err := o.fs.Mkdir("") bucket, bucketPath := o.split()
err := o.fs.makeBucket(ctx, bucket)
if err != nil { if err != nil {
return err return err
} }
modTime := src.ModTime() modTime := src.ModTime(ctx)
object := storage.Object{ object := storage.Object{
Bucket: o.fs.bucket, Bucket: bucket,
Name: o.fs.root + o.remote, Name: bucketPath,
ContentType: fs.MimeType(src), ContentType: fs.MimeType(ctx, src),
Updated: modTime.Format(timeFormatOut), // Doesn't get set
Metadata: metadataFromModTime(modTime), Metadata: metadataFromModTime(modTime),
} }
var newObject *storage.Object var newObject *storage.Object
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.CallNoRetry(func() (bool, error) {
insertObject := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name) insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
if !o.fs.opt.BucketPolicyOnly { if !o.fs.opt.BucketPolicyOnly {
insertObject.PredefinedAcl(o.fs.opt.ObjectACL) insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
} }
newObject, err = insertObject.Do() newObject, err = insertObject.Context(ctx).Do()
return shouldRetry(err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
@@ -1024,16 +1068,17 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
} }
// Remove an object // Remove an object
func (o *Object) Remove() (err error) { func (o *Object) Remove(ctx context.Context) (err error) {
bucket, bucketPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do() err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
return shouldRetry(err) return shouldRetry(err)
}) })
return err return err
} }
// MimeType of an Object if known, "" otherwise // MimeType of an Object if known, "" otherwise
func (o *Object) MimeType() string { func (o *Object) MimeType(ctx context.Context) string {
return o.mimeType return o.mimeType
} }

View File

@@ -5,8 +5,8 @@ package googlecloudstorage_test
import ( import (
"testing" "testing"
"github.com/ncw/rclone/backend/googlecloudstorage" "github.com/rclone/rclone/backend/googlecloudstorage"
"github.com/ncw/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -0,0 +1,148 @@
// This file contains the albums abstraction
package googlephotos
import (
"path"
"strings"
"sync"
"github.com/rclone/rclone/backend/googlephotos/api"
)
// All the albums
type albums struct {
mu sync.Mutex
dupes map[string][]*api.Album // duplicated names
byID map[string]*api.Album //..indexed by ID
byTitle map[string]*api.Album //..indexed by Title
path map[string][]string // partial album names to directory
}
// Create a new album
func newAlbums() *albums {
return &albums{
dupes: map[string][]*api.Album{},
byID: map[string]*api.Album{},
byTitle: map[string]*api.Album{},
path: map[string][]string{},
}
}
// add an album
func (as *albums) add(album *api.Album) {
// Munge the name of the album into a sensible path name
album.Title = path.Clean(album.Title)
if album.Title == "." || album.Title == "/" {
album.Title = addID("", album.ID)
}
as.mu.Lock()
as._add(album)
as.mu.Unlock()
}
// _add an album - call with lock held
func (as *albums) _add(album *api.Album) {
// update dupes by title
dupes := as.dupes[album.Title]
dupes = append(dupes, album)
as.dupes[album.Title] = dupes
// Dedupe the album name if necessary
if len(dupes) >= 2 {
// If this is the first dupe, then need to adjust the first one
if len(dupes) == 2 {
firstAlbum := dupes[0]
as._del(firstAlbum)
as._add(firstAlbum)
// undo add of firstAlbum to dupes
as.dupes[album.Title] = dupes
}
album.Title = addID(album.Title, album.ID)
}
// Store the new album
as.byID[album.ID] = album
as.byTitle[album.Title] = album
// Store the partial paths
dir, leaf := album.Title, ""
for dir != "" {
i := strings.LastIndex(dir, "/")
if i >= 0 {
dir, leaf = dir[:i], dir[i+1:]
} else {
dir, leaf = "", dir
}
dirs := as.path[dir]
found := false
for _, dir := range dirs {
if dir == leaf {
found = true
}
}
if !found {
as.path[dir] = append(as.path[dir], leaf)
}
}
}
// del an album
func (as *albums) del(album *api.Album) {
as.mu.Lock()
as._del(album)
as.mu.Unlock()
}
// _del an album - call with lock held
func (as *albums) _del(album *api.Album) {
// We leave in dupes so it doesn't cause albums to get renamed
// Remove from byID and byTitle
delete(as.byID, album.ID)
delete(as.byTitle, album.Title)
// Remove from paths
dir, leaf := album.Title, ""
for dir != "" {
// Can't delete if this dir exists anywhere in the path structure
if _, found := as.path[dir]; found {
break
}
i := strings.LastIndex(dir, "/")
if i >= 0 {
dir, leaf = dir[:i], dir[i+1:]
} else {
dir, leaf = "", dir
}
dirs := as.path[dir]
for i, dir := range dirs {
if dir == leaf {
dirs = append(dirs[:i], dirs[i+1:]...)
break
}
}
if len(dirs) == 0 {
delete(as.path, dir)
} else {
as.path[dir] = dirs
}
}
}
// get an album by title
func (as *albums) get(title string) (album *api.Album, ok bool) {
as.mu.Lock()
defer as.mu.Unlock()
album, ok = as.byTitle[title]
return album, ok
}
// getDirs gets directories below an album path
func (as *albums) getDirs(albumPath string) (dirs []string, ok bool) {
as.mu.Lock()
defer as.mu.Unlock()
dirs, ok = as.path[albumPath]
return dirs, ok
}

View File

@@ -0,0 +1,311 @@
package googlephotos
import (
"testing"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/stretchr/testify/assert"
)
func TestNewAlbums(t *testing.T) {
albums := newAlbums()
assert.NotNil(t, albums.dupes)
assert.NotNil(t, albums.byID)
assert.NotNil(t, albums.byTitle)
assert.NotNil(t, albums.path)
}
func TestAlbumsAdd(t *testing.T) {
albums := newAlbums()
assert.Equal(t, map[string][]*api.Album{}, albums.dupes)
assert.Equal(t, map[string]*api.Album{}, albums.byID)
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
assert.Equal(t, map[string][]string{}, albums.path)
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one"},
}, albums.path)
a2 := &api.Album{
Title: "two",
ID: "2",
}
albums.add(a2)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"two": a2,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two"},
}, albums.path)
// Add a duplicate
a2a := &api.Album{
Title: "two",
ID: "2a",
}
albums.add(a2a)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
"2a": a2a,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
}, albums.path)
// Add a sub directory
a1sub := &api.Album{
Title: "one/sub",
ID: "1sub",
}
albums.add(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
// Add a weird path
a0 := &api.Album{
Title: "/../././..////.",
ID: "0",
}
albums.add(a0)
assert.Equal(t, map[string][]*api.Album{
"{0}": []*api.Album{a0},
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"0": a0,
"1": a1,
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"{0}": a0,
"one": a1,
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}", "{0}"},
"one": []string{"sub"},
}, albums.path)
}
func TestAlbumsDel(t *testing.T) {
albums := newAlbums()
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
a2 := &api.Album{
Title: "two",
ID: "2",
}
albums.add(a2)
// Add a duplicate
a2a := &api.Album{
Title: "two",
ID: "2a",
}
albums.add(a2a)
// Add a sub directory
a1sub := &api.Album{
Title: "one/sub",
ID: "1sub",
}
albums.add(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
albums.del(a1)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
albums.del(a2)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one/sub": a1sub,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
albums.del(a2a)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one/sub": a1sub,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one"},
"one": []string{"sub"},
}, albums.path)
albums.del(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{}, albums.byID)
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
assert.Equal(t, map[string][]string{}, albums.path)
}
func TestAlbumsGet(t *testing.T) {
albums := newAlbums()
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
album, ok := albums.get("one")
assert.Equal(t, true, ok)
assert.Equal(t, a1, album)
album, ok = albums.get("notfound")
assert.Equal(t, false, ok)
assert.Nil(t, album)
}
func TestAlbumsGetDirs(t *testing.T) {
albums := newAlbums()
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
dirs, ok := albums.getDirs("")
assert.Equal(t, true, ok)
assert.Equal(t, []string{"one"}, dirs)
dirs, ok = albums.getDirs("notfound")
assert.Equal(t, false, ok)
assert.Nil(t, dirs)
}

View File

@@ -0,0 +1,190 @@
package api
import (
"fmt"
"time"
)
// ErrorDetails in the internals of the Error type
type ErrorDetails struct {
Code int `json:"code"`
Message string `json:"message"`
Status string `json:"status"`
}
// Error is returned on errors
type Error struct {
Details ErrorDetails `json:"error"`
}
// Error statisfies error interface
func (e *Error) Error() string {
return fmt.Sprintf("%s (%d %s)", e.Details.Message, e.Details.Code, e.Details.Status)
}
// Album of photos
type Album struct {
ID string `json:"id,omitempty"`
Title string `json:"title"`
ProductURL string `json:"productUrl,omitempty"`
MediaItemsCount string `json:"mediaItemsCount,omitempty"`
CoverPhotoBaseURL string `json:"coverPhotoBaseUrl,omitempty"`
CoverPhotoMediaItemID string `json:"coverPhotoMediaItemId,omitempty"`
IsWriteable bool `json:"isWriteable,omitempty"`
}
// ListAlbums is returned from albums.list and sharedAlbums.list
type ListAlbums struct {
Albums []Album `json:"albums"`
SharedAlbums []Album `json:"sharedAlbums"`
NextPageToken string `json:"nextPageToken"`
}
// CreateAlbum creates an Album
type CreateAlbum struct {
Album *Album `json:"album"`
}
// MediaItem is a photo or video
type MediaItem struct {
ID string `json:"id"`
ProductURL string `json:"productUrl"`
BaseURL string `json:"baseUrl"`
MimeType string `json:"mimeType"`
MediaMetadata struct {
CreationTime time.Time `json:"creationTime"`
Width string `json:"width"`
Height string `json:"height"`
Photo struct {
} `json:"photo"`
} `json:"mediaMetadata"`
Filename string `json:"filename"`
}
// MediaItems is returned from mediaitems.list, mediaitems.search
type MediaItems struct {
MediaItems []MediaItem `json:"mediaItems"`
NextPageToken string `json:"nextPageToken"`
}
//Content categories
// NONE Default content category. This category is ignored when any other category is used in the filter.
// LANDSCAPES Media items containing landscapes.
// RECEIPTS Media items containing receipts.
// CITYSCAPES Media items containing cityscapes.
// LANDMARKS Media items containing landmarks.
// SELFIES Media items that are selfies.
// PEOPLE Media items containing people.
// PETS Media items containing pets.
// WEDDINGS Media items from weddings.
// BIRTHDAYS Media items from birthdays.
// DOCUMENTS Media items containing documents.
// TRAVEL Media items taken during travel.
// ANIMALS Media items containing animals.
// FOOD Media items containing food.
// SPORT Media items from sporting events.
// NIGHT Media items taken at night.
// PERFORMANCES Media items from performances.
// WHITEBOARDS Media items containing whiteboards.
// SCREENSHOTS Media items that are screenshots.
// UTILITY Media items that are considered to be utility. These include, but aren't limited to documents, screenshots, whiteboards etc.
// ARTS Media items containing art.
// CRAFTS Media items containing crafts.
// FASHION Media items related to fashion.
// HOUSES Media items containing houses.
// GARDENS Media items containing gardens.
// FLOWERS Media items containing flowers.
// HOLIDAYS Media items taken of holidays.
// MediaTypes
// ALL_MEDIA Treated as if no filters are applied. All media types are included.
// VIDEO All media items that are considered videos. This also includes movies the user has created using the Google Photos app.
// PHOTO All media items that are considered photos. This includes .bmp, .gif, .ico, .jpg (and other spellings), .tiff, .webp and special photo types such as iOS live photos, Android motion photos, panoramas, photospheres.
// Features
// NONE Treated as if no filters are applied. All features are included.
// FAVORITES Media items that the user has marked as favorites in the Google Photos app.
// Date is used as part of SearchFilter
type Date struct {
Year int `json:"year,omitempty"`
Month int `json:"month,omitempty"`
Day int `json:"day,omitempty"`
}
// DateFilter is uses to add date ranges to media item queries
type DateFilter struct {
Dates []Date `json:"dates,omitempty"`
Ranges []struct {
StartDate Date `json:"startDate,omitempty"`
EndDate Date `json:"endDate,omitempty"`
} `json:"ranges,omitempty"`
}
// ContentFilter is uses to add content categories to media item queries
type ContentFilter struct {
IncludedContentCategories []string `json:"includedContentCategories,omitempty"`
ExcludedContentCategories []string `json:"excludedContentCategories,omitempty"`
}
// MediaTypeFilter is uses to add media types to media item queries
type MediaTypeFilter struct {
MediaTypes []string `json:"mediaTypes,omitempty"`
}
// FeatureFilter is uses to add features to media item queries
type FeatureFilter struct {
IncludedFeatures []string `json:"includedFeatures,omitempty"`
}
// Filters combines all the filter types for media item queries
type Filters struct {
DateFilter *DateFilter `json:"dateFilter,omitempty"`
ContentFilter *ContentFilter `json:"contentFilter,omitempty"`
MediaTypeFilter *MediaTypeFilter `json:"mediaTypeFilter,omitempty"`
FeatureFilter *FeatureFilter `json:"featureFilter,omitempty"`
IncludeArchivedMedia *bool `json:"includeArchivedMedia,omitempty"`
ExcludeNonAppCreatedData *bool `json:"excludeNonAppCreatedData,omitempty"`
}
// SearchFilter is uses with mediaItems.search
type SearchFilter struct {
AlbumID string `json:"albumId,omitempty"`
PageSize int `json:"pageSize"`
PageToken string `json:"pageToken,omitempty"`
Filters *Filters `json:"filters,omitempty"`
}
// SimpleMediaItem is part of NewMediaItem
type SimpleMediaItem struct {
UploadToken string `json:"uploadToken"`
}
// NewMediaItem is a single media item for upload
type NewMediaItem struct {
Description string `json:"description"`
SimpleMediaItem SimpleMediaItem `json:"simpleMediaItem"`
}
// BatchCreateRequest creates media items from upload tokens
type BatchCreateRequest struct {
AlbumID string `json:"albumId,omitempty"`
NewMediaItems []NewMediaItem `json:"newMediaItems"`
}
// BatchCreateResponse is returned from BatchCreateRequest
type BatchCreateResponse struct {
NewMediaItemResults []struct {
UploadToken string `json:"uploadToken"`
Status struct {
Message string `json:"message"`
Code int `json:"code"`
} `json:"status"`
MediaItem MediaItem `json:"mediaItem"`
} `json:"newMediaItemResults"`
}
// BatchRemoveItems is for removing items from an album
type BatchRemoveItems struct {
MediaItemIds []string `json:"mediaItemIds"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,307 @@
package googlephotos
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"path"
"testing"
"time"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
// We have two different files here as Google Photos will uniq
// them otherwise which confuses the tests as the filename is
// unexpected.
fileNameAlbum = "rclone-test-image1.jpg"
fileNameUpload = "rclone-test-image2.jpg"
)
// Wrapper to override the remote for an object
type overrideRemoteObject struct {
fs.Object
remote string
}
// Remote returns the overridden remote name
func (o *overrideRemoteObject) Remote() string {
return o.remote
}
func TestIntegration(t *testing.T) {
ctx := context.Background()
fstest.Initialise()
// Create Fs
if *fstest.RemoteName == "" {
*fstest.RemoteName = "TestGooglePhotos:"
}
f, err := fs.NewFs(*fstest.RemoteName)
if err == fs.ErrorNotFoundInConfigFile {
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
}
require.NoError(t, err)
// Create local Fs pointing at testfiles
localFs, err := fs.NewFs("testfiles")
require.NoError(t, err)
t.Run("CreateAlbum", func(t *testing.T) {
albumName := "album/rclone-test-" + random.String(24)
err = f.Mkdir(ctx, albumName)
require.NoError(t, err)
remote := albumName + "/" + fileNameAlbum
t.Run("PutFile", func(t *testing.T) {
srcObj, err := localFs.NewObject(ctx, fileNameAlbum)
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()
remoteWithID := addFileID(remote, dstObj.(*Object).id)
t.Run("ObjectFs", func(t *testing.T) {
assert.Equal(t, f, dstObj.Fs())
})
t.Run("ObjectString", func(t *testing.T) {
assert.Equal(t, remote, dstObj.String())
assert.Equal(t, "<nil>", (*Object)(nil).String())
})
t.Run("ObjectHash", func(t *testing.T) {
h, err := dstObj.Hash(ctx, hash.MD5)
assert.Equal(t, "", h)
assert.Equal(t, hash.ErrUnsupported, err)
})
t.Run("ObjectSize", func(t *testing.T) {
assert.Equal(t, int64(-1), dstObj.Size())
f.(*Fs).opt.ReadSize = true
defer func() {
f.(*Fs).opt.ReadSize = false
}()
size := dstObj.Size()
assert.True(t, size > 1000, fmt.Sprintf("Size too small %d", size))
})
t.Run("ObjectSetModTime", func(t *testing.T) {
err := dstObj.SetModTime(ctx, time.Now())
assert.Equal(t, fs.ErrorCantSetModTime, err)
})
t.Run("ObjectStorable", func(t *testing.T) {
assert.True(t, dstObj.Storable())
})
t.Run("ObjectOpen", func(t *testing.T) {
in, err := dstObj.Open(ctx)
require.NoError(t, err)
buf, err := ioutil.ReadAll(in)
require.NoError(t, err)
require.NoError(t, in.Close())
assert.True(t, len(buf) > 1000)
contentType := http.DetectContentType(buf[:512])
assert.Equal(t, "image/jpeg", contentType)
})
t.Run("CheckFileInAlbum", func(t *testing.T) {
entries, err := f.List(ctx, albumName)
require.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, remote, entries[0].Remote())
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
})
// Check it is there in the date/month/year heirachy
// 2013-07-13 is the creation date of the folder
checkPresent := func(t *testing.T, objPath string) {
entries, err := f.List(ctx, objPath)
require.NoError(t, err)
found := false
for _, entry := range entries {
leaf := path.Base(entry.Remote())
if leaf == fileNameAlbum || leaf == remoteWithID {
found = true
}
}
assert.True(t, found, fmt.Sprintf("didn't find %q in %q", fileNameAlbum, objPath))
}
t.Run("CheckInByYear", func(t *testing.T) {
checkPresent(t, "media/by-year/2013")
})
t.Run("CheckInByMonth", func(t *testing.T) {
checkPresent(t, "media/by-month/2013/2013-07")
})
t.Run("CheckInByDay", func(t *testing.T) {
checkPresent(t, "media/by-day/2013/2013-07-26")
})
t.Run("NewObject", func(t *testing.T) {
o, err := f.NewObject(ctx, remote)
require.NoError(t, err)
require.Equal(t, remote, o.Remote())
})
t.Run("NewObjectWithID", func(t *testing.T) {
o, err := f.NewObject(ctx, remoteWithID)
require.NoError(t, err)
require.Equal(t, remoteWithID, o.Remote())
})
t.Run("NewFsIsFile", func(t *testing.T) {
fNew, err := fs.NewFs(*fstest.RemoteName + remote)
assert.Equal(t, fs.ErrorIsFile, err)
leaf := path.Base(remote)
o, err := fNew.NewObject(ctx, leaf)
require.NoError(t, err)
require.Equal(t, leaf, o.Remote())
})
t.Run("RemoveFileFromAlbum", func(t *testing.T) {
err = dstObj.Remove(ctx)
require.NoError(t, err)
time.Sleep(time.Second)
// Check album empty
entries, err := f.List(ctx, albumName)
require.NoError(t, err)
assert.Equal(t, 0, len(entries))
})
})
// remove the album
err = f.Rmdir(ctx, albumName)
require.Error(t, err) // FIXME doesn't work yet
})
t.Run("UploadMkdir", func(t *testing.T) {
assert.NoError(t, f.Mkdir(ctx, "upload/dir"))
assert.NoError(t, f.Mkdir(ctx, "upload/dir/subdir"))
t.Run("List", func(t *testing.T) {
entries, err := f.List(ctx, "upload")
require.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, "upload/dir", entries[0].Remote())
entries, err = f.List(ctx, "upload/dir")
require.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, "upload/dir/subdir", entries[0].Remote())
})
t.Run("Rmdir", func(t *testing.T) {
assert.NoError(t, f.Rmdir(ctx, "upload/dir/subdir"))
assert.NoError(t, f.Rmdir(ctx, "upload/dir"))
})
t.Run("ListEmpty", func(t *testing.T) {
entries, err := f.List(ctx, "upload")
require.NoError(t, err)
assert.Equal(t, 0, len(entries))
_, err = f.List(ctx, "upload/dir")
assert.Equal(t, fs.ErrorDirNotFound, err)
})
})
t.Run("Upload", func(t *testing.T) {
uploadDir := "upload/dir/subdir"
remote := path.Join(uploadDir, fileNameUpload)
srcObj, err := localFs.NewObject(ctx, fileNameUpload)
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()
remoteWithID := addFileID(remote, dstObj.(*Object).id)
t.Run("List", func(t *testing.T) {
entries, err := f.List(ctx, uploadDir)
require.NoError(t, err)
require.Equal(t, 1, len(entries))
assert.Equal(t, remote, entries[0].Remote())
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
})
t.Run("NewObject", func(t *testing.T) {
o, err := f.NewObject(ctx, remote)
require.NoError(t, err)
require.Equal(t, remote, o.Remote())
})
t.Run("NewObjectWithID", func(t *testing.T) {
o, err := f.NewObject(ctx, remoteWithID)
require.NoError(t, err)
require.Equal(t, remoteWithID, o.Remote())
})
})
t.Run("Name", func(t *testing.T) {
assert.Equal(t, (*fstest.RemoteName)[:len(*fstest.RemoteName)-1], f.Name())
})
t.Run("Root", func(t *testing.T) {
assert.Equal(t, "", f.Root())
})
t.Run("String", func(t *testing.T) {
assert.Equal(t, `Google Photos path ""`, f.String())
})
t.Run("Features", func(t *testing.T) {
features := f.Features()
assert.False(t, features.CaseInsensitive)
assert.True(t, features.ReadMimeType)
})
t.Run("Precision", func(t *testing.T) {
assert.Equal(t, fs.ModTimeNotSupported, f.Precision())
})
t.Run("Hashes", func(t *testing.T) {
assert.Equal(t, hash.Set(hash.None), f.Hashes())
})
}
func TestAddID(t *testing.T) {
assert.Equal(t, "potato {123}", addID("potato", "123"))
assert.Equal(t, "{123}", addID("", "123"))
}
func TestFileAddID(t *testing.T) {
assert.Equal(t, "potato {123}.txt", addFileID("potato.txt", "123"))
assert.Equal(t, "potato {123}", addFileID("potato", "123"))
assert.Equal(t, "{123}", addFileID("", "123"))
}
func TestFindID(t *testing.T) {
assert.Equal(t, "", findID("potato"))
ID := "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
assert.Equal(t, ID, findID("potato {"+ID+"}.txt"))
ID = ID[1:]
assert.Equal(t, "", findID("potato {"+ID+"}.txt"))
}

View File

@@ -0,0 +1,335 @@
// Store the parsing of file patterns
package googlephotos
import (
"context"
"fmt"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
)
// lister describes the subset of the interfaces on Fs needed for the
// file pattern parsing
type lister interface {
listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error)
listAlbums(ctx context.Context, shared bool) (all *albums, err error)
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
dirTime() time.Time
}
// dirPattern describes a single directory pattern
type dirPattern struct {
re string // match for the path
match *regexp.Regexp // compiled match
canUpload bool // true if can upload here
canMkdir bool // true if can make a directory here
isFile bool // true if this is a file
isUpload bool // true if this is the upload directory
// function to turn a match into DirEntries
toEntries func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error)
}
// dirPatters is a slice of all the directory patterns
type dirPatterns []dirPattern
// patterns describes the layout of the google photos backend file system.
//
// NB no trailing / on paths
var patterns = dirPatterns{
{
re: `^$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return fs.DirEntries{
fs.NewDir(prefix+"media", f.dirTime()),
fs.NewDir(prefix+"album", f.dirTime()),
fs.NewDir(prefix+"shared-album", f.dirTime()),
fs.NewDir(prefix+"upload", f.dirTime()),
}, nil
},
},
{
re: `^upload(?:/(.*))?$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return f.listUploads(ctx, match[0])
},
canUpload: true,
canMkdir: true,
isUpload: true,
},
{
re: `^upload/(.*)$`,
isFile: true,
canUpload: true,
isUpload: true,
},
{
re: `^media$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return fs.DirEntries{
fs.NewDir(prefix+"all", f.dirTime()),
fs.NewDir(prefix+"by-year", f.dirTime()),
fs.NewDir(prefix+"by-month", f.dirTime()),
fs.NewDir(prefix+"by-day", f.dirTime()),
}, nil
},
},
{
re: `^media/all$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return f.listDir(ctx, prefix, api.SearchFilter{})
},
},
{
re: `^media/all/([^/]+)$`,
isFile: true,
},
{
re: `^media/by-year$`,
toEntries: years,
},
{
re: `^media/by-year/(\d{4})$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
filter, err := yearMonthDayFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^media/by-year/(\d{4})/([^/]+)$`,
isFile: true,
},
{
re: `^media/by-month$`,
toEntries: years,
},
{
re: `^media/by-month/(\d{4})$`,
toEntries: months,
},
{
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
filter, err := yearMonthDayFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})/([^/]+)$`,
isFile: true,
},
{
re: `^media/by-day$`,
toEntries: years,
},
{
re: `^media/by-day/(\d{4})$`,
toEntries: days,
},
{
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
filter, err := yearMonthDayFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})/([^/]+)$`,
isFile: true,
},
{
re: `^album$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, false, prefix, "")
},
},
{
re: `^album/(.+)$`,
canMkdir: true,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, false, prefix, match[1])
},
},
{
re: `^album/(.+?)/([^/]+)$`,
canUpload: true,
isFile: true,
},
{
re: `^shared-album$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, true, prefix, "")
},
},
{
re: `^shared-album/(.+)$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, true, prefix, match[1])
},
},
{
re: `^shared-album/(.+?)/([^/]+)$`,
isFile: true,
},
}.mustCompile()
// mustCompile compiles the regexps in the dirPatterns
func (ds dirPatterns) mustCompile() dirPatterns {
for i := range ds {
pattern := &ds[i]
pattern.match = regexp.MustCompile(pattern.re)
}
return ds
}
// match finds the path passed in in the matching structure and
// returns the parameters and a pointer to the match, or nil.
func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []string, prefix string, pattern *dirPattern) {
itemPath = strings.Trim(itemPath, "/")
absPath := path.Join(root, itemPath)
prefix = strings.Trim(absPath[len(root):], "/")
if prefix != "" {
prefix += "/"
}
for i := range ds {
pattern = &ds[i]
if pattern.isFile != isFile {
continue
}
match = pattern.match.FindStringSubmatch(absPath)
if match != nil {
return
}
}
return nil, "", nil
}
// Return the years from 2000 to today
// FIXME make configurable?
func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
currentYear := f.dirTime().Year()
for year := 2000; year <= currentYear; year++ {
entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime()))
}
return entries, nil
}
// Return the months in a given year
func months(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
year := match[1]
for month := 1; month <= 12; month++ {
entries = append(entries, fs.NewDir(fmt.Sprintf("%s%s-%02d", prefix, year, month), f.dirTime()))
}
return entries, nil
}
// Return the days in a given year
func days(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
year := match[1]
current, err := time.Parse("2006", year)
if err != nil {
return nil, errors.Errorf("bad year %q", match[1])
}
currentYear := current.Year()
for current.Year() == currentYear {
entries = append(entries, fs.NewDir(prefix+current.Format("2006-01-02"), f.dirTime()))
current = current.AddDate(0, 0, 1)
}
return entries, nil
}
// This creates a search filter on year/month/day as provided
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
year, err := strconv.Atoi(match[1])
if err != nil || year < 1000 || year > 3000 {
return sf, errors.Errorf("bad year %q", match[1])
}
sf = api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Year: year,
},
},
},
},
}
if len(match) >= 3 {
month, err := strconv.Atoi(match[2])
if err != nil || month < 1 || month > 12 {
return sf, errors.Errorf("bad month %q", match[2])
}
sf.Filters.DateFilter.Dates[0].Month = month
}
if len(match) >= 4 {
day, err := strconv.Atoi(match[3])
if err != nil || day < 1 || day > 31 {
return sf, errors.Errorf("bad day %q", match[3])
}
sf.Filters.DateFilter.Dates[0].Day = day
}
return sf, nil
}
// Turns an albumPath into entries
//
// These can either be synthetic directory entries if the album path
// is a prefix of another album, or actual files, or a combination of
// the two.
func albumsToEntries(ctx context.Context, f lister, shared bool, prefix string, albumPath string) (entries fs.DirEntries, err error) {
albums, err := f.listAlbums(ctx, shared)
if err != nil {
return nil, err
}
// Put in the directories
dirs, foundAlbumPath := albums.getDirs(albumPath)
if foundAlbumPath {
for _, dir := range dirs {
d := fs.NewDir(prefix+dir, f.dirTime())
dirPath := path.Join(albumPath, dir)
// if this dir is an album add more special stuff
album, ok := albums.get(dirPath)
if ok {
count, err := strconv.ParseInt(album.MediaItemsCount, 10, 64)
if err != nil {
fs.Debugf(f, "Error reading media count: %v", err)
}
d.SetID(album.ID).SetItems(count)
}
entries = append(entries, d)
}
}
// if this is an album then return a filter to list it
album, foundAlbum := albums.get(albumPath)
if foundAlbum {
filter := api.SearchFilter{AlbumID: album.ID}
newEntries, err := f.listDir(ctx, prefix, filter)
if err != nil {
return nil, err
}
entries = append(entries, newEntries...)
}
if !foundAlbumPath && !foundAlbum && albumPath != "" {
return nil, fs.ErrorDirNotFound
}
return entries, nil
}

View File

@@ -0,0 +1,495 @@
package googlephotos
import (
"context"
"fmt"
"testing"
"time"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// time for directories
var startTime = fstest.Time("2019-06-24T15:53:05.999999999Z")
// mock Fs for testing patterns
type testLister struct {
t *testing.T
albums *albums
names []string
uploaded dirtree.DirTree
}
// newTestLister makes a mock for testing
func newTestLister(t *testing.T) *testLister {
return &testLister{
t: t,
albums: newAlbums(),
uploaded: dirtree.New(),
}
}
// mock listDir for testing
func (f *testLister) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {
for _, name := range f.names {
entries = append(entries, mockobject.New(prefix+name))
}
return entries, nil
}
// mock listAlbums for testing
func (f *testLister) listAlbums(ctx context.Context, shared bool) (all *albums, err error) {
return f.albums, nil
}
// mock listUploads for testing
func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entries, _ = f.uploaded[dir]
return entries, nil
}
// mock dirTime for testing
func (f *testLister) dirTime() time.Time {
return startTime
}
func TestPatternMatch(t *testing.T) {
for testNumber, test := range []struct {
// input
root string
itemPath string
isFile bool
// expected output
wantMatch []string
wantPrefix string
wantPattern *dirPattern
}{
{
root: "",
itemPath: "",
isFile: false,
wantMatch: []string{""},
wantPrefix: "",
wantPattern: &patterns[0],
},
{
root: "",
itemPath: "",
isFile: true,
wantMatch: nil,
wantPrefix: "",
wantPattern: nil,
},
{
root: "upload",
itemPath: "",
isFile: false,
wantMatch: []string{"upload", ""},
wantPrefix: "",
wantPattern: &patterns[1],
},
{
root: "upload/dir",
itemPath: "",
isFile: false,
wantMatch: []string{"upload/dir", "dir"},
wantPrefix: "",
wantPattern: &patterns[1],
},
{
root: "upload/file.jpg",
itemPath: "",
isFile: true,
wantMatch: []string{"upload/file.jpg", "file.jpg"},
wantPrefix: "",
wantPattern: &patterns[2],
},
{
root: "media",
itemPath: "",
isFile: false,
wantMatch: []string{"media"},
wantPrefix: "",
wantPattern: &patterns[3],
},
{
root: "",
itemPath: "media",
isFile: false,
wantMatch: []string{"media"},
wantPrefix: "media/",
wantPattern: &patterns[3],
},
{
root: "media/all",
itemPath: "",
isFile: false,
wantMatch: []string{"media/all"},
wantPrefix: "",
wantPattern: &patterns[4],
},
{
root: "media",
itemPath: "all",
isFile: false,
wantMatch: []string{"media/all"},
wantPrefix: "all/",
wantPattern: &patterns[4],
},
{
root: "media/all",
itemPath: "file.jpg",
isFile: true,
wantMatch: []string{"media/all/file.jpg", "file.jpg"},
wantPrefix: "file.jpg/",
wantPattern: &patterns[5],
},
} {
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q,isFile=%v", testNumber, test.root, test.itemPath, test.isFile), func(t *testing.T) {
gotMatch, gotPrefix, gotPattern := patterns.match(test.root, test.itemPath, test.isFile)
assert.Equal(t, test.wantMatch, gotMatch)
assert.Equal(t, test.wantPrefix, gotPrefix)
assert.Equal(t, test.wantPattern, gotPattern)
})
}
}
func TestPatternMatchToEntries(t *testing.T) {
ctx := context.Background()
f := newTestLister(t)
f.names = []string{"file.jpg"}
f.albums.add(&api.Album{
ID: "1",
Title: "sub/one",
})
f.albums.add(&api.Album{
ID: "2",
Title: "sub",
})
f.uploaded.AddEntry(mockobject.New("upload/file1.jpg"))
f.uploaded.AddEntry(mockobject.New("upload/dir/file2.jpg"))
for testNumber, test := range []struct {
// input
root string
itemPath string
// expected output
wantMatch []string
wantPrefix string
remotes []string
}{
{
root: "",
itemPath: "",
wantMatch: []string{""},
wantPrefix: "",
remotes: []string{"media/", "album/", "shared-album/", "upload/"},
},
{
root: "upload",
itemPath: "",
wantMatch: []string{"upload", ""},
wantPrefix: "",
remotes: []string{"upload/file1.jpg", "upload/dir/"},
},
{
root: "upload",
itemPath: "dir",
wantMatch: []string{"upload/dir", "dir"},
wantPrefix: "dir/",
remotes: []string{"upload/dir/file2.jpg"},
},
{
root: "media",
itemPath: "",
wantMatch: []string{"media"},
wantPrefix: "",
remotes: []string{"all/", "by-year/", "by-month/", "by-day/"},
},
{
root: "media/all",
itemPath: "",
wantMatch: []string{"media/all"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "media",
itemPath: "all",
wantMatch: []string{"media/all"},
wantPrefix: "all/",
remotes: []string{"all/file.jpg"},
},
{
root: "media/by-year",
itemPath: "",
wantMatch: []string{"media/by-year"},
wantPrefix: "",
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
},
{
root: "media/by-year/2000",
itemPath: "",
wantMatch: []string{"media/by-year/2000", "2000"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "media/by-month",
itemPath: "",
wantMatch: []string{"media/by-month"},
wantPrefix: "",
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
},
{
root: "media/by-month/2001",
itemPath: "",
wantMatch: []string{"media/by-month/2001", "2001"},
wantPrefix: "",
remotes: []string{"2001-01/", "2001-02/", "2001-03/", "2001-04/"},
},
{
root: "media/by-month/2001/2001-01",
itemPath: "",
wantMatch: []string{"media/by-month/2001/2001-01", "2001", "01"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "media/by-day",
itemPath: "",
wantMatch: []string{"media/by-day"},
wantPrefix: "",
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
},
{
root: "media/by-day/2001",
itemPath: "",
wantMatch: []string{"media/by-day/2001", "2001"},
wantPrefix: "",
remotes: []string{"2001-01-01/", "2001-01-02/", "2001-01-03/", "2001-01-04/"},
},
{
root: "media/by-day/2001/2001-01-02",
itemPath: "",
wantMatch: []string{"media/by-day/2001/2001-01-02", "2001", "01", "02"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "album",
itemPath: "",
wantMatch: []string{"album"},
wantPrefix: "",
remotes: []string{"sub/"},
},
{
root: "album/sub",
itemPath: "",
wantMatch: []string{"album/sub", "sub"},
wantPrefix: "",
remotes: []string{"one/", "file.jpg"},
},
{
root: "album/sub/one",
itemPath: "",
wantMatch: []string{"album/sub/one", "sub/one"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "shared-album",
itemPath: "",
wantMatch: []string{"shared-album"},
wantPrefix: "",
remotes: []string{"sub/"},
},
{
root: "shared-album/sub",
itemPath: "",
wantMatch: []string{"shared-album/sub", "sub"},
wantPrefix: "",
remotes: []string{"one/", "file.jpg"},
},
{
root: "shared-album/sub/one",
itemPath: "",
wantMatch: []string{"shared-album/sub/one", "sub/one"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
} {
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q", testNumber, test.root, test.itemPath), func(t *testing.T) {
match, prefix, pattern := patterns.match(test.root, test.itemPath, false)
assert.Equal(t, test.wantMatch, match)
assert.Equal(t, test.wantPrefix, prefix)
assert.NotNil(t, pattern)
assert.NotNil(t, pattern.toEntries)
entries, err := pattern.toEntries(ctx, f, prefix, match)
assert.NoError(t, err)
var remotes = []string{}
for _, entry := range entries {
remote := entry.Remote()
if _, isDir := entry.(fs.Directory); isDir {
remote += "/"
}
remotes = append(remotes, remote)
if len(remotes) >= 4 {
break // only test first 4 entries
}
}
assert.Equal(t, test.remotes, remotes)
})
}
}
func TestPatternYears(t *testing.T) {
f := newTestLister(t)
entries, err := years(context.Background(), f, "potato/", nil)
require.NoError(t, err)
year := 2000
for _, entry := range entries {
assert.Equal(t, "potato/"+fmt.Sprint(year), entry.Remote())
year++
}
}
func TestPatternMonths(t *testing.T) {
f := newTestLister(t)
entries, err := months(context.Background(), f, "potato/", []string{"", "2020"})
require.NoError(t, err)
assert.Equal(t, 12, len(entries))
for i, entry := range entries {
assert.Equal(t, fmt.Sprintf("potato/2020-%02d", i+1), entry.Remote())
}
}
func TestPatternDays(t *testing.T) {
f := newTestLister(t)
entries, err := days(context.Background(), f, "potato/", []string{"", "2020"})
require.NoError(t, err)
assert.Equal(t, 366, len(entries))
assert.Equal(t, "potato/2020-01-01", entries[0].Remote())
assert.Equal(t, "potato/2020-12-31", entries[len(entries)-1].Remote())
}
func TestPatternYearMonthDayFilter(t *testing.T) {
ctx := context.Background()
f := newTestLister(t)
// Years
sf, err := yearMonthDayFilter(ctx, f, []string{"", "2000"})
require.NoError(t, err)
assert.Equal(t, api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Year: 2000,
},
},
},
},
}, sf)
_, err = yearMonthDayFilter(ctx, f, []string{"", "potato"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "999"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "4000"})
require.Error(t, err)
// Months
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01"})
require.NoError(t, err)
assert.Equal(t, api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Month: 1,
Year: 2000,
},
},
},
},
}, sf)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "potato"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "0"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "13"})
require.Error(t, err)
// Days
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "02"})
require.NoError(t, err)
assert.Equal(t, api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Day: 2,
Month: 1,
Year: 2000,
},
},
},
},
}, sf)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "potato"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "0"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "32"})
require.Error(t, err)
}
func TestPatternAlbumsToEntries(t *testing.T) {
f := newTestLister(t)
ctx := context.Background()
_, err := albumsToEntries(ctx, f, false, "potato/", "sub")
assert.Equal(t, fs.ErrorDirNotFound, err)
f.albums.add(&api.Album{
ID: "1",
Title: "sub/one",
})
entries, err := albumsToEntries(ctx, f, false, "potato/", "sub")
assert.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, "potato/one", entries[0].Remote())
_, ok := entries[0].(fs.Directory)
assert.Equal(t, true, ok)
f.albums.add(&api.Album{
ID: "1",
Title: "sub",
})
f.names = []string{"file.jpg"}
entries, err = albumsToEntries(ctx, f, false, "potato/", "sub")
assert.NoError(t, err)
assert.Equal(t, 2, len(entries))
assert.Equal(t, "potato/one", entries[0].Remote())
_, ok = entries[0].(fs.Directory)
assert.Equal(t, true, ok)
assert.Equal(t, "potato/file.jpg", entries[1].Remote())
_, ok = entries[1].(fs.Object)
assert.Equal(t, true, ok)
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

View File

@@ -5,6 +5,7 @@
package http package http
import ( import (
"context"
"io" "io"
"mime" "mime"
"net/http" "net/http"
@@ -12,15 +13,16 @@ import (
"path" "path"
"strconv" "strconv"
"strings" "strings"
"sync"
"time" "time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/net/html" "golang.org/x/net/html"
) )
@@ -45,6 +47,21 @@ func init() {
Value: "https://user:pass@example.com", Value: "https://user:pass@example.com",
Help: "Connect to example.com using a username and password", Help: "Connect to example.com using a username and password",
}}, }},
}, {
Name: "headers",
Help: `Set HTTP headers for all transactions
Use this to set additional HTTP headers for all transactions
The input format is comma separated list of key,value pairs. Standard
[CSV encoding](https://godoc.org/encoding/csv) may be used.
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
You can set multiple headers, eg '"Cookie","name=value","Authorization","xxx"'.
`,
Default: fs.CommaSepList{},
Advanced: true,
}, { }, {
Name: "no_slash", Name: "no_slash",
Help: `Set this if the site doesn't end directories with / Help: `Set this if the site doesn't end directories with /
@@ -61,6 +78,26 @@ Note that this may cause rclone to confuse genuine HTML files with
directories.`, directories.`,
Default: false, Default: false,
Advanced: true, Advanced: true,
}, {
Name: "no_head",
Help: `Don't use HEAD requests to find file sizes in dir listing
If your site is being very slow to load then you can try this option.
Normally rclone does a HEAD request for each potential file in a
directory listing to:
- find its size
- check it really exists
- check to see if it is a directory
If you set this option, rclone will not do the HEAD request. This will mean
- directory listings are much quicker
- rclone won't have the times or sizes of any files
- some files that don't exist may be in the listing
`,
Default: false,
Advanced: true,
}}, }},
} }
fs.Register(fsi) fs.Register(fsi)
@@ -68,8 +105,10 @@ directories.`,
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
Endpoint string `config:"url"` Endpoint string `config:"url"`
NoSlash bool `config:"no_slash"` NoSlash bool `config:"no_slash"`
NoHead bool `config:"no_head"`
Headers fs.CommaSepList `config:"headers"`
} }
// Fs stores the interface to the remote HTTP files // Fs stores the interface to the remote HTTP files
@@ -107,6 +146,7 @@ func statusError(res *http.Response, err error) error {
// NewFs creates a new Fs object from the name and root. It connects to // NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file. // the host specified in the config file.
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@@ -114,6 +154,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err return nil, err
} }
if len(opt.Headers)%2 != 0 {
return nil, errors.New("odd number of headers supplied")
}
if !strings.HasSuffix(opt.Endpoint, "/") { if !strings.HasSuffix(opt.Endpoint, "/") {
opt.Endpoint += "/" opt.Endpoint += "/"
} }
@@ -139,10 +183,15 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return http.ErrUseLastResponse return http.ErrUseLastResponse
} }
// check to see if points to a file // check to see if points to a file
res, err := noRedir.Head(u.String()) req, err := http.NewRequest("HEAD", u.String(), nil)
err = statusError(res, err)
if err == nil { if err == nil {
isFile = true req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
addHeaders(req, opt)
res, err := noRedir.Do(req)
err = statusError(res, err)
if err == nil {
isFile = true
}
} }
} }
@@ -207,12 +256,12 @@ func (f *Fs) Precision() time.Duration {
} }
// NewObject creates a new remote http file object // NewObject creates a new remote http file object
func (f *Fs) NewObject(remote string) (fs.Object, error) { func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
o := &Object{ o := &Object{
fs: f, fs: f,
remote: remote, remote: remote,
} }
err := o.stat() err := o.stat(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -315,8 +364,22 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
return names, nil return names, nil
} }
// Adds the configured headers to the request if any
func addHeaders(req *http.Request, opt *Options) {
for i := 0; i < len(opt.Headers); i += 2 {
key := opt.Headers[i]
value := opt.Headers[i+1]
req.Header.Add(key, value)
}
}
// Adds the configured headers to the request if any
func (f *Fs) addHeaders(req *http.Request) {
addHeaders(req, &f.opt)
}
// Read the directory passed in // Read the directory passed in
func (f *Fs) readDir(dir string) (names []string, err error) { func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error) {
URL := f.url(dir) URL := f.url(dir)
u, err := url.Parse(URL) u, err := url.Parse(URL)
if err != nil { if err != nil {
@@ -325,7 +388,14 @@ func (f *Fs) readDir(dir string) (names []string, err error) {
if !strings.HasSuffix(URL, "/") { if !strings.HasSuffix(URL, "/") {
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL) return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
} }
res, err := f.httpClient.Get(URL) // Do the request
req, err := http.NewRequest("GET", URL, nil)
if err != nil {
return nil, errors.Wrap(err, "readDir failed")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
f.addHeaders(req)
res, err := f.httpClient.Do(req)
if err == nil { if err == nil {
defer fs.CheckClose(res.Body, &err) defer fs.CheckClose(res.Body, &err)
if res.StatusCode == http.StatusNotFound { if res.StatusCode == http.StatusNotFound {
@@ -359,38 +429,57 @@ func (f *Fs) readDir(dir string) (names []string, err error) {
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if !strings.HasSuffix(dir, "/") && dir != "" { if !strings.HasSuffix(dir, "/") && dir != "" {
dir += "/" dir += "/"
} }
names, err := f.readDir(dir) names, err := f.readDir(ctx, dir)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error listing %q", dir) return nil, errors.Wrapf(err, "error listing %q", dir)
} }
var (
entriesMu sync.Mutex // to protect entries
wg sync.WaitGroup
in = make(chan string, fs.Config.Checkers)
)
add := func(entry fs.DirEntry) {
entriesMu.Lock()
entries = append(entries, entry)
entriesMu.Unlock()
}
for i := 0; i < fs.Config.Checkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for remote := range in {
file := &Object{
fs: f,
remote: remote,
}
switch err := file.stat(ctx); err {
case nil:
add(file)
case fs.ErrorNotAFile:
// ...found a directory not a file
add(fs.NewDir(remote, timeUnset))
default:
fs.Debugf(remote, "skipping because of error: %v", err)
}
}
}()
}
for _, name := range names { for _, name := range names {
isDir := name[len(name)-1] == '/' isDir := name[len(name)-1] == '/'
name = strings.TrimRight(name, "/") name = strings.TrimRight(name, "/")
remote := path.Join(dir, name) remote := path.Join(dir, name)
if isDir { if isDir {
dir := fs.NewDir(remote, timeUnset) add(fs.NewDir(remote, timeUnset))
entries = append(entries, dir)
} else { } else {
file := &Object{ in <- remote
fs: f,
remote: remote,
}
switch err = file.stat(); err {
case nil:
entries = append(entries, file)
case fs.ErrorNotAFile:
// ...found a directory not a file
dir := fs.NewDir(remote, timeUnset)
entries = append(entries, dir)
default:
fs.Debugf(remote, "skipping because of error: %v", err)
}
} }
} }
close(in)
wg.Wait()
return entries, nil return entries, nil
} }
@@ -399,12 +488,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
// May create the object even if it returns an error - if so // May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return // will return the object and the error, otherwise will return
// nil and the error // nil and the error
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly return nil, errorReadOnly
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly return nil, errorReadOnly
} }
@@ -427,7 +516,7 @@ func (o *Object) Remote() string {
} }
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes // Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
func (o *Object) Hash(r hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -437,7 +526,7 @@ func (o *Object) Size() int64 {
} }
// ModTime returns the modification time of the remote http file // ModTime returns the modification time of the remote http file
func (o *Object) ModTime() time.Time { func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime return o.modTime
} }
@@ -447,9 +536,21 @@ func (o *Object) url() string {
} }
// stat updates the info field in the Object // stat updates the info field in the Object
func (o *Object) stat() error { func (o *Object) stat(ctx context.Context) error {
if o.fs.opt.NoHead {
o.size = -1
o.modTime = timeUnset
o.contentType = fs.MimeType(ctx, o)
return nil
}
url := o.url() url := o.url()
res, err := o.fs.httpClient.Head(url) req, err := http.NewRequest("HEAD", url, nil)
if err != nil {
return errors.Wrap(err, "stat failed")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
o.fs.addHeaders(req)
res, err := o.fs.httpClient.Do(req)
if err == nil && res.StatusCode == http.StatusNotFound { if err == nil && res.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound return fs.ErrorObjectNotFound
} }
@@ -480,7 +581,7 @@ func (o *Object) stat() error {
// SetModTime sets the modification and access time to the specified time // SetModTime sets the modification and access time to the specified time
// //
// it also updates the info field // it also updates the info field
func (o *Object) SetModTime(modTime time.Time) error { func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return errorReadOnly return errorReadOnly
} }
@@ -490,17 +591,19 @@ func (o *Object) Storable() bool {
} }
// Open a remote http file object for reading. Seek is supported // Open a remote http file object for reading. Seek is supported
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
url := o.url() url := o.url()
req, err := http.NewRequest("GET", url, nil) req, err := http.NewRequest("GET", url, nil)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Open failed") return nil, errors.Wrap(err, "Open failed")
} }
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
// Add optional headers // Add optional headers
for k, v := range fs.OpenOptionHeaders(options) { for k, v := range fs.OpenOptionHeaders(options) {
req.Header.Add(k, v) req.Header.Add(k, v)
} }
o.fs.addHeaders(req)
// Do the request // Do the request
res, err := o.fs.httpClient.Do(req) res, err := o.fs.httpClient.Do(req)
@@ -517,27 +620,27 @@ func (f *Fs) Hashes() hash.Set {
} }
// Mkdir makes the root directory of the Fs object // Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(dir string) error { func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return errorReadOnly return errorReadOnly
} }
// Remove a remote http file object // Remove a remote http file object
func (o *Object) Remove() error { func (o *Object) Remove(ctx context.Context) error {
return errorReadOnly return errorReadOnly
} }
// Rmdir removes the root directory of the Fs object // Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(dir string) error { func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return errorReadOnly return errorReadOnly
} }
// Update in to the object with the modTime given of the given size // Update in to the object with the modTime given of the given size
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return errorReadOnly return errorReadOnly
} }
// MimeType of an Object if known, "" otherwise // MimeType of an Object if known, "" otherwise
func (o *Object) MimeType() string { func (o *Object) MimeType(ctx context.Context) string {
return o.contentType return o.contentType
} }

View File

@@ -1,6 +1,7 @@
package http package http
import ( import (
"context"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
@@ -9,14 +10,15 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"sort" "sort"
"strings"
"testing" "testing"
"time" "time"
"github.com/ncw/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/ncw/rclone/fs/config" "github.com/rclone/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/ncw/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/ncw/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -25,6 +27,7 @@ var (
remoteName = "TestHTTP" remoteName = "TestHTTP"
testPath = "test" testPath = "test"
filesPath = filepath.Join(testPath, "files") filesPath = filepath.Join(testPath, "files")
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
) )
// prepareServer the test server and return a function to tidy it up afterwards // prepareServer the test server and return a function to tidy it up afterwards
@@ -32,8 +35,16 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
// file server for test/files // file server for test/files
fileServer := http.FileServer(http.Dir(filesPath)) fileServer := http.FileServer(http.Dir(filesPath))
// test the headers are there then pass on to fileServer
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
fileServer.ServeHTTP(w, r)
})
// Make the test server // Make the test server
ts := httptest.NewServer(fileServer) ts := httptest.NewServer(handler)
// Configure the remote // Configure the remote
config.LoadConfig() config.LoadConfig()
@@ -44,8 +55,9 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
// config.FileSet(remoteName, "url", ts.URL) // config.FileSet(remoteName, "url", ts.URL)
m := configmap.Simple{ m := configmap.Simple{
"type": "http", "type": "http",
"url": ts.URL, "url": ts.URL,
"headers": strings.Join(headers, ","),
} }
// return a function to tidy up // return a function to tidy up
@@ -64,7 +76,7 @@ func prepare(t *testing.T) (fs.Fs, func()) {
} }
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) { func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
entries, err := f.List("") entries, err := f.List(context.Background(), "")
require.NoError(t, err) require.NoError(t, err)
sort.Sort(entries) sort.Sort(entries)
@@ -120,7 +132,7 @@ func TestListSubDir(t *testing.T) {
f, tidy := prepare(t) f, tidy := prepare(t)
defer tidy() defer tidy()
entries, err := f.List("three") entries, err := f.List(context.Background(), "three")
require.NoError(t, err) require.NoError(t, err)
sort.Sort(entries) sort.Sort(entries)
@@ -138,7 +150,7 @@ func TestNewObject(t *testing.T) {
f, tidy := prepare(t) f, tidy := prepare(t)
defer tidy() defer tidy()
o, err := f.NewObject("four/under four.txt") o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "four/under four.txt", o.Remote()) assert.Equal(t, "four/under four.txt", o.Remote())
@@ -148,7 +160,7 @@ func TestNewObject(t *testing.T) {
// Test the time is correct on the object // Test the time is correct on the object
tObj := o.ModTime() tObj := o.ModTime(context.Background())
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt")) fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
require.NoError(t, err) require.NoError(t, err)
@@ -158,7 +170,7 @@ func TestNewObject(t *testing.T) {
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second)) assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
// check object not found // check object not found
o, err = f.NewObject("not found.txt") o, err = f.NewObject(context.Background(), "not found.txt")
assert.Nil(t, o) assert.Nil(t, o)
assert.Equal(t, fs.ErrorObjectNotFound, err) assert.Equal(t, fs.ErrorObjectNotFound, err)
} }
@@ -167,11 +179,11 @@ func TestOpen(t *testing.T) {
f, tidy := prepare(t) f, tidy := prepare(t)
defer tidy() defer tidy()
o, err := f.NewObject("four/under four.txt") o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err) require.NoError(t, err)
// Test normal read // Test normal read
fd, err := o.Open() fd, err := o.Open(context.Background())
require.NoError(t, err) require.NoError(t, err)
data, err := ioutil.ReadAll(fd) data, err := ioutil.ReadAll(fd)
require.NoError(t, err) require.NoError(t, err)
@@ -179,7 +191,7 @@ func TestOpen(t *testing.T) {
assert.Equal(t, "beetroot\n", string(data)) assert.Equal(t, "beetroot\n", string(data))
// Test with range request // Test with range request
fd, err = o.Open(&fs.RangeOption{Start: 1, End: 5}) fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err) require.NoError(t, err)
data, err = ioutil.ReadAll(fd) data, err = ioutil.ReadAll(fd)
require.NoError(t, err) require.NoError(t, err)
@@ -191,12 +203,12 @@ func TestMimeType(t *testing.T) {
f, tidy := prepare(t) f, tidy := prepare(t)
defer tidy() defer tidy()
o, err := f.NewObject("four/under four.txt") o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err) require.NoError(t, err)
do, ok := o.(fs.MimeTyper) do, ok := o.(fs.MimeTyper)
require.True(t, ok) require.True(t, ok)
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType()) assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
} }
func TestIsAFileRoot(t *testing.T) { func TestIsAFileRoot(t *testing.T) {
@@ -216,7 +228,7 @@ func TestIsAFileSubDir(t *testing.T) {
f, err := NewFs(remoteName, "three/underthree.txt", m) f, err := NewFs(remoteName, "three/underthree.txt", m)
assert.Equal(t, err, fs.ErrorIsFile) assert.Equal(t, err, fs.ErrorIsFile)
entries, err := f.List("") entries, err := f.List(context.Background(), "")
require.NoError(t, err) require.NoError(t, err)
sort.Sort(entries) sort.Sort(entries)

View File

@@ -24,7 +24,7 @@
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="timer-test">timer-test</a></td><td align="right">09-May-2017 17:05 </td><td align="right">1.5M</td><td>&nbsp;</td></tr> <tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="timer-test">timer-test</a></td><td align="right">09-May-2017 17:05 </td><td align="right">1.5M</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="words-to-regexp.pl">words-to-regexp.pl</a></td><td align="right">01-Mar-2005 20:43 </td><td align="right">6.0K</td><td>&nbsp;</td></tr> <tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="words-to-regexp.pl">words-to-regexp.pl</a></td><td align="right">01-Mar-2005 20:43 </td><td align="right">6.0K</td><td>&nbsp;</td></tr>
<tr><th colspan="5"><hr></th></tr> <tr><th colspan="5"><hr></th></tr>
<!-- some extras from https://github.com/ncw/rclone/issues/1573 --> <!-- some extras from https://github.com/rclone/rclone/issues/1573 -->
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20100%25%20better.mp3">Now 100% better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td>&nbsp;</td></tr> <tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20100%25%20better.mp3">Now 100% better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20better.mp3">Now better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td>&nbsp;</td></tr> <tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20better.mp3">Now better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td>&nbsp;</td></tr>

View File

@@ -1,11 +1,12 @@
package hubic package hubic
import ( import (
"context"
"net/http" "net/http"
"time" "time"
"github.com/ncw/rclone/fs"
"github.com/ncw/swift" "github.com/ncw/swift"
"github.com/rclone/rclone/fs"
) )
// auth is an authenticator for swift // auth is an authenticator for swift
@@ -26,7 +27,7 @@ func newAuth(f *Fs) *auth {
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) { func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
const retries = 10 const retries = 10
for try := 1; try <= retries; try++ { for try := 1; try <= retries; try++ {
err = a.f.getCredentials() err = a.f.getCredentials(context.TODO())
if err == nil { if err == nil {
break break
} }

View File

@@ -7,6 +7,7 @@ package hubic
// to be revisted after some actual experience. // to be revisted after some actual experience.
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
@@ -15,16 +16,16 @@ import (
"strings" "strings"
"time" "time"
"github.com/ncw/rclone/backend/swift"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/lib/oauthutil"
swiftLib "github.com/ncw/swift" swiftLib "github.com/ncw/swift"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/oauthutil"
"golang.org/x/oauth2" "golang.org/x/oauth2"
) )
@@ -115,11 +116,12 @@ func (f *Fs) String() string {
// getCredentials reads the OpenStack Credentials using the Hubic API // getCredentials reads the OpenStack Credentials using the Hubic API
// //
// The credentials are read into the Fs // The credentials are read into the Fs
func (f *Fs) getCredentials() (err error) { func (f *Fs) getCredentials(ctx context.Context) (err error) {
req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil) req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil)
if err != nil { if err != nil {
return err return err
} }
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
resp, err := f.client.Do(req) resp, err := f.client.Do(req)
if err != nil { if err != nil {
return err return err

View File

@@ -4,8 +4,8 @@ package hubic_test
import ( import (
"testing" "testing"
"github.com/ncw/rclone/backend/hubic" "github.com/rclone/rclone/backend/hubic"
"github.com/ncw/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -46,6 +46,82 @@ func (t Time) String() string { return time.Time(t).Format(timeFormat) }
// APIString returns Time string in Jottacloud API format // APIString returns Time string in Jottacloud API format
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) } func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
// TokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
type TokenJSON struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
}
// JSON structures returned by new API
// AllocateFileRequest to prepare an upload to Jottacloud
type AllocateFileRequest struct {
Bytes int64 `json:"bytes"`
Created string `json:"created"`
Md5 string `json:"md5"`
Modified string `json:"modified"`
Path string `json:"path"`
}
// AllocateFileResponse for upload requests
type AllocateFileResponse struct {
Name string `json:"name"`
Path string `json:"path"`
State string `json:"state"`
UploadID string `json:"upload_id"`
UploadURL string `json:"upload_url"`
Bytes int64 `json:"bytes"`
ResumePos int64 `json:"resume_pos"`
}
// UploadResponse after an upload
type UploadResponse struct {
Name string `json:"name"`
Path string `json:"path"`
Kind string `json:"kind"`
ContentID string `json:"content_id"`
Bytes int64 `json:"bytes"`
Md5 string `json:"md5"`
Created int64 `json:"created"`
Modified int64 `json:"modified"`
Deleted interface{} `json:"deleted"`
Mime string `json:"mime"`
}
// DeviceRegistrationResponse is the response to registering a device
type DeviceRegistrationResponse struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
}
// CustomerInfo provides general information about the account. Required for finding the correct internal username.
type CustomerInfo struct {
Username string `json:"username"`
Email string `json:"email"`
Name string `json:"name"`
CountryCode string `json:"country_code"`
LanguageCode string `json:"language_code"`
CustomerGroupCode string `json:"customer_group_code"`
BrandCode string `json:"brand_code"`
AccountType string `json:"account_type"`
SubscriptionType string `json:"subscription_type"`
Usage int64 `json:"usage"`
Qouta int64 `json:"quota"`
BusinessUsage int64 `json:"business_usage"`
BusinessQouta int64 `json:"business_quota"`
WriteLocked bool `json:"write_locked"`
ReadLocked bool `json:"read_locked"`
LockedCause interface{} `json:"locked_cause"`
WebHash string `json:"web_hash"`
AndroidHash string `json:"android_hash"`
IOSHash string `json:"ios_hash"`
}
// XML structures returned by the old API
// Flag is a hacky type for checking if an attribute is present // Flag is a hacky type for checking if an attribute is present
type Flag bool type Flag bool
@@ -64,15 +140,6 @@ func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
return attr, errors.New("unimplemented") return attr, errors.New("unimplemented")
} }
// TokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
type TokenJSON struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
}
/* /*
GET http://www.jottacloud.com/JFS/<account> GET http://www.jottacloud.com/JFS/<account>
@@ -102,8 +169,8 @@ GET http://www.jottacloud.com/JFS/<account>
</user> </user>
*/ */
// AccountInfo represents a Jottacloud account // DriveInfo represents a Jottacloud account
type AccountInfo struct { type DriveInfo struct {
Username string `xml:"username"` Username string `xml:"username"`
AccountType string `xml:"account-type"` AccountType string `xml:"account-type"`
Locked bool `xml:"locked"` Locked bool `xml:"locked"`
@@ -280,43 +347,3 @@ func (e *Error) Error() string {
} }
return out return out
} }
// AllocateFileRequest to prepare an upload to Jottacloud
type AllocateFileRequest struct {
Bytes int64 `json:"bytes"`
Created string `json:"created"`
Md5 string `json:"md5"`
Modified string `json:"modified"`
Path string `json:"path"`
}
// AllocateFileResponse for upload requests
type AllocateFileResponse struct {
Name string `json:"name"`
Path string `json:"path"`
State string `json:"state"`
UploadID string `json:"upload_id"`
UploadURL string `json:"upload_url"`
Bytes int64 `json:"bytes"`
ResumePos int64 `json:"resume_pos"`
}
// UploadResponse after an upload
type UploadResponse struct {
Name string `json:"name"`
Path string `json:"path"`
Kind string `json:"kind"`
ContentID string `json:"content_id"`
Bytes int64 `json:"bytes"`
Md5 string `json:"md5"`
Created int64 `json:"created"`
Modified int64 `json:"modified"`
Deleted interface{} `json:"deleted"`
Mime string `json:"mime"`
}
// DeviceRegistrationResponse is the response to registering a device
type DeviceRegistrationResponse struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
}

View File

@@ -2,6 +2,7 @@ package jottacloud
import ( import (
"bytes" "bytes"
"context"
"crypto/md5" "crypto/md5"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
@@ -17,21 +18,21 @@ import (
"strings" "strings"
"time" "time"
"github.com/ncw/rclone/backend/jottacloud/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/jottacloud/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2" "golang.org/x/oauth2"
) )
@@ -43,14 +44,13 @@ const (
defaultDevice = "Jotta" defaultDevice = "Jotta"
defaultMountpoint = "Archive" defaultMountpoint = "Archive"
rootURL = "https://www.jottacloud.com/jfs/" rootURL = "https://www.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/files/v1/" apiURL = "https://api.jottacloud.com/"
baseURL = "https://www.jottacloud.com/" baseURL = "https://www.jottacloud.com/"
tokenURL = "https://api.jottacloud.com/auth/v1/token" tokenURL = "https://api.jottacloud.com/auth/v1/token"
registerURL = "https://api.jottacloud.com/auth/v1/register" registerURL = "https://api.jottacloud.com/auth/v1/register"
cachePrefix = "rclone-jcmd5-" cachePrefix = "rclone-jcmd5-"
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40" rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2" rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
configUsername = "user"
configClientID = "client_id" configClientID = "client_id"
configClientSecret = "client_secret" configClientSecret = "client_secret"
configDevice = "device" configDevice = "device"
@@ -77,6 +77,7 @@ func init() {
Description: "JottaCloud", Description: "JottaCloud",
NewFs: NewFs, NewFs: NewFs,
Config: func(name string, m configmap.Mapper) { Config: func(name string, m configmap.Mapper) {
ctx := context.TODO()
tokenString, ok := m.Get("token") tokenString, ok := m.Get("token")
if ok && tokenString != "" { if ok && tokenString != "" {
fmt.Printf("Already have a token - refresh?\n") fmt.Printf("Already have a token - refresh?\n")
@@ -86,34 +87,9 @@ func init() {
} }
srv := rest.NewClient(fshttp.NewClient(fs.Config)) srv := rest.NewClient(fshttp.NewClient(fs.Config))
// ask if we should create a device specifc token: https://github.com/ncw/rclone/issues/2995
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n") fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
if config.Confirm() { if config.Confirm() {
// random generator to generate random device names deviceRegistration, err := registerDevice(ctx, srv)
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
randonDeviceNamePartLength := 21
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
for i := range randomDeviceNamePart {
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
}
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
values := url.Values{}
values.Set("device_id", randomDeviceName)
// all information comes from https://github.com/ttyridal/aiojotta/wiki/Jotta-protocol-3.-Authentication#token-authentication
opts := rest.Opts{
Method: "POST",
RootURL: registerURL,
ContentType: "application/x-www-form-urlencoded",
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
Parameters: values,
}
var deviceRegistration api.DeviceRegistrationResponse
_, err := srv.CallJSON(&opts, nil, &deviceRegistration)
if err != nil { if err != nil {
log.Fatalf("Failed to register device: %v", err) log.Fatalf("Failed to register device: %v", err)
} }
@@ -134,53 +110,14 @@ func init() {
oauthConfig.ClientID = clientID oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret) oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
username, ok := m.Get(configUsername) fmt.Printf("Username> ")
if !ok { username := config.ReadLine()
log.Fatalf("No username defined")
}
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.") password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
// prepare out token request with username and password token, err := doAuth(ctx, srv, username, password)
values := url.Values{}
values.Set("grant_type", "PASSWORD")
values.Set("password", password)
values.Set("username", username)
values.Set("client_id", oauthConfig.ClientID)
values.Set("client_secret", oauthConfig.ClientSecret)
opts := rest.Opts{
Method: "POST",
RootURL: oauthConfig.Endpoint.AuthURL,
ContentType: "application/x-www-form-urlencoded",
Parameters: values,
}
var jsonToken api.TokenJSON
resp, err := srv.CallJSON(&opts, nil, &jsonToken)
if err != nil { if err != nil {
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header log.Fatalf("Failed to get oauth token: %s", err)
if resp != nil {
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
fmt.Printf("Enter verification code> ")
authCode := config.ReadLine()
authCode = strings.Replace(authCode, "-", "", -1) // the sms received contains a pair of 3 digit numbers seperated by '-' but wants a single 6 digit number
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
resp, err = srv.CallJSON(&opts, nil, &jsonToken)
}
}
if err != nil {
log.Fatalf("Failed to get resource token: %v", err)
}
} }
var token oauth2.Token
token.AccessToken = jsonToken.AccessToken
token.RefreshToken = jsonToken.RefreshToken
token.TokenType = jsonToken.TokenType
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
// finally save them in the config
err = oauthutil.PutToken(name, m, &token, true) err = oauthutil.PutToken(name, m, &token, true)
if err != nil { if err != nil {
log.Fatalf("Error while saving token: %s", err) log.Fatalf("Error while saving token: %s", err)
@@ -194,39 +131,17 @@ func init() {
} }
srv = rest.NewClient(oAuthClient).SetRoot(rootURL) srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
acc, err := getAccountInfo(srv, username) device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
if err != nil { if err != nil {
log.Fatalf("Error getting devices: %s", err) log.Fatalf("Failed to setup mountpoint: %s", err)
} }
fmt.Printf("Please select the device to use. Normally this will be Jotta\n") m.Set(configDevice, device)
var deviceNames []string m.Set(configMountpoint, mountpoint)
for i := range acc.Devices {
deviceNames = append(deviceNames, acc.Devices[i].Name)
}
result := config.Choose("Devices", deviceNames, nil, false)
m.Set(configDevice, result)
dev, err := getDeviceInfo(srv, path.Join(username, result))
if err != nil {
log.Fatalf("Error getting Mountpoint: %s", err)
}
if len(dev.MountPoints) == 0 {
log.Fatalf("No Mountpoints found for this device.")
}
fmt.Printf("Please select the mountpoint to user. Normally this will be Archive\n")
var mountpointNames []string
for i := range dev.MountPoints {
mountpointNames = append(mountpointNames, dev.MountPoints[i].Name)
}
result = config.Choose("Mountpoints", mountpointNames, nil, false)
m.Set(configMountpoint, result)
} }
}, },
Options: []fs.Option{{ Options: []fs.Option{{
Name: configUsername,
Help: "User Name:",
}, {
Name: "md5_memory_limit", Name: "md5_memory_limit",
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.", Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
Default: fs.SizeSuffix(10 * 1024 * 1024), Default: fs.SizeSuffix(10 * 1024 * 1024),
@@ -252,7 +167,6 @@ func init() {
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
User string `config:"user"`
Device string `config:"device"` Device string `config:"device"`
Mountpoint string `config:"mountpoint"` Mountpoint string `config:"mountpoint"`
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"` MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
@@ -332,8 +246,169 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
} }
// registerDevice register a new device for use with the jottacloud API
func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
// random generator to generate random device names
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
randonDeviceNamePartLength := 21
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
for i := range randomDeviceNamePart {
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
}
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
values := url.Values{}
values.Set("device_id", randomDeviceName)
opts := rest.Opts{
Method: "POST",
RootURL: registerURL,
ContentType: "application/x-www-form-urlencoded",
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
Parameters: values,
}
var deviceRegistration *api.DeviceRegistrationResponse
_, err = srv.CallJSON(ctx, &opts, nil, &deviceRegistration)
return deviceRegistration, err
}
// doAuth runs the actual token request
func doAuth(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
// prepare out token request with username and password
values := url.Values{}
values.Set("grant_type", "PASSWORD")
values.Set("password", password)
values.Set("username", username)
values.Set("client_id", oauthConfig.ClientID)
values.Set("client_secret", oauthConfig.ClientSecret)
opts := rest.Opts{
Method: "POST",
RootURL: oauthConfig.Endpoint.AuthURL,
ContentType: "application/x-www-form-urlencoded",
Parameters: values,
}
// do the first request
var jsonToken api.TokenJSON
resp, err := srv.CallJSON(ctx, &opts, nil, &jsonToken)
if err != nil {
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
if resp != nil {
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
fmt.Printf("Enter verification code> ")
authCode := config.ReadLine()
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
resp, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
}
}
}
token.AccessToken = jsonToken.AccessToken
token.RefreshToken = jsonToken.RefreshToken
token.TokenType = jsonToken.TokenType
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
return token, err
}
// setupMountpoint sets up a custom device and mountpoint if desired by the user
func setupMountpoint(ctx context.Context, srv *rest.Client, apiSrv *rest.Client) (device, mountpoint string, err error) {
cust, err := getCustomerInfo(ctx, apiSrv)
if err != nil {
return "", "", err
}
acc, err := getDriveInfo(ctx, srv, cust.Username)
if err != nil {
return "", "", err
}
var deviceNames []string
for i := range acc.Devices {
deviceNames = append(deviceNames, acc.Devices[i].Name)
}
fmt.Printf("Please select the device to use. Normally this will be Jotta\n")
device = config.Choose("Devices", deviceNames, nil, false)
dev, err := getDeviceInfo(ctx, srv, path.Join(cust.Username, device))
if err != nil {
return "", "", err
}
if len(dev.MountPoints) == 0 {
return "", "", errors.New("no mountpoints for selected device")
}
var mountpointNames []string
for i := range dev.MountPoints {
mountpointNames = append(mountpointNames, dev.MountPoints[i].Name)
}
fmt.Printf("Please select the mountpoint to user. Normally this will be Archive\n")
mountpoint = config.Choose("Mountpoints", mountpointNames, nil, false)
return device, mountpoint, err
}
// getCustomerInfo queries general information about the account
func getCustomerInfo(ctx context.Context, srv *rest.Client) (info *api.CustomerInfo, err error) {
opts := rest.Opts{
Method: "GET",
Path: "account/v1/customer",
}
_, err = srv.CallJSON(ctx, &opts, nil, &info)
if err != nil {
return nil, errors.Wrap(err, "couldn't get customer info")
}
return info, nil
}
// getDriveInfo queries general information about the account and the available devices and mountpoints.
func getDriveInfo(ctx context.Context, srv *rest.Client, username string) (info *api.DriveInfo, err error) {
opts := rest.Opts{
Method: "GET",
Path: username,
}
_, err = srv.CallXML(ctx, &opts, nil, &info)
if err != nil {
return nil, errors.Wrap(err, "couldn't get drive info")
}
return info, nil
}
// getDeviceInfo queries Information about a jottacloud device
func getDeviceInfo(ctx context.Context, srv *rest.Client, path string) (info *api.JottaDevice, err error) {
opts := rest.Opts{
Method: "GET",
Path: urlPathEscape(path),
}
_, err = srv.CallXML(ctx, &opts, nil, &info)
if err != nil {
return nil, errors.Wrap(err, "couldn't get device info")
}
return info, nil
}
// setEndpointURL generates the API endpoint URL
func (f *Fs) setEndpointURL() {
if f.opt.Device == "" {
f.opt.Device = defaultDevice
}
if f.opt.Mountpoint == "" {
f.opt.Mountpoint = defaultMountpoint
}
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
}
// readMetaDataForPath reads the metadata from the path // readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) { func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.JottaFile, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
Path: f.filePath(path), Path: f.filePath(path),
@@ -341,7 +416,7 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
var result api.JottaFile var result api.JottaFile
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(&opts, nil, &result) resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
@@ -361,54 +436,6 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
return &result, nil return &result, nil
} }
// getAccountInfo queries general information about the account.
// Takes rest.Client and username as parameter to be easily usable
// during config
func getAccountInfo(srv *rest.Client, username string) (info *api.AccountInfo, err error) {
opts := rest.Opts{
Method: "GET",
Path: urlPathEscape(username),
}
_, err = srv.CallXML(&opts, nil, &info)
if err != nil {
return nil, err
}
return info, nil
}
// getDeviceInfo queries Information about a jottacloud device
func getDeviceInfo(srv *rest.Client, path string) (info *api.JottaDevice, err error) {
opts := rest.Opts{
Method: "GET",
Path: urlPathEscape(path),
}
_, err = srv.CallXML(&opts, nil, &info)
if err != nil {
return nil, err
}
return info, nil
}
// setEndpointUrl reads the account id and generates the API endpoint URL
func (f *Fs) setEndpointURL() (err error) {
info, err := getAccountInfo(f.srv, f.user)
if err != nil {
return errors.Wrap(err, "failed to get endpoint url")
}
if f.opt.Device == "" {
f.opt.Device = defaultDevice
}
if f.opt.Mountpoint == "" {
f.opt.Mountpoint = defaultMountpoint
}
f.endpointURL = urlPathEscape(path.Join(info.Username, f.opt.Device, f.opt.Mountpoint))
return nil
}
// errorHandler parses a non 2xx error response into an error // errorHandler parses a non 2xx error response into an error
func errorHandler(resp *http.Response) error { func errorHandler(resp *http.Response) error {
// Decode error response // Decode error response
@@ -441,11 +468,6 @@ func (f *Fs) filePath(file string) string {
return urlPathEscape(f.filePathRaw(file)) return urlPathEscape(f.filePathRaw(file))
} }
// filePath returns a escaped file path (f.root, remote)
func (o *Object) filePath() string {
return o.fs.filePath(o.remote)
}
// Jottacloud requires the grant_type 'refresh_token' string // Jottacloud requires the grant_type 'refresh_token' string
// to be uppercase and throws a 400 Bad Request if we use the // to be uppercase and throws a 400 Bad Request if we use the
// lower case used by the oauth2 module // lower case used by the oauth2 module
@@ -471,6 +493,7 @@ func grantTypeFilter(req *http.Request) {
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@@ -510,7 +533,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
user: opt.User,
opt: *opt, opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL), srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL), apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
@@ -526,14 +548,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Renew the token in the background // Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath("") _, err := f.readMetaDataForPath(ctx, "")
return err return err
}) })
err = f.setEndpointURL() cust, err := getCustomerInfo(ctx, f.apiSrv)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't get account info") return nil, err
} }
f.user = cust.Username
f.setEndpointURL()
if root != "" && !rootIsDir { if root != "" && !rootIsDir {
// Check to see if the root actually an existing file // Check to see if the root actually an existing file
@@ -542,7 +566,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if f.root == "." { if f.root == "." {
f.root = "" f.root = ""
} }
_, err := f.NewObject(remote) _, err := f.NewObject(context.TODO(), remote)
if err != nil { if err != nil {
if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile { if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
// File doesn't exist so return old f // File doesn't exist so return old f
@@ -560,7 +584,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Return an Object from a path // Return an Object from a path
// //
// If it can't be found it returns the error fs.ErrorObjectNotFound. // If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, error) { func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.JottaFile) (fs.Object, error) {
o := &Object{ o := &Object{
fs: f, fs: f,
remote: remote, remote: remote,
@@ -570,7 +594,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, e
// Set info // Set info
err = o.setMetaData(info) err = o.setMetaData(info)
} else { } else {
err = o.readMetaData(false) // reads info and meta, returning an error err = o.readMetaData(ctx, false) // reads info and meta, returning an error
} }
if err != nil { if err != nil {
return nil, err return nil, err
@@ -580,12 +604,12 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, e
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) { func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil) return f.newObjectWithInfo(ctx, remote, nil)
} }
// CreateDir makes a directory // CreateDir makes a directory
func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) { func (f *Fs) CreateDir(ctx context.Context, path string) (jf *api.JottaFolder, err error) {
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf) // fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
var resp *http.Response var resp *http.Response
opts := rest.Opts{ opts := rest.Opts{
@@ -597,7 +621,7 @@ func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) {
opts.Parameters.Set("mkDir", "true") opts.Parameters.Set("mkDir", "true")
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(&opts, nil, &jf) resp, err = f.srv.CallXML(ctx, &opts, nil, &jf)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -617,8 +641,7 @@ func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) {
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
//fmt.Printf("List: %s\n", f.filePath(dir))
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
Path: f.filePath(dir), Path: f.filePath(dir),
@@ -627,7 +650,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
var resp *http.Response var resp *http.Response
var result api.JottaFolder var result api.JottaFolder
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(&opts, nil, &result) resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
@@ -661,13 +684,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
continue continue
} }
remote := path.Join(dir, restoreReservedChars(item.Name)) remote := path.Join(dir, restoreReservedChars(item.Name))
o, err := f.newObjectWithInfo(remote, item) o, err := f.newObjectWithInfo(ctx, remote, item)
if err != nil { if err != nil {
continue continue
} }
entries = append(entries, o) entries = append(entries, o)
} }
//fmt.Printf("Entries: %+v\n", entries)
return entries, nil return entries, nil
} }
@@ -676,7 +698,7 @@ type listFileDirFn func(fs.DirEntry) error
// List the objects and directories into entries, from a // List the objects and directories into entries, from a
// special kind of JottaFolder representing a FileDirLis // special kind of JottaFolder representing a FileDirLis
func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, fn listFileDirFn) error { func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolder *api.JottaFolder, fn listFileDirFn) error {
pathPrefix := "/" + f.filePathRaw("") // Non-escaped prefix of API paths to be cut off, to be left with the remote path including the remoteStartPath pathPrefix := "/" + f.filePathRaw("") // Non-escaped prefix of API paths to be cut off, to be left with the remote path including the remoteStartPath
pathPrefixLength := len(pathPrefix) pathPrefixLength := len(pathPrefix)
startPath := path.Join(pathPrefix, remoteStartPath) // Non-escaped API path up to and including remoteStartPath, to decide if it should be created as a new dir object startPath := path.Join(pathPrefix, remoteStartPath) // Non-escaped API path up to and including remoteStartPath, to decide if it should be created as a new dir object
@@ -705,7 +727,7 @@ func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, f
continue continue
} }
remoteFile := path.Join(remoteDir, restoreReservedChars(file.Name)) remoteFile := path.Join(remoteDir, restoreReservedChars(file.Name))
o, err := f.newObjectWithInfo(remoteFile, file) o, err := f.newObjectWithInfo(ctx, remoteFile, file)
if err != nil { if err != nil {
return err return err
} }
@@ -723,18 +745,7 @@ func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, f
// //
// dir should be "" to start from the root, and should not // dir should be "" to start from the root, and should not
// have trailing slashes. // have trailing slashes.
// func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
Path: f.filePath(dir), Path: f.filePath(dir),
@@ -745,7 +756,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
var resp *http.Response var resp *http.Response
var result api.JottaFolder // Could be JottaFileDirList, but JottaFolder is close enough var result api.JottaFolder // Could be JottaFileDirList, but JottaFolder is close enough
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(&opts, nil, &result) resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -758,7 +769,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
return errors.Wrap(err, "couldn't list files") return errors.Wrap(err, "couldn't list files")
} }
list := walk.NewListRHelper(callback) list := walk.NewListRHelper(callback)
err = f.listFileDir(dir, &result, func(entry fs.DirEntry) error { err = f.listFileDir(ctx, dir, &result, func(entry fs.DirEntry) error {
return list.Add(entry) return list.Add(entry)
}) })
if err != nil { if err != nil {
@@ -787,17 +798,17 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if f.opt.Device != "Jotta" { if f.opt.Device != "Jotta" {
return nil, errors.New("upload not supported for devices other than Jotta") return nil, errors.New("upload not supported for devices other than Jotta")
} }
o := f.createObject(src.Remote(), src.ModTime(), src.Size()) o := f.createObject(src.Remote(), src.ModTime(ctx), src.Size())
return o, o.Update(in, src, options...) return o, o.Update(ctx, in, src, options...)
} }
// mkParentDir makes the parent of the native path dirPath if // mkParentDir makes the parent of the native path dirPath if
// necessary and any directories above that // necessary and any directories above that
func (f *Fs) mkParentDir(dirPath string) error { func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
// defer log.Trace(dirPath, "")("") // defer log.Trace(dirPath, "")("")
// chop off trailing / if it exists // chop off trailing / if it exists
if strings.HasSuffix(dirPath, "/") { if strings.HasSuffix(dirPath, "/") {
@@ -807,25 +818,25 @@ func (f *Fs) mkParentDir(dirPath string) error {
if parent == "." { if parent == "." {
parent = "" parent = ""
} }
return f.Mkdir(parent) return f.Mkdir(ctx, parent)
} }
// Mkdir creates the container if it doesn't exist // Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(dir string) error { func (f *Fs) Mkdir(ctx context.Context, dir string) error {
_, err := f.CreateDir(dir) _, err := f.CreateDir(ctx, dir)
return err return err
} }
// purgeCheck removes the root directory, if check is set then it // purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in // refuses to do so if it has anything in
func (f *Fs) purgeCheck(dir string, check bool) (err error) { func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error) {
root := path.Join(f.root, dir) root := path.Join(f.root, dir)
if root == "" { if root == "" {
return errors.New("can't purge root directory") return errors.New("can't purge root directory")
} }
// check that the directory exists // check that the directory exists
entries, err := f.List(dir) entries, err := f.List(ctx, dir)
if err != nil { if err != nil {
return err return err
} }
@@ -851,22 +862,21 @@ func (f *Fs) purgeCheck(dir string, check bool) (err error) {
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(&opts) resp, err = f.srv.Call(ctx, &opts)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "couldn't purge directory") return errors.Wrap(err, "couldn't purge directory")
} }
// TODO: Parse response?
return nil return nil
} }
// Rmdir deletes the root folder // Rmdir deletes the root folder
// //
// Returns an error if it isn't empty // Returns an error if it isn't empty
func (f *Fs) Rmdir(dir string) error { func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeCheck(dir, true) return f.purgeCheck(ctx, dir, true)
} }
// Precision return the precision of this Fs // Precision return the precision of this Fs
@@ -875,16 +885,12 @@ func (f *Fs) Precision() time.Duration {
} }
// Purge deletes all the files and the container // Purge deletes all the files and the container
// func (f *Fs) Purge(ctx context.Context) error {
// Optional interface: Only implement this if you have a way of return f.purgeCheck(ctx, "", false)
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge() error {
return f.purgeCheck("", false)
} }
// copyOrMoves copies or moves directories or files depending on the method parameter // copyOrMoves copies or moves directories or files depending on the method parameter
func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err error) { func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *api.JottaFile, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: src, Path: src,
@@ -895,7 +901,7 @@ func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err erro
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(&opts, nil, &info) resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -913,24 +919,24 @@ func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err erro
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't copy - not same remote type") fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
} }
err := f.mkParentDir(remote) err := f.mkParentDir(ctx, remote)
if err != nil { if err != nil {
return nil, err return nil, err
} }
info, err := f.copyOrMove("cp", srcObj.filePath(), remote) info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't copy file") return nil, errors.Wrap(err, "couldn't copy file")
} }
return f.newObjectWithInfo(remote, info) return f.newObjectWithInfo(ctx, remote, info)
//return f.newObjectWithInfo(remote, &result) //return f.newObjectWithInfo(remote, &result)
} }
@@ -943,24 +949,24 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantMove // If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't move - not same remote type") fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
} }
err := f.mkParentDir(remote) err := f.mkParentDir(ctx, remote)
if err != nil { if err != nil {
return nil, err return nil, err
} }
info, err := f.copyOrMove("mv", srcObj.filePath(), remote) info, err := f.copyOrMove(ctx, "mv", srcObj.filePath(), remote)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't move file") return nil, errors.Wrap(err, "couldn't move file")
} }
return f.newObjectWithInfo(remote, info) return f.newObjectWithInfo(ctx, remote, info)
//return f.newObjectWithInfo(remote, result) //return f.newObjectWithInfo(remote, result)
} }
@@ -972,7 +978,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs) srcFs, ok := src.(*Fs)
if !ok { if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type") fs.Debugf(srcFs, "Can't move directory - not same remote type")
@@ -989,7 +995,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
//fmt.Printf("Move src: %s (FullPath %s), dst: %s (FullPath: %s)\n", srcRemote, srcPath, dstRemote, dstPath) //fmt.Printf("Move src: %s (FullPath %s), dst: %s (FullPath: %s)\n", srcRemote, srcPath, dstRemote, dstPath)
var err error var err error
_, err = f.List(dstRemote) _, err = f.List(ctx, dstRemote)
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
// OK // OK
} else if err != nil { } else if err != nil {
@@ -998,7 +1004,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
return fs.ErrorDirExists return fs.ErrorDirExists
} }
_, err = f.copyOrMove("mvDir", path.Join(f.endpointURL, replaceReservedChars(srcPath))+"/", dstRemote) _, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, replaceReservedChars(srcPath))+"/", dstRemote)
if err != nil { if err != nil {
return errors.Wrap(err, "couldn't move directory") return errors.Wrap(err, "couldn't move directory")
@@ -1007,7 +1013,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
} }
// PublicLink generates a public link to the remote path (usually readable by anyone) // PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(remote string) (link string, err error) { func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
Path: f.filePath(remote), Path: f.filePath(remote),
@@ -1023,7 +1029,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
var resp *http.Response var resp *http.Response
var result api.JottaFile var result api.JottaFile
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(&opts, nil, &result) resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
@@ -1053,8 +1059,8 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
} }
// About gets quota information // About gets quota information
func (f *Fs) About() (*fs.Usage, error) { func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
info, err := getAccountInfo(f.srv, f.user) info, err := getDriveInfo(ctx, f.srv, f.user)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -1094,8 +1100,13 @@ func (o *Object) Remote() string {
return o.remote return o.remote
} }
// filePath returns a escaped file path (f.root, remote)
func (o *Object) filePath() string {
return o.fs.filePath(o.remote)
}
// Hash returns the MD5 of an object returning a lowercase hex string // Hash returns the MD5 of an object returning a lowercase hex string
func (o *Object) Hash(t hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 { if t != hash.MD5 {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -1104,7 +1115,8 @@ func (o *Object) Hash(t hash.Type) (string, error) {
// Size returns the size of an object in bytes // Size returns the size of an object in bytes
func (o *Object) Size() int64 { func (o *Object) Size() int64 {
err := o.readMetaData(false) ctx := context.TODO()
err := o.readMetaData(ctx, false)
if err != nil { if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err) fs.Logf(o, "Failed to read metadata: %v", err)
return 0 return 0
@@ -1113,7 +1125,7 @@ func (o *Object) Size() int64 {
} }
// MimeType of an Object if known, "" otherwise // MimeType of an Object if known, "" otherwise
func (o *Object) MimeType() string { func (o *Object) MimeType(ctx context.Context) string {
return o.mimeType return o.mimeType
} }
@@ -1127,11 +1139,12 @@ func (o *Object) setMetaData(info *api.JottaFile) (err error) {
return nil return nil
} }
func (o *Object) readMetaData(force bool) (err error) { // readMetaData reads and updates the metadata for an object
func (o *Object) readMetaData(ctx context.Context, force bool) (err error) {
if o.hasMetaData && !force { if o.hasMetaData && !force {
return nil return nil
} }
info, err := o.fs.readMetaDataForPath(o.remote) info, err := o.fs.readMetaDataForPath(ctx, o.remote)
if err != nil { if err != nil {
return err return err
} }
@@ -1145,8 +1158,8 @@ func (o *Object) readMetaData(force bool) (err error) {
// //
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime() time.Time { func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData(false) err := o.readMetaData(ctx, false)
if err != nil { if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err) fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now() return time.Now()
@@ -1155,7 +1168,7 @@ func (o *Object) ModTime() time.Time {
} }
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error { func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return fs.ErrorCantSetModTime return fs.ErrorCantSetModTime
} }
@@ -1165,7 +1178,7 @@ func (o *Object) Storable() bool {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.size) fs.FixRangeOption(options, o.size)
var resp *http.Response var resp *http.Response
opts := rest.Opts{ opts := rest.Opts{
@@ -1178,7 +1191,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
opts.Parameters.Set("mode", "bin") opts.Parameters.Set("mode", "bin")
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(&opts) resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -1249,9 +1262,9 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
// If existing is set then it updates the object rather than creating a new one // If existing is set then it updates the object rather than creating a new one
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
size := src.Size() size := src.Size()
md5String, err := src.Hash(hash.MD5) md5String, err := src.Hash(ctx, hash.MD5)
if err != nil || md5String == "" { if err != nil || md5String == "" {
// unwrap the accounting from the input, we use wrap to put it // unwrap the accounting from the input, we use wrap to put it
// back on after the buffering // back on after the buffering
@@ -1271,10 +1284,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
var resp *http.Response var resp *http.Response
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "allocate", Path: "files/v1/allocate",
ExtraHeaders: make(map[string]string), ExtraHeaders: make(map[string]string),
} }
fileDate := api.Time(src.ModTime()).APIString() fileDate := api.Time(src.ModTime(ctx)).APIString()
// the allocate request // the allocate request
var request = api.AllocateFileRequest{ var request = api.AllocateFileRequest{
@@ -1288,7 +1301,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
// send it // send it
var response api.AllocateFileResponse var response api.AllocateFileResponse
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.apiSrv.CallJSON(&opts, &request, &response) resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, &request, &response)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -1319,7 +1332,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
} }
// send the remaining bytes // send the remaining bytes
resp, err = o.fs.apiSrv.CallJSON(&opts, nil, &result) resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, nil, &result)
if err != nil { if err != nil {
return err return err
} }
@@ -1330,15 +1343,15 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
o.md5 = result.Md5 o.md5 = result.Md5
o.modTime = time.Unix(result.Modified/1000, 0) o.modTime = time.Unix(result.Modified/1000, 0)
} else { } else {
// If the file state is COMPLETE we don't need to upload it because the file was allready found but we still ned to update our metadata // If the file state is COMPLETE we don't need to upload it because the file was already found but we still ned to update our metadata
return o.readMetaData(true) return o.readMetaData(ctx, true)
} }
return nil return nil
} }
// Remove an object // Remove an object
func (o *Object) Remove() error { func (o *Object) Remove(ctx context.Context) error {
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: o.filePath(), Path: o.filePath(),
@@ -1353,7 +1366,7 @@ func (o *Object) Remove() error {
} }
return o.fs.pacer.Call(func() (bool, error) { return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallXML(&opts, nil, nil) resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
} }

View File

@@ -6,7 +6,7 @@ import (
"io" "io"
"testing" "testing"
"github.com/ncw/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )

View File

@@ -4,8 +4,8 @@ package jottacloud_test
import ( import (
"testing" "testing"
"github.com/ncw/rclone/backend/jottacloud" "github.com/rclone/rclone/backend/jottacloud"
"github.com/ncw/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -1,6 +1,7 @@
package koofr package koofr
import ( import (
"context"
"encoding/base64" "encoding/base64"
"errors" "errors"
"fmt" "fmt"
@@ -10,11 +11,11 @@ import (
"strings" "strings"
"time" "time"
"github.com/ncw/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
httpclient "github.com/koofr/go-httpclient" httpclient "github.com/koofr/go-httpclient"
koofrclient "github.com/koofr/go-koofrclient" koofrclient "github.com/koofr/go-koofrclient"
@@ -39,6 +40,12 @@ func init() {
Required: false, Required: false,
Default: "", Default: "",
Advanced: true, Advanced: true,
}, {
Name: "setmtime",
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
Default: true,
Required: true,
Advanced: true,
}, { }, {
Name: "user", Name: "user",
Help: "Your Koofr user name", Help: "Your Koofr user name",
@@ -59,6 +66,7 @@ type Options struct {
MountID string `config:"mountid"` MountID string `config:"mountid"`
User string `config:"user"` User string `config:"user"`
Password string `config:"password"` Password string `config:"password"`
SetMTime bool `config:"setmtime"`
} }
// A Fs is a representation of a remote Koofr Fs // A Fs is a representation of a remote Koofr Fs
@@ -105,7 +113,7 @@ func (o *Object) Remote() string {
} }
// ModTime returns the modification time of the Object // ModTime returns the modification time of the Object
func (o *Object) ModTime() time.Time { func (o *Object) ModTime(ctx context.Context) time.Time {
return time.Unix(o.info.Modified/1000, (o.info.Modified%1000)*1000*1000) return time.Unix(o.info.Modified/1000, (o.info.Modified%1000)*1000*1000)
} }
@@ -120,7 +128,7 @@ func (o *Object) Fs() fs.Info {
} }
// Hash returns an MD5 hash of the Object // Hash returns an MD5 hash of the Object
func (o *Object) Hash(typ hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, typ hash.Type) (string, error) {
if typ == hash.MD5 { if typ == hash.MD5 {
return o.info.Hash, nil return o.info.Hash, nil
} }
@@ -138,14 +146,15 @@ func (o *Object) Storable() bool {
} }
// SetModTime is not supported // SetModTime is not supported
func (o *Object) SetModTime(mtime time.Time) error { func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
return nil return fs.ErrorCantSetModTimeWithoutDelete
} }
// Open opens the Object for reading // Open opens the Object for reading
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
var sOff, eOff int64 = 0, -1 var sOff, eOff int64 = 0, -1
fs.FixRangeOption(options, o.Size())
for _, option := range options { for _, option := range options {
switch x := option.(type) { switch x := option.(type) {
case *fs.SeekOption: case *fs.SeekOption:
@@ -162,13 +171,6 @@ func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
if sOff == 0 && eOff < 0 { if sOff == 0 && eOff < 0 {
return o.fs.client.FilesGet(o.fs.mountID, o.fullPath()) return o.fs.client.FilesGet(o.fs.mountID, o.fullPath())
} }
if sOff < 0 {
sOff = o.Size() - eOff
eOff = o.Size()
}
if eOff > o.Size() {
eOff = o.Size()
}
span := &koofrclient.FileSpan{ span := &koofrclient.FileSpan{
Start: sOff, Start: sOff,
End: eOff, End: eOff,
@@ -177,11 +179,13 @@ func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
} }
// Update updates the Object contents // Update updates the Object contents
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
putopts := &koofrclient.PutFilter{ mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000
ForceOverwrite: true, putopts := &koofrclient.PutOptions{
NoRename: true, ForceOverwrite: true,
IgnoreNonExisting: true, NoRename: true,
OverwriteIgnoreNonExisting: true,
SetModified: &mtime,
} }
fullPath := o.fullPath() fullPath := o.fullPath()
dirPath := dir(fullPath) dirPath := dir(fullPath)
@@ -190,7 +194,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
if err != nil { if err != nil {
return err return err
} }
info, err := o.fs.client.FilesPutOptions(o.fs.mountID, dirPath, name, in, putopts) info, err := o.fs.client.FilesPutWithOptions(o.fs.mountID, dirPath, name, in, putopts)
if err != nil { if err != nil {
return err return err
} }
@@ -199,7 +203,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
} }
// Remove deletes the remote Object // Remove deletes the remote Object
func (o *Object) Remove() error { func (o *Object) Remove(ctx context.Context) error {
return o.fs.client.FilesDelete(o.fs.mountID, o.fullPath()) return o.fs.client.FilesDelete(o.fs.mountID, o.fullPath())
} }
@@ -225,7 +229,10 @@ func (f *Fs) Features() *fs.Features {
// Precision denotes that setting modification times is not supported // Precision denotes that setting modification times is not supported
func (f *Fs) Precision() time.Duration { func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported if !f.opt.SetMTime {
return fs.ModTimeNotSupported
}
return time.Millisecond
} }
// Hashes returns a set of hashes are Provided by the Fs // Hashes returns a set of hashes are Provided by the Fs
@@ -297,7 +304,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
} }
// List returns a list of items in a directory // List returns a list of items in a directory
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
files, err := f.client.FilesList(f.mountID, f.fullPath(dir)) files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
if err != nil { if err != nil {
return nil, translateErrorsDir(err) return nil, translateErrorsDir(err)
@@ -318,7 +325,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
} }
// NewObject creates a new remote Object for a given remote path // NewObject creates a new remote Object for a given remote path
func (f *Fs) NewObject(remote string) (obj fs.Object, err error) { func (f *Fs) NewObject(ctx context.Context, remote string) (obj fs.Object, err error) {
info, err := f.client.FilesInfo(f.mountID, f.fullPath(remote)) info, err := f.client.FilesInfo(f.mountID, f.fullPath(remote))
if err != nil { if err != nil {
return nil, translateErrorsObject(err) return nil, translateErrorsObject(err)
@@ -334,11 +341,13 @@ func (f *Fs) NewObject(remote string) (obj fs.Object, err error) {
} }
// Put updates a remote Object // Put updates a remote Object
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) {
putopts := &koofrclient.PutFilter{ mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000
ForceOverwrite: true, putopts := &koofrclient.PutOptions{
NoRename: true, ForceOverwrite: true,
IgnoreNonExisting: true, NoRename: true,
OverwriteIgnoreNonExisting: true,
SetModified: &mtime,
} }
fullPath := f.fullPath(src.Remote()) fullPath := f.fullPath(src.Remote())
dirPath := dir(fullPath) dirPath := dir(fullPath)
@@ -347,7 +356,7 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj
if err != nil { if err != nil {
return nil, err return nil, err
} }
info, err := f.client.FilesPutOptions(f.mountID, dirPath, name, in, putopts) info, err := f.client.FilesPutWithOptions(f.mountID, dirPath, name, in, putopts)
if err != nil { if err != nil {
return nil, translateErrorsObject(err) return nil, translateErrorsObject(err)
} }
@@ -359,8 +368,8 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj
} }
// PutStream updates a remote Object with a stream of unknown size // PutStream updates a remote Object with a stream of unknown size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...) return f.Put(ctx, in, src, options...)
} }
// isBadRequest is a predicate which holds true iff the error returned was // isBadRequest is a predicate which holds true iff the error returned was
@@ -436,13 +445,13 @@ func (f *Fs) mkdir(fullPath string) error {
// Mkdir creates a directory at the given remote path. Creates ancestors if // Mkdir creates a directory at the given remote path. Creates ancestors if
// necessary // necessary
func (f *Fs) Mkdir(dir string) error { func (f *Fs) Mkdir(ctx context.Context, dir string) error {
fullPath := f.fullPath(dir) fullPath := f.fullPath(dir)
return f.mkdir(fullPath) return f.mkdir(fullPath)
} }
// Rmdir removes an (empty) directory at the given remote path // Rmdir removes an (empty) directory at the given remote path
func (f *Fs) Rmdir(dir string) error { func (f *Fs) Rmdir(ctx context.Context, dir string) error {
files, err := f.client.FilesList(f.mountID, f.fullPath(dir)) files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
if err != nil { if err != nil {
return translateErrorsDir(err) return translateErrorsDir(err)
@@ -458,24 +467,25 @@ func (f *Fs) Rmdir(dir string) error {
} }
// Copy copies a remote Object to the given path // Copy copies a remote Object to the given path
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstFullPath := f.fullPath(remote) dstFullPath := f.fullPath(remote)
dstDir := dir(dstFullPath) dstDir := dir(dstFullPath)
err := f.mkdir(dstDir) err := f.mkdir(dstDir)
if err != nil { if err != nil {
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000
err = f.client.FilesCopy((src.(*Object)).fs.mountID, err = f.client.FilesCopy((src.(*Object)).fs.mountID,
(src.(*Object)).fs.fullPath((src.(*Object)).remote), (src.(*Object)).fs.fullPath((src.(*Object)).remote),
f.mountID, dstFullPath) f.mountID, dstFullPath, koofrclient.CopyOptions{SetModified: &mtime})
if err != nil { if err != nil {
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
return f.NewObject(remote) return f.NewObject(ctx, remote)
} }
// Move moves a remote Object to the given path // Move moves a remote Object to the given path
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj := src.(*Object) srcObj := src.(*Object)
dstFullPath := f.fullPath(remote) dstFullPath := f.fullPath(remote)
dstDir := dir(dstFullPath) dstDir := dir(dstFullPath)
@@ -488,11 +498,11 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
if err != nil { if err != nil {
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
} }
return f.NewObject(remote) return f.NewObject(ctx, remote)
} }
// DirMove moves a remote directory to the given path // DirMove moves a remote directory to the given path
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
srcFs := src.(*Fs) srcFs := src.(*Fs)
srcFullPath := srcFs.fullPath(srcRemote) srcFullPath := srcFs.fullPath(srcRemote)
dstFullPath := f.fullPath(dstRemote) dstFullPath := f.fullPath(dstRemote)
@@ -512,7 +522,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
} }
// About reports space usage (with a MB precision) // About reports space usage (with a MB precision)
func (f *Fs) About() (*fs.Usage, error) { func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
mount, err := f.client.MountsDetails(f.mountID) mount, err := f.client.MountsDetails(f.mountID)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -528,7 +538,7 @@ func (f *Fs) About() (*fs.Usage, error) {
} }
// Purge purges the complete Fs // Purge purges the complete Fs
func (f *Fs) Purge() error { func (f *Fs) Purge(ctx context.Context) error {
err := translateErrorsDir(f.client.FilesDelete(f.mountID, f.fullPath(""))) err := translateErrorsDir(f.client.FilesDelete(f.mountID, f.fullPath("")))
return err return err
} }
@@ -580,7 +590,7 @@ func createLink(c *koofrclient.KoofrClient, mountID string, path string) (*link,
} }
// PublicLink creates a public link to the remote path // PublicLink creates a public link to the remote path
func (f *Fs) PublicLink(remote string) (string, error) { func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
linkData, err := createLink(f.client, f.mountID, f.fullPath(remote)) linkData, err := createLink(f.client, f.mountID, f.fullPath(remote))
if err != nil { if err != nil {
return "", translateErrorsDir(err) return "", translateErrorsDir(err)

View File

@@ -3,7 +3,7 @@ package koofr_test
import ( import (
"testing" "testing"
"github.com/ncw/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

0
backend/local/aaaa Normal file
View File

View File

@@ -3,14 +3,15 @@
package local package local
import ( import (
"context"
"syscall" "syscall"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
) )
// About gets quota information // About gets quota information
func (f *Fs) About() (*fs.Usage, error) { func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var s syscall.Statfs_t var s syscall.Statfs_t
err := syscall.Statfs(f.root, &s) err := syscall.Statfs(f.root, &s)
if err != nil { if err != nil {

View File

@@ -3,17 +3,18 @@
package local package local
import ( import (
"context"
"syscall" "syscall"
"unsafe" "unsafe"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
) )
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW") var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
// About gets quota information // About gets quota information
func (f *Fs) About() (*fs.Usage, error) { func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var available, total, free int64 var available, total, free int64
_, _, e1 := getFreeDiskSpace.Call( _, _, e1 := getFreeDiskSpace.Call(
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))), uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),

View File

@@ -0,0 +1,12 @@
//+build !linux
package local
import (
"io"
"os"
)
func newFadviseReadCloser(o *Object, f *os.File, offset, limit int64) io.ReadCloser {
return f
}

View File

@@ -0,0 +1,165 @@
//+build linux
package local
import (
"io"
"os"
"github.com/rclone/rclone/fs"
"golang.org/x/sys/unix"
)
// fadvise provides means to automate freeing pages in kernel page cache for
// a given file descriptor as the file is sequentially processed (read or
// written).
//
// When copying a file to a remote backend all the file content is read by
// kernel and put to page cache to make future reads faster.
// This causes memory pressure visible in both memory usage and CPU consumption
// and can even cause OOM errors in applications consuming large amounts memory.
//
// In case of an upload to a remote backend, there is no benefits from caching.
//
// fadvise would orchestrate calling POSIX_FADV_DONTNEED
//
// POSIX_FADV_DONTNEED attempts to free cached pages associated
// with the specified region. This is useful, for example, while
// streaming large files. A program may periodically request the
// kernel to free cached data that has already been used, so that
// more useful cached pages are not discarded instead.
//
// Requests to discard partial pages are ignored. It is
// preferable to preserve needed data than discard unneeded data.
// If the application requires that data be considered for
// discarding, then offset and len must be page-aligned.
//
// The implementation may attempt to write back dirty pages in
// the specified region, but this is not guaranteed. Any
// unwritten dirty pages will not be freed. If the application
// wishes to ensure that dirty pages will be released, it should
// call fsync(2) or fdatasync(2) first.
type fadvise struct {
o *Object
fd int
lastPos int64
curPos int64
windowSize int64
freePagesCh chan offsetLength
doneCh chan struct{}
}
type offsetLength struct {
offset int64
length int64
}
const (
defaultAllowPages = 32
defaultWorkerQueueSize = 64
)
func newFadvise(o *Object, fd int, offset int64) *fadvise {
f := &fadvise{
o: o,
fd: fd,
lastPos: offset,
curPos: offset,
windowSize: int64(os.Getpagesize()) * defaultAllowPages,
freePagesCh: make(chan offsetLength, defaultWorkerQueueSize),
doneCh: make(chan struct{}),
}
go f.worker()
return f
}
// sequential configures readahead strategy in Linux kernel.
//
// Under Linux, POSIX_FADV_NORMAL sets the readahead window to the
// default size for the backing device; POSIX_FADV_SEQUENTIAL doubles
// this size, and POSIX_FADV_RANDOM disables file readahead entirely.
func (f *fadvise) sequential(limit int64) bool {
l := int64(0)
if limit > 0 {
l = limit
}
if err := unix.Fadvise(f.fd, f.curPos, l, unix.FADV_SEQUENTIAL); err != nil {
fs.Debugf(f.o, "fadvise sequential failed on file descriptor %d: %s", f.fd, err)
return false
}
return true
}
func (f *fadvise) next(n int) {
f.curPos += int64(n)
f.freePagesIfNeeded()
}
func (f *fadvise) freePagesIfNeeded() {
if f.curPos >= f.lastPos+f.windowSize {
f.freePages()
}
}
func (f *fadvise) freePages() {
f.freePagesCh <- offsetLength{f.lastPos, f.curPos - f.lastPos}
f.lastPos = f.curPos
}
func (f *fadvise) worker() {
for p := range f.freePagesCh {
if err := unix.Fadvise(f.fd, p.offset, p.length, unix.FADV_DONTNEED); err != nil {
fs.Debugf(f.o, "fadvise dontneed failed on file descriptor %d: %s", f.fd, err)
}
}
close(f.doneCh)
}
func (f *fadvise) wait() {
close(f.freePagesCh)
<-f.doneCh
}
type fadviseReadCloser struct {
*fadvise
inner io.ReadCloser
}
// newFadviseReadCloser wraps os.File so that reading from that file would
// remove already consumed pages from kernel page cache.
// In addition to that it instructs kernel to double the readahead window to
// make sequential reads faster.
// See also fadvise.
func newFadviseReadCloser(o *Object, f *os.File, offset, limit int64) io.ReadCloser {
r := fadviseReadCloser{
fadvise: newFadvise(o, int(f.Fd()), offset),
inner: f,
}
// If syscall failed it's likely that the subsequent syscalls to that
// file descriptor would also fail. In that case return the provided os.File
// pointer.
if !r.sequential(limit) {
r.wait()
return f
}
return r
}
func (f fadviseReadCloser) Read(p []byte) (n int, err error) {
n, err = f.inner.Read(p)
f.next(n)
return
}
func (f fadviseReadCloser) Close() error {
f.freePages()
f.wait()
return f.inner.Close()
}

View File

@@ -3,6 +3,7 @@ package local
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@@ -16,15 +17,15 @@ import (
"time" "time"
"unicode/utf8" "unicode/utf8"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/file"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/readers"
) )
// Constants // Constants
@@ -85,7 +86,7 @@ are being uploaded and aborts with a message which starts "can't copy
- source file is being updated" if the file changes during upload. - source file is being updated" if the file changes during upload.
However on some file systems this modification time check may fail (eg However on some file systems this modification time check may fail (eg
[Glusterfs #2206](https://github.com/ncw/rclone/issues/2206)) so this [Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
check can be disabled with this flag.`, check can be disabled with this flag.`,
Default: false, Default: false,
Advanced: true, Advanced: true,
@@ -96,6 +97,24 @@ check can be disabled with this flag.`,
NoPrefix: true, NoPrefix: true,
ShortOpt: "x", ShortOpt: "x",
Advanced: true, Advanced: true,
}, {
Name: "case_sensitive",
Help: `Force the filesystem to report itself as case sensitive.
Normally the local backend declares itself as case insensitive on
Windows/macOS and case sensitive for everything else. Use this flag
to override the default choice.`,
Default: false,
Advanced: true,
}, {
Name: "case_insensitive",
Help: `Force the filesystem to report itself as case insensitive
Normally the local backend declares itself as case insensitive on
Windows/macOS and case sensitive for everything else. Use this flag
to override the default choice.`,
Default: false,
Advanced: true,
}}, }},
} }
fs.Register(fsi) fs.Register(fsi)
@@ -110,6 +129,8 @@ type Options struct {
NoCheckUpdated bool `config:"no_check_updated"` NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"` NoUNC bool `config:"nounc"`
OneFileSystem bool `config:"one_file_system"` OneFileSystem bool `config:"one_file_system"`
CaseSensitive bool `config:"case_sensitive"`
CaseInsensitive bool `config:"case_insensitive"`
} }
// Fs represents a local filesystem rooted at root // Fs represents a local filesystem rooted at root
@@ -173,6 +194,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: f.caseInsensitive(), CaseInsensitive: f.caseInsensitive(),
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
IsLocal: true,
}).Fill(f) }).Fill(f)
if opt.FollowSymlinks { if opt.FollowSymlinks {
f.lstat = os.Stat f.lstat = os.Stat
@@ -228,6 +250,12 @@ func (f *Fs) Features() *fs.Features {
// caseInsensitive returns whether the remote is case insensitive or not // caseInsensitive returns whether the remote is case insensitive or not
func (f *Fs) caseInsensitive() bool { func (f *Fs) caseInsensitive() bool {
if f.opt.CaseSensitive {
return false
}
if f.opt.CaseInsensitive {
return true
}
// FIXME not entirely accurate since you can have case // FIXME not entirely accurate since you can have case
// sensitive Fses on darwin and case insensitive Fses on linux. // sensitive Fses on darwin and case insensitive Fses on linux.
// Should probably check but that would involve creating a // Should probably check but that would involve creating a
@@ -303,7 +331,7 @@ func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Obj
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound. // it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) { func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, "", nil) return f.newObjectWithInfo(remote, "", nil)
} }
@@ -316,7 +344,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
dir = f.dirNames.Load(dir) dir = f.dirNames.Load(dir)
fsDirPath := f.cleanPath(filepath.Join(f.root, dir)) fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
@@ -332,7 +360,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
err = errors.Wrapf(err, "failed to open directory %q", dir) err = errors.Wrapf(err, "failed to open directory %q", dir)
fs.Errorf(dir, "%v", err) fs.Errorf(dir, "%v", err)
if isPerm { if isPerm {
accounting.Stats.Error(fserrors.NoRetryError(err)) accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
err = nil // ignore error but fail sync err = nil // ignore error but fail sync
} }
return nil, err return nil, err
@@ -368,7 +396,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if fierr != nil { if fierr != nil {
err = errors.Wrapf(err, "failed to read directory %q", namepath) err = errors.Wrapf(err, "failed to read directory %q", namepath)
fs.Errorf(dir, "%v", fierr) fs.Errorf(dir, "%v", fierr)
accounting.Stats.Error(fserrors.NoRetryError(fierr)) // fail the sync accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
continue continue
} }
fis = append(fis, fi) fis = append(fis, fi)
@@ -391,7 +419,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
// Skip bad symlinks // Skip bad symlinks
err = fserrors.NoRetryError(errors.Wrap(err, "symlink")) err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
fs.Errorf(newRemote, "Listing error: %v", err) fs.Errorf(newRemote, "Listing error: %v", err)
accounting.Stats.Error(err) accounting.Stats(ctx).Error(err)
continue continue
} }
if err != nil { if err != nil {
@@ -481,11 +509,11 @@ func (m *mapper) Save(in, out string) string {
} }
// Put the Object to the local filesystem // Put the Object to the local filesystem
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote() remote := src.Remote()
// Temporary Object under construction - info filled in by Update() // Temporary Object under construction - info filled in by Update()
o := f.newObject(remote, "") o := f.newObject(remote, "")
err := o.Update(in, src, options...) err := o.Update(ctx, in, src, options...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -493,12 +521,12 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...) return f.Put(ctx, in, src, options...)
} }
// Mkdir creates the directory if it doesn't exist // Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(dir string) error { func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go // FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
root := f.cleanPath(filepath.Join(f.root, dir)) root := f.cleanPath(filepath.Join(f.root, dir))
err := os.MkdirAll(root, 0777) err := os.MkdirAll(root, 0777)
@@ -518,7 +546,7 @@ func (f *Fs) Mkdir(dir string) error {
// Rmdir removes the directory // Rmdir removes the directory
// //
// If it isn't empty it will return an error // If it isn't empty it will return an error
func (f *Fs) Rmdir(dir string) error { func (f *Fs) Rmdir(ctx context.Context, dir string) error {
root := f.cleanPath(filepath.Join(f.root, dir)) root := f.cleanPath(filepath.Join(f.root, dir))
return os.Remove(root) return os.Remove(root)
} }
@@ -574,7 +602,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
} }
// If it matches - have found the precision // If it matches - have found the precision
// fmt.Println("compare", fi.ModTime(), t) // fmt.Println("compare", fi.ModTime(ctx), t)
if fi.ModTime().Equal(t) { if fi.ModTime().Equal(t) {
// fmt.Println("Precision detected as", duration) // fmt.Println("Precision detected as", duration)
return duration return duration
@@ -588,7 +616,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
// Optional interface: Only implement this if you have a way of // Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the // deleting all the files quicker than just running Remove() on the
// result of List() // result of List()
func (f *Fs) Purge() error { func (f *Fs) Purge(ctx context.Context) error {
fi, err := f.lstat(f.root) fi, err := f.lstat(f.root)
if err != nil { if err != nil {
return err return err
@@ -608,7 +636,7 @@ func (f *Fs) Purge() error {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantMove // If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't move - not same remote type") fs.Debugf(src, "Can't move - not same remote type")
@@ -667,7 +695,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs) srcFs, ok := src.(*Fs)
if !ok { if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type") fs.Debugf(srcFs, "Can't move directory - not same remote type")
@@ -732,7 +760,7 @@ func (o *Object) Remote() string {
} }
// Hash returns the requested hash of a file as a lowercase hex string // Hash returns the requested hash of a file as a lowercase hex string
func (o *Object) Hash(r hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
// Check that the underlying file hasn't changed // Check that the underlying file hasn't changed
oldtime := o.modTime oldtime := o.modTime
oldsize := o.size oldsize := o.size
@@ -750,7 +778,11 @@ func (o *Object) Hash(r hash.Type) (string, error) {
var in io.ReadCloser var in io.ReadCloser
if !o.translatedLink { if !o.translatedLink {
in, err = file.Open(o.path) var fd *os.File
fd, err = file.Open(o.path)
if fd != nil {
in = newFadviseReadCloser(o, fd, 0, 0)
}
} else { } else {
in, err = o.openTranslatedLink(0, -1) in, err = o.openTranslatedLink(0, -1)
} }
@@ -783,12 +815,12 @@ func (o *Object) Size() int64 {
} }
// ModTime returns the modification time of the object // ModTime returns the modification time of the object
func (o *Object) ModTime() time.Time { func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime return o.modTime
} }
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error { func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
var err error var err error
if o.translatedLink { if o.translatedLink {
err = lChtimes(o.path, modTime, modTime) err = lChtimes(o.path, modTime, modTime)
@@ -884,9 +916,9 @@ func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err
} }
// Open an object for read // Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1 var offset, limit int64 = 0, -1
hashes := hash.Supported var hasher *hash.MultiHasher
for _, option := range options { for _, option := range options {
switch x := option.(type) { switch x := option.(type) {
case *fs.SeekOption: case *fs.SeekOption:
@@ -894,7 +926,12 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
case *fs.RangeOption: case *fs.RangeOption:
offset, limit = x.Decode(o.size) offset, limit = x.Decode(o.size)
case *fs.HashesOption: case *fs.HashesOption:
hashes = x.Hashes if x.Hashes.Count() > 0 {
hasher, err = hash.NewMultiHasherTypes(x.Hashes)
if err != nil {
return nil, err
}
}
default: default:
if option.Mandatory() { if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option) fs.Logf(o, "Unsupported mandatory option: %v", option)
@@ -911,22 +948,22 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
if err != nil { if err != nil {
return return
} }
wrappedFd := readers.NewLimitedReadCloser(fd, limit) wrappedFd := readers.NewLimitedReadCloser(newFadviseReadCloser(o, fd, offset, limit), limit)
if offset != 0 { if offset != 0 {
// seek the object // seek the object
_, err = fd.Seek(offset, io.SeekStart) _, err = fd.Seek(offset, io.SeekStart)
// don't attempt to make checksums // don't attempt to make checksums
return wrappedFd, err return wrappedFd, err
} }
hash, err := hash.NewMultiHasherTypes(hashes) if hasher == nil {
if err != nil { // no need to wrap since we don't need checksums
return nil, err return wrappedFd, nil
} }
// Update the md5sum as we go along // Update the hashes as we go along
in = &localOpenFile{ in = &localOpenFile{
o: o, o: o,
in: wrappedFd, in: wrappedFd,
hash: hash, hash: hasher,
fd: fd, fd: fd,
} }
return in, nil return in, nil
@@ -948,18 +985,23 @@ func (nwc nopWriterCloser) Close() error {
} }
// Update the object from in with modTime and size // Update the object from in with modTime and size
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
var out io.WriteCloser var out io.WriteCloser
var hasher *hash.MultiHasher
hashes := hash.Supported
for _, option := range options { for _, option := range options {
switch x := option.(type) { switch x := option.(type) {
case *fs.HashesOption: case *fs.HashesOption:
hashes = x.Hashes if x.Hashes.Count() > 0 {
hasher, err = hash.NewMultiHasherTypes(x.Hashes)
if err != nil {
return err
}
}
} }
} }
err := o.mkdirAll() err = o.mkdirAll()
if err != nil { if err != nil {
return err return err
} }
@@ -984,11 +1026,9 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
} }
// Calculate the hash of the object we are reading as we go along // Calculate the hash of the object we are reading as we go along
hash, err := hash.NewMultiHasherTypes(hashes) if hasher != nil {
if err != nil { in = io.TeeReader(in, hasher)
return err
} }
in = io.TeeReader(in, hash)
_, err = io.Copy(out, in) _, err = io.Copy(out, in)
closeErr := out.Close() closeErr := out.Close()
@@ -1024,12 +1064,14 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
} }
// All successful so update the hashes // All successful so update the hashes
o.fs.objectHashesMu.Lock() if hasher != nil {
o.hashes = hash.Sums() o.fs.objectHashesMu.Lock()
o.fs.objectHashesMu.Unlock() o.hashes = hasher.Sums()
o.fs.objectHashesMu.Unlock()
}
// Set the mtime // Set the mtime
err = o.SetModTime(src.ModTime()) err = o.SetModTime(ctx, src.ModTime(ctx))
if err != nil { if err != nil {
return err return err
} }
@@ -1043,7 +1085,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
// Pass in the remote desired and the size if known. // Pass in the remote desired and the size if known.
// //
// It truncates any existing object // It truncates any existing object
func (f *Fs) OpenWriterAt(remote string, size int64) (fs.WriterAtCloser, error) { func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
// Temporary Object under construction // Temporary Object under construction
o := f.newObject(remote, "") o := f.newObject(remote, "")
@@ -1093,7 +1135,7 @@ func (o *Object) lstat() error {
} }
// Remove an object // Remove an object
func (o *Object) Remove() error { func (o *Object) Remove(ctx context.Context) error {
return remove(o.path) return remove(o.path)
} }

View File

@@ -1,6 +1,7 @@
package local package local
import ( import (
"context"
"io/ioutil" "io/ioutil"
"os" "os"
"path" "path"
@@ -9,12 +10,12 @@ import (
"testing" "testing"
"time" "time"
"github.com/ncw/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/ncw/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/ncw/rclone/lib/file" "github.com/rclone/rclone/lib/file"
"github.com/ncw/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -83,6 +84,7 @@ func TestUpdatingCheck(t *testing.T) {
} }
func TestSymlink(t *testing.T) { func TestSymlink(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
f := r.Flocal.(*Fs) f := r.Flocal.(*Fs)
@@ -131,7 +133,7 @@ func TestSymlink(t *testing.T) {
// Create a symlink // Create a symlink
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z") modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
file3 := r.WriteObjectTo(r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false) file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
file3.Size = 0 // symlinks are 0 length under Windows file3.Size = 0 // symlinks are 0 length under Windows
} }
@@ -150,7 +152,7 @@ func TestSymlink(t *testing.T) {
assert.Equal(t, "file.txt", linkText) assert.Equal(t, "file.txt", linkText)
// Check that NewObject gets the correct object // Check that NewObject gets the correct object
o, err := r.Flocal.NewObject("symlink2.txt" + linkSuffix) o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote()) assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
if runtime.GOOS != "windows" { if runtime.GOOS != "windows" {
@@ -158,11 +160,11 @@ func TestSymlink(t *testing.T) {
} }
// Check that NewObject doesn't see the non suffixed version // Check that NewObject doesn't see the non suffixed version
_, err = r.Flocal.NewObject("symlink2.txt") _, err = r.Flocal.NewObject(ctx, "symlink2.txt")
require.Equal(t, fs.ErrorObjectNotFound, err) require.Equal(t, fs.ErrorObjectNotFound, err)
// Check reading the object // Check reading the object
in, err := o.Open() in, err := o.Open(ctx)
require.NoError(t, err) require.NoError(t, err)
contents, err := ioutil.ReadAll(in) contents, err := ioutil.ReadAll(in)
require.NoError(t, err) require.NoError(t, err)
@@ -170,7 +172,7 @@ func TestSymlink(t *testing.T) {
require.NoError(t, in.Close()) require.NoError(t, in.Close())
// Check reading the object with range // Check reading the object with range
in, err = o.Open(&fs.RangeOption{Start: 2, End: 5}) in, err = o.Open(ctx, &fs.RangeOption{Start: 2, End: 5})
require.NoError(t, err) require.NoError(t, err)
contents, err = ioutil.ReadAll(in) contents, err = ioutil.ReadAll(in)
require.NoError(t, err) require.NoError(t, err)

View File

@@ -4,8 +4,8 @@ package local_test
import ( import (
"testing" "testing"
"github.com/ncw/rclone/backend/local" "github.com/rclone/rclone/backend/local"
"github.com/ncw/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -6,7 +6,7 @@ import (
"os" "os"
"sync/atomic" "sync/atomic"
"github.com/ncw/rclone/fs" "github.com/rclone/rclone/fs"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )

View File

@@ -8,7 +8,7 @@ import (
"os" "os"
"syscall" "syscall"
"github.com/ncw/rclone/fs" "github.com/rclone/rclone/fs"
) )
// readDevice turns a valid os.FileInfo into a device number, // readDevice turns a valid os.FileInfo into a device number,

View File

@@ -7,7 +7,7 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/ncw/rclone/fs" "github.com/rclone/rclone/fs"
) )
const ( const (

Some files were not shown because too many files have changed in this diff Show More