mirror of
https://github.com/rclone/rclone.git
synced 2026-01-24 05:13:23 +00:00
Compare commits
42 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
48e23d8c85 | ||
|
|
934440a9df | ||
|
|
29b4f211ab | ||
|
|
bd863f8868 | ||
|
|
66c23723e3 | ||
|
|
58a531a203 | ||
|
|
ba1daea072 | ||
|
|
bdcd0b4c64 | ||
|
|
94eb9a4014 | ||
|
|
e028c006fc | ||
|
|
3f3f038b73 | ||
|
|
2298834e83 | ||
|
|
07dfb3aa11 | ||
|
|
1382dba3c8 | ||
|
|
f1347139fa | ||
|
|
27a730ef8f | ||
|
|
d0c6e5cf5a | ||
|
|
cf9b973fe4 | ||
|
|
ffa1dac10b | ||
|
|
7b0966880e | ||
|
|
1c4e33d4ad | ||
|
|
530ba66d35 | ||
|
|
b3db38ae31 | ||
|
|
c0d1869204 | ||
|
|
89b6d89077 | ||
|
|
ef7b001626 | ||
|
|
f97a3e853e | ||
|
|
b71ac141cc | ||
|
|
5932acfee3 | ||
|
|
e2ce687f93 | ||
|
|
a3fb460c6b | ||
|
|
8d296d0e1d | ||
|
|
20a57aaccb | ||
|
|
50a4ed8fc4 | ||
|
|
e2b5ed6c7a | ||
|
|
16e7da2cb5 | ||
|
|
52df19ad34 | ||
|
|
693112d57e | ||
|
|
0edbc9578d | ||
|
|
7211c2dca7 | ||
|
|
af192d2507 | ||
|
|
d1a39dcc4b |
@@ -40,6 +40,7 @@ build_script:
|
||||
|
||||
test_script:
|
||||
- make GOTAGS=cmount quicktest
|
||||
- make GOTAGS=cmount racequicktest
|
||||
|
||||
artifacts:
|
||||
- path: rclone.exe
|
||||
|
||||
@@ -14,37 +14,30 @@ jobs:
|
||||
- run:
|
||||
name: Cross-compile rclone
|
||||
command: |
|
||||
docker pull rclone/xgo-cgofuse
|
||||
docker pull billziss/xgo-cgofuse
|
||||
go get -v github.com/karalabe/xgo
|
||||
xgo \
|
||||
--image=rclone/xgo-cgofuse \
|
||||
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
-image=billziss/xgo-cgofuse \
|
||||
-targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
-tags cmount \
|
||||
-dest build \
|
||||
.
|
||||
xgo \
|
||||
--targets=android/*,ios/* \
|
||||
-image=billziss/xgo-cgofuse \
|
||||
-targets=android/*,ios/* \
|
||||
-dest build \
|
||||
.
|
||||
|
||||
- run:
|
||||
name: Prepare artifacts
|
||||
command: |
|
||||
mkdir -p /tmp/rclone.dist
|
||||
cp -R rclone-* /tmp/rclone.dist
|
||||
mkdir build
|
||||
cp -R rclone-* build/
|
||||
|
||||
- run:
|
||||
name: Build rclone
|
||||
command: |
|
||||
go version
|
||||
go build
|
||||
docker pull golang
|
||||
docker run --rm -v "$PWD":/usr/src/rclone -w /usr/src/rclone golang go build -mod=vendor -v
|
||||
|
||||
- run:
|
||||
name: Upload artifacts
|
||||
command: |
|
||||
if [[ $CIRCLE_PULL_REQUEST != "" ]]; then
|
||||
make circleci_upload
|
||||
fi
|
||||
make circleci_upload
|
||||
|
||||
- store_artifacts:
|
||||
path: /tmp/rclone.dist
|
||||
path: build
|
||||
|
||||
14
.travis.yml
14
.travis.yml
@@ -50,9 +50,6 @@ matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
include:
|
||||
- go: 1.9.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.10.x
|
||||
script:
|
||||
- make quicktest
|
||||
@@ -60,6 +57,9 @@ matrix:
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.12.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.13.x
|
||||
name: Linux
|
||||
env:
|
||||
- GOTAGS=cmount
|
||||
@@ -69,7 +69,7 @@ matrix:
|
||||
- make build_dep
|
||||
- make check
|
||||
- make quicktest
|
||||
- go: 1.12.x
|
||||
- go: 1.13.x
|
||||
name: Go Modules / Race
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
@@ -77,7 +77,7 @@ matrix:
|
||||
script:
|
||||
- make quicktest
|
||||
- make racequicktest
|
||||
- go: 1.12.x
|
||||
- go: 1.13.x
|
||||
name: Other OS
|
||||
env:
|
||||
- DEPLOY=true
|
||||
@@ -85,7 +85,7 @@ matrix:
|
||||
script:
|
||||
- make
|
||||
- make compile_all
|
||||
- go: 1.12.x
|
||||
- go: 1.13.x
|
||||
name: macOS
|
||||
os: osx
|
||||
env:
|
||||
@@ -101,7 +101,7 @@ matrix:
|
||||
- make racequicktest
|
||||
# - os: windows
|
||||
# name: Windows
|
||||
# go: 1.12.x
|
||||
# go: 1.13.x
|
||||
# env:
|
||||
# - GOTAGS=cmount
|
||||
# - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse'
|
||||
|
||||
65
MANUAL.html
generated
65
MANUAL.html
generated
@@ -17,7 +17,7 @@
|
||||
<header>
|
||||
<h1 class="title">rclone(1) User Manual</h1>
|
||||
<p class="author">Nick Craig-Wood</p>
|
||||
<p class="date">Sep 08, 2019</p>
|
||||
<p class="date">Aug 26, 2019</p>
|
||||
</header>
|
||||
<h1 id="rclone---rsync-for-cloud-storage">Rclone - rsync for cloud storage</h1>
|
||||
<p>Rclone is a command line program to sync files and directories to and from:</p>
|
||||
@@ -134,20 +134,6 @@ sudo mv rclone /usr/local/bin/</code></pre>
|
||||
<pre><code>cd .. && rm -rf rclone-*-osx-amd64 rclone-current-osx-amd64.zip</code></pre>
|
||||
<p>Run <code>rclone config</code> to setup. See <a href="https://rclone.org/docs/">rclone config docs</a> for more details.</p>
|
||||
<pre><code>rclone config</code></pre>
|
||||
<h2 id="install-with-docker">Install with docker</h2>
|
||||
<p>The rclone maintains a <a href="https://hub.docker.com/r/rclone/rclone">docker image for rclone</a>. These images are autobuilt by docker hub from the rclone source based on a minimal Alpine linux image.</p>
|
||||
<p>The <code>:latest</code> tag will always point to the latest stable release. You can use the <code>:beta</code> tag to get the latest build from master. You can also use version tags, eg <code>:1.49.1</code>, <code>:1.49</code> or <code>:1</code>.</p>
|
||||
<pre><code>$ docker pull rclone/rclone:latest
|
||||
latest: Pulling from rclone/rclone
|
||||
Digest: sha256:0e0ced72671989bb837fea8e88578b3fc48371aa45d209663683e24cfdaa0e11
|
||||
...
|
||||
$ docker run --rm rclone/rclone:latest version
|
||||
rclone v1.49.1
|
||||
- os/arch: linux/amd64
|
||||
- go version: go1.12.9</code></pre>
|
||||
<p>You will probably want to mount rclone’s config file directory or file from the host, or configure rclone with environment variables.</p>
|
||||
<p>Eg to share your local config with the container</p>
|
||||
<pre><code>docker run -v ~/.config/rclone:/root/.config/rclone rclone/rclone:latest listremotes</code></pre>
|
||||
<h2 id="install-from-source">Install from source</h2>
|
||||
<p>Make sure you have at least <a href="https://golang.org/">Go</a> 1.7 installed. <a href="https://golang.org/dl/">Download go</a> if necessary. The latest release is recommended. Then</p>
|
||||
<pre><code>git clone https://github.com/rclone/rclone.git
|
||||
@@ -3315,16 +3301,12 @@ rclone rc cache/expire remote=/ withData=true</code></pre>
|
||||
<p>This takes the following parameters</p>
|
||||
<ul>
|
||||
<li>name - name of remote</li>
|
||||
<li>parameters - a map of { “key”: “value” } pairs</li>
|
||||
<li>type - type of the new remote</li>
|
||||
</ul>
|
||||
<p>See the <a href="https://rclone.org/commands/rclone_config_create/">config create command</a> command for more information on the above.</p>
|
||||
<p>Authentication is required for this call.</p>
|
||||
<h3 id="configdelete-delete-a-remote-in-the-config-file.-configdelete">config/delete: Delete a remote in the config file. {#config/delete}</h3>
|
||||
<p>Parameters:</p>
|
||||
<ul>
|
||||
<li>name - name of remote to delete</li>
|
||||
</ul>
|
||||
<p>Parameters: - name - name of remote to delete</p>
|
||||
<p>See the <a href="https://rclone.org/commands/rclone_config_delete/">config delete command</a> command for more information on the above.</p>
|
||||
<p>Authentication is required for this call.</p>
|
||||
<h3 id="configdump-dumps-the-config-file.-configdump">config/dump: Dumps the config file. {#config/dump}</h3>
|
||||
@@ -3344,7 +3326,6 @@ rclone rc cache/expire remote=/ withData=true</code></pre>
|
||||
<p>This takes the following parameters</p>
|
||||
<ul>
|
||||
<li>name - name of remote</li>
|
||||
<li>parameters - a map of { “key”: “value” } pairs</li>
|
||||
</ul>
|
||||
<p>See the <a href="https://rclone.org/commands/rclone_config_password/">config password command</a> command for more information on the above.</p>
|
||||
<p>Authentication is required for this call.</p>
|
||||
@@ -3356,7 +3337,6 @@ rclone rc cache/expire remote=/ withData=true</code></pre>
|
||||
<p>This takes the following parameters</p>
|
||||
<ul>
|
||||
<li>name - name of remote</li>
|
||||
<li>parameters - a map of { “key”: “value” } pairs</li>
|
||||
</ul>
|
||||
<p>See the <a href="https://rclone.org/commands/rclone_config_update/">config update command</a> command for more information on the above.</p>
|
||||
<p>Authentication is required for this call.</p>
|
||||
@@ -4610,7 +4590,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
|
||||
--use-json-log Use json log format.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.49.2")
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.49.0")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)</code></pre>
|
||||
<h2 id="backend-flags">Backend Flags</h2>
|
||||
<p>These flags are available for every command. They control the backends and may be set in the config file.</p>
|
||||
@@ -12308,41 +12288,6 @@ $ tree /tmp/b
|
||||
</ul>
|
||||
<!--- autogenerated options stop -->
|
||||
<h1 id="changelog">Changelog</h1>
|
||||
<h2 id="v1.49.2---2019-09-08">v1.49.2 - 2019-09-08</h2>
|
||||
<ul>
|
||||
<li>New Features
|
||||
<ul>
|
||||
<li>build: Add Docker workflow support (Alfonso Montero)</li>
|
||||
</ul></li>
|
||||
<li>Bug Fixes
|
||||
<ul>
|
||||
<li>accounting: Fix locking in Transfer to avoid deadlock with –progress (Nick Craig-Wood)</li>
|
||||
<li>docs: Fix template argument for mktemp in install.sh (Cnly)</li>
|
||||
<li>operations: Fix -u/–update with google photos / files of unknown size (Nick Craig-Wood)</li>
|
||||
<li>rc: Fix docs for config/create /update /password (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>Google Cloud Storage
|
||||
<ul>
|
||||
<li>Fix need for elevated permissions on SetModTime (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
</ul>
|
||||
<h2 id="v1.49.1---2019-08-28">v1.49.1 - 2019-08-28</h2>
|
||||
<p>Point release to fix config bug and google photos backend.</p>
|
||||
<ul>
|
||||
<li>Bug Fixes
|
||||
<ul>
|
||||
<li>config: Fix generated passwords being stored as empty password (Nick Craig-Wood)</li>
|
||||
<li>rcd: Added missing parameter for web-gui info logs. (Chaitanya)</li>
|
||||
</ul></li>
|
||||
<li>Googlephotos
|
||||
<ul>
|
||||
<li>Fix crash on error response (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>Onedrive
|
||||
<ul>
|
||||
<li>Fix crash on error response (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
</ul>
|
||||
<h2 id="v1.49.0---2019-08-26">v1.49.0 - 2019-08-26</h2>
|
||||
<ul>
|
||||
<li>New backends
|
||||
@@ -12357,10 +12302,8 @@ $ tree /tmp/b
|
||||
<li>Experimental <a href="https://rclone.org/gui/">web GUI</a> (Chaitanya Bankanhal)</li>
|
||||
<li>Implement <code>--compare-dest</code> & <code>--copy-dest</code> (yparitcher)</li>
|
||||
<li>Implement <code>--suffix</code> without <code>--backup-dir</code> for backup to current dir (yparitcher)</li>
|
||||
<li><code>config reconnect</code> to re-login (re-run the oauth login) for the backend. (Nick Craig-Wood)</li>
|
||||
<li><code>config userinfo</code> to discover which user you are logged in as. (Nick Craig-Wood)</li>
|
||||
<li><code>config disconnect</code> to disconnect you (log out) from the backend. (Nick Craig-Wood)</li>
|
||||
<li>Add <code>--use-json-log</code> for JSON logging (justinalin)</li>
|
||||
<li>Add <code>config reconnect</code>, <code>config userinfo</code> and <code>config disconnect</code> subcommands. (Nick Craig-Wood)</li>
|
||||
<li>Add context propagation to rclone (Aleksandar Jankovic)</li>
|
||||
<li>Reworking internal statistics interfaces so they work with rc jobs (Aleksandar Jankovic)</li>
|
||||
<li>Add Higher units for ETA (AbelThar)</li>
|
||||
|
||||
66
MANUAL.md
generated
66
MANUAL.md
generated
@@ -1,6 +1,6 @@
|
||||
% rclone(1) User Manual
|
||||
% Nick Craig-Wood
|
||||
% Sep 08, 2019
|
||||
% Aug 26, 2019
|
||||
|
||||
# Rclone - rsync for cloud storage
|
||||
|
||||
@@ -151,36 +151,6 @@ Run `rclone config` to setup. See [rclone config docs](https://rclone.org/docs/)
|
||||
|
||||
rclone config
|
||||
|
||||
## Install with docker ##
|
||||
|
||||
The rclone maintains a [docker image for rclone](https://hub.docker.com/r/rclone/rclone).
|
||||
These images are autobuilt by docker hub from the rclone source based
|
||||
on a minimal Alpine linux image.
|
||||
|
||||
The `:latest` tag will always point to the latest stable release. You
|
||||
can use the `:beta` tag to get the latest build from master. You can
|
||||
also use version tags, eg `:1.49.1`, `:1.49` or `:1`.
|
||||
|
||||
```
|
||||
$ docker pull rclone/rclone:latest
|
||||
latest: Pulling from rclone/rclone
|
||||
Digest: sha256:0e0ced72671989bb837fea8e88578b3fc48371aa45d209663683e24cfdaa0e11
|
||||
...
|
||||
$ docker run --rm rclone/rclone:latest version
|
||||
rclone v1.49.1
|
||||
- os/arch: linux/amd64
|
||||
- go version: go1.12.9
|
||||
```
|
||||
|
||||
You will probably want to mount rclone's config file directory or file
|
||||
from the host, or configure rclone with environment variables.
|
||||
|
||||
Eg to share your local config with the container
|
||||
|
||||
```
|
||||
docker run -v ~/.config/rclone:/root/.config/rclone rclone/rclone:latest listremotes
|
||||
```
|
||||
|
||||
## Install from source ##
|
||||
|
||||
Make sure you have at least [Go](https://golang.org/) 1.7
|
||||
@@ -7040,7 +7010,6 @@ Show statistics for the cache remote.
|
||||
This takes the following parameters
|
||||
|
||||
- name - name of remote
|
||||
- parameters - a map of \{ "key": "value" \} pairs
|
||||
- type - type of the new remote
|
||||
|
||||
|
||||
@@ -7051,7 +7020,6 @@ Authentication is required for this call.
|
||||
### config/delete: Delete a remote in the config file. {#config/delete}
|
||||
|
||||
Parameters:
|
||||
|
||||
- name - name of remote to delete
|
||||
|
||||
See the [config delete command](https://rclone.org/commands/rclone_config_delete/) command for more information on the above.
|
||||
@@ -7092,7 +7060,6 @@ Authentication is required for this call.
|
||||
This takes the following parameters
|
||||
|
||||
- name - name of remote
|
||||
- parameters - a map of \{ "key": "value" \} pairs
|
||||
|
||||
|
||||
See the [config password command](https://rclone.org/commands/rclone_config_password/) command for more information on the above.
|
||||
@@ -7113,7 +7080,6 @@ Authentication is required for this call.
|
||||
This takes the following parameters
|
||||
|
||||
- name - name of remote
|
||||
- parameters - a map of \{ "key": "value" \} pairs
|
||||
|
||||
|
||||
See the [config update command](https://rclone.org/commands/rclone_config_update/) command for more information on the above.
|
||||
@@ -8279,7 +8245,7 @@ These flags are available for every command.
|
||||
--use-json-log Use json log format.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.49.2")
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.49.0")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
```
|
||||
|
||||
@@ -18500,30 +18466,6 @@ to override the default choice.
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.49.2 - 2019-09-08
|
||||
|
||||
* New Features
|
||||
* build: Add Docker workflow support (Alfonso Montero)
|
||||
* Bug Fixes
|
||||
* accounting: Fix locking in Transfer to avoid deadlock with --progress (Nick Craig-Wood)
|
||||
* docs: Fix template argument for mktemp in install.sh (Cnly)
|
||||
* operations: Fix -u/--update with google photos / files of unknown size (Nick Craig-Wood)
|
||||
* rc: Fix docs for config/create /update /password (Nick Craig-Wood)
|
||||
* Google Cloud Storage
|
||||
* Fix need for elevated permissions on SetModTime (Nick Craig-Wood)
|
||||
|
||||
## v1.49.1 - 2019-08-28
|
||||
|
||||
Point release to fix config bug and google photos backend.
|
||||
|
||||
* Bug Fixes
|
||||
* config: Fix generated passwords being stored as empty password (Nick Craig-Wood)
|
||||
* rcd: Added missing parameter for web-gui info logs. (Chaitanya)
|
||||
* Googlephotos
|
||||
* Fix crash on error response (Nick Craig-Wood)
|
||||
* Onedrive
|
||||
* Fix crash on error response (Nick Craig-Wood)
|
||||
|
||||
## v1.49.0 - 2019-08-26
|
||||
|
||||
* New backends
|
||||
@@ -18535,10 +18477,8 @@ Point release to fix config bug and google photos backend.
|
||||
* Experimental [web GUI](https://rclone.org/gui/) (Chaitanya Bankanhal)
|
||||
* Implement `--compare-dest` & `--copy-dest` (yparitcher)
|
||||
* Implement `--suffix` without `--backup-dir` for backup to current dir (yparitcher)
|
||||
* `config reconnect` to re-login (re-run the oauth login) for the backend. (Nick Craig-Wood)
|
||||
* `config userinfo` to discover which user you are logged in as. (Nick Craig-Wood)
|
||||
* `config disconnect` to disconnect you (log out) from the backend. (Nick Craig-Wood)
|
||||
* Add `--use-json-log` for JSON logging (justinalin)
|
||||
* Add `config reconnect`, `config userinfo` and `config disconnect` subcommands. (Nick Craig-Wood)
|
||||
* Add context propagation to rclone (Aleksandar Jankovic)
|
||||
* Reworking internal statistics interfaces so they work with rc jobs (Aleksandar Jankovic)
|
||||
* Add Higher units for ETA (AbelThar)
|
||||
|
||||
77
MANUAL.txt
generated
77
MANUAL.txt
generated
@@ -1,6 +1,6 @@
|
||||
rclone(1) User Manual
|
||||
Nick Craig-Wood
|
||||
Sep 08, 2019
|
||||
Aug 26, 2019
|
||||
|
||||
|
||||
|
||||
@@ -164,33 +164,6 @@ Run rclone config to setup. See rclone config docs for more details.
|
||||
rclone config
|
||||
|
||||
|
||||
Install with docker
|
||||
|
||||
The rclone maintains a docker image for rclone. These images are
|
||||
autobuilt by docker hub from the rclone source based on a minimal Alpine
|
||||
linux image.
|
||||
|
||||
The :latest tag will always point to the latest stable release. You can
|
||||
use the :beta tag to get the latest build from master. You can also use
|
||||
version tags, eg :1.49.1, :1.49 or :1.
|
||||
|
||||
$ docker pull rclone/rclone:latest
|
||||
latest: Pulling from rclone/rclone
|
||||
Digest: sha256:0e0ced72671989bb837fea8e88578b3fc48371aa45d209663683e24cfdaa0e11
|
||||
...
|
||||
$ docker run --rm rclone/rclone:latest version
|
||||
rclone v1.49.1
|
||||
- os/arch: linux/amd64
|
||||
- go version: go1.12.9
|
||||
|
||||
You will probably want to mount rclone’s config file directory or file
|
||||
from the host, or configure rclone with environment variables.
|
||||
|
||||
Eg to share your local config with the container
|
||||
|
||||
docker run -v ~/.config/rclone:/root/.config/rclone rclone/rclone:latest listremotes
|
||||
|
||||
|
||||
Install from source
|
||||
|
||||
Make sure you have at least Go 1.7 installed. Download go if necessary.
|
||||
@@ -6677,7 +6650,6 @@ config/create: create the config for a remote. {#config/create}
|
||||
This takes the following parameters
|
||||
|
||||
- name - name of remote
|
||||
- parameters - a map of { “key”: “value” } pairs
|
||||
- type - type of the new remote
|
||||
|
||||
See the config create command command for more information on the above.
|
||||
@@ -6686,9 +6658,7 @@ Authentication is required for this call.
|
||||
|
||||
config/delete: Delete a remote in the config file. {#config/delete}
|
||||
|
||||
Parameters:
|
||||
|
||||
- name - name of remote to delete
|
||||
Parameters: - name - name of remote to delete
|
||||
|
||||
See the config delete command command for more information on the above.
|
||||
|
||||
@@ -6725,7 +6695,6 @@ config/password: password the config for a remote. {#config/password}
|
||||
This takes the following parameters
|
||||
|
||||
- name - name of remote
|
||||
- parameters - a map of { “key”: “value” } pairs
|
||||
|
||||
See the config password command command for more information on the
|
||||
above.
|
||||
@@ -6746,7 +6715,6 @@ config/update: update the config for a remote. {#config/update}
|
||||
This takes the following parameters
|
||||
|
||||
- name - name of remote
|
||||
- parameters - a map of { “key”: “value” } pairs
|
||||
|
||||
See the config update command command for more information on the above.
|
||||
|
||||
@@ -7856,7 +7824,7 @@ These flags are available for every command.
|
||||
--use-json-log Use json log format.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.49.2")
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.49.0")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
|
||||
|
||||
@@ -17955,37 +17923,6 @@ override the default choice.
|
||||
CHANGELOG
|
||||
|
||||
|
||||
v1.49.2 - 2019-09-08
|
||||
|
||||
- New Features
|
||||
- build: Add Docker workflow support (Alfonso Montero)
|
||||
- Bug Fixes
|
||||
- accounting: Fix locking in Transfer to avoid deadlock with
|
||||
–progress (Nick Craig-Wood)
|
||||
- docs: Fix template argument for mktemp in install.sh (Cnly)
|
||||
- operations: Fix -u/–update with google photos / files of unknown
|
||||
size (Nick Craig-Wood)
|
||||
- rc: Fix docs for config/create /update /password (Nick
|
||||
Craig-Wood)
|
||||
- Google Cloud Storage
|
||||
- Fix need for elevated permissions on SetModTime (Nick
|
||||
Craig-Wood)
|
||||
|
||||
|
||||
v1.49.1 - 2019-08-28
|
||||
|
||||
Point release to fix config bug and google photos backend.
|
||||
|
||||
- Bug Fixes
|
||||
- config: Fix generated passwords being stored as empty password
|
||||
(Nick Craig-Wood)
|
||||
- rcd: Added missing parameter for web-gui info logs. (Chaitanya)
|
||||
- Googlephotos
|
||||
- Fix crash on error response (Nick Craig-Wood)
|
||||
- Onedrive
|
||||
- Fix crash on error response (Nick Craig-Wood)
|
||||
|
||||
|
||||
v1.49.0 - 2019-08-26
|
||||
|
||||
- New backends
|
||||
@@ -17998,13 +17935,9 @@ v1.49.0 - 2019-08-26
|
||||
- Implement --compare-dest & --copy-dest (yparitcher)
|
||||
- Implement --suffix without --backup-dir for backup to current
|
||||
dir (yparitcher)
|
||||
- config reconnect to re-login (re-run the oauth login) for the
|
||||
backend. (Nick Craig-Wood)
|
||||
- config userinfo to discover which user you are logged in as.
|
||||
(Nick Craig-Wood)
|
||||
- config disconnect to disconnect you (log out) from the backend.
|
||||
(Nick Craig-Wood)
|
||||
- Add --use-json-log for JSON logging (justinalin)
|
||||
- Add config reconnect, config userinfo and config disconnect
|
||||
subcommands. (Nick Craig-Wood)
|
||||
- Add context propagation to rclone (Aleksandar Jankovic)
|
||||
- Reworking internal statistics interfaces so they work with rc
|
||||
jobs (Aleksandar Jankovic)
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
[](https://circleci.com/gh/rclone/rclone/tree/master)
|
||||
[](https://goreportcard.com/report/github.com/rclone/rclone)
|
||||
[](https://godoc.org/github.com/rclone/rclone)
|
||||
[](https://hub.docker.com/r/rclone/rclone)
|
||||
|
||||
# Rclone
|
||||
|
||||
@@ -40,6 +41,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
|
||||
66
RELEASE.md
66
RELEASE.md
@@ -1,8 +1,14 @@
|
||||
Extra required software for making a release
|
||||
# Release
|
||||
|
||||
This file describes how to make the various kinds of releases
|
||||
|
||||
## Extra required software for making a release
|
||||
|
||||
* [github-release](https://github.com/aktau/github-release) for uploading packages
|
||||
* pandoc for making the html and man pages
|
||||
|
||||
Making a release
|
||||
## Making a release
|
||||
|
||||
* git status - make sure everything is checked in
|
||||
* Check travis & appveyor builds are green
|
||||
* make check
|
||||
@@ -26,8 +32,8 @@ Making a release
|
||||
* # announce with forum post, twitter post, G+ post
|
||||
|
||||
Early in the next release cycle update the vendored dependencies
|
||||
|
||||
* Review any pinned packages in go.mod and remove if possible
|
||||
* GO111MODULE=on go get -u github.com/spf13/cobra@master
|
||||
* make update
|
||||
* git status
|
||||
* git add new files
|
||||
@@ -48,26 +54,46 @@ Can be fixed with
|
||||
* GO111MODULE=on go mod vendor
|
||||
|
||||
|
||||
Making a point release. If rclone needs a point release due to some
|
||||
horrendous bug, then
|
||||
* git branch v1.XX v1.XX-fixes
|
||||
## Making a point release
|
||||
|
||||
If rclone needs a point release due to some horrendous bug then a
|
||||
point release is necessary.
|
||||
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
|
||||
* BASE_TAG=v1.XX # eg v1.49
|
||||
* NEW_TAG=${BASE_TAG}.Y # eg v1.49.1
|
||||
* echo $BASE_TAG $NEW_TAG # v1.49 v1.49.1
|
||||
* git branch ${BASE_TAG} ${BASE_TAG}-fixes
|
||||
|
||||
Now
|
||||
|
||||
* git co ${BASE_TAG}-fixes
|
||||
* git cherry-pick any fixes
|
||||
* Test (see above)
|
||||
* make NEW_TAG=v1.XX.1 tag
|
||||
* make NEW_TAG=${NEW_TAG} tag
|
||||
* edit docs/content/changelog.md
|
||||
* make TAG=v1.43.1 doc
|
||||
* git commit -a -v -m "Version v1.XX.1"
|
||||
* git tag -d -v1.XX.1
|
||||
* git tag -s -m "Version v1.XX.1" v1.XX.1
|
||||
* git push --tags -u origin v1.XX-fixes
|
||||
* make BRANCH_PATH= TAG=v1.43.1 fetch_binaries
|
||||
* make TAG=v1.43.1 tarball
|
||||
* make TAG=v1.43.1 sign_upload
|
||||
* make TAG=v1.43.1 check_sign
|
||||
* make TAG=v1.43.1 upload
|
||||
* make TAG=v1.43.1 upload_website
|
||||
* make TAG=v1.43.1 upload_github
|
||||
* NB this overwrites the current beta so after the release, rebuild the last travis build
|
||||
* make TAG=${NEW_TAG} doc
|
||||
* git commit -a -v -m "Version ${NEW_TAG}"
|
||||
* git tag -d ${NEW_TAG}
|
||||
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
|
||||
* git push --tags -u origin ${BASE_TAG}-fixes
|
||||
* Wait for builds to complete
|
||||
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
|
||||
* make TAG=${NEW_TAG} tarball
|
||||
* make TAG=${NEW_TAG} sign_upload
|
||||
* make TAG=${NEW_TAG} check_sign
|
||||
* make TAG=${NEW_TAG} upload
|
||||
* make TAG=${NEW_TAG} upload_website
|
||||
* make TAG=${NEW_TAG} upload_github
|
||||
* NB this overwrites the current beta so we need to do this
|
||||
* git co master
|
||||
* make LAST_TAG=${NEW_TAG} startdev
|
||||
* # cherry pick the changes to the changelog
|
||||
* git checkout ${BASE_TAG}-fixes docs/content/changelog.md
|
||||
* git commit --amend
|
||||
* git push
|
||||
* Announce!
|
||||
|
||||
## Making a manual build of docker
|
||||
|
||||
@@ -49,6 +49,7 @@ strategy:
|
||||
GO_VERSION: latest
|
||||
BUILD_FLAGS: '-include "^windows/amd64" -cgo'
|
||||
MAKE_QUICKTEST: true
|
||||
MAKE_RACEQUICKTEST: true
|
||||
DEPLOY: true
|
||||
windows_386:
|
||||
imageName: windows-2019
|
||||
@@ -73,12 +74,6 @@ strategy:
|
||||
GOPROXY: https://proxy.golang.org
|
||||
MAKE_QUICKTEST: true
|
||||
MAKE_RACEQUICKTEST: true
|
||||
go1.9:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GOCACHE: '' # build caching only came in go1.10
|
||||
GO_VERSION: go1.9.7
|
||||
MAKE_QUICKTEST: true
|
||||
go1.10:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
@@ -87,7 +82,12 @@ strategy:
|
||||
go1.11:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: go1.11.12
|
||||
GO_VERSION: go1.11.13
|
||||
MAKE_QUICKTEST: true
|
||||
go1.12:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: go1.12.9
|
||||
MAKE_QUICKTEST: true
|
||||
|
||||
pool:
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
_ "github.com/rclone/rclone/backend/koofr"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/mailru"
|
||||
_ "github.com/rclone/rclone/backend/mega"
|
||||
_ "github.com/rclone/rclone/backend/onedrive"
|
||||
_ "github.com/rclone/rclone/backend/opendrive"
|
||||
|
||||
@@ -1115,7 +1115,7 @@ func (o *Object) parseTimeString(timeString string) (err error) {
|
||||
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
|
||||
return err
|
||||
}
|
||||
o.modTime = time.Unix(unixMilliseconds/1E3, (unixMilliseconds%1E3)*1E6).UTC()
|
||||
o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ type Timestamp time.Time
|
||||
// MarshalJSON turns a Timestamp into JSON (in UTC)
|
||||
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
|
||||
timestamp := (*time.Time)(t).UTC().UnixNano()
|
||||
return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
|
||||
return []byte(strconv.FormatInt(timestamp/1e6, 10)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Timestamp
|
||||
@@ -59,7 +59,7 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
|
||||
*t = Timestamp(time.Unix(timestamp/1e3, (timestamp%1e3)*1e6).UTC())
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
112
backend/b2/b2.go
112
backend/b2/b2.go
@@ -273,11 +273,11 @@ func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if resp != nil && resp.StatusCode == 401 {
|
||||
fs.Debugf(f, "Unauthorized: %v", err)
|
||||
// Reauth
|
||||
authErr := f.authorizeAccount()
|
||||
authErr := f.authorizeAccount(ctx)
|
||||
if authErr != nil {
|
||||
err = authErr
|
||||
}
|
||||
@@ -393,7 +393,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
|
||||
}
|
||||
f.fillBufferTokens()
|
||||
err = f.authorizeAccount()
|
||||
err = f.authorizeAccount(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to authorize account")
|
||||
}
|
||||
@@ -431,7 +431,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
// authorizeAccount gets the API endpoint and auth token. Can be used
|
||||
// for reauthentication too.
|
||||
func (f *Fs) authorizeAccount() error {
|
||||
func (f *Fs) authorizeAccount(ctx context.Context) error {
|
||||
f.authMu.Lock()
|
||||
defer f.authMu.Unlock()
|
||||
opts := rest.Opts{
|
||||
@@ -443,7 +443,7 @@ func (f *Fs) authorizeAccount() error {
|
||||
ExtraHeaders: map[string]string{"Authorization": ""}, // unset the Authorization for this request
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, nil, &f.info)
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &f.info)
|
||||
return f.shouldRetryNoReauth(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -466,10 +466,10 @@ func (f *Fs) hasPermission(permission string) bool {
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
//
|
||||
// This should be returned with returnUploadURL when finished
|
||||
func (f *Fs) getUploadURL(bucket string) (upload *api.GetUploadURLResponse, err error) {
|
||||
func (f *Fs) getUploadURL(ctx context.Context, bucket string) (upload *api.GetUploadURLResponse, err error) {
|
||||
f.uploadMu.Lock()
|
||||
defer f.uploadMu.Unlock()
|
||||
bucketID, err := f.getBucketID(bucket)
|
||||
bucketID, err := f.getBucketID(ctx, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -489,8 +489,8 @@ func (f *Fs) getUploadURL(bucket string) (upload *api.GetUploadURLResponse, err
|
||||
BucketID: bucketID,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &upload)
|
||||
return f.shouldRetry(resp, err)
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &upload)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||
@@ -609,7 +609,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
if !recurse {
|
||||
delimiter = "/"
|
||||
}
|
||||
bucketID, err := f.getBucketID(bucket)
|
||||
bucketID, err := f.getBucketID(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -636,8 +636,8 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
for {
|
||||
var response api.ListFileNamesResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -727,7 +727,7 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
||||
|
||||
// listBuckets returns all the buckets to out
|
||||
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
err = f.listBucketsToFn(func(bucket *api.Bucket) error {
|
||||
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
|
||||
d := fs.NewDir(bucket.Name, time.Time{})
|
||||
entries = append(entries, d)
|
||||
return nil
|
||||
@@ -820,7 +820,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
type listBucketFn func(*api.Bucket) error
|
||||
|
||||
// listBucketsToFn lists the buckets to the function supplied
|
||||
func (f *Fs) listBucketsToFn(fn listBucketFn) error {
|
||||
func (f *Fs) listBucketsToFn(ctx context.Context, fn listBucketFn) error {
|
||||
var account = api.ListBucketsRequest{
|
||||
AccountID: f.info.AccountID,
|
||||
BucketID: f.info.Allowed.BucketID,
|
||||
@@ -832,8 +832,8 @@ func (f *Fs) listBucketsToFn(fn listBucketFn) error {
|
||||
Path: "/b2_list_buckets",
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &account, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -862,14 +862,14 @@ func (f *Fs) listBucketsToFn(fn listBucketFn) error {
|
||||
|
||||
// getbucketType finds the bucketType for the current bucket name
|
||||
// can be one of allPublic. allPrivate, or snapshot
|
||||
func (f *Fs) getbucketType(bucket string) (bucketType string, err error) {
|
||||
func (f *Fs) getbucketType(ctx context.Context, bucket string) (bucketType string, err error) {
|
||||
f.bucketTypeMutex.Lock()
|
||||
bucketType = f._bucketType[bucket]
|
||||
f.bucketTypeMutex.Unlock()
|
||||
if bucketType != "" {
|
||||
return bucketType, nil
|
||||
}
|
||||
err = f.listBucketsToFn(func(bucket *api.Bucket) error {
|
||||
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
|
||||
// listBucketsToFn reads bucket Types
|
||||
return nil
|
||||
})
|
||||
@@ -897,14 +897,14 @@ func (f *Fs) clearBucketType(bucket string) {
|
||||
}
|
||||
|
||||
// getBucketID finds the ID for the current bucket name
|
||||
func (f *Fs) getBucketID(bucket string) (bucketID string, err error) {
|
||||
func (f *Fs) getBucketID(ctx context.Context, bucket string) (bucketID string, err error) {
|
||||
f.bucketIDMutex.Lock()
|
||||
bucketID = f._bucketID[bucket]
|
||||
f.bucketIDMutex.Unlock()
|
||||
if bucketID != "" {
|
||||
return bucketID, nil
|
||||
}
|
||||
err = f.listBucketsToFn(func(bucket *api.Bucket) error {
|
||||
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
|
||||
// listBucketsToFn sets IDs
|
||||
return nil
|
||||
})
|
||||
@@ -970,15 +970,15 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
||||
}
|
||||
var response api.Bucket
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.Code == "duplicate_bucket_name" {
|
||||
// Check this is our bucket - buckets are globally unique and this
|
||||
// might be someone elses.
|
||||
_, getBucketErr := f.getBucketID(bucket)
|
||||
_, getBucketErr := f.getBucketID(ctx, bucket)
|
||||
if getBucketErr == nil {
|
||||
// found so it is our bucket
|
||||
return nil
|
||||
@@ -1009,7 +1009,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
Method: "POST",
|
||||
Path: "/b2_delete_bucket",
|
||||
}
|
||||
bucketID, err := f.getBucketID(bucket)
|
||||
bucketID, err := f.getBucketID(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1019,8 +1019,8 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
}
|
||||
var response api.Bucket
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to delete bucket")
|
||||
@@ -1038,8 +1038,8 @@ func (f *Fs) Precision() time.Duration {
|
||||
}
|
||||
|
||||
// hide hides a file on the remote
|
||||
func (f *Fs) hide(bucket, bucketPath string) error {
|
||||
bucketID, err := f.getBucketID(bucket)
|
||||
func (f *Fs) hide(ctx context.Context, bucket, bucketPath string) error {
|
||||
bucketID, err := f.getBucketID(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1053,8 +1053,8 @@ func (f *Fs) hide(bucket, bucketPath string) error {
|
||||
}
|
||||
var response api.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
@@ -1070,7 +1070,7 @@ func (f *Fs) hide(bucket, bucketPath string) error {
|
||||
}
|
||||
|
||||
// deleteByID deletes a file version given Name and ID
|
||||
func (f *Fs) deleteByID(ID, Name string) error {
|
||||
func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_delete_file_version",
|
||||
@@ -1081,8 +1081,8 @@ func (f *Fs) deleteByID(ID, Name string) error {
|
||||
}
|
||||
var response api.File
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to delete %q", Name)
|
||||
@@ -1132,7 +1132,7 @@ func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool)
|
||||
continue
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
err = f.deleteByID(object.ID, object.Name)
|
||||
err = f.deleteByID(ctx, object.ID, object.Name)
|
||||
checkErr(err)
|
||||
tr.Done(err)
|
||||
}
|
||||
@@ -1205,7 +1205,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
destBucketID, err := f.getBucketID(dstBucket)
|
||||
destBucketID, err := f.getBucketID(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1221,8 +1221,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
var response api.FileInfo
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1245,7 +1245,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
|
||||
// getDownloadAuthorization returns authorization token for downloading
|
||||
// without account.
|
||||
func (f *Fs) getDownloadAuthorization(bucket, remote string) (authorization string, err error) {
|
||||
func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string) (authorization string, err error) {
|
||||
validDurationInSeconds := time.Duration(f.opt.DownloadAuthorizationDuration).Nanoseconds() / 1e9
|
||||
if validDurationInSeconds <= 0 || validDurationInSeconds > 604800 {
|
||||
return "", errors.New("--b2-download-auth-duration must be between 1 sec and 1 week")
|
||||
@@ -1253,7 +1253,7 @@ func (f *Fs) getDownloadAuthorization(bucket, remote string) (authorization stri
|
||||
if !f.hasPermission("shareFiles") {
|
||||
return "", errors.New("sharing a file link requires the shareFiles permission")
|
||||
}
|
||||
bucketID, err := f.getBucketID(bucket)
|
||||
bucketID, err := f.getBucketID(ctx, bucket)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -1268,8 +1268,8 @@ func (f *Fs) getDownloadAuthorization(bucket, remote string) (authorization stri
|
||||
}
|
||||
var response api.GetDownloadAuthorizationResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to get download authorization")
|
||||
@@ -1301,12 +1301,12 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
}
|
||||
absPath := "/" + bucketPath
|
||||
link = RootURL + "/file/" + urlEncode(bucket) + absPath
|
||||
bucketType, err := f.getbucketType(bucket)
|
||||
bucketType, err := f.getbucketType(ctx, bucket)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if bucketType == "allPrivate" || bucketType == "snapshot" {
|
||||
AuthorizationToken, err := f.getDownloadAuthorization(bucket, remote)
|
||||
AuthorizationToken, err := f.getDownloadAuthorization(ctx, bucket, remote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -1453,7 +1453,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
// timeString returns modTime as the number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC as a decimal string.
|
||||
func timeString(modTime time.Time) string {
|
||||
return strconv.FormatInt(modTime.UnixNano()/1E6, 10)
|
||||
return strconv.FormatInt(modTime.UnixNano()/1e6, 10)
|
||||
}
|
||||
|
||||
// parseTimeString converts a decimal string number of milliseconds
|
||||
@@ -1468,7 +1468,7 @@ func (o *Object) parseTimeString(timeString string) (err error) {
|
||||
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
|
||||
return nil
|
||||
}
|
||||
o.modTime = time.Unix(unixMilliseconds/1E3, (unixMilliseconds%1E3)*1E6).UTC()
|
||||
o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1505,8 +1505,8 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
}
|
||||
var response api.FileInfo
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(&opts, &request, &response)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1604,8 +1604,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open for download")
|
||||
@@ -1701,7 +1701,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
o.fs.putUploadBlock(buf)
|
||||
return err
|
||||
}
|
||||
return up.Stream(buf)
|
||||
return up.Stream(ctx, buf)
|
||||
} else if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
|
||||
defer o.fs.putUploadBlock(buf)
|
||||
@@ -1715,7 +1715,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.Upload()
|
||||
return up.Upload(ctx)
|
||||
}
|
||||
|
||||
modTime := src.ModTime(ctx)
|
||||
@@ -1729,7 +1729,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// Get upload URL
|
||||
upload, err := o.fs.getUploadURL(bucket)
|
||||
upload, err := o.fs.getUploadURL(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1807,8 +1807,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var response api.FileInfo
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(&opts, nil, &response)
|
||||
retry, err := o.fs.shouldRetry(resp, err)
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
retry, err := o.fs.shouldRetry(ctx, resp, err)
|
||||
// On retryable error clear UploadURL
|
||||
if retry {
|
||||
fs.Debugf(o, "Clearing upload URL because of error: %v", err)
|
||||
@@ -1829,9 +1829,9 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
return errNotWithVersions
|
||||
}
|
||||
if o.fs.opt.HardDelete {
|
||||
return o.fs.deleteByID(o.id, bucketPath)
|
||||
return o.fs.deleteByID(ctx, o.id, bucketPath)
|
||||
}
|
||||
return o.fs.hide(bucket, bucketPath)
|
||||
return o.fs.hide(ctx, bucket, bucketPath)
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
|
||||
@@ -105,7 +105,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
Path: "/b2_start_large_file",
|
||||
}
|
||||
bucket, bucketPath := o.split()
|
||||
bucketID, err := f.getBucketID(bucket)
|
||||
bucketID, err := f.getBucketID(ctx, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -125,8 +125,8 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -150,7 +150,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
//
|
||||
// This should be returned with returnUploadURL when finished
|
||||
func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
|
||||
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
|
||||
up.uploadMu.Lock()
|
||||
defer up.uploadMu.Unlock()
|
||||
if len(up.uploads) == 0 {
|
||||
@@ -162,8 +162,8 @@ func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err
|
||||
ID: up.id,
|
||||
}
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
|
||||
return up.f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||
@@ -192,12 +192,12 @@ func (up *largeUpload) clearUploadURL() {
|
||||
}
|
||||
|
||||
// Transfer a chunk
|
||||
func (up *largeUpload) transferChunk(part int64, body []byte) error {
|
||||
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
||||
|
||||
// Get upload URL
|
||||
upload, err := up.getUploadURL()
|
||||
upload, err := up.getUploadURL(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -241,8 +241,8 @@ func (up *largeUpload) transferChunk(part int64, body []byte) error {
|
||||
|
||||
var response api.UploadPartResponse
|
||||
|
||||
resp, err := up.f.srv.CallJSON(&opts, nil, &response)
|
||||
retry, err := up.f.shouldRetry(resp, err)
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
retry, err := up.f.shouldRetry(ctx, resp, err)
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||
}
|
||||
@@ -264,7 +264,7 @@ func (up *largeUpload) transferChunk(part int64, body []byte) error {
|
||||
}
|
||||
|
||||
// finish closes off the large upload
|
||||
func (up *largeUpload) finish() error {
|
||||
func (up *largeUpload) finish(ctx context.Context) error {
|
||||
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -276,8 +276,8 @@ func (up *largeUpload) finish() error {
|
||||
}
|
||||
var response api.FileInfo
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return up.f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -286,7 +286,7 @@ func (up *largeUpload) finish() error {
|
||||
}
|
||||
|
||||
// cancel aborts the large upload
|
||||
func (up *largeUpload) cancel() error {
|
||||
func (up *largeUpload) cancel(ctx context.Context) error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_cancel_large_file",
|
||||
@@ -296,18 +296,18 @@ func (up *largeUpload) cancel() error {
|
||||
}
|
||||
var response api.CancelLargeFileResponse
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return up.f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
|
||||
func (up *largeUpload) managedTransferChunk(ctx context.Context, wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
|
||||
wg.Add(1)
|
||||
go func(part int64, buf []byte) {
|
||||
defer wg.Done()
|
||||
defer up.f.putUploadBlock(buf)
|
||||
err := up.transferChunk(part, buf)
|
||||
err := up.transferChunk(ctx, part, buf)
|
||||
if err != nil {
|
||||
select {
|
||||
case errs <- err:
|
||||
@@ -317,7 +317,7 @@ func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error,
|
||||
}(part, buf)
|
||||
}
|
||||
|
||||
func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
|
||||
func (up *largeUpload) finishOrCancelOnError(ctx context.Context, err error, errs chan error) error {
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
@@ -326,19 +326,19 @@ func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
|
||||
}
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
|
||||
cancelErr := up.cancel()
|
||||
cancelErr := up.cancel(ctx)
|
||||
if cancelErr != nil {
|
||||
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return up.finish()
|
||||
return up.finish(ctx)
|
||||
}
|
||||
|
||||
// Stream uploads the chunks from the input, starting with a required initial
|
||||
// chunk. Assumes the file size is unknown and will upload until the input
|
||||
// reaches EOF.
|
||||
func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
|
||||
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
|
||||
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
||||
errs := make(chan error, 1)
|
||||
hasMoreParts := true
|
||||
@@ -346,7 +346,7 @@ func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
|
||||
|
||||
// Transfer initial chunk
|
||||
up.size = int64(len(initialUploadBlock))
|
||||
up.managedTransferChunk(&wg, errs, 1, initialUploadBlock)
|
||||
up.managedTransferChunk(ctx, &wg, errs, 1, initialUploadBlock)
|
||||
|
||||
outer:
|
||||
for part := int64(2); hasMoreParts; part++ {
|
||||
@@ -388,16 +388,16 @@ outer:
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
up.managedTransferChunk(&wg, errs, part, buf)
|
||||
up.managedTransferChunk(ctx, &wg, errs, part, buf)
|
||||
}
|
||||
wg.Wait()
|
||||
up.sha1s = up.sha1s[:up.parts]
|
||||
|
||||
return up.finishOrCancelOnError(err, errs)
|
||||
return up.finishOrCancelOnError(ctx, err, errs)
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
func (up *largeUpload) Upload() error {
|
||||
func (up *largeUpload) Upload(ctx context.Context) error {
|
||||
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
|
||||
remaining := up.size
|
||||
errs := make(chan error, 1)
|
||||
@@ -428,10 +428,10 @@ outer:
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
up.managedTransferChunk(&wg, errs, part, buf)
|
||||
up.managedTransferChunk(ctx, &wg, errs, part, buf)
|
||||
remaining -= reqSize
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return up.finishOrCancelOnError(err, errs)
|
||||
return up.finishOrCancelOnError(ctx, err, errs)
|
||||
}
|
||||
|
||||
@@ -204,7 +204,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
return nil, err
|
||||
}
|
||||
|
||||
found, err := f.listAll(directoryID, false, true, func(item *api.Item) bool {
|
||||
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
info = item
|
||||
return true
|
||||
@@ -352,7 +352,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
|
||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
pathIDOut = item.ID
|
||||
return true
|
||||
@@ -386,7 +386,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
},
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &mkdir, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -408,7 +408,7 @@ type listAllFn func(*api.Item) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folders/" + dirID + "/items",
|
||||
@@ -423,7 +423,7 @@ OUTER:
|
||||
var result api.FolderItems
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -479,7 +479,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return nil, err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
|
||||
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
||||
remote := path.Join(dir, info.Name)
|
||||
if info.Type == api.ItemTypeFolder {
|
||||
// cache the directory ID for later lookups
|
||||
@@ -581,14 +581,14 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
}
|
||||
|
||||
// deleteObject removes an object by ID
|
||||
func (f *Fs) deleteObject(id string) error {
|
||||
func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/files/" + id,
|
||||
NoResponse: true,
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(&opts)
|
||||
resp, err := f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
@@ -619,7 +619,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
opts.Parameters.Set("recursive", strconv.FormatBool(!check))
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(&opts)
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -692,7 +692,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var resp *http.Response
|
||||
var info *api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, ©File, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©File, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -715,7 +715,7 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// move a file or folder
|
||||
func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err error) {
|
||||
func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (info *api.Item, err error) {
|
||||
// Move the object
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
@@ -730,7 +730,7 @@ func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err e
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &move, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -762,7 +762,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// Do the move
|
||||
info, err := f.move("/files/", srcObj.id, leaf, directoryID)
|
||||
info, err := f.move(ctx, "/files/", srcObj.id, leaf, directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -845,7 +845,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// Do the move
|
||||
_, err = f.move("/folders/", srcID, leaf, directoryID)
|
||||
_, err = f.move(ctx, "/folders/", srcID, leaf, directoryID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -887,7 +887,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||
var info api.Item
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &shareLink, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &shareLink, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info.SharedLink.URL, err
|
||||
@@ -1006,7 +1006,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
|
||||
}
|
||||
var info *api.Item
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(&opts, &update, &info)
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info, err
|
||||
@@ -1039,7 +1039,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1051,7 +1051,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// upload does a single non-multipart upload
|
||||
//
|
||||
// This is recommended for less than 50 MB of content
|
||||
func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
|
||||
upload := api.UploadFile{
|
||||
Name: replaceReservedChars(leaf),
|
||||
ContentModifiedAt: api.Time(modTime),
|
||||
@@ -1078,7 +1078,7 @@ func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Tim
|
||||
opts.Path = "/files/content"
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &upload, &result)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &upload, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1111,16 +1111,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Upload with simple or multipart
|
||||
if size <= int64(o.fs.opt.UploadCutoff) {
|
||||
err = o.upload(in, leaf, directoryID, modTime)
|
||||
err = o.upload(ctx, in, leaf, directoryID, modTime)
|
||||
} else {
|
||||
err = o.uploadMultipart(in, leaf, directoryID, size, modTime)
|
||||
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.deleteObject(o.id)
|
||||
return o.fs.deleteObject(ctx, o.id)
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
|
||||
@@ -4,6 +4,7 @@ package box
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
@@ -22,7 +23,7 @@ import (
|
||||
)
|
||||
|
||||
// createUploadSession creates an upload session for the object
|
||||
func (o *Object) createUploadSession(leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
|
||||
func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/files/upload_sessions",
|
||||
@@ -41,7 +42,7 @@ func (o *Object) createUploadSession(leaf, directoryID string, size int64) (resp
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &request, &response)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return
|
||||
@@ -53,7 +54,7 @@ func sha1Digest(digest []byte) string {
|
||||
}
|
||||
|
||||
// uploadPart uploads a part in an upload session
|
||||
func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
|
||||
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
|
||||
chunkSize := int64(len(chunk))
|
||||
sha1sum := sha1.Sum(chunk)
|
||||
opts := rest.Opts{
|
||||
@@ -70,7 +71,7 @@ func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []b
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
opts.Body = wrap(bytes.NewReader(chunk))
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -80,7 +81,7 @@ func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []b
|
||||
}
|
||||
|
||||
// commitUpload finishes an upload session
|
||||
func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
|
||||
func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/files/upload_sessions/" + SessionID + "/commit",
|
||||
@@ -104,7 +105,7 @@ func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.T
|
||||
outer:
|
||||
for tries = 0; tries < maxTries; tries++ {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
@@ -154,7 +155,7 @@ outer:
|
||||
}
|
||||
|
||||
// abortUpload cancels an upload session
|
||||
func (o *Object) abortUpload(SessionID string) (err error) {
|
||||
func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/files/upload_sessions/" + SessionID,
|
||||
@@ -163,16 +164,16 @@ func (o *Object) abortUpload(SessionID string) (err error) {
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
|
||||
// Create upload session
|
||||
session, err := o.createUploadSession(leaf, directoryID, size)
|
||||
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload create session failed")
|
||||
}
|
||||
@@ -183,7 +184,7 @@ func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size in
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Cancelling multipart upload: %v", err)
|
||||
cancelErr := o.abortUpload(session.ID)
|
||||
cancelErr := o.abortUpload(ctx, session.ID)
|
||||
if cancelErr != nil {
|
||||
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
|
||||
}
|
||||
@@ -235,7 +236,7 @@ outer:
|
||||
defer wg.Done()
|
||||
defer o.fs.uploadToken.Put()
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
partResponse, err := o.uploadPart(session.ID, position, size, buf, wrap)
|
||||
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to upload part")
|
||||
select {
|
||||
@@ -263,7 +264,7 @@ outer:
|
||||
}
|
||||
|
||||
// Finalise the upload session
|
||||
result, err := o.commitUpload(session.ID, parts, modTime, hash.Sum(nil))
|
||||
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to finalize")
|
||||
}
|
||||
|
||||
@@ -705,16 +705,16 @@ var (
|
||||
|
||||
// Test test infrastructure first!
|
||||
func TestRandomSource(t *testing.T) {
|
||||
source := newRandomSource(1E8)
|
||||
sink := newRandomSource(1E8)
|
||||
source := newRandomSource(1e8)
|
||||
sink := newRandomSource(1e8)
|
||||
n, err := io.Copy(sink, source)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(1E8), n)
|
||||
assert.Equal(t, int64(1e8), n)
|
||||
|
||||
source = newRandomSource(1E8)
|
||||
source = newRandomSource(1e8)
|
||||
buf := make([]byte, 16)
|
||||
_, _ = source.Read(buf)
|
||||
sink = newRandomSource(1E8)
|
||||
sink = newRandomSource(1e8)
|
||||
_, err = io.Copy(sink, source)
|
||||
assert.Error(t, err, "Error in stream")
|
||||
}
|
||||
@@ -754,23 +754,23 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
|
||||
}
|
||||
|
||||
func TestEncryptDecrypt1(t *testing.T) {
|
||||
testEncryptDecrypt(t, 1, 1E7)
|
||||
testEncryptDecrypt(t, 1, 1e7)
|
||||
}
|
||||
|
||||
func TestEncryptDecrypt32(t *testing.T) {
|
||||
testEncryptDecrypt(t, 32, 1E8)
|
||||
testEncryptDecrypt(t, 32, 1e8)
|
||||
}
|
||||
|
||||
func TestEncryptDecrypt4096(t *testing.T) {
|
||||
testEncryptDecrypt(t, 4096, 1E8)
|
||||
testEncryptDecrypt(t, 4096, 1e8)
|
||||
}
|
||||
|
||||
func TestEncryptDecrypt65536(t *testing.T) {
|
||||
testEncryptDecrypt(t, 65536, 1E8)
|
||||
testEncryptDecrypt(t, 65536, 1e8)
|
||||
}
|
||||
|
||||
func TestEncryptDecrypt65537(t *testing.T) {
|
||||
testEncryptDecrypt(t, 65537, 1E8)
|
||||
testEncryptDecrypt(t, 65537, 1e8)
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -803,7 +803,7 @@ func TestEncryptData(t *testing.T) {
|
||||
} {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
|
||||
// Check encode works
|
||||
buf := bytes.NewBuffer(test.in)
|
||||
@@ -826,7 +826,7 @@ func TestEncryptData(t *testing.T) {
|
||||
func TestNewEncrypter(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
|
||||
z := &zeroes{}
|
||||
|
||||
@@ -853,7 +853,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
|
||||
fh, err := c.newEncrypter(in, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1E6)
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
|
||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||
assert.Equal(t, int64(32), n)
|
||||
}
|
||||
@@ -885,7 +885,7 @@ func (c *closeDetector) Close() error {
|
||||
func TestNewDecrypter(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
|
||||
cd := newCloseDetector(bytes.NewBuffer(file0))
|
||||
fh, err := c.newDecrypter(cd)
|
||||
@@ -936,7 +936,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
||||
fh, err := c.newDecrypter(in)
|
||||
assert.NoError(t, err)
|
||||
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1E6)
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
|
||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||
assert.Equal(t, int64(16), n)
|
||||
}
|
||||
|
||||
@@ -156,6 +156,7 @@ func init() {
|
||||
Description: "Google Drive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
ctx := context.TODO()
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -177,7 +178,7 @@ func init() {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
}
|
||||
err = configTeamDrive(opt, m, name)
|
||||
err = configTeamDrive(ctx, opt, m, name)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure team drive: %v", err)
|
||||
}
|
||||
@@ -663,7 +664,7 @@ OUTER:
|
||||
for {
|
||||
var files *drive.FileList
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
files, err = list.Fields(googleapi.Field(fields)).Do()
|
||||
files, err = list.Fields(googleapi.Field(fields)).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -778,7 +779,7 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
|
||||
}
|
||||
|
||||
// Figure out if the user wants to use a team drive
|
||||
func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
|
||||
func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error {
|
||||
// Stop if we are running non-interactive config
|
||||
if fs.Config.AutoConfirm {
|
||||
return nil
|
||||
@@ -806,7 +807,7 @@ func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
|
||||
for {
|
||||
var teamDrives *drive.TeamDriveList
|
||||
err = newPacer(opt).Call(func() (bool, error) {
|
||||
teamDrives, err = listTeamDrives.Do()
|
||||
teamDrives, err = listTeamDrives.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1734,7 +1735,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
}
|
||||
} else {
|
||||
// Upload the file in chunks
|
||||
info, err = f.Upload(in, size, srcMimeType, "", remote, createInfo)
|
||||
info, err = f.Upload(ctx, in, size, srcMimeType, "", remote, createInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1975,7 +1976,7 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
// CleanUp empties the trash
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
err := f.svc.Files.EmptyTrash().Do()
|
||||
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
|
||||
@@ -1994,7 +1995,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var about *drive.About
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
about, err = f.svc.About.Get().Fields("storageQuota").Do()
|
||||
about, err = f.svc.About.Get().Fields("storageQuota").Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -2253,7 +2254,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
}
|
||||
}
|
||||
fs.Debugf(f, "Checking for changes on remote")
|
||||
startPageToken, err = f.changeNotifyRunner(notifyFunc, startPageToken)
|
||||
startPageToken, err = f.changeNotifyRunner(ctx, notifyFunc, startPageToken)
|
||||
if err != nil {
|
||||
fs.Infof(f, "Change notify listener failure: %s", err)
|
||||
}
|
||||
@@ -2275,7 +2276,7 @@ func (f *Fs) changeNotifyStartPageToken() (pageToken string, err error) {
|
||||
return startPageToken.StartPageToken, nil
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), startPageToken string) (newStartPageToken string, err error) {
|
||||
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), startPageToken string) (newStartPageToken string, err error) {
|
||||
pageToken := startPageToken
|
||||
for {
|
||||
var changeList *drive.ChangeList
|
||||
@@ -2291,7 +2292,7 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), startPage
|
||||
if f.isTeamDrive {
|
||||
changesCall.TeamDriveId(f.opt.TeamDriveID)
|
||||
}
|
||||
changeList, err = changesCall.Do()
|
||||
changeList, err = changesCall.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -2499,7 +2500,7 @@ func (o *baseObject) Storable() bool {
|
||||
|
||||
// httpResponse gets an http.Response object for the object
|
||||
// using the url and method passed in
|
||||
func (o *baseObject) httpResponse(url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
|
||||
func (o *baseObject) httpResponse(ctx context.Context, url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
|
||||
if url == "" {
|
||||
return nil, nil, errors.New("forbidden to download - check sharing permission")
|
||||
}
|
||||
@@ -2507,6 +2508,7 @@ func (o *baseObject) httpResponse(url, method string, options []fs.OpenOption) (
|
||||
if err != nil {
|
||||
return req, nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
if o.bytes == 0 {
|
||||
// Don't supply range requests for 0 length objects as they always fail
|
||||
@@ -2577,8 +2579,8 @@ func isGoogleError(err error, what string) bool {
|
||||
}
|
||||
|
||||
// open a url for reading
|
||||
func (o *baseObject) open(url string, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
_, res, err := o.httpResponse(url, "GET", options)
|
||||
func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
_, res, err := o.httpResponse(ctx, url, "GET", options)
|
||||
if err != nil {
|
||||
if isGoogleError(err, "cannotDownloadAbusiveFile") {
|
||||
if o.fs.opt.AcknowledgeAbuse {
|
||||
@@ -2589,7 +2591,7 @@ func (o *baseObject) open(url string, options ...fs.OpenOption) (in io.ReadClose
|
||||
url += "?"
|
||||
}
|
||||
url += "acknowledgeAbuse=true"
|
||||
_, res, err = o.httpResponse(url, "GET", options)
|
||||
_, res, err = o.httpResponse(ctx, url, "GET", options)
|
||||
} else {
|
||||
err = errors.Wrap(err, "Use the --drive-acknowledge-abuse flag to download this file")
|
||||
}
|
||||
@@ -2618,7 +2620,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
o.v2Download = false
|
||||
}
|
||||
}
|
||||
return o.baseObject.open(o.url, options...)
|
||||
return o.baseObject.open(ctx, o.url, options...)
|
||||
}
|
||||
func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Update the size with what we are reading as it can change from
|
||||
@@ -2643,7 +2645,7 @@ func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in
|
||||
if offset != 0 {
|
||||
return nil, errors.New("partial downloads are not supported while exporting Google Documents")
|
||||
}
|
||||
in, err = o.baseObject.open(o.url, options...)
|
||||
in, err = o.baseObject.open(ctx, o.url, options...)
|
||||
if in != nil {
|
||||
in = &openDocumentFile{o: o, in: in}
|
||||
}
|
||||
@@ -2678,7 +2680,7 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
|
||||
return ioutil.NopCloser(bytes.NewReader(data)), nil
|
||||
}
|
||||
|
||||
func (o *baseObject) update(updateInfo *drive.File, uploadMimeType string, in io.Reader,
|
||||
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
|
||||
src fs.ObjectInfo) (info *drive.File, err error) {
|
||||
// Make the API request to upload metadata and file data.
|
||||
size := src.Size()
|
||||
@@ -2696,7 +2698,7 @@ func (o *baseObject) update(updateInfo *drive.File, uploadMimeType string, in io
|
||||
return
|
||||
}
|
||||
// Upload the file in chunks
|
||||
return o.fs.Upload(in, size, uploadMimeType, o.id, o.remote, updateInfo)
|
||||
return o.fs.Upload(ctx, in, size, uploadMimeType, o.id, o.remote, updateInfo)
|
||||
}
|
||||
|
||||
// Update the already existing object
|
||||
@@ -2710,7 +2712,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
MimeType: srcMimeType,
|
||||
ModifiedTime: src.ModTime(ctx).Format(timeFormatOut),
|
||||
}
|
||||
info, err := o.baseObject.update(updateInfo, srcMimeType, in, src)
|
||||
info, err := o.baseObject.update(ctx, updateInfo, srcMimeType, in, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2747,7 +2749,7 @@ func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.Object
|
||||
}
|
||||
updateInfo.MimeType = importMimeType
|
||||
|
||||
info, err := o.baseObject.update(updateInfo, srcMimeType, in, src)
|
||||
info, err := o.baseObject.update(ctx, updateInfo, srcMimeType, in, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
package drive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -50,7 +51,7 @@ type resumableUpload struct {
|
||||
}
|
||||
|
||||
// Upload the io.Reader in of size bytes with contentType and info
|
||||
func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
|
||||
func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
|
||||
params := url.Values{
|
||||
"alt": {"json"},
|
||||
"uploadType": {"resumable"},
|
||||
@@ -81,6 +82,7 @@ func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
googleapi.Expand(req.URL, map[string]string{
|
||||
"fileId": fileID,
|
||||
})
|
||||
@@ -106,12 +108,13 @@ func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string
|
||||
MediaType: contentType,
|
||||
ContentLength: size,
|
||||
}
|
||||
return rx.Upload()
|
||||
return rx.Upload(ctx)
|
||||
}
|
||||
|
||||
// Make an http.Request for the range passed in
|
||||
func (rx *resumableUpload) makeRequest(start int64, body io.ReadSeeker, reqSize int64) *http.Request {
|
||||
func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request {
|
||||
req, _ := http.NewRequest("POST", rx.URI, body)
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.ContentLength = reqSize
|
||||
if reqSize != 0 {
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
|
||||
@@ -129,8 +132,8 @@ var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
|
||||
// Query drive for the amount transferred so far
|
||||
//
|
||||
// If error is nil, then start should be valid
|
||||
func (rx *resumableUpload) transferStatus() (start int64, err error) {
|
||||
req := rx.makeRequest(0, nil, 0)
|
||||
func (rx *resumableUpload) transferStatus(ctx context.Context) (start int64, err error) {
|
||||
req := rx.makeRequest(ctx, 0, nil, 0)
|
||||
res, err := rx.f.client.Do(req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@@ -157,9 +160,9 @@ func (rx *resumableUpload) transferStatus() (start int64, err error) {
|
||||
}
|
||||
|
||||
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
|
||||
func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
|
||||
func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
|
||||
_, _ = chunk.Seek(0, io.SeekStart)
|
||||
req := rx.makeRequest(start, chunk, chunkSize)
|
||||
req := rx.makeRequest(ctx, start, chunk, chunkSize)
|
||||
res, err := rx.f.client.Do(req)
|
||||
if err != nil {
|
||||
return 599, err
|
||||
@@ -192,7 +195,7 @@ func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunk
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
// It retries each chunk using the pacer and --low-level-retries
|
||||
func (rx *resumableUpload) Upload() (*drive.File, error) {
|
||||
func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
|
||||
start := int64(0)
|
||||
var StatusCode int
|
||||
var err error
|
||||
@@ -207,7 +210,7 @@ func (rx *resumableUpload) Upload() (*drive.File, error) {
|
||||
// Transfer the chunk
|
||||
err = rx.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
|
||||
StatusCode, err = rx.transferChunk(start, chunk, reqSize)
|
||||
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize)
|
||||
again, err := shouldRetry(err)
|
||||
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
|
||||
again = false
|
||||
|
||||
@@ -32,7 +32,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
|
||||
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
|
||||
|
||||
func (f *Fs) getDownloadToken(url string) (*GetTokenResponse, error) {
|
||||
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
|
||||
request := DownloadRequest{
|
||||
URL: url,
|
||||
Single: 1,
|
||||
@@ -44,7 +44,7 @@ func (f *Fs) getDownloadToken(url string) (*GetTokenResponse, error) {
|
||||
|
||||
var token GetTokenResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, &request, &token)
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -72,7 +72,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
|
||||
|
||||
var sharedFiles SharedFolderResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, nil, &sharedFiles)
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, &sharedFiles)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -88,7 +88,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (f *Fs) listFiles(directoryID int) (filesList *FilesList, err error) {
|
||||
func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesList, err error) {
|
||||
// fs.Debugf(f, "Requesting files for dir `%s`", directoryID)
|
||||
request := ListFilesRequest{
|
||||
FolderID: directoryID,
|
||||
@@ -101,7 +101,7 @@ func (f *Fs) listFiles(directoryID int) (filesList *FilesList, err error) {
|
||||
|
||||
filesList = &FilesList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, &request, filesList)
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, filesList)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -111,7 +111,7 @@ func (f *Fs) listFiles(directoryID int) (filesList *FilesList, err error) {
|
||||
return filesList, nil
|
||||
}
|
||||
|
||||
func (f *Fs) listFolders(directoryID int) (foldersList *FoldersList, err error) {
|
||||
func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *FoldersList, err error) {
|
||||
// fs.Debugf(f, "Requesting folders for id `%s`", directoryID)
|
||||
|
||||
request := ListFolderRequest{
|
||||
@@ -125,7 +125,7 @@ func (f *Fs) listFolders(directoryID int) (foldersList *FoldersList, err error)
|
||||
|
||||
foldersList = &FoldersList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, &request, foldersList)
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, foldersList)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -153,12 +153,12 @@ func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, er
|
||||
return nil, err
|
||||
}
|
||||
|
||||
files, err := f.listFiles(folderID)
|
||||
files, err := f.listFiles(ctx, folderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folders, err := f.listFolders(folderID)
|
||||
folders, err := f.listFolders(ctx, folderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -205,7 +205,7 @@ func getRemote(dir, fileName string) string {
|
||||
return dir + "/" + fileName
|
||||
}
|
||||
|
||||
func (f *Fs) makeFolder(leaf string, folderID int) (response *MakeFolderResponse, err error) {
|
||||
func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (response *MakeFolderResponse, err error) {
|
||||
name := replaceReservedChars(leaf)
|
||||
// fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID)
|
||||
|
||||
@@ -221,7 +221,7 @@ func (f *Fs) makeFolder(leaf string, folderID int) (response *MakeFolderResponse
|
||||
|
||||
response = &MakeFolderResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, &request, response)
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -233,7 +233,7 @@ func (f *Fs) makeFolder(leaf string, folderID int) (response *MakeFolderResponse
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (f *Fs) removeFolder(name string, folderID int) (response *GenericOKResponse, err error) {
|
||||
func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (response *GenericOKResponse, err error) {
|
||||
// fs.Debugf(f, "Removing folder with id `%s`", directoryID)
|
||||
|
||||
request := &RemoveFolderRequest{
|
||||
@@ -248,7 +248,7 @@ func (f *Fs) removeFolder(name string, folderID int) (response *GenericOKRespons
|
||||
response = &GenericOKResponse{}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rest.CallJSON(&opts, request, response)
|
||||
resp, err = f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -263,7 +263,7 @@ func (f *Fs) removeFolder(name string, folderID int) (response *GenericOKRespons
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) deleteFile(url string) (response *GenericOKResponse, err error) {
|
||||
func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKResponse, err error) {
|
||||
request := &RemoveFileRequest{
|
||||
Files: []RmFile{
|
||||
{url},
|
||||
@@ -277,7 +277,7 @@ func (f *Fs) deleteFile(url string) (response *GenericOKResponse, err error) {
|
||||
|
||||
response = &GenericOKResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, request, response)
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -290,7 +290,7 @@ func (f *Fs) deleteFile(url string) (response *GenericOKResponse, err error) {
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) getUploadNode() (response *GetUploadNodeResponse, err error) {
|
||||
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
|
||||
// fs.Debugf(f, "Requesting Upload node")
|
||||
|
||||
opts := rest.Opts{
|
||||
@@ -301,7 +301,7 @@ func (f *Fs) getUploadNode() (response *GetUploadNodeResponse, err error) {
|
||||
|
||||
response = &GetUploadNodeResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, nil, response)
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -313,7 +313,7 @@ func (f *Fs) getUploadNode() (response *GetUploadNodeResponse, err error) {
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (f *Fs) uploadFile(in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
|
||||
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
|
||||
// fs.Debugf(f, "Uploading File `%s`", fileName)
|
||||
|
||||
fileName = replaceReservedChars(fileName)
|
||||
@@ -343,7 +343,7 @@ func (f *Fs) uploadFile(in io.Reader, size int64, fileName, folderID, uploadID,
|
||||
}
|
||||
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, nil, nil)
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -356,7 +356,7 @@ func (f *Fs) uploadFile(in io.Reader, size int64, fileName, folderID, uploadID,
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (f *Fs) endUpload(uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) {
|
||||
func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) {
|
||||
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
|
||||
|
||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||
@@ -377,7 +377,7 @@ func (f *Fs) endUpload(uploadID string, nodeurl string) (response *EndFileUpload
|
||||
|
||||
response = &EndFileUploadResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, nil, response)
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
|
||||
@@ -74,7 +74,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
folders, err := f.listFolders(folderID)
|
||||
folders, err := f.listFolders(ctx, folderID)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
@@ -95,7 +95,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
resp, err := f.makeFolder(leaf, folderID)
|
||||
resp, err := f.makeFolder(ctx, leaf, folderID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -251,7 +251,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files, err := f.listFiles(folderID)
|
||||
files, err := f.listFiles(ctx, folderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -298,13 +298,13 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if size > int64(100E9) {
|
||||
if size > int64(100e9) {
|
||||
return nil, errors.New("File too big, cant upload")
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
nodeResponse, err := f.getUploadNode()
|
||||
nodeResponse, err := f.getUploadNode(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -314,12 +314,12 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = f.uploadFile(in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
|
||||
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileUploadResponse, err := f.endUpload(nodeResponse.ID, nodeResponse.URL)
|
||||
fileUploadResponse, err := f.endUpload(ctx, nodeResponse.ID, nodeResponse.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -393,7 +393,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.removeFolder(dir, folderID)
|
||||
_, err = f.removeFolder(ctx, dir, folderID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ func (o *Object) SetModTime(context.Context, time.Time) error {
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
fs.FixRangeOption(options, int64(o.file.Size))
|
||||
downloadToken, err := o.fs.getDownloadToken(o.file.URL)
|
||||
downloadToken, err := o.fs.getDownloadToken(ctx, o.file.URL)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -89,7 +89,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.rest.Call(&opts)
|
||||
resp, err = o.fs.rest.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -131,7 +131,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
// fs.Debugf(f, "Removing file `%s` with url `%s`", o.file.Filename, o.file.URL)
|
||||
|
||||
_, err := o.fs.deleteFile(o.file.URL)
|
||||
_, err := o.fs.deleteFile(ctx, o.file.URL)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -374,6 +374,7 @@ func (f *Fs) setRoot(root string) {
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.TODO()
|
||||
var oAuthClient *http.Client
|
||||
|
||||
// Parse config into Options struct
|
||||
@@ -438,7 +439,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
// Check to see if the object exists
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.Get(f.rootBucket, f.rootDirectory).Do()
|
||||
_, err = f.svc.Objects.Get(f.rootBucket, f.rootDirectory).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -457,7 +458,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage.Object) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -465,7 +466,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object,
|
||||
if info != nil {
|
||||
o.setMetaData(info)
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and meta, returning an error
|
||||
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -476,7 +477,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object,
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object.
|
||||
@@ -504,7 +505,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
for {
|
||||
var objects *storage.Objects
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
objects, err = list.Do()
|
||||
objects, err = list.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -563,12 +564,12 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
|
||||
// Convert a list item into a DirEntry
|
||||
func (f *Fs) itemToDirEntry(remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
|
||||
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
|
||||
if isDirectory {
|
||||
d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size))
|
||||
return d, nil
|
||||
}
|
||||
o, err := f.newObjectWithInfo(remote, object)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -579,7 +580,7 @@ func (f *Fs) itemToDirEntry(remote string, object *storage.Object, isDirectory b
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
// List the objects
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -605,7 +606,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
for {
|
||||
var buckets *storage.Buckets
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
buckets, err = listBuckets.Do()
|
||||
buckets, err = listBuckets.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -664,7 +665,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -731,7 +732,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.List(bucket).MaxResults(1).Do()
|
||||
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -766,7 +767,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
}
|
||||
_, err = insertBucket.Do()
|
||||
_, err = insertBucket.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}, nil)
|
||||
@@ -783,7 +784,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
}
|
||||
return f.cache.Remove(bucket, func() error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
err = f.svc.Buckets.Delete(bucket).Do()
|
||||
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
})
|
||||
@@ -828,7 +829,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = copyObject.Do()
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -912,10 +913,10 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
}
|
||||
|
||||
// readObjectInfo reads the definition for an object
|
||||
func (o *Object) readObjectInfo() (object *storage.Object, err error) {
|
||||
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Do()
|
||||
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -932,11 +933,11 @@ func (o *Object) readObjectInfo() (object *storage.Object, err error) {
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
object, err := o.readObjectInfo()
|
||||
object, err := o.readObjectInfo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -949,7 +950,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData()
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
// fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
@@ -967,7 +968,7 @@ func metadataFromModTime(modTime time.Time) map[string]string {
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||
// read the complete existing object first
|
||||
object, err := o.readObjectInfo()
|
||||
object, err := o.readObjectInfo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -986,7 +987,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = copyObject.Do()
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1007,6 +1008,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
var res *http.Response
|
||||
@@ -1054,7 +1056,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = insertObject.Do()
|
||||
newObject, err = insertObject.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1069,7 +1071,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Do()
|
||||
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
|
||||
@@ -290,7 +290,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// fetchEndpoint gets the openid endpoint named from the Google config
|
||||
func (f *Fs) fetchEndpoint(name string) (endpoint string, err error) {
|
||||
func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, err error) {
|
||||
// Get openID config without auth
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
@@ -298,7 +298,7 @@ func (f *Fs) fetchEndpoint(name string) (endpoint string, err error) {
|
||||
}
|
||||
var openIDconfig map[string]interface{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.unAuth.CallJSON(&opts, nil, &openIDconfig)
|
||||
resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -316,7 +316,7 @@ func (f *Fs) fetchEndpoint(name string) (endpoint string, err error) {
|
||||
|
||||
// UserInfo fetches info about the current user with oauth2
|
||||
func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err error) {
|
||||
endpoint, err := f.fetchEndpoint("userinfo_endpoint")
|
||||
endpoint, err := f.fetchEndpoint(ctx, "userinfo_endpoint")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -327,7 +327,7 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
|
||||
RootURL: endpoint,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, nil, &userInfo)
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &userInfo)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -338,7 +338,7 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
|
||||
|
||||
// Disconnect kills the token and refresh token
|
||||
func (f *Fs) Disconnect(ctx context.Context) (err error) {
|
||||
endpoint, err := f.fetchEndpoint("revocation_endpoint")
|
||||
endpoint, err := f.fetchEndpoint(ctx, "revocation_endpoint")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -358,7 +358,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
|
||||
}
|
||||
var res interface{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, nil, &res)
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &res)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -423,7 +423,7 @@ func findID(name string) string {
|
||||
|
||||
// list the albums into an internal cache
|
||||
// FIXME cache invalidation
|
||||
func (f *Fs) listAlbums(shared bool) (all *albums, err error) {
|
||||
func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err error) {
|
||||
f.albumsMu.Lock()
|
||||
defer f.albumsMu.Unlock()
|
||||
all, ok := f.albums[shared]
|
||||
@@ -445,7 +445,7 @@ func (f *Fs) listAlbums(shared bool) (all *albums, err error) {
|
||||
var result api.ListAlbums
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -482,7 +482,7 @@ type listFn func(remote string, object *api.MediaItem, isDirectory bool) error
|
||||
// dir is the starting directory, "" for root
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) list(filter api.SearchFilter, fn listFn) (err error) {
|
||||
func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/mediaItems:search",
|
||||
@@ -494,7 +494,7 @@ func (f *Fs) list(filter api.SearchFilter, fn listFn) (err error) {
|
||||
var result api.MediaItems
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &filter, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &filter, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -543,7 +543,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *api.MediaI
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {
|
||||
// List the objects
|
||||
err = f.list(filter, func(remote string, item *api.MediaItem, isDirectory bool) error {
|
||||
err = f.list(ctx, filter, func(remote string, item *api.MediaItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, prefix+remote, item, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -638,7 +638,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
|
||||
var result api.Album
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, request, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, request, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -654,7 +654,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
|
||||
func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *api.Album, err error) {
|
||||
f.createMu.Lock()
|
||||
defer f.createMu.Unlock()
|
||||
albums, err := f.listAlbums(false)
|
||||
albums, err := f.listAlbums(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -708,7 +708,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
return err
|
||||
}
|
||||
albumTitle := match[1]
|
||||
allAlbums, err := f.listAlbums(false)
|
||||
allAlbums, err := f.listAlbums(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -773,7 +773,7 @@ func (o *Object) Size() int64 {
|
||||
RootURL: o.downloadURL(),
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -824,7 +824,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
var item api.MediaItem
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &item)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &item)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -901,7 +901,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -954,7 +954,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var token []byte
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
@@ -986,7 +986,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
var result api.BatchCreateResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, request, &result)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, request, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1029,7 +1029,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
// file pattern parsing
|
||||
type lister interface {
|
||||
listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error)
|
||||
listAlbums(shared bool) (all *albums, err error)
|
||||
listAlbums(ctx context.Context, shared bool) (all *albums, err error)
|
||||
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
|
||||
dirTime() time.Time
|
||||
}
|
||||
@@ -296,7 +296,7 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
|
||||
// is a prefix of another album, or actual files, or a combination of
|
||||
// the two.
|
||||
func albumsToEntries(ctx context.Context, f lister, shared bool, prefix string, albumPath string) (entries fs.DirEntries, err error) {
|
||||
albums, err := f.listAlbums(shared)
|
||||
albums, err := f.listAlbums(ctx, shared)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ func (f *testLister) listDir(ctx context.Context, prefix string, filter api.Sear
|
||||
}
|
||||
|
||||
// mock listAlbums for testing
|
||||
func (f *testLister) listAlbums(shared bool) (all *albums, err error) {
|
||||
func (f *testLister) listAlbums(ctx context.Context, shared bool) (all *albums, err error) {
|
||||
return f.albums, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -124,6 +124,7 @@ func statusError(res *http.Response, err error) error {
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.TODO()
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -162,6 +163,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// check to see if points to a file
|
||||
req, err := http.NewRequest("HEAD", u.String(), nil)
|
||||
if err == nil {
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
addHeaders(req, opt)
|
||||
res, err := noRedir.Do(req)
|
||||
err = statusError(res, err)
|
||||
@@ -237,7 +239,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err := o.stat()
|
||||
err := o.stat(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -355,7 +357,7 @@ func (f *Fs) addHeaders(req *http.Request) {
|
||||
}
|
||||
|
||||
// Read the directory passed in
|
||||
func (f *Fs) readDir(dir string) (names []string, err error) {
|
||||
func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error) {
|
||||
URL := f.url(dir)
|
||||
u, err := url.Parse(URL)
|
||||
if err != nil {
|
||||
@@ -369,6 +371,7 @@ func (f *Fs) readDir(dir string) (names []string, err error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "readDir failed")
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
f.addHeaders(req)
|
||||
res, err := f.httpClient.Do(req)
|
||||
if err == nil {
|
||||
@@ -408,7 +411,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if !strings.HasSuffix(dir, "/") && dir != "" {
|
||||
dir += "/"
|
||||
}
|
||||
names, err := f.readDir(dir)
|
||||
names, err := f.readDir(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error listing %q", dir)
|
||||
}
|
||||
@@ -424,7 +427,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
switch err = file.stat(); err {
|
||||
switch err = file.stat(ctx); err {
|
||||
case nil:
|
||||
entries = append(entries, file)
|
||||
case fs.ErrorNotAFile:
|
||||
@@ -492,12 +495,13 @@ func (o *Object) url() string {
|
||||
}
|
||||
|
||||
// stat updates the info field in the Object
|
||||
func (o *Object) stat() error {
|
||||
func (o *Object) stat(ctx context.Context) error {
|
||||
url := o.url()
|
||||
req, err := http.NewRequest("HEAD", url, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "stat failed")
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
o.fs.addHeaders(req)
|
||||
res, err := o.fs.httpClient.Do(req)
|
||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||
@@ -546,6 +550,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open failed")
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
|
||||
// Add optional headers
|
||||
for k, v := range fs.OpenOptionHeaders(options) {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package hubic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
@@ -26,7 +27,7 @@ func newAuth(f *Fs) *auth {
|
||||
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
|
||||
const retries = 10
|
||||
for try := 1; try <= retries; try++ {
|
||||
err = a.f.getCredentials()
|
||||
err = a.f.getCredentials(context.TODO())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ package hubic
|
||||
// to be revisted after some actual experience.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -115,11 +116,12 @@ func (f *Fs) String() string {
|
||||
// getCredentials reads the OpenStack Credentials using the Hubic API
|
||||
//
|
||||
// The credentials are read into the Fs
|
||||
func (f *Fs) getCredentials() (err error) {
|
||||
func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -77,6 +77,7 @@ func init() {
|
||||
Description: "JottaCloud",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
ctx := context.TODO()
|
||||
tokenString, ok := m.Get("token")
|
||||
if ok && tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
@@ -88,7 +89,7 @@ func init() {
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
|
||||
if config.Confirm() {
|
||||
deviceRegistration, err := registerDevice(srv)
|
||||
deviceRegistration, err := registerDevice(ctx, srv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to register device: %v", err)
|
||||
}
|
||||
@@ -113,7 +114,7 @@ func init() {
|
||||
username := config.ReadLine()
|
||||
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
|
||||
|
||||
token, err := doAuth(srv, username, password)
|
||||
token, err := doAuth(ctx, srv, username, password)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get oauth token: %s", err)
|
||||
}
|
||||
@@ -132,7 +133,7 @@ func init() {
|
||||
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
device, mountpoint, err := setupMountpoint(srv, apiSrv)
|
||||
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to setup mountpoint: %s", err)
|
||||
}
|
||||
@@ -246,7 +247,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
}
|
||||
|
||||
// registerDevice register a new device for use with the jottacloud API
|
||||
func registerDevice(srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
|
||||
func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
|
||||
// random generator to generate random device names
|
||||
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
randonDeviceNamePartLength := 21
|
||||
@@ -269,12 +270,12 @@ func registerDevice(srv *rest.Client) (reg *api.DeviceRegistrationResponse, err
|
||||
}
|
||||
|
||||
var deviceRegistration *api.DeviceRegistrationResponse
|
||||
_, err = srv.CallJSON(&opts, nil, &deviceRegistration)
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &deviceRegistration)
|
||||
return deviceRegistration, err
|
||||
}
|
||||
|
||||
// doAuth runs the actual token request
|
||||
func doAuth(srv *rest.Client, username, password string) (token oauth2.Token, err error) {
|
||||
func doAuth(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
|
||||
// prepare out token request with username and password
|
||||
values := url.Values{}
|
||||
values.Set("grant_type", "PASSWORD")
|
||||
@@ -291,7 +292,7 @@ func doAuth(srv *rest.Client, username, password string) (token oauth2.Token, er
|
||||
|
||||
// do the first request
|
||||
var jsonToken api.TokenJSON
|
||||
resp, err := srv.CallJSON(&opts, nil, &jsonToken)
|
||||
resp, err := srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
if err != nil {
|
||||
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
|
||||
if resp != nil {
|
||||
@@ -303,7 +304,7 @@ func doAuth(srv *rest.Client, username, password string) (token oauth2.Token, er
|
||||
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||
resp, err = srv.CallJSON(&opts, nil, &jsonToken)
|
||||
resp, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -316,13 +317,13 @@ func doAuth(srv *rest.Client, username, password string) (token oauth2.Token, er
|
||||
}
|
||||
|
||||
// setupMountpoint sets up a custom device and mountpoint if desired by the user
|
||||
func setupMountpoint(srv *rest.Client, apiSrv *rest.Client) (device, mountpoint string, err error) {
|
||||
cust, err := getCustomerInfo(apiSrv)
|
||||
func setupMountpoint(ctx context.Context, srv *rest.Client, apiSrv *rest.Client) (device, mountpoint string, err error) {
|
||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
acc, err := getDriveInfo(srv, cust.Username)
|
||||
acc, err := getDriveInfo(ctx, srv, cust.Username)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
@@ -333,7 +334,7 @@ func setupMountpoint(srv *rest.Client, apiSrv *rest.Client) (device, mountpoint
|
||||
fmt.Printf("Please select the device to use. Normally this will be Jotta\n")
|
||||
device = config.Choose("Devices", deviceNames, nil, false)
|
||||
|
||||
dev, err := getDeviceInfo(srv, path.Join(cust.Username, device))
|
||||
dev, err := getDeviceInfo(ctx, srv, path.Join(cust.Username, device))
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
@@ -351,13 +352,13 @@ func setupMountpoint(srv *rest.Client, apiSrv *rest.Client) (device, mountpoint
|
||||
}
|
||||
|
||||
// getCustomerInfo queries general information about the account
|
||||
func getCustomerInfo(srv *rest.Client) (info *api.CustomerInfo, err error) {
|
||||
func getCustomerInfo(ctx context.Context, srv *rest.Client) (info *api.CustomerInfo, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "account/v1/customer",
|
||||
}
|
||||
|
||||
_, err = srv.CallJSON(&opts, nil, &info)
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get customer info")
|
||||
}
|
||||
@@ -366,13 +367,13 @@ func getCustomerInfo(srv *rest.Client) (info *api.CustomerInfo, err error) {
|
||||
}
|
||||
|
||||
// getDriveInfo queries general information about the account and the available devices and mountpoints.
|
||||
func getDriveInfo(srv *rest.Client, username string) (info *api.DriveInfo, err error) {
|
||||
func getDriveInfo(ctx context.Context, srv *rest.Client, username string) (info *api.DriveInfo, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: username,
|
||||
}
|
||||
|
||||
_, err = srv.CallXML(&opts, nil, &info)
|
||||
_, err = srv.CallXML(ctx, &opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get drive info")
|
||||
}
|
||||
@@ -381,13 +382,13 @@ func getDriveInfo(srv *rest.Client, username string) (info *api.DriveInfo, err e
|
||||
}
|
||||
|
||||
// getDeviceInfo queries Information about a jottacloud device
|
||||
func getDeviceInfo(srv *rest.Client, path string) (info *api.JottaDevice, err error) {
|
||||
func getDeviceInfo(ctx context.Context, srv *rest.Client, path string) (info *api.JottaDevice, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: urlPathEscape(path),
|
||||
}
|
||||
|
||||
_, err = srv.CallXML(&opts, nil, &info)
|
||||
_, err = srv.CallXML(ctx, &opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get device info")
|
||||
}
|
||||
@@ -407,7 +408,7 @@ func (f *Fs) setEndpointURL() {
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.JottaFile, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.filePath(path),
|
||||
@@ -415,7 +416,7 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
|
||||
var result api.JottaFile
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -492,6 +493,7 @@ func grantTypeFilter(req *http.Request) {
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.TODO()
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -546,11 +548,11 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath("")
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
return err
|
||||
})
|
||||
|
||||
cust, err := getCustomerInfo(f.apiSrv)
|
||||
cust, err := getCustomerInfo(ctx, f.apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -582,7 +584,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.JottaFile) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -592,7 +594,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, e
|
||||
// Set info
|
||||
err = o.setMetaData(info)
|
||||
} else {
|
||||
err = o.readMetaData(false) // reads info and meta, returning an error
|
||||
err = o.readMetaData(ctx, false) // reads info and meta, returning an error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -603,11 +605,11 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, e
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// CreateDir makes a directory
|
||||
func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) {
|
||||
func (f *Fs) CreateDir(ctx context.Context, path string) (jf *api.JottaFolder, err error) {
|
||||
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
@@ -619,7 +621,7 @@ func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) {
|
||||
opts.Parameters.Set("mkDir", "true")
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &jf)
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &jf)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -648,7 +650,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
var resp *http.Response
|
||||
var result api.JottaFolder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -682,7 +684,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
continue
|
||||
}
|
||||
remote := path.Join(dir, restoreReservedChars(item.Name))
|
||||
o, err := f.newObjectWithInfo(remote, item)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, item)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -696,7 +698,7 @@ type listFileDirFn func(fs.DirEntry) error
|
||||
|
||||
// List the objects and directories into entries, from a
|
||||
// special kind of JottaFolder representing a FileDirLis
|
||||
func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, fn listFileDirFn) error {
|
||||
func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolder *api.JottaFolder, fn listFileDirFn) error {
|
||||
pathPrefix := "/" + f.filePathRaw("") // Non-escaped prefix of API paths to be cut off, to be left with the remote path including the remoteStartPath
|
||||
pathPrefixLength := len(pathPrefix)
|
||||
startPath := path.Join(pathPrefix, remoteStartPath) // Non-escaped API path up to and including remoteStartPath, to decide if it should be created as a new dir object
|
||||
@@ -725,7 +727,7 @@ func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, f
|
||||
continue
|
||||
}
|
||||
remoteFile := path.Join(remoteDir, restoreReservedChars(file.Name))
|
||||
o, err := f.newObjectWithInfo(remoteFile, file)
|
||||
o, err := f.newObjectWithInfo(ctx, remoteFile, file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -754,7 +756,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
var resp *http.Response
|
||||
var result api.JottaFolder // Could be JottaFileDirList, but JottaFolder is close enough
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -767,7 +769,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
return errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.listFileDir(dir, &result, func(entry fs.DirEntry) error {
|
||||
err = f.listFileDir(ctx, dir, &result, func(entry fs.DirEntry) error {
|
||||
return list.Add(entry)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -821,7 +823,7 @@ func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
_, err := f.CreateDir(dir)
|
||||
_, err := f.CreateDir(ctx, dir)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -860,7 +862,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(&opts)
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -888,7 +890,7 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||
func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err error) {
|
||||
func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *api.JottaFile, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: src,
|
||||
@@ -899,7 +901,7 @@ func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err erro
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &info)
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -928,13 +930,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := f.copyOrMove("cp", srcObj.filePath(), remote)
|
||||
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't copy file")
|
||||
}
|
||||
|
||||
return f.newObjectWithInfo(remote, info)
|
||||
return f.newObjectWithInfo(ctx, remote, info)
|
||||
//return f.newObjectWithInfo(remote, &result)
|
||||
}
|
||||
|
||||
@@ -958,13 +960,13 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := f.copyOrMove("mv", srcObj.filePath(), remote)
|
||||
info, err := f.copyOrMove(ctx, "mv", srcObj.filePath(), remote)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't move file")
|
||||
}
|
||||
|
||||
return f.newObjectWithInfo(remote, info)
|
||||
return f.newObjectWithInfo(ctx, remote, info)
|
||||
//return f.newObjectWithInfo(remote, result)
|
||||
}
|
||||
|
||||
@@ -1002,7 +1004,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
_, err = f.copyOrMove("mvDir", path.Join(f.endpointURL, replaceReservedChars(srcPath))+"/", dstRemote)
|
||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, replaceReservedChars(srcPath))+"/", dstRemote)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't move directory")
|
||||
@@ -1027,7 +1029,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
var resp *http.Response
|
||||
var result api.JottaFile
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -1058,7 +1060,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
info, err := getDriveInfo(f.srv, f.user)
|
||||
info, err := getDriveInfo(ctx, f.srv, f.user)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1113,7 +1115,8 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
err := o.readMetaData(false)
|
||||
ctx := context.TODO()
|
||||
err := o.readMetaData(ctx, false)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return 0
|
||||
@@ -1137,11 +1140,11 @@ func (o *Object) setMetaData(info *api.JottaFile) (err error) {
|
||||
}
|
||||
|
||||
// readMetaData reads and updates the metadata for an object
|
||||
func (o *Object) readMetaData(force bool) (err error) {
|
||||
func (o *Object) readMetaData(ctx context.Context, force bool) (err error) {
|
||||
if o.hasMetaData && !force {
|
||||
return nil
|
||||
}
|
||||
info, err := o.fs.readMetaDataForPath(o.remote)
|
||||
info, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1156,7 +1159,7 @@ func (o *Object) readMetaData(force bool) (err error) {
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData(false)
|
||||
err := o.readMetaData(ctx, false)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
@@ -1188,7 +1191,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
opts.Parameters.Set("mode", "bin")
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1298,7 +1301,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// send it
|
||||
var response api.AllocateFileResponse
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.apiSrv.CallJSON(&opts, &request, &response)
|
||||
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, &request, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1329,7 +1332,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// send the remaining bytes
|
||||
resp, err = o.fs.apiSrv.CallJSON(&opts, nil, &result)
|
||||
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, nil, &result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1341,7 +1344,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
o.modTime = time.Unix(result.Modified/1000, 0)
|
||||
} else {
|
||||
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still ned to update our metadata
|
||||
return o.readMetaData(true)
|
||||
return o.readMetaData(ctx, true)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -1363,7 +1366,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
}
|
||||
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallXML(&opts, nil, nil)
|
||||
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
107
backend/mailru/api/bin.go
Normal file
107
backend/mailru/api/bin.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package api
|
||||
|
||||
// BIN protocol constants
|
||||
const (
|
||||
BinContentType = "application/x-www-form-urlencoded"
|
||||
TreeIDLength = 12
|
||||
DunnoNodeIDLength = 16
|
||||
)
|
||||
|
||||
// Operations in binary protocol
|
||||
const (
|
||||
OperationAddFile = 103 // 0x67
|
||||
OperationRename = 105 // 0x69
|
||||
OperationCreateFolder = 106 // 0x6A
|
||||
OperationFolderList = 117 // 0x75
|
||||
OperationSharedFoldersList = 121 // 0x79
|
||||
// TODO investigate opcodes below
|
||||
Operation154MaybeItemInfo = 154 // 0x9A
|
||||
Operation102MaybeAbout = 102 // 0x66
|
||||
Operation104MaybeDelete = 104 // 0x68
|
||||
)
|
||||
|
||||
// CreateDir protocol constants
|
||||
const (
|
||||
MkdirResultOK = 0
|
||||
MkdirResultSourceNotExists = 1
|
||||
MkdirResultAlreadyExists = 4
|
||||
MkdirResultExistsDifferentCase = 9
|
||||
MkdirResultInvalidName = 10
|
||||
MkdirResultFailed254 = 254
|
||||
)
|
||||
|
||||
// Move result codes
|
||||
const (
|
||||
MoveResultOK = 0
|
||||
MoveResultSourceNotExists = 1
|
||||
MoveResultFailed002 = 2
|
||||
MoveResultAlreadyExists = 4
|
||||
MoveResultFailed005 = 5
|
||||
MoveResultFailed254 = 254
|
||||
)
|
||||
|
||||
// AddFile result codes
|
||||
const (
|
||||
AddResultOK = 0
|
||||
AddResultError01 = 1
|
||||
AddResultDunno04 = 4
|
||||
AddResultWrongPath = 5
|
||||
AddResultNoFreeSpace = 7
|
||||
AddResultDunno09 = 9
|
||||
AddResultInvalidName = 10
|
||||
AddResultNotModified = 12
|
||||
AddResultFailedA = 253
|
||||
AddResultFailedB = 254
|
||||
)
|
||||
|
||||
// List request options
|
||||
const (
|
||||
ListOptTotalSpace = 1
|
||||
ListOptDelete = 2
|
||||
ListOptFingerprint = 4
|
||||
ListOptUnknown8 = 8
|
||||
ListOptUnknown16 = 16
|
||||
ListOptFolderSize = 32
|
||||
ListOptUsedSpace = 64
|
||||
ListOptUnknown128 = 128
|
||||
ListOptUnknown256 = 256
|
||||
)
|
||||
|
||||
// ListOptDefaults ...
|
||||
const ListOptDefaults = ListOptUnknown128 | ListOptUnknown256 | ListOptFolderSize | ListOptTotalSpace | ListOptUsedSpace
|
||||
|
||||
// List parse flags
|
||||
const (
|
||||
ListParseDone = 0
|
||||
ListParseReadItem = 1
|
||||
ListParsePin = 2
|
||||
ListParsePinUpper = 3
|
||||
ListParseUnknown15 = 15
|
||||
)
|
||||
|
||||
// List operation results
|
||||
const (
|
||||
ListResultOK = 0
|
||||
ListResultNotExists = 1
|
||||
ListResultDunno02 = 2
|
||||
ListResultDunno03 = 3
|
||||
ListResultAlreadyExists04 = 4
|
||||
ListResultDunno05 = 5
|
||||
ListResultDunno06 = 6
|
||||
ListResultDunno07 = 7
|
||||
ListResultDunno08 = 8
|
||||
ListResultAlreadyExists09 = 9
|
||||
ListResultDunno10 = 10
|
||||
ListResultDunno11 = 11
|
||||
ListResultDunno12 = 12
|
||||
ListResultFailedB = 253
|
||||
ListResultFailedA = 254
|
||||
)
|
||||
|
||||
// Directory item types
|
||||
const (
|
||||
ListItemMountPoint = 0
|
||||
ListItemFile = 1
|
||||
ListItemFolder = 2
|
||||
ListItemSharedFolder = 3
|
||||
)
|
||||
225
backend/mailru/api/helpers.go
Normal file
225
backend/mailru/api/helpers.go
Normal file
@@ -0,0 +1,225 @@
|
||||
package api
|
||||
|
||||
// BIN protocol helpers
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
// protocol errors
|
||||
var (
|
||||
ErrorPrematureEOF = errors.New("Premature EOF")
|
||||
ErrorInvalidLength = errors.New("Invalid length")
|
||||
ErrorZeroTerminate = errors.New("String must end with zero")
|
||||
)
|
||||
|
||||
// BinWriter is a binary protocol writer
|
||||
type BinWriter struct {
|
||||
b *bytes.Buffer // growing byte buffer
|
||||
a []byte // temporary buffer for next varint
|
||||
}
|
||||
|
||||
// NewBinWriter creates a binary protocol helper
|
||||
func NewBinWriter() *BinWriter {
|
||||
return &BinWriter{
|
||||
b: new(bytes.Buffer),
|
||||
a: make([]byte, binary.MaxVarintLen64),
|
||||
}
|
||||
}
|
||||
|
||||
// Bytes returns binary data
|
||||
func (w *BinWriter) Bytes() []byte {
|
||||
return w.b.Bytes()
|
||||
}
|
||||
|
||||
// Reader returns io.Reader with binary data
|
||||
func (w *BinWriter) Reader() io.Reader {
|
||||
return bytes.NewReader(w.b.Bytes())
|
||||
}
|
||||
|
||||
// WritePu16 writes a short as unsigned varint
|
||||
func (w *BinWriter) WritePu16(val int) {
|
||||
if val < 0 || val > 65535 {
|
||||
log.Fatalf("Invalid UInt16 %v", val)
|
||||
}
|
||||
w.WritePu64(int64(val))
|
||||
}
|
||||
|
||||
// WritePu32 writes a signed long as unsigned varint
|
||||
func (w *BinWriter) WritePu32(val int64) {
|
||||
if val < 0 || val > 4294967295 {
|
||||
log.Fatalf("Invalid UInt32 %v", val)
|
||||
}
|
||||
w.WritePu64(val)
|
||||
}
|
||||
|
||||
// WritePu64 writes an unsigned (actually, signed) long as unsigned varint
|
||||
func (w *BinWriter) WritePu64(val int64) {
|
||||
if val < 0 {
|
||||
log.Fatalf("Invalid UInt64 %v", val)
|
||||
}
|
||||
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
|
||||
}
|
||||
|
||||
// WriteString writes a zero-terminated string
|
||||
func (w *BinWriter) WriteString(str string) {
|
||||
buf := []byte(str)
|
||||
w.WritePu64(int64(len(buf) + 1))
|
||||
w.b.Write(buf)
|
||||
w.b.WriteByte(0)
|
||||
}
|
||||
|
||||
// Write writes a byte buffer
|
||||
func (w *BinWriter) Write(buf []byte) {
|
||||
w.b.Write(buf)
|
||||
}
|
||||
|
||||
// WriteWithLength writes a byte buffer prepended with its length as varint
|
||||
func (w *BinWriter) WriteWithLength(buf []byte) {
|
||||
w.WritePu64(int64(len(buf)))
|
||||
w.b.Write(buf)
|
||||
}
|
||||
|
||||
// BinReader is a binary protocol reader helper
|
||||
type BinReader struct {
|
||||
b *bufio.Reader
|
||||
count *readers.CountingReader
|
||||
err error // keeps the first error encountered
|
||||
}
|
||||
|
||||
// NewBinReader creates a binary protocol reader helper
|
||||
func NewBinReader(reader io.Reader) *BinReader {
|
||||
r := &BinReader{}
|
||||
r.count = readers.NewCountingReader(reader)
|
||||
r.b = bufio.NewReader(r.count)
|
||||
return r
|
||||
}
|
||||
|
||||
// Count returns number of bytes read
|
||||
func (r *BinReader) Count() uint64 {
|
||||
return r.count.BytesRead()
|
||||
}
|
||||
|
||||
// Error returns first encountered error or nil
|
||||
func (r *BinReader) Error() error {
|
||||
return r.err
|
||||
}
|
||||
|
||||
// check() keeps the first error encountered in a stream
|
||||
func (r *BinReader) check(err error) bool {
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
if r.err == nil {
|
||||
// keep the first error
|
||||
r.err = err
|
||||
}
|
||||
if err != io.EOF {
|
||||
log.Fatalf("Error parsing response: %v", err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ReadByteAsInt reads a single byte as uint32, returns -1 for EOF or errors
|
||||
func (r *BinReader) ReadByteAsInt() int {
|
||||
if octet, err := r.b.ReadByte(); r.check(err) {
|
||||
return int(octet)
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// ReadByteAsShort reads a single byte as uint16, returns -1 for EOF or errors
|
||||
func (r *BinReader) ReadByteAsShort() int16 {
|
||||
if octet, err := r.b.ReadByte(); r.check(err) {
|
||||
return int16(octet)
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// ReadIntSpl reads two bytes as little-endian uint16, returns -1 for EOF or errors
|
||||
func (r *BinReader) ReadIntSpl() int {
|
||||
var val uint16
|
||||
if r.check(binary.Read(r.b, binary.LittleEndian, &val)) {
|
||||
return int(val)
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// ReadULong returns uint64 equivalent of -1 for EOF or errors
|
||||
func (r *BinReader) ReadULong() uint64 {
|
||||
if val, err := binary.ReadUvarint(r.b); r.check(err) {
|
||||
return val
|
||||
}
|
||||
return 0xffffffffffffffff
|
||||
}
|
||||
|
||||
// ReadPu32 returns -1 for EOF or errors
|
||||
func (r *BinReader) ReadPu32() int64 {
|
||||
if val, err := binary.ReadUvarint(r.b); r.check(err) {
|
||||
return int64(val)
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// ReadNBytes reads given number of bytes, returns invalid data for EOF or errors
|
||||
func (r *BinReader) ReadNBytes(len int) []byte {
|
||||
buf := make([]byte, len)
|
||||
n, err := r.b.Read(buf)
|
||||
if r.check(err) {
|
||||
return buf
|
||||
}
|
||||
if n != len {
|
||||
r.check(ErrorPrematureEOF)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
// ReadBytesByLength reads buffer length and its bytes
|
||||
func (r *BinReader) ReadBytesByLength() []byte {
|
||||
len := r.ReadPu32()
|
||||
if len < 0 {
|
||||
r.check(ErrorInvalidLength)
|
||||
return []byte{}
|
||||
}
|
||||
return r.ReadNBytes(int(len))
|
||||
}
|
||||
|
||||
// ReadString reads a zero-terminated string with length
|
||||
func (r *BinReader) ReadString() string {
|
||||
len := int(r.ReadPu32())
|
||||
if len < 1 {
|
||||
r.check(ErrorInvalidLength)
|
||||
return ""
|
||||
}
|
||||
buf := make([]byte, len-1)
|
||||
n, err := r.b.Read(buf)
|
||||
if !r.check(err) {
|
||||
return ""
|
||||
}
|
||||
if n != len-1 {
|
||||
r.check(ErrorPrematureEOF)
|
||||
return ""
|
||||
}
|
||||
zeroByte, err := r.b.ReadByte()
|
||||
if !r.check(err) {
|
||||
return ""
|
||||
}
|
||||
if zeroByte != 0 {
|
||||
r.check(ErrorZeroTerminate)
|
||||
return ""
|
||||
}
|
||||
return string(buf)
|
||||
}
|
||||
|
||||
// ReadDate reads a Unix encoded time
|
||||
func (r *BinReader) ReadDate() time.Time {
|
||||
return time.Unix(r.ReadPu32(), 0)
|
||||
}
|
||||
248
backend/mailru/api/m1.go
Normal file
248
backend/mailru/api/m1.go
Normal file
@@ -0,0 +1,248 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// M1 protocol constants and structures
|
||||
const (
|
||||
APIServerURL = "https://cloud.mail.ru"
|
||||
PublicLinkURL = "https://cloud.mail.ru/public/"
|
||||
DispatchServerURL = "https://dispatcher.cloud.mail.ru"
|
||||
OAuthURL = "https://o2.mail.ru/token"
|
||||
OAuthClientID = "cloud-win"
|
||||
)
|
||||
|
||||
// ServerErrorResponse represents erroneous API response.
|
||||
type ServerErrorResponse struct {
|
||||
Message string `json:"body"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
func (e *ServerErrorResponse) Error() string {
|
||||
return fmt.Sprintf("server error %d (%s)", e.Status, e.Message)
|
||||
}
|
||||
|
||||
// FileErrorResponse represents erroneous API response for a file
|
||||
type FileErrorResponse struct {
|
||||
Body struct {
|
||||
Home struct {
|
||||
Value string `json:"value"`
|
||||
Error string `json:"error"`
|
||||
} `json:"home"`
|
||||
} `json:"body"`
|
||||
Status int `json:"status"`
|
||||
Account string `json:"email,omitempty"`
|
||||
Time int64 `json:"time,omitempty"`
|
||||
Message string // non-json, calculated field
|
||||
}
|
||||
|
||||
func (e *FileErrorResponse) Error() string {
|
||||
return fmt.Sprintf("file error %d (%s)", e.Status, e.Body.Home.Error)
|
||||
}
|
||||
|
||||
// UserInfoResponse contains account metadata
|
||||
type UserInfoResponse struct {
|
||||
Body struct {
|
||||
AccountType string `json:"account_type"`
|
||||
AccountVerified bool `json:"account_verified"`
|
||||
Cloud struct {
|
||||
Beta struct {
|
||||
Allowed bool `json:"allowed"`
|
||||
Asked bool `json:"asked"`
|
||||
} `json:"beta"`
|
||||
Billing struct {
|
||||
ActiveCostID string `json:"active_cost_id"`
|
||||
ActiveRateID string `json:"active_rate_id"`
|
||||
AutoProlong bool `json:"auto_prolong"`
|
||||
Basequota int64 `json:"basequota"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Expires int `json:"expires"`
|
||||
Prolong bool `json:"prolong"`
|
||||
Promocodes struct {
|
||||
} `json:"promocodes"`
|
||||
Subscription []interface{} `json:"subscription"`
|
||||
Version string `json:"version"`
|
||||
} `json:"billing"`
|
||||
Bonuses struct {
|
||||
CameraUpload bool `json:"camera_upload"`
|
||||
Complete bool `json:"complete"`
|
||||
Desktop bool `json:"desktop"`
|
||||
Feedback bool `json:"feedback"`
|
||||
Links bool `json:"links"`
|
||||
Mobile bool `json:"mobile"`
|
||||
Registration bool `json:"registration"`
|
||||
} `json:"bonuses"`
|
||||
Enable struct {
|
||||
Sharing bool `json:"sharing"`
|
||||
} `json:"enable"`
|
||||
FileSizeLimit int64 `json:"file_size_limit"`
|
||||
Space struct {
|
||||
BytesTotal int64 `json:"bytes_total"`
|
||||
BytesUsed int `json:"bytes_used"`
|
||||
Overquota bool `json:"overquota"`
|
||||
} `json:"space"`
|
||||
} `json:"cloud"`
|
||||
Cloudflags struct {
|
||||
Exists bool `json:"exists"`
|
||||
} `json:"cloudflags"`
|
||||
Domain string `json:"domain"`
|
||||
Login string `json:"login"`
|
||||
Newbie bool `json:"newbie"`
|
||||
UI struct {
|
||||
ExpandLoader bool `json:"expand_loader"`
|
||||
Kind string `json:"kind"`
|
||||
Sidebar bool `json:"sidebar"`
|
||||
Sort struct {
|
||||
Order string `json:"order"`
|
||||
Type string `json:"type"`
|
||||
} `json:"sort"`
|
||||
Thumbs bool `json:"thumbs"`
|
||||
} `json:"ui"`
|
||||
} `json:"body"`
|
||||
Email string `json:"email"`
|
||||
Status int `json:"status"`
|
||||
Time int64 `json:"time"`
|
||||
}
|
||||
|
||||
// ListItem ...
|
||||
type ListItem struct {
|
||||
Count struct {
|
||||
Folders int `json:"folders"`
|
||||
Files int `json:"files"`
|
||||
} `json:"count,omitempty"`
|
||||
Kind string `json:"kind"`
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Home string `json:"home"`
|
||||
Size int64 `json:"size"`
|
||||
Mtime int64 `json:"mtime,omitempty"`
|
||||
Hash string `json:"hash,omitempty"`
|
||||
VirusScan string `json:"virus_scan,omitempty"`
|
||||
Tree string `json:"tree,omitempty"`
|
||||
Grev int `json:"grev,omitempty"`
|
||||
Rev int `json:"rev,omitempty"`
|
||||
}
|
||||
|
||||
// ItemInfoResponse ...
|
||||
type ItemInfoResponse struct {
|
||||
Email string `json:"email"`
|
||||
Body ListItem `json:"body"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
// FolderInfoResponse ...
|
||||
type FolderInfoResponse struct {
|
||||
Body struct {
|
||||
Count struct {
|
||||
Folders int `json:"folders"`
|
||||
Files int `json:"files"`
|
||||
} `json:"count"`
|
||||
Tree string `json:"tree"`
|
||||
Name string `json:"name"`
|
||||
Grev int `json:"grev"`
|
||||
Size int64 `json:"size"`
|
||||
Sort struct {
|
||||
Order string `json:"order"`
|
||||
Type string `json:"type"`
|
||||
} `json:"sort"`
|
||||
Kind string `json:"kind"`
|
||||
Rev int `json:"rev"`
|
||||
Type string `json:"type"`
|
||||
Home string `json:"home"`
|
||||
List []ListItem `json:"list"`
|
||||
} `json:"body,omitempty"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
Email string `json:"email"`
|
||||
}
|
||||
|
||||
// ShardInfoResponse ...
|
||||
type ShardInfoResponse struct {
|
||||
Email string `json:"email"`
|
||||
Body struct {
|
||||
Video []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"video"`
|
||||
ViewDirect []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"view_direct"`
|
||||
WeblinkView []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_view"`
|
||||
WeblinkVideo []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_video"`
|
||||
WeblinkGet []struct {
|
||||
Count int `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_get"`
|
||||
Stock []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"stock"`
|
||||
WeblinkThumbnails []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_thumbnails"`
|
||||
PublicUpload []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"public_upload"`
|
||||
Auth []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"auth"`
|
||||
Web []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"web"`
|
||||
View []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"view"`
|
||||
Upload []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"upload"`
|
||||
Get []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"get"`
|
||||
Thumbnails []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"thumbnails"`
|
||||
} `json:"body"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
// CleanupResponse ...
|
||||
type CleanupResponse struct {
|
||||
Email string `json:"email"`
|
||||
Time int64 `json:"time"`
|
||||
StatusStr string `json:"status"`
|
||||
}
|
||||
|
||||
// GenericResponse ...
|
||||
type GenericResponse struct {
|
||||
Email string `json:"email"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
// ignore other fields
|
||||
}
|
||||
|
||||
// GenericBodyResponse ...
|
||||
type GenericBodyResponse struct {
|
||||
Email string `json:"email"`
|
||||
Body string `json:"body"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
2378
backend/mailru/mailru.go
Normal file
2378
backend/mailru/mailru.go
Normal file
File diff suppressed because it is too large
Load Diff
18
backend/mailru/mailru_test.go
Normal file
18
backend/mailru/mailru_test.go
Normal file
@@ -0,0 +1,18 @@
|
||||
// Test Mailru filesystem interface
|
||||
package mailru_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/mailru"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestMailru:",
|
||||
NilObject: (*mailru.Object)(nil),
|
||||
SkipBadWindowsCharacters: true,
|
||||
})
|
||||
}
|
||||
134
backend/mailru/mrhash/mrhash.go
Normal file
134
backend/mailru/mrhash/mrhash.go
Normal file
@@ -0,0 +1,134 @@
|
||||
// Package mrhash implements the mailru hash, which is a modified SHA1.
|
||||
// If file size is less than or equal to the SHA1 block size (20 bytes),
|
||||
// its hash is simply its data right-padded with zero bytes.
|
||||
// Hash sum of a larger file is computed as a SHA1 sum of the file data
|
||||
// bytes concatenated with a decimal representation of the data length.
|
||||
package mrhash
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"hash"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockSize of the checksum in bytes.
|
||||
BlockSize = sha1.BlockSize
|
||||
// Size of the checksum in bytes.
|
||||
Size = sha1.Size
|
||||
startString = "mrCloud"
|
||||
hashError = "hash function returned error"
|
||||
)
|
||||
|
||||
// Global errors
|
||||
var (
|
||||
ErrorInvalidHash = errors.New("invalid hash")
|
||||
)
|
||||
|
||||
type digest struct {
|
||||
total int // bytes written into hash so far
|
||||
sha hash.Hash // underlying SHA1
|
||||
small []byte // small content
|
||||
}
|
||||
|
||||
// New returns a new hash.Hash computing the Mailru checksum.
|
||||
func New() hash.Hash {
|
||||
d := &digest{}
|
||||
d.Reset()
|
||||
return d
|
||||
}
|
||||
|
||||
// Write writes len(p) bytes from p to the underlying data stream. It returns
|
||||
// the number of bytes written from p (0 <= n <= len(p)) and any error
|
||||
// encountered that caused the write to stop early. Write must return a non-nil
|
||||
// error if it returns n < len(p). Write must not modify the slice data, even
|
||||
// temporarily.
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
func (d *digest) Write(p []byte) (n int, err error) {
|
||||
n, err = d.sha.Write(p)
|
||||
if err != nil {
|
||||
panic(hashError)
|
||||
}
|
||||
d.total += n
|
||||
if d.total <= Size {
|
||||
d.small = append(d.small, p...)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
// It does not change the underlying hash state.
|
||||
func (d *digest) Sum(b []byte) []byte {
|
||||
// If content is small, return it padded to Size
|
||||
if d.total <= Size {
|
||||
padded := make([]byte, Size)
|
||||
copy(padded, d.small)
|
||||
return append(b, padded...)
|
||||
}
|
||||
endString := strconv.Itoa(d.total)
|
||||
copy, err := cloneSHA1(d.sha)
|
||||
if err == nil {
|
||||
_, err = copy.Write([]byte(endString))
|
||||
}
|
||||
if err != nil {
|
||||
panic(hashError)
|
||||
}
|
||||
return copy.Sum(b)
|
||||
}
|
||||
|
||||
// cloneSHA1 clones state of SHA1 hash
|
||||
func cloneSHA1(orig hash.Hash) (clone hash.Hash, err error) {
|
||||
state, err := orig.(encoding.BinaryMarshaler).MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clone = sha1.New()
|
||||
err = clone.(encoding.BinaryUnmarshaler).UnmarshalBinary(state)
|
||||
return
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
func (d *digest) Reset() {
|
||||
d.sha = sha1.New()
|
||||
_, _ = d.sha.Write([]byte(startString))
|
||||
d.total = 0
|
||||
}
|
||||
|
||||
// Size returns the number of bytes Sum will return.
|
||||
func (d *digest) Size() int {
|
||||
return Size
|
||||
}
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
func (d *digest) BlockSize() int {
|
||||
return BlockSize
|
||||
}
|
||||
|
||||
// Sum returns the Mailru checksum of the data.
|
||||
func Sum(data []byte) []byte {
|
||||
var d digest
|
||||
d.Reset()
|
||||
_, _ = d.Write(data)
|
||||
return d.Sum(nil)
|
||||
}
|
||||
|
||||
// DecodeString converts a string to the Mailru hash
|
||||
func DecodeString(s string) ([]byte, error) {
|
||||
b, err := hex.DecodeString(s)
|
||||
if err != nil || len(b) != Size {
|
||||
return nil, ErrorInvalidHash
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// must implement this interface
|
||||
var (
|
||||
_ hash.Hash = (*digest)(nil)
|
||||
)
|
||||
81
backend/mailru/mrhash/mrhash_test.go
Normal file
81
backend/mailru/mrhash/mrhash_test.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package mrhash_test
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/mailru/mrhash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func testChunk(t *testing.T, chunk int) {
|
||||
data := make([]byte, chunk)
|
||||
for i := 0; i < chunk; i++ {
|
||||
data[i] = 'A'
|
||||
}
|
||||
for _, test := range []struct {
|
||||
n int
|
||||
want string
|
||||
}{
|
||||
{0, "0000000000000000000000000000000000000000"},
|
||||
{1, "4100000000000000000000000000000000000000"},
|
||||
{2, "4141000000000000000000000000000000000000"},
|
||||
{19, "4141414141414141414141414141414141414100"},
|
||||
{20, "4141414141414141414141414141414141414141"},
|
||||
{21, "eb1d05e78a18691a5aa196a6c2b60cd40b5faafb"},
|
||||
{22, "037e6d960601118a0639afbeff30fe716c66ed2d"},
|
||||
{4096, "45a16aa192502b010280fb5b44274c601a91fd9f"},
|
||||
{4194303, "fa019d5bd26498cf6abe35e0d61801bf19bf704b"},
|
||||
{4194304, "5ed0e07aa6ea5c1beb9402b4d807258f27d40773"},
|
||||
{4194305, "67bd0b9247db92e0e7d7e29a0947a50fedcb5452"},
|
||||
{8388607, "41a8e2eb044c2e242971b5445d7be2a13fc0dd84"},
|
||||
{8388608, "267a970917c624c11fe624276ec60233a66dc2c0"},
|
||||
{8388609, "37b60b308d553d2732aefb62b3ea88f74acfa13f"},
|
||||
} {
|
||||
d := mrhash.New()
|
||||
var toWrite int
|
||||
for toWrite = test.n; toWrite >= chunk; toWrite -= chunk {
|
||||
n, err := d.Write(data)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, chunk, n)
|
||||
}
|
||||
n, err := d.Write(data[:toWrite])
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, toWrite, n)
|
||||
got1 := hex.EncodeToString(d.Sum(nil))
|
||||
assert.Equal(t, test.want, got1, fmt.Sprintf("when testing length %d", n))
|
||||
got2 := hex.EncodeToString(d.Sum(nil))
|
||||
assert.Equal(t, test.want, got2, fmt.Sprintf("when testing length %d (2nd sum)", n))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashChunk16M(t *testing.T) { testChunk(t, 16*1024*1024) }
|
||||
func TestHashChunk8M(t *testing.T) { testChunk(t, 8*1024*1024) }
|
||||
func TestHashChunk4M(t *testing.T) { testChunk(t, 4*1024*1024) }
|
||||
func TestHashChunk2M(t *testing.T) { testChunk(t, 2*1024*1024) }
|
||||
func TestHashChunk1M(t *testing.T) { testChunk(t, 1*1024*1024) }
|
||||
func TestHashChunk64k(t *testing.T) { testChunk(t, 64*1024) }
|
||||
func TestHashChunk32k(t *testing.T) { testChunk(t, 32*1024) }
|
||||
func TestHashChunk2048(t *testing.T) { testChunk(t, 2048) }
|
||||
func TestHashChunk2047(t *testing.T) { testChunk(t, 2047) }
|
||||
|
||||
func TestSumCalledTwice(t *testing.T) {
|
||||
d := mrhash.New()
|
||||
assert.NotPanics(t, func() { d.Sum(nil) })
|
||||
d.Reset()
|
||||
assert.NotPanics(t, func() { d.Sum(nil) })
|
||||
assert.NotPanics(t, func() { d.Sum(nil) })
|
||||
_, _ = d.Write([]byte{1})
|
||||
assert.NotPanics(t, func() { d.Sum(nil) })
|
||||
}
|
||||
|
||||
func TestSize(t *testing.T) {
|
||||
d := mrhash.New()
|
||||
assert.Equal(t, 20, d.Size())
|
||||
}
|
||||
|
||||
func TestBlockSize(t *testing.T) {
|
||||
d := mrhash.New()
|
||||
assert.Equal(t, 64, d.BlockSize())
|
||||
}
|
||||
@@ -72,6 +72,7 @@ func init() {
|
||||
Description: "Microsoft OneDrive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
ctx := context.TODO()
|
||||
err := oauthutil.Config("onedrive", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
@@ -143,7 +144,7 @@ func init() {
|
||||
}
|
||||
|
||||
sites := siteResponse{}
|
||||
_, err := srv.CallJSON(&opts, nil, &sites)
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &sites)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available sites: %v", err)
|
||||
}
|
||||
@@ -172,7 +173,7 @@ func init() {
|
||||
// query Microsoft Graph
|
||||
if finalDriveID == "" {
|
||||
drives := drivesResponse{}
|
||||
_, err := srv.CallJSON(&opts, nil, &drives)
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &drives)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available drives: %v", err)
|
||||
}
|
||||
@@ -194,7 +195,7 @@ func init() {
|
||||
RootURL: graphURL,
|
||||
Path: "/drives/" + finalDriveID + "/root"}
|
||||
var rootItem api.Item
|
||||
_, err = srv.CallJSON(&opts, nil, &rootItem)
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &rootItem)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query root for drive %s: %v", finalDriveID, err)
|
||||
}
|
||||
@@ -343,10 +344,10 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
// instead of simply using `drives/driveID/root:/itemPath` because it works for
|
||||
// "shared with me" folders in OneDrive Personal (See #2536, #2778)
|
||||
// This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
|
||||
func (f *Fs) readMetaDataForPathRelativeToID(normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||
func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||
opts := newOptsCall(normalizedID, "GET", ":/"+withTrailingColon(rest.URLPathEscape(replaceReservedChars(relPath))))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -371,7 +372,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
}
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info, resp, err
|
||||
@@ -426,7 +427,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
}
|
||||
}
|
||||
|
||||
return f.readMetaDataForPathRelativeToID(baseNormalizedID, relPath)
|
||||
return f.readMetaDataForPathRelativeToID(ctx, baseNormalizedID, relPath)
|
||||
}
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
@@ -592,7 +593,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
if !ok {
|
||||
return "", false, errors.New("couldn't find parent ID")
|
||||
}
|
||||
info, resp, err := f.readMetaDataForPathRelativeToID(pathID, leaf)
|
||||
info, resp, err := f.readMetaDataForPathRelativeToID(ctx, pathID, leaf)
|
||||
if err != nil {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
return "", false, nil
|
||||
@@ -619,7 +620,7 @@ func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, e
|
||||
ConflictBehavior: "fail",
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &mkdir, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -642,7 +643,7 @@ type listAllFn func(*api.Item) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
// Top parameter asks for bigger pages of data
|
||||
// https://dev.onedrive.com/odata/optional-query-parameters.htm
|
||||
opts := newOptsCall(dirID, "GET", "/children?$top=1000")
|
||||
@@ -651,7 +652,7 @@ OUTER:
|
||||
var result api.ListChildrenResponse
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -709,7 +710,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return nil, err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
|
||||
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
||||
if !f.opt.ExposeOneNoteFiles && info.GetPackageType() == api.PackageTypeOneNote {
|
||||
fs.Debugf(info.Name, "OneNote file not shown in directory listing")
|
||||
return false
|
||||
@@ -793,12 +794,12 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
}
|
||||
|
||||
// deleteObject removes an object by ID
|
||||
func (f *Fs) deleteObject(id string) error {
|
||||
func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
opts := newOptsCall(id, "DELETE", "")
|
||||
opts.NoResponse = true
|
||||
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(&opts)
|
||||
resp, err := f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
@@ -821,7 +822,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
}
|
||||
if check {
|
||||
// check to see if there are any items
|
||||
found, err := f.listAll(rootID, false, false, func(item *api.Item) bool {
|
||||
found, err := f.listAll(ctx, rootID, false, false, func(item *api.Item) bool {
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
@@ -831,7 +832,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
}
|
||||
err = f.deleteObject(rootID)
|
||||
err = f.deleteObject(ctx, rootID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -941,7 +942,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, ©Req, nil)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©Req, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1029,7 +1030,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var resp *http.Response
|
||||
var info api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &move, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1122,7 +1123,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// Get timestamps of src so they can be preserved
|
||||
srcInfo, _, err := srcFs.readMetaDataForPathRelativeToID(srcID, "")
|
||||
srcInfo, _, err := srcFs.readMetaDataForPathRelativeToID(ctx, srcID, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1144,7 +1145,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
var resp *http.Response
|
||||
var info api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &move, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1170,7 +1171,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &drive)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &drive)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1210,7 +1211,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
var resp *http.Response
|
||||
var result api.CreateShareLinkResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &share, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &share, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1370,7 +1371,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
|
||||
}
|
||||
var info *api.Item
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(&opts, &update, &info)
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info, err
|
||||
@@ -1405,7 +1406,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
opts.Options = options
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1441,7 +1442,7 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
|
||||
createRequest.Item.FileSystemInfo.LastModifiedDateTime = api.Timestamp(modTime)
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &createRequest, &response)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &createRequest, &response)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.ErrorInfo.Code == "nameAlreadyExists" {
|
||||
// Make the error more user-friendly
|
||||
@@ -1454,7 +1455,7 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
|
||||
}
|
||||
|
||||
// uploadFragment uploads a part
|
||||
func (o *Object) uploadFragment(url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64) (info *api.Item, err error) {
|
||||
func (o *Object) uploadFragment(ctx context.Context, url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64) (info *api.Item, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: url,
|
||||
@@ -1467,7 +1468,7 @@ func (o *Object) uploadFragment(url string, start int64, totalSize int64, chunk
|
||||
var body []byte
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, _ = chunk.Seek(0, io.SeekStart)
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
@@ -1487,7 +1488,7 @@ func (o *Object) uploadFragment(url string, start int64, totalSize int64, chunk
|
||||
}
|
||||
|
||||
// cancelUploadSession cancels an upload session
|
||||
func (o *Object) cancelUploadSession(url string) (err error) {
|
||||
func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
RootURL: url,
|
||||
@@ -1495,7 +1496,7 @@ func (o *Object) cancelUploadSession(url string) (err error) {
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return
|
||||
@@ -1517,7 +1518,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Cancelling multipart upload")
|
||||
cancelErr := o.cancelUploadSession(uploadURL)
|
||||
cancelErr := o.cancelUploadSession(ctx, uploadURL)
|
||||
if cancelErr != nil {
|
||||
fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr)
|
||||
}
|
||||
@@ -1553,7 +1554,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
}
|
||||
seg := readers.NewRepeatableReader(io.LimitReader(in, n))
|
||||
fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
|
||||
info, err = o.uploadFragment(uploadURL, position, size, seg, n)
|
||||
info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1594,7 +1595,7 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &info)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.ErrorInfo.Code == "nameAlreadyExists" {
|
||||
// Make the error more user-friendly
|
||||
@@ -1646,7 +1647,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.deleteObject(o.id)
|
||||
return o.fs.deleteObject(ctx, o.id)
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
|
||||
@@ -161,7 +161,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
Method: "POST",
|
||||
Path: "/session/login.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(&opts, &account, &f.session)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &account, &f.session)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -246,7 +246,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
}
|
||||
|
||||
// deleteObject removes an object by ID
|
||||
func (f *Fs) deleteObject(id string) error {
|
||||
func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
removeDirData := removeFolder{SessionID: f.session.SessionID, FolderID: id}
|
||||
opts := rest.Opts{
|
||||
@@ -254,7 +254,7 @@ func (f *Fs) deleteObject(id string) error {
|
||||
NoResponse: true,
|
||||
Path: "/folder/remove.json",
|
||||
}
|
||||
resp, err := f.srv.CallJSON(&opts, &removeDirData, nil)
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &removeDirData, nil)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
@@ -275,14 +275,14 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
item, err := f.readMetaDataForFolderID(rootID)
|
||||
item, err := f.readMetaDataForFolderID(ctx, rootID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if check && len(item.Files) != 0 {
|
||||
return errors.New("folder not empty")
|
||||
}
|
||||
err = f.deleteObject(rootID)
|
||||
err = f.deleteObject(ctx, rootID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -353,7 +353,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
Method: "POST",
|
||||
Path: "/file/move_copy.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(&opts, ©FileData, &response)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©FileData, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -410,7 +410,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
Method: "POST",
|
||||
Path: "/file/move_copy.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(&opts, ©FileData, &response)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©FileData, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -509,7 +509,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
Method: "POST",
|
||||
Path: "/folder/move_copy.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(&opts, &moveFolderData, &response)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &moveFolderData, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -589,14 +589,14 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForFolderID(id string) (info *FolderList, err error) {
|
||||
func (f *Fs) readMetaDataForFolderID(ctx context.Context, id string) (info *FolderList, err error) {
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folder/list.json/" + f.session.SessionID + "/" + id,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -641,7 +641,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
Method: "POST",
|
||||
Path: "/upload/create_file.json",
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &createFileData, &response)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &createFileData, &response)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -694,7 +694,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
Method: "POST",
|
||||
Path: "/folder.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(&opts, &createDirData, &response)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &createDirData, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -722,7 +722,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
Method: "GET",
|
||||
Path: "/folder/list.json/" + f.session.SessionID + "/" + pathID,
|
||||
}
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &folderList)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &folderList)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -769,7 +769,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
folderList := FolderList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &folderList)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &folderList)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -853,7 +853,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
}
|
||||
update := modTimeFile{SessionID: o.fs.session.SessionID, FileID: o.id, FileModificationTime: strconv.FormatInt(modTime.Unix(), 10)}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(&opts, &update, nil)
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, nil)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -873,7 +873,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -892,7 +892,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
NoResponse: true,
|
||||
Path: "/file.json/" + o.fs.session.SessionID + "/" + o.id,
|
||||
}
|
||||
resp, err := o.fs.srv.Call(&opts)
|
||||
resp, err := o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
@@ -920,7 +920,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Method: "POST",
|
||||
Path: "/upload/open_file_upload.json",
|
||||
}
|
||||
resp, err := o.fs.srv.CallJSON(&opts, &openUploadData, &openResponse)
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &openUploadData, &openResponse)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -966,7 +966,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
MultipartFileName: o.remote, // ..name of the file for the attached file
|
||||
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &reply)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &reply)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -989,7 +989,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Method: "POST",
|
||||
Path: "/upload/close_file_upload.json",
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &closeUploadData, &closeResponse)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &closeUploadData, &closeResponse)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1015,7 +1015,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
NoResponse: true,
|
||||
Path: "/file/access.json",
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &update, nil)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &update, nil)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1040,7 +1040,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
Method: "GET",
|
||||
Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + url.QueryEscape(replaceReservedChars(leaf)),
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &folderList)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &folderList)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -201,7 +201,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
return nil, err
|
||||
}
|
||||
|
||||
found, err := f.listAll(directoryID, false, true, func(item *api.Item) bool {
|
||||
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
info = item
|
||||
return true
|
||||
@@ -334,7 +334,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
|
||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
pathIDOut = item.ID
|
||||
return true
|
||||
@@ -357,7 +357,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
opts.Parameters.Set("name", replaceReservedChars(leaf))
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(pathID))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -400,7 +400,7 @@ type listAllFn func(*api.Item) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/listfolder",
|
||||
@@ -412,7 +412,7 @@ func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn list
|
||||
var result api.ItemResult
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -458,7 +458,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return nil, err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
|
||||
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
||||
remote := path.Join(dir, info.Name)
|
||||
if info.IsFolder {
|
||||
// cache the directory ID for later lookups
|
||||
@@ -563,7 +563,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
var resp *http.Response
|
||||
var result api.ItemResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -628,7 +628,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var resp *http.Response
|
||||
var result api.ItemResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -666,7 +666,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
var resp *http.Response
|
||||
var result api.Error
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -706,7 +706,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var resp *http.Response
|
||||
var result api.ItemResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -803,7 +803,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
var resp *http.Response
|
||||
var result api.ItemResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -830,7 +830,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var resp *http.Response
|
||||
var q api.UserInfo
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &q)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &q)
|
||||
err = q.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -881,7 +881,7 @@ func (o *Object) getHashes(ctx context.Context) (err error) {
|
||||
}
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -984,7 +984,7 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
|
||||
// downloadURL fetches the download link
|
||||
func (o *Object) downloadURL() (URL string, err error) {
|
||||
func (o *Object) downloadURL(ctx context.Context) (URL string, err error) {
|
||||
if o.id == "" {
|
||||
return "", errors.New("can't download - no id")
|
||||
}
|
||||
@@ -1000,7 +1000,7 @@ func (o *Object) downloadURL() (URL string, err error) {
|
||||
}
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -1016,7 +1016,7 @@ func (o *Object) downloadURL() (URL string, err error) {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
url, err := o.downloadURL()
|
||||
url, err := o.downloadURL(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1027,7 +1027,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1104,7 +1104,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -1134,7 +1134,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
var result api.ItemResult
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -200,7 +200,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, directoriesOn
|
||||
}
|
||||
|
||||
lcLeaf := strings.ToLower(leaf)
|
||||
found, err := f.listAll(directoryID, directoriesOnly, filesOnly, func(item *api.Item) bool {
|
||||
found, err := f.listAll(ctx, directoryID, directoriesOnly, filesOnly, func(item *api.Item) bool {
|
||||
if strings.ToLower(item.Name) == lcLeaf {
|
||||
info = item
|
||||
return true
|
||||
@@ -361,7 +361,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
|
||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
pathIDOut = item.ID
|
||||
return true
|
||||
@@ -386,7 +386,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
},
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -411,7 +411,7 @@ type listAllFn func(*api.Item) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folder/list",
|
||||
@@ -423,7 +423,7 @@ func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn list
|
||||
var result api.FolderListResponse
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -475,7 +475,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return nil, err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
|
||||
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
||||
remote := path.Join(dir, info.Name)
|
||||
if info.Type == api.ItemTypeFolder {
|
||||
// cache the directory ID for later lookups
|
||||
@@ -589,7 +589,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
|
||||
// need to check if empty as it will delete recursively by default
|
||||
if check {
|
||||
found, err := f.listAll(rootID, false, false, func(item *api.Item) bool {
|
||||
found, err := f.listAll(ctx, rootID, false, false, func(item *api.Item) bool {
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
@@ -611,7 +611,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
var resp *http.Response
|
||||
var result api.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -690,7 +690,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
|
||||
var resp *http.Response
|
||||
var result api.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -860,7 +860,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
Parameters: f.baseParams(),
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -992,7 +992,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1036,7 +1036,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
},
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &info)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
@@ -1096,7 +1096,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
var result api.Response
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1138,7 +1138,7 @@ func (f *Fs) renameLeaf(ctx context.Context, isFile bool, id string, newLeaf str
|
||||
var resp *http.Response
|
||||
var result api.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1163,7 +1163,7 @@ func (f *Fs) remove(ctx context.Context, id string) (err error) {
|
||||
var resp *http.Response
|
||||
var result api.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -289,6 +289,7 @@ func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.Header.Set("tus-resumable", "1.0.0")
|
||||
req.Header.Set("upload-length", strconv.FormatInt(size, 10))
|
||||
b64name := base64.StdEncoding.EncodeToString([]byte(name))
|
||||
@@ -354,7 +355,7 @@ func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.
|
||||
func (f *Fs) transferChunk(ctx context.Context, location string, start int64, chunk io.ReadSeeker, chunkSize int64) (fileID int64, err error) {
|
||||
// defer log.Trace(f, "location=%v, start=%v, chunkSize=%v", location, start, chunkSize)("fileID=%v, err=%v", fileID, &err)
|
||||
_, _ = chunk.Seek(0, io.SeekStart)
|
||||
req, err := f.makeUploadPatchRequest(location, chunk, start, chunkSize)
|
||||
req, err := f.makeUploadPatchRequest(ctx, location, chunk, start, chunkSize)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -379,11 +380,12 @@ func (f *Fs) transferChunk(ctx context.Context, location string, start int64, ch
|
||||
return fileID, nil
|
||||
}
|
||||
|
||||
func (f *Fs) makeUploadPatchRequest(location string, in io.Reader, offset, length int64) (*http.Request, error) {
|
||||
func (f *Fs) makeUploadPatchRequest(ctx context.Context, location string, in io.Reader, offset, length int64) (*http.Request, error) {
|
||||
req, err := http.NewRequest("PATCH", location, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.Header.Set("tus-resumable", "1.0.0")
|
||||
req.Header.Set("upload-offset", strconv.FormatInt(offset, 10))
|
||||
req.Header.Set("content-length", strconv.FormatInt(length, 10))
|
||||
|
||||
@@ -223,7 +223,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
var resp *http.Response
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
req, _ := http.NewRequest(http.MethodGet, storageURL, nil)
|
||||
req, err := http.NewRequest(http.MethodGet, storageURL, nil)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.Header.Set("User-Agent", o.fs.client.UserAgent)
|
||||
|
||||
// merge headers with extra headers
|
||||
|
||||
@@ -1942,7 +1942,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "s3 upload: new request")
|
||||
}
|
||||
httpReq = httpReq.WithContext(ctx)
|
||||
httpReq = httpReq.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
|
||||
// set the headers we signed and the length
|
||||
httpReq.Header = headers
|
||||
|
||||
@@ -3,6 +3,7 @@ package odrvcookie
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"html/template"
|
||||
"net/http"
|
||||
@@ -91,8 +92,8 @@ func New(pUser, pPass, pEndpoint string) CookieAuth {
|
||||
|
||||
// Cookies creates a CookieResponse. It fetches the auth token and then
|
||||
// retrieves the Cookies
|
||||
func (ca *CookieAuth) Cookies() (*CookieResponse, error) {
|
||||
tokenResp, err := ca.getSPToken()
|
||||
func (ca *CookieAuth) Cookies(ctx context.Context) (*CookieResponse, error) {
|
||||
tokenResp, err := ca.getSPToken(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -140,7 +141,7 @@ func (ca *CookieAuth) getSPCookie(conf *SuccessResponse) (*CookieResponse, error
|
||||
return &cookieResponse, nil
|
||||
}
|
||||
|
||||
func (ca *CookieAuth) getSPToken() (conf *SuccessResponse, err error) {
|
||||
func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SuccessResponse, err error) {
|
||||
reqData := map[string]interface{}{
|
||||
"Username": ca.user,
|
||||
"Password": ca.pass,
|
||||
@@ -160,6 +161,7 @@ func (ca *CookieAuth) getSPToken() (conf *SuccessResponse, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
resp, err := client.Do(req)
|
||||
|
||||
@@ -205,7 +205,7 @@ func itemIsDir(item *api.Response) bool {
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(path string, depth string) (info *api.Prop, err error) {
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string) (info *api.Prop, err error) {
|
||||
// FIXME how do we read back additional properties?
|
||||
opts := rest.Opts{
|
||||
Method: "PROPFIND",
|
||||
@@ -221,7 +221,7 @@ func (f *Fs) readMetaDataForPath(path string, depth string) (info *api.Prop, err
|
||||
var result api.Multistatus
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
@@ -229,7 +229,7 @@ func (f *Fs) readMetaDataForPath(path string, depth string) (info *api.Prop, err
|
||||
switch apiErr.StatusCode {
|
||||
case http.StatusNotFound:
|
||||
if f.retryWithZeroDepth && depth != "0" {
|
||||
return f.readMetaDataForPath(path, "0")
|
||||
return f.readMetaDataForPath(ctx, path, "0")
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
case http.StatusMovedPermanently, http.StatusFound, http.StatusSeeOther:
|
||||
@@ -353,7 +353,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
}
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
err = f.setQuirks(opt.Vendor)
|
||||
err = f.setQuirks(ctx, opt.Vendor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -424,7 +424,7 @@ func (f *Fs) fetchAndSetBearerToken() error {
|
||||
}
|
||||
|
||||
// setQuirks adjusts the Fs for the vendor passed in
|
||||
func (f *Fs) setQuirks(vendor string) error {
|
||||
func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
||||
switch vendor {
|
||||
case "owncloud":
|
||||
f.canStream = true
|
||||
@@ -440,13 +440,13 @@ func (f *Fs) setQuirks(vendor string) error {
|
||||
// They have to be set instead of BasicAuth
|
||||
f.srv.RemoveHeader("Authorization") // We don't need this Header if using cookies
|
||||
spCk := odrvcookie.New(f.opt.User, f.opt.Pass, f.endpointURL)
|
||||
spCookies, err := spCk.Cookies()
|
||||
spCookies, err := spCk.Cookies(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
odrvcookie.NewRenew(12*time.Hour, func() {
|
||||
spCookies, err := spCk.Cookies()
|
||||
spCookies, err := spCk.Cookies(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf("could not renew cookies: %s", err.Error())
|
||||
return
|
||||
@@ -477,7 +477,7 @@ func (f *Fs) setQuirks(vendor string) error {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *api.Prop) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Prop) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -487,7 +487,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Prop) (fs.Object, error)
|
||||
// Set info
|
||||
err = o.setMetaData(info)
|
||||
} else {
|
||||
err = o.readMetaData() // reads info and meta, returning an error
|
||||
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -498,7 +498,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Prop) (fs.Object, error)
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// Read the normal props, plus the checksums
|
||||
@@ -528,7 +528,7 @@ type listAllFn func(string, bool, *api.Prop) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(dir string, directoriesOnly bool, filesOnly bool, depth string, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, filesOnly bool, depth string, fn listAllFn) (found bool, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PROPFIND",
|
||||
Path: f.dirPath(dir), // FIXME Should not start with /
|
||||
@@ -542,7 +542,7 @@ func (f *Fs) listAll(dir string, directoriesOnly bool, filesOnly bool, depth str
|
||||
var result api.Multistatus
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -550,7 +550,7 @@ func (f *Fs) listAll(dir string, directoriesOnly bool, filesOnly bool, depth str
|
||||
// does not exist
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
if f.retryWithZeroDepth && depth != "0" {
|
||||
return f.listAll(dir, directoriesOnly, filesOnly, "0", fn)
|
||||
return f.listAll(ctx, dir, directoriesOnly, filesOnly, "0", fn)
|
||||
}
|
||||
return found, fs.ErrorDirNotFound
|
||||
}
|
||||
@@ -625,14 +625,14 @@ func (f *Fs) listAll(dir string, directoriesOnly bool, filesOnly bool, depth str
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
var iErr error
|
||||
_, err = f.listAll(dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool {
|
||||
_, err = f.listAll(ctx, dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool {
|
||||
if isDir {
|
||||
d := fs.NewDir(remote, time.Time(info.Modified))
|
||||
// .SetID(info.ID)
|
||||
// FIXME more info from dir? can set size, items?
|
||||
entries = append(entries, d)
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(remote, info)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
@@ -696,7 +696,7 @@ func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
|
||||
}
|
||||
|
||||
// low level mkdir, only makes the directory, doesn't attempt to create parents
|
||||
func (f *Fs) _mkdir(dirPath string) error {
|
||||
func (f *Fs) _mkdir(ctx context.Context, dirPath string) error {
|
||||
// We assume the root is already created
|
||||
if dirPath == "" {
|
||||
return nil
|
||||
@@ -711,7 +711,7 @@ func (f *Fs) _mkdir(dirPath string) error {
|
||||
NoResponse: true,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(&opts)
|
||||
resp, err := f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
@@ -727,13 +727,13 @@ func (f *Fs) _mkdir(dirPath string) error {
|
||||
// mkdir makes the directory and parents using native paths
|
||||
func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
err := f._mkdir(dirPath)
|
||||
err := f._mkdir(ctx, dirPath)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// parent does not exist so create it first then try again
|
||||
if apiErr.StatusCode == http.StatusConflict {
|
||||
err = f.mkParentDir(ctx, dirPath)
|
||||
if err == nil {
|
||||
err = f._mkdir(dirPath)
|
||||
err = f._mkdir(ctx, dirPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -749,17 +749,17 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
// dirNotEmpty returns true if the directory exists and is not Empty
|
||||
//
|
||||
// if the directory does not exist then err will be ErrorDirNotFound
|
||||
func (f *Fs) dirNotEmpty(dir string) (found bool, err error) {
|
||||
return f.listAll(dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool {
|
||||
func (f *Fs) dirNotEmpty(ctx context.Context, dir string) (found bool, err error) {
|
||||
return f.listAll(ctx, dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool {
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
if check {
|
||||
notEmpty, err := f.dirNotEmpty(dir)
|
||||
notEmpty, err := f.dirNotEmpty(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -775,7 +775,7 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, nil)
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, nil)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -789,7 +789,7 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(dir, true)
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
@@ -835,10 +835,10 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
||||
},
|
||||
}
|
||||
if f.useOCMtime {
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1E9)
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1e9)
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(&opts)
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -870,7 +870,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck("", false)
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
@@ -904,7 +904,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
dstPath := f.filePath(dstRemote)
|
||||
|
||||
// Check if destination exists
|
||||
_, err := f.dirNotEmpty(dstRemote)
|
||||
_, err := f.dirNotEmpty(ctx, dstRemote)
|
||||
if err == nil {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
@@ -934,7 +934,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
},
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(&opts)
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -975,7 +975,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &q)
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &q)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1028,7 +1028,8 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
err := o.readMetaData()
|
||||
ctx := context.TODO()
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return 0
|
||||
@@ -1052,11 +1053,11 @@ func (o *Object) setMetaData(info *api.Prop) (err error) {
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if o.hasMetaData {
|
||||
return nil
|
||||
}
|
||||
info, err := o.fs.readMetaDataForPath(o.remote, defaultDepth)
|
||||
info, err := o.fs.readMetaDataForPath(ctx, o.remote, defaultDepth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1068,7 +1069,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData()
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
@@ -1095,7 +1096,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1128,7 +1129,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if o.fs.useOCMtime || o.fs.hasChecksums {
|
||||
opts.ExtraHeaders = map[string]string{}
|
||||
if o.fs.useOCMtime {
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1E9)
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1e9)
|
||||
}
|
||||
if o.fs.hasChecksums {
|
||||
// Set an upload checksum - prefer SHA1
|
||||
@@ -1143,7 +1144,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1159,7 +1160,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
// read metadata from remote
|
||||
o.hasMetaData = false
|
||||
return o.readMetaData()
|
||||
return o.readMetaData(ctx)
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
@@ -1170,7 +1171,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
NoResponse: true,
|
||||
}
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Call(&opts)
|
||||
resp, err := o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -200,7 +200,7 @@ func (f *Fs) dirPath(file string) string {
|
||||
return path.Join(f.diskRoot, file) + "/"
|
||||
}
|
||||
|
||||
func (f *Fs) readMetaDataForPath(path string, options *api.ResourceInfoRequestOptions) (*api.ResourceInfoResponse, error) {
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string, options *api.ResourceInfoRequestOptions) (*api.ResourceInfoResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/resources",
|
||||
@@ -226,7 +226,7 @@ func (f *Fs) readMetaDataForPath(path string, options *api.ResourceInfoRequestOp
|
||||
var info api.ResourceInfoResponse
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -239,6 +239,7 @@ func (f *Fs) readMetaDataForPath(path string, options *api.ResourceInfoRequestOp
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.TODO()
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -284,7 +285,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
//request object meta info
|
||||
// Check to see if the object exists and is a file
|
||||
//request object meta info
|
||||
if info, err := f.readMetaDataForPath(f.diskRoot, &api.ResourceInfoRequestOptions{}); err != nil {
|
||||
if info, err := f.readMetaDataForPath(ctx, f.diskRoot, &api.ResourceInfoRequestOptions{}); err != nil {
|
||||
|
||||
} else {
|
||||
if info.ResourceType == "file" {
|
||||
@@ -301,7 +302,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Convert a list item into a DirEntry
|
||||
func (f *Fs) itemToDirEntry(remote string, object *api.ResourceInfoResponse) (fs.DirEntry, error) {
|
||||
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.ResourceInfoResponse) (fs.DirEntry, error) {
|
||||
switch object.ResourceType {
|
||||
case "dir":
|
||||
t, err := time.Parse(time.RFC3339Nano, object.Modified)
|
||||
@@ -311,7 +312,7 @@ func (f *Fs) itemToDirEntry(remote string, object *api.ResourceInfoResponse) (fs
|
||||
d := fs.NewDir(remote, t).SetSize(object.Size)
|
||||
return d, nil
|
||||
case "file":
|
||||
o, err := f.newObjectWithInfo(remote, object)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -343,7 +344,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
Limit: limit,
|
||||
Offset: offset,
|
||||
}
|
||||
info, err := f.readMetaDataForPath(root, opts)
|
||||
info, err := f.readMetaDataForPath(ctx, root, opts)
|
||||
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.ErrorResponse); ok {
|
||||
@@ -360,7 +361,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
//list all subdirs
|
||||
for _, element := range info.Embedded.Items {
|
||||
remote := path.Join(dir, element.Name)
|
||||
entry, err := f.itemToDirEntry(remote, &element)
|
||||
entry, err := f.itemToDirEntry(ctx, remote, &element)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -386,7 +387,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *api.ResourceInfoResponse) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.ResourceInfoResponse) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -395,7 +396,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.ResourceInfoResponse) (f
|
||||
if info != nil {
|
||||
err = o.setMetaData(info)
|
||||
} else {
|
||||
err = o.readMetaData()
|
||||
err = o.readMetaData(ctx)
|
||||
if apiErr, ok := err.(*api.ErrorResponse); ok {
|
||||
// does not exist
|
||||
if apiErr.ErrorName == "DiskNotFoundError" {
|
||||
@@ -412,7 +413,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.ResourceInfoResponse) (f
|
||||
// NewObject finds the Object at remote. If it can't be found it
|
||||
// returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
@@ -446,7 +447,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
}
|
||||
|
||||
// CreateDir makes a directory
|
||||
func (f *Fs) CreateDir(path string) (err error) {
|
||||
func (f *Fs) CreateDir(ctx context.Context, path string) (err error) {
|
||||
//fmt.Printf("CreateDir: %s\n", path)
|
||||
|
||||
var resp *http.Response
|
||||
@@ -460,7 +461,7 @@ func (f *Fs) CreateDir(path string) (err error) {
|
||||
opts.Parameters.Set("path", path)
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(&opts)
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -474,7 +475,7 @@ func (f *Fs) CreateDir(path string) (err error) {
|
||||
// This really needs improvement and especially proper error checking
|
||||
// but Yandex does not publish a List of possible errors and when they're
|
||||
// expected to occur.
|
||||
func (f *Fs) mkDirs(path string) (err error) {
|
||||
func (f *Fs) mkDirs(ctx context.Context, path string) (err error) {
|
||||
//trim filename from path
|
||||
//dirString := strings.TrimSuffix(path, filepath.Base(path))
|
||||
//trim "disk:" from path
|
||||
@@ -483,7 +484,7 @@ func (f *Fs) mkDirs(path string) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = f.CreateDir(dirString); err != nil {
|
||||
if err = f.CreateDir(ctx, dirString); err != nil {
|
||||
if apiErr, ok := err.(*api.ErrorResponse); ok {
|
||||
// allready exists
|
||||
if apiErr.ErrorName != "DiskPathPointsToExistentDirectoryError" {
|
||||
@@ -493,7 +494,7 @@ func (f *Fs) mkDirs(path string) (err error) {
|
||||
for _, element := range dirs {
|
||||
if element != "" {
|
||||
mkdirpath += element + "/" //path separator /
|
||||
if err = f.CreateDir(mkdirpath); err != nil {
|
||||
if err = f.CreateDir(ctx, mkdirpath); err != nil {
|
||||
// ignore errors while creating dirs
|
||||
}
|
||||
}
|
||||
@@ -505,7 +506,7 @@ func (f *Fs) mkDirs(path string) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *Fs) mkParentDirs(resPath string) error {
|
||||
func (f *Fs) mkParentDirs(ctx context.Context, resPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
// chop off trailing / if it exists
|
||||
if strings.HasSuffix(resPath, "/") {
|
||||
@@ -515,17 +516,17 @@ func (f *Fs) mkParentDirs(resPath string) error {
|
||||
if parent == "." {
|
||||
parent = ""
|
||||
}
|
||||
return f.mkDirs(parent)
|
||||
return f.mkDirs(ctx, parent)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
path := f.filePath(dir)
|
||||
return f.mkDirs(path)
|
||||
return f.mkDirs(ctx, path)
|
||||
}
|
||||
|
||||
// waitForJob waits for the job with status in url to complete
|
||||
func (f *Fs) waitForJob(location string) (err error) {
|
||||
func (f *Fs) waitForJob(ctx context.Context, location string) (err error) {
|
||||
opts := rest.Opts{
|
||||
RootURL: location,
|
||||
Method: "GET",
|
||||
@@ -535,7 +536,7 @@ func (f *Fs) waitForJob(location string) (err error) {
|
||||
var resp *http.Response
|
||||
var body []byte
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(&opts)
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
@@ -564,7 +565,7 @@ func (f *Fs) waitForJob(location string) (err error) {
|
||||
return errors.Errorf("async operation didn't complete after %v", fs.Config.Timeout)
|
||||
}
|
||||
|
||||
func (f *Fs) delete(path string, hardDelete bool) (err error) {
|
||||
func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/resources",
|
||||
@@ -577,7 +578,7 @@ func (f *Fs) delete(path string, hardDelete bool) (err error) {
|
||||
var resp *http.Response
|
||||
var body []byte
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(&opts)
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
@@ -595,19 +596,19 @@ func (f *Fs) delete(path string, hardDelete bool) (err error) {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "async info result not JSON: %q", body)
|
||||
}
|
||||
return f.waitForJob(info.HRef)
|
||||
return f.waitForJob(ctx, info.HRef)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// purgeCheck remotes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
root := f.filePath(dir)
|
||||
if check {
|
||||
//to comply with rclone logic we check if the directory is empty before delete.
|
||||
//send request to get list of objects in this directory.
|
||||
info, err := f.readMetaDataForPath(root, &api.ResourceInfoRequestOptions{})
|
||||
info, err := f.readMetaDataForPath(ctx, root, &api.ResourceInfoRequestOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rmdir failed")
|
||||
}
|
||||
@@ -616,14 +617,14 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
}
|
||||
}
|
||||
//delete directory
|
||||
return f.delete(root, false)
|
||||
return f.delete(ctx, root, false)
|
||||
}
|
||||
|
||||
// Rmdir deletes the container
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(dir, true)
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
@@ -632,11 +633,11 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck("", false)
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
}
|
||||
|
||||
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||
func (f *Fs) copyOrMove(method, src, dst string, overwrite bool) (err error) {
|
||||
func (f *Fs) copyOrMove(ctx context.Context, method, src, dst string, overwrite bool) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/resources/" + method,
|
||||
@@ -650,7 +651,7 @@ func (f *Fs) copyOrMove(method, src, dst string, overwrite bool) (err error) {
|
||||
var resp *http.Response
|
||||
var body []byte
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(&opts)
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
@@ -668,7 +669,7 @@ func (f *Fs) copyOrMove(method, src, dst string, overwrite bool) (err error) {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "async info result not JSON: %q", body)
|
||||
}
|
||||
return f.waitForJob(info.HRef)
|
||||
return f.waitForJob(ctx, info.HRef)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -690,11 +691,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
dstPath := f.filePath(remote)
|
||||
err := f.mkParentDirs(dstPath)
|
||||
err := f.mkParentDirs(ctx, dstPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = f.copyOrMove("copy", srcObj.filePath(), dstPath, false)
|
||||
err = f.copyOrMove(ctx, "copy", srcObj.filePath(), dstPath, false)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't copy file")
|
||||
@@ -720,11 +721,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
dstPath := f.filePath(remote)
|
||||
err := f.mkParentDirs(dstPath)
|
||||
err := f.mkParentDirs(ctx, dstPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = f.copyOrMove("move", srcObj.filePath(), dstPath, false)
|
||||
err = f.copyOrMove(ctx, "move", srcObj.filePath(), dstPath, false)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't move file")
|
||||
@@ -758,12 +759,12 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return errors.New("can't move root directory")
|
||||
}
|
||||
|
||||
err := f.mkParentDirs(dstPath)
|
||||
err := f.mkParentDirs(ctx, dstPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.readMetaDataForPath(dstPath, &api.ResourceInfoRequestOptions{})
|
||||
_, err = f.readMetaDataForPath(ctx, dstPath, &api.ResourceInfoRequestOptions{})
|
||||
if apiErr, ok := err.(*api.ErrorResponse); ok {
|
||||
// does not exist
|
||||
if apiErr.ErrorName == "DiskNotFoundError" {
|
||||
@@ -775,7 +776,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
err = f.copyOrMove("move", srcPath, dstPath, false)
|
||||
err = f.copyOrMove(ctx, "move", srcPath, dstPath, false)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't move directory")
|
||||
@@ -802,7 +803,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(&opts)
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -819,7 +820,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
return "", errors.Wrap(err, "couldn't create public link")
|
||||
}
|
||||
|
||||
info, err := f.readMetaDataForPath(f.filePath(remote), &api.ResourceInfoRequestOptions{})
|
||||
info, err := f.readMetaDataForPath(ctx, f.filePath(remote), &api.ResourceInfoRequestOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -840,7 +841,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(&opts)
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
@@ -857,7 +858,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var info api.DiskInfo
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -924,11 +925,11 @@ func (o *Object) setMetaData(info *api.ResourceInfoResponse) (err error) {
|
||||
}
|
||||
|
||||
// readMetaData reads ands sets the new metadata for a storage.Object
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if o.hasMetaData {
|
||||
return nil
|
||||
}
|
||||
info, err := o.fs.readMetaDataForPath(o.filePath(), &api.ResourceInfoRequestOptions{})
|
||||
info, err := o.fs.readMetaDataForPath(ctx, o.filePath(), &api.ResourceInfoRequestOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -943,7 +944,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData()
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
@@ -953,7 +954,8 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
err := o.readMetaData()
|
||||
ctx := context.TODO()
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return 0
|
||||
@@ -974,7 +976,7 @@ func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (o *Object) setCustomProperty(property string, value string) (err error) {
|
||||
func (o *Object) setCustomProperty(ctx context.Context, property string, value string) (err error) {
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "PATCH",
|
||||
@@ -990,7 +992,7 @@ func (o *Object) setCustomProperty(property string, value string) (err error) {
|
||||
cpr := api.CustomPropertyResponse{CustomProperties: rcm}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &cpr, nil)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &cpr, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
@@ -1001,7 +1003,7 @@ func (o *Object) setCustomProperty(property string, value string) (err error) {
|
||||
// Commits the datastore
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
// set custom_property 'rclone_modified' of object to modTime
|
||||
err := o.setCustomProperty("rclone_modified", modTime.Format(time.RFC3339Nano))
|
||||
err := o.setCustomProperty(ctx, "rclone_modified", modTime.Format(time.RFC3339Nano))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1023,7 +1025,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
opts.Parameters.Set("path", o.filePath())
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &dl)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &dl)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -1038,7 +1040,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1047,7 +1049,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
func (o *Object) upload(in io.Reader, overwrite bool, mimeType string) (err error) {
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeType string) (err error) {
|
||||
// prepare upload
|
||||
var resp *http.Response
|
||||
var ur api.AsyncInfo
|
||||
@@ -1061,7 +1063,7 @@ func (o *Object) upload(in io.Reader, overwrite bool, mimeType string) (err erro
|
||||
opts.Parameters.Set("overwrite", strconv.FormatBool(overwrite))
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &ur)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &ur)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -1079,7 +1081,7 @@ func (o *Object) upload(in io.Reader, overwrite bool, mimeType string) (err erro
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -1097,13 +1099,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
remote := o.filePath()
|
||||
|
||||
//create full path to file before upload.
|
||||
err := o.fs.mkParentDirs(remote)
|
||||
err := o.fs.mkParentDirs(ctx, remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//upload file
|
||||
err = o.upload(in1, true, fs.MimeType(ctx, src))
|
||||
err = o.upload(ctx, in1, true, fs.MimeType(ctx, src))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1120,7 +1122,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.delete(o.filePath(), false)
|
||||
return o.fs.delete(ctx, o.filePath(), false)
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
This is a tool to decrypt file names in rclone logs.
|
||||
|
||||
@@ -43,13 +43,13 @@ def map_log_file(crypt_map, log_file):
|
||||
"""
|
||||
with open(log_file) as fd:
|
||||
for line in fd:
|
||||
for cipher, plain in crypt_map.iteritems():
|
||||
for cipher, plain in crypt_map.items():
|
||||
line = line.replace(cipher, plain)
|
||||
sys.stdout.write(line)
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 3:
|
||||
print "Syntax: %s <crypt-mapping-file> <log-file>" % sys.argv[0]
|
||||
print("Syntax: %s <crypt-mapping-file> <log-file>" % sys.argv[0])
|
||||
raise SystemExit(1)
|
||||
mapping_file, log_file = sys.argv[1:]
|
||||
crypt_map = read_crypt_map(mapping_file)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python2
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Make backend documentation
|
||||
"""
|
||||
@@ -52,9 +52,9 @@ if __name__ == "__main__":
|
||||
for backend in find_backends():
|
||||
try:
|
||||
alter_doc(backend)
|
||||
except Exception, e:
|
||||
print "Failed adding docs for %s backend: %s" % (backend, e)
|
||||
except Exception as e:
|
||||
print("Failed adding docs for %s backend: %s" % (backend, e))
|
||||
failed += 1
|
||||
else:
|
||||
success += 1
|
||||
print "Added docs for %d backends with %d failures" % (success, failed)
|
||||
print("Added docs for %d backends with %d failures" % (success, failed))
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/python
|
||||
#!/usr/bin/python3
|
||||
"""
|
||||
Generate a markdown changelog for the rclone project
|
||||
"""
|
||||
@@ -99,10 +99,11 @@ def process_log(log):
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 3:
|
||||
print >>sys.stderr, "Syntax: %s vX.XX vX.XY" % sys.argv[0]
|
||||
print("Syntax: %s vX.XX vX.XY" % sys.argv[0], file=sys.stderr)
|
||||
sys.exit(1)
|
||||
version, next_version = sys.argv[1], sys.argv[2]
|
||||
log = subprocess.check_output(["git", "log", '''--pretty=format:%H|%an|%aI|%s'''] + [version+".."+next_version])
|
||||
log = log.decode("utf-8")
|
||||
by_category = process_log(log)
|
||||
|
||||
# Output backends first so remaining in by_category are core items
|
||||
@@ -112,7 +113,7 @@ def main():
|
||||
out("local", title="Local")
|
||||
out("cache", title="Cache")
|
||||
out("crypt", title="Crypt")
|
||||
backend_names = sorted(x for x in by_category.keys() if x in backends)
|
||||
backend_names = sorted(x for x in list(by_category.keys()) if x in backends)
|
||||
for backend_name in backend_names:
|
||||
if backend_name in backend_titles:
|
||||
backend_title = backend_titles[backend_name]
|
||||
@@ -123,7 +124,7 @@ def main():
|
||||
# Split remaining in by_category into new features and fixes
|
||||
new_features = defaultdict(list)
|
||||
bugfixes = defaultdict(list)
|
||||
for name, messages in by_category.iteritems():
|
||||
for name, messages in by_category.items():
|
||||
for message in messages:
|
||||
if IS_FIX_RE.search(message):
|
||||
bugfixes[name].append(message)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python2
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Make single page versions of the documentation for release and
|
||||
conversion into man pages etc.
|
||||
@@ -41,6 +41,7 @@ docs = [
|
||||
"hubic.md",
|
||||
"jottacloud.md",
|
||||
"koofr.md",
|
||||
"mailru.md",
|
||||
"mega.md",
|
||||
"azureblob.md",
|
||||
"onedrive.md",
|
||||
@@ -118,8 +119,8 @@ def check_docs(docpath):
|
||||
docs_set = set(docs)
|
||||
if files == docs_set:
|
||||
return
|
||||
print "Files on disk but not in docs variable: %s" % ", ".join(files - docs_set)
|
||||
print "Files in docs variable but not on disk: %s" % ", ".join(docs_set - files)
|
||||
print("Files on disk but not in docs variable: %s" % ", ".join(files - docs_set))
|
||||
print("Files in docs variable but not on disk: %s" % ", ".join(docs_set - files))
|
||||
raise ValueError("Missing files")
|
||||
|
||||
def read_command(command):
|
||||
@@ -142,7 +143,7 @@ def read_commands(docpath):
|
||||
|
||||
def main():
|
||||
check_docs(docpath)
|
||||
command_docs = read_commands(docpath)
|
||||
command_docs = read_commands(docpath).replace("\\", "\\\\") # escape \ so we can use command_docs in re.sub
|
||||
with open(outfile, "w") as out:
|
||||
out.write("""\
|
||||
%% rclone(1) User Manual
|
||||
@@ -156,7 +157,7 @@ def main():
|
||||
if doc == "docs.md":
|
||||
contents = re.sub(r"The main rclone commands.*?for the full list.", command_docs, contents, 0, re.S)
|
||||
out.write(contents)
|
||||
print "Written '%s'" % outfile
|
||||
print("Written '%s'" % outfile)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Update the authors.md file with the authors from the git log
|
||||
"""
|
||||
@@ -23,13 +23,14 @@ def add_email(name, email):
|
||||
"""
|
||||
adds the email passed in to the end of authors.md
|
||||
"""
|
||||
print "Adding %s <%s>" % (name, email)
|
||||
print("Adding %s <%s>" % (name, email))
|
||||
with open(AUTHORS, "a+") as fd:
|
||||
print >>fd, " * %s <%s>" % (name, email)
|
||||
print(" * %s <%s>" % (name, email), file=fd)
|
||||
subprocess.check_call(["git", "commit", "-m", "Add %s to contributors" % name, AUTHORS])
|
||||
|
||||
def main():
|
||||
out = subprocess.check_output(["git", "log", '--reverse', '--format=%an|%ae', "master"])
|
||||
out = out.decode("utf-8")
|
||||
|
||||
previous = load()
|
||||
for line in out.split("\n"):
|
||||
|
||||
11
cmd/cmd.go
11
cmd/cmd.go
@@ -174,7 +174,11 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
|
||||
// If file exists then srcFileName != "", however if the file
|
||||
// doesn't exist then we assume it is a directory...
|
||||
if srcFileName != "" {
|
||||
dstRemote, dstFileName = fspath.Split(dstRemote)
|
||||
var err error
|
||||
dstRemote, dstFileName, err = fspath.Split(dstRemote)
|
||||
if err != nil {
|
||||
log.Fatalf("Parsing %q failed: %v", args[1], err)
|
||||
}
|
||||
if dstRemote == "" {
|
||||
dstRemote = "."
|
||||
}
|
||||
@@ -197,7 +201,10 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
|
||||
|
||||
// NewFsDstFile creates a new dst fs with a destination file name from the arguments
|
||||
func NewFsDstFile(args []string) (fdst fs.Fs, dstFileName string) {
|
||||
dstRemote, dstFileName := fspath.Split(args[0])
|
||||
dstRemote, dstFileName, err := fspath.Split(args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Parsing %q failed: %v", args[0], err)
|
||||
}
|
||||
if dstRemote == "" {
|
||||
dstRemote = "."
|
||||
}
|
||||
|
||||
@@ -267,8 +267,8 @@ func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
|
||||
stat.Blocks = fsBlocks // Total data blocks in file system.
|
||||
stat.Bfree = fsBlocks // Free blocks in file system.
|
||||
stat.Bavail = fsBlocks // Free blocks in file system if you're not root.
|
||||
stat.Files = 1E9 // Total files in file system.
|
||||
stat.Ffree = 1E9 // Free files in file system.
|
||||
stat.Files = 1e9 // Total files in file system.
|
||||
stat.Ffree = 1e9 // Free files in file system.
|
||||
stat.Bsize = blockSize // Block size
|
||||
stat.Namemax = 255 // Maximum file name length?
|
||||
stat.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
|
||||
|
||||
@@ -4,12 +4,18 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
autoFilename = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&autoFilename, "auto-filename", "a", autoFilename, "Get the file name from the url and use it for destination file path")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
@@ -18,13 +24,22 @@ var commandDefintion = &cobra.Command{
|
||||
Long: `
|
||||
Download urls content and copy it to destination
|
||||
without saving it in tmp storage.
|
||||
|
||||
Setting --auto-filename flag will cause retrieving file name from url and using it in destination path.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsdst, dstFileName := cmd.NewFsDstFile(args[1:])
|
||||
|
||||
var dstFileName string
|
||||
var fsdst fs.Fs
|
||||
if autoFilename {
|
||||
fsdst = cmd.NewFsDir(args[1:])
|
||||
} else {
|
||||
fsdst, dstFileName = cmd.NewFsDstFile(args[1:])
|
||||
}
|
||||
|
||||
cmd.Run(true, true, command, func() error {
|
||||
_, err := operations.CopyURL(context.Background(), fsdst, dstFileName, args[0])
|
||||
_, err := operations.CopyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename)
|
||||
return err
|
||||
})
|
||||
},
|
||||
|
||||
20
cmd/help.go
20
cmd/help.go
@@ -47,10 +47,14 @@ __rclone_custom_func() {
|
||||
__rclone_init_completion -n : || return
|
||||
fi
|
||||
if [[ $cur != *:* ]]; then
|
||||
local ifs=$IFS
|
||||
IFS=$'\n'
|
||||
local remotes=($(command rclone listremotes))
|
||||
IFS=$ifs
|
||||
local remote
|
||||
while IFS= read -r remote; do
|
||||
for remote in "${remotes[@]}"; do
|
||||
[[ $remote != $cur* ]] || COMPREPLY+=("$remote")
|
||||
done < <(command rclone listremotes)
|
||||
done
|
||||
if [[ ${COMPREPLY[@]} ]]; then
|
||||
local paths=("$cur"*)
|
||||
[[ ! -f ${paths[0]} ]] || COMPREPLY+=("${paths[@]}")
|
||||
@@ -62,14 +66,18 @@ __rclone_custom_func() {
|
||||
else
|
||||
local prefix=
|
||||
fi
|
||||
local ifs=$IFS
|
||||
IFS=$'\n'
|
||||
local lines=($(rclone lsf "${cur%%:*}:$prefix" 2>/dev/null))
|
||||
IFS=$ifs
|
||||
local line
|
||||
while IFS= read -r line; do
|
||||
for line in "${lines[@]}"; do
|
||||
local reply=${prefix:+$prefix/}$line
|
||||
[[ $reply != $path* ]] || COMPREPLY+=("$reply")
|
||||
done < <(rclone lsf "${cur%%:*}:$prefix" 2>/dev/null)
|
||||
[[ ! ${COMPREPLY[@]} ]] || compopt -o filenames
|
||||
done
|
||||
[[ ! ${COMPREPLY[@]} || $(type -t compopt) != builtin ]] || compopt -o filenames
|
||||
fi
|
||||
[[ ! ${COMPREPLY[@]} ]] || compopt -o nospace
|
||||
[[ ! ${COMPREPLY[@]} || $(type -t compopt) != builtin ]] || compopt -o nospace
|
||||
fi
|
||||
}
|
||||
`
|
||||
|
||||
@@ -58,7 +58,7 @@ a bit of go code for each one.
|
||||
`,
|
||||
Hidden: true,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1E6, command, args)
|
||||
cmd.CheckArgs(1, 1e6, command, args)
|
||||
for i := range args {
|
||||
f := cmd.NewFsDir(args[i : i+1])
|
||||
cmd.Run(false, false, command, func() error {
|
||||
|
||||
@@ -58,8 +58,8 @@ func (f *FS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.Sta
|
||||
resp.Blocks = fsBlocks // Total data blocks in file system.
|
||||
resp.Bfree = fsBlocks // Free blocks in file system.
|
||||
resp.Bavail = fsBlocks // Free blocks in file system if you're not root.
|
||||
resp.Files = 1E9 // Total files in file system.
|
||||
resp.Ffree = 1E9 // Free files in file system.
|
||||
resp.Files = 1e9 // Total files in file system.
|
||||
resp.Ffree = 1e9 // Free files in file system.
|
||||
resp.Bsize = blockSize // Block size
|
||||
resp.Namelen = 255 // Maximum file name length?
|
||||
resp.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
|
||||
var (
|
||||
// Flags
|
||||
iterations = flag.Int("n", 1E6, "Iterations to try")
|
||||
iterations = flag.Int("n", 1e6, "Iterations to try")
|
||||
maxBlockSize = flag.Int("b", 1024*1024, "Max block size to read")
|
||||
)
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
var (
|
||||
// Flags
|
||||
iterations = flag.Int("n", 1E6, "Iterations to try")
|
||||
iterations = flag.Int("n", 1e6, "Iterations to try")
|
||||
maxBlockSize = flag.Int("b", 1024*1024, "Max block size to read")
|
||||
simultaneous = flag.Int("transfers", 16, "Number of simultaneous files to open")
|
||||
seeksPerFile = flag.Int("seeks", 8, "Seeks per file")
|
||||
|
||||
18
cmd/rc/rc.go
18
cmd/rc/rc.go
@@ -69,13 +69,14 @@ rclone rc server, eg:
|
||||
|
||||
Use "rclone rc" to see a list of all possible commands.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1E9, command, args)
|
||||
cmd.CheckArgs(0, 1e9, command, args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
ctx := context.Background()
|
||||
parseFlags()
|
||||
if len(args) == 0 {
|
||||
return list()
|
||||
return list(ctx)
|
||||
}
|
||||
return run(args)
|
||||
return run(ctx, args)
|
||||
})
|
||||
},
|
||||
}
|
||||
@@ -110,7 +111,7 @@ func setAlternateFlag(flagName string, output *string) {
|
||||
// do a call from (path, in) to (out, err).
|
||||
//
|
||||
// if err is set, out may be a valid error return or it may be nil
|
||||
func doCall(path string, in rc.Params) (out rc.Params, err error) {
|
||||
func doCall(ctx context.Context, path string, in rc.Params) (out rc.Params, err error) {
|
||||
// If loopback set, short circuit HTTP request
|
||||
if loopback {
|
||||
call := rc.Calls.Get(path)
|
||||
@@ -141,6 +142,7 @@ func doCall(path string, in rc.Params) (out rc.Params, err error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make request")
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
if authUser != "" || authPass != "" {
|
||||
@@ -182,7 +184,7 @@ func doCall(path string, in rc.Params) (out rc.Params, err error) {
|
||||
}
|
||||
|
||||
// Run the remote control command passed in
|
||||
func run(args []string) (err error) {
|
||||
func run(ctx context.Context, args []string) (err error) {
|
||||
path := strings.Trim(args[0], "/")
|
||||
|
||||
// parse input
|
||||
@@ -208,7 +210,7 @@ func run(args []string) (err error) {
|
||||
}
|
||||
|
||||
// Do the call
|
||||
out, callErr := doCall(path, in)
|
||||
out, callErr := doCall(ctx, path, in)
|
||||
|
||||
// Write the JSON blob to stdout if required
|
||||
if out != nil && !noOutput {
|
||||
@@ -222,8 +224,8 @@ func run(args []string) (err error) {
|
||||
}
|
||||
|
||||
// List the available commands to stdout
|
||||
func list() error {
|
||||
list, err := doCall("rc/list", nil)
|
||||
func list(ctx context.Context) error {
|
||||
list, err := doCall(ctx, "rc/list", nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to list")
|
||||
}
|
||||
|
||||
@@ -1,7 +1,4 @@
|
||||
// Package restic serves a remote suitable for use with restic
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
// +build go1.9
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
// +build go1.9
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
// Serve restic tests set up a server and run the integration tests
|
||||
// for restic against it.
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package restic
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
// Command definition is nil to show not implemented
|
||||
var Command *cobra.Command = nil
|
||||
@@ -1,5 +1,3 @@
|
||||
// +build go1.9
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
// +build go1.9
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Addr implements net.Addr for stdin/stdout.
|
||||
@@ -52,3 +51,23 @@ func (s *StdioConn) LocalAddr() net.Addr {
|
||||
func (s *StdioConn) RemoteAddr() net.Addr {
|
||||
return Addr{}
|
||||
}
|
||||
|
||||
// SetDeadline sets the read/write deadline.
|
||||
func (s *StdioConn) SetDeadline(t time.Time) error {
|
||||
err1 := s.stdin.SetReadDeadline(t)
|
||||
err2 := s.stdout.SetWriteDeadline(t)
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
// SetReadDeadline sets the read/write deadline.
|
||||
func (s *StdioConn) SetReadDeadline(t time.Time) error {
|
||||
return s.stdin.SetReadDeadline(t)
|
||||
}
|
||||
|
||||
// SetWriteDeadline sets the read/write deadline.
|
||||
func (s *StdioConn) SetWriteDeadline(t time.Time) error {
|
||||
return s.stdout.SetWriteDeadline(t)
|
||||
}
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
//+build go1.10
|
||||
|
||||
// Deadline setting for go1.10+
|
||||
|
||||
package restic
|
||||
|
||||
import "time"
|
||||
|
||||
// SetDeadline sets the read/write deadline.
|
||||
func (s *StdioConn) SetDeadline(t time.Time) error {
|
||||
err1 := s.stdin.SetReadDeadline(t)
|
||||
err2 := s.stdout.SetWriteDeadline(t)
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
// SetReadDeadline sets the read/write deadline.
|
||||
func (s *StdioConn) SetReadDeadline(t time.Time) error {
|
||||
return s.stdin.SetReadDeadline(t)
|
||||
}
|
||||
|
||||
// SetWriteDeadline sets the read/write deadline.
|
||||
func (s *StdioConn) SetWriteDeadline(t time.Time) error {
|
||||
return s.stdout.SetWriteDeadline(t)
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
//+build go1.9,!go1.10
|
||||
|
||||
// Fallback deadline setting for pre go1.10
|
||||
|
||||
package restic
|
||||
|
||||
import "time"
|
||||
|
||||
// SetDeadline sets the read/write deadline.
|
||||
func (s *StdioConn) SetDeadline(t time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetReadDeadline sets the read/write deadline.
|
||||
func (s *StdioConn) SetReadDeadline(t time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetWriteDeadline sets the read/write deadline.
|
||||
func (s *StdioConn) SetWriteDeadline(t time.Time) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
//+build go1.9
|
||||
|
||||
// Package webdav implements a WebDAV server backed by rclone VFS
|
||||
package webdav
|
||||
|
||||
import (
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
//
|
||||
// We skip tests on platforms with troublesome character mappings
|
||||
|
||||
//+build !windows,!darwin,go1.9
|
||||
//+build !windows,!darwin
|
||||
|
||||
package webdav
|
||||
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
// Build for webdav for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package webdav
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
// Command definition is nil to show not implemented
|
||||
var Command *cobra.Command = nil
|
||||
@@ -30,6 +30,7 @@ Rclone is a command line program to sync files and directories to and from:
|
||||
* {{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
|
||||
* {{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
|
||||
* {{< provider name="Koofr" home="https://koofr.eu/" config="/koofr/" >}}
|
||||
* {{< provider name="Mail.ru Cloud" home="https://cloud.mail.ru/" config="/mailru/" >}}
|
||||
* {{< provider name="Memset Memstore" home="https://www.memset.com/cloud/storage/" config="/swift/" >}}
|
||||
* {{< provider name="Mega" home="https://mega.nz/" config="/mega/" >}}
|
||||
* {{< provider name="Microsoft Azure Blob Storage" home="https://azure.microsoft.com/en-us/services/storage/blobs/" config="/azureblob/" >}}
|
||||
|
||||
@@ -284,3 +284,5 @@ Contributors
|
||||
* Patrick Wang <mail6543210@yahoo.com.tw>
|
||||
* Cenk Alti <cenkalti@gmail.com>
|
||||
* Andreas Chlupka <andy@chlupka.com>
|
||||
* Alfonso Montero <amontero@tinet.org>
|
||||
* Ivan Andreev <ivandeex@gmail.com>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone"
|
||||
slug: rclone
|
||||
url: /commands/rclone/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone about"
|
||||
slug: rclone_about
|
||||
url: /commands/rclone_about/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone authorize"
|
||||
slug: rclone_authorize
|
||||
url: /commands/rclone_authorize/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone cachestats"
|
||||
slug: rclone_cachestats
|
||||
url: /commands/rclone_cachestats/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone cat"
|
||||
slug: rclone_cat
|
||||
url: /commands/rclone_cat/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone check"
|
||||
slug: rclone_check
|
||||
url: /commands/rclone_check/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone cleanup"
|
||||
slug: rclone_cleanup
|
||||
url: /commands/rclone_cleanup/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone config"
|
||||
slug: rclone_config
|
||||
url: /commands/rclone_config/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone config create"
|
||||
slug: rclone_config_create
|
||||
url: /commands/rclone_config_create/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone config delete"
|
||||
slug: rclone_config_delete
|
||||
url: /commands/rclone_config_delete/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone config disconnect"
|
||||
slug: rclone_config_disconnect
|
||||
url: /commands/rclone_config_disconnect/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone config dump"
|
||||
slug: rclone_config_dump
|
||||
url: /commands/rclone_config_dump/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone config edit"
|
||||
slug: rclone_config_edit
|
||||
url: /commands/rclone_config_edit/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone config file"
|
||||
slug: rclone_config_file
|
||||
url: /commands/rclone_config_file/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone config password"
|
||||
slug: rclone_config_password
|
||||
url: /commands/rclone_config_password/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone config providers"
|
||||
slug: rclone_config_providers
|
||||
url: /commands/rclone_config_providers/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone config reconnect"
|
||||
slug: rclone_config_reconnect
|
||||
url: /commands/rclone_config_reconnect/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone config show"
|
||||
slug: rclone_config_show
|
||||
url: /commands/rclone_config_show/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone config update"
|
||||
slug: rclone_config_update
|
||||
url: /commands/rclone_config_update/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone config userinfo"
|
||||
slug: rclone_config_userinfo
|
||||
url: /commands/rclone_config_userinfo/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone copy"
|
||||
slug: rclone_copy
|
||||
url: /commands/rclone_copy/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone copyto"
|
||||
slug: rclone_copyto
|
||||
url: /commands/rclone_copyto/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone copyurl"
|
||||
slug: rclone_copyurl
|
||||
url: /commands/rclone_copyurl/
|
||||
@@ -22,7 +22,8 @@ rclone copyurl https://example.com dest:path [flags]
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for copyurl
|
||||
-a, --auto-filename Get the file name from the url and use it for destination file path
|
||||
-h, --help help for copyurl
|
||||
```
|
||||
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone cryptcheck"
|
||||
slug: rclone_cryptcheck
|
||||
url: /commands/rclone_cryptcheck/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-09-08T16:48:33+01:00
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
title: "rclone cryptdecode"
|
||||
slug: rclone_cryptdecode
|
||||
url: /commands/rclone_cryptdecode/
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user