mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
303 Commits
zoho-clien
...
fix-no-rem
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e3a77f218b | ||
|
|
80bccacd83 | ||
|
|
3349b055f5 | ||
|
|
bef0c23e00 | ||
|
|
84201ed891 | ||
|
|
04608428bf | ||
|
|
6aaa06d7be | ||
|
|
e53bad5353 | ||
|
|
f5397246eb | ||
|
|
b8b73f2656 | ||
|
|
96b67ce0ec | ||
|
|
e2beeffd76 | ||
|
|
30b949642d | ||
|
|
92b3518c78 | ||
|
|
062919e08c | ||
|
|
654f5309b0 | ||
|
|
318fa4472b | ||
|
|
5104e24153 | ||
|
|
9d87a5192d | ||
|
|
29f967dba3 | ||
|
|
1f846c18d4 | ||
|
|
41f561bf26 | ||
|
|
df60e6323c | ||
|
|
58006a925a | ||
|
|
ee2fac1855 | ||
|
|
2188fe38e5 | ||
|
|
b5f8f0973b | ||
|
|
85b8ba9469 | ||
|
|
04a1f673f0 | ||
|
|
0574ebf44a | ||
|
|
22e86ce335 | ||
|
|
c9fce20249 | ||
|
|
5b6f637461 | ||
|
|
07f2f3a62e | ||
|
|
6dc190ec93 | ||
|
|
71f75a1d95 | ||
|
|
1b44035e45 | ||
|
|
054b467f32 | ||
|
|
23da913d03 | ||
|
|
c0cda087a8 | ||
|
|
1773717a47 | ||
|
|
04308dcaa1 | ||
|
|
06f27384dd | ||
|
|
82f1f7d2c4 | ||
|
|
6555d3eb33 | ||
|
|
03229cf394 | ||
|
|
f572bf7829 | ||
|
|
f593558dc2 | ||
|
|
08040a57b0 | ||
|
|
2fa7a3c0fb | ||
|
|
798d1293df | ||
|
|
75c417ad93 | ||
|
|
5ee646f264 | ||
|
|
4a4aca4da7 | ||
|
|
2e4b65f888 | ||
|
|
77cda6773c | ||
|
|
dbc5167281 | ||
|
|
635d1e10ae | ||
|
|
296ceadda6 | ||
|
|
7ae2891252 | ||
|
|
99caf79ffe | ||
|
|
095cf9e4be | ||
|
|
e57553930f | ||
|
|
f122808d86 | ||
|
|
94dbfa4ea6 | ||
|
|
6f2e525821 | ||
|
|
119bddc10b | ||
|
|
28e9fd45cc | ||
|
|
326f3b35ff | ||
|
|
ce83228cb2 | ||
|
|
732bc08ced | ||
|
|
6ef7178ee4 | ||
|
|
9ff6f48d74 | ||
|
|
532af77fd1 | ||
|
|
ab7dfe0c87 | ||
|
|
e489a101f6 | ||
|
|
35a86193b7 | ||
|
|
2833941da8 | ||
|
|
9e6c23d9af | ||
|
|
8bef972262 | ||
|
|
0a968818f6 | ||
|
|
c2ac353183 | ||
|
|
773da395fb | ||
|
|
9e8cd6bff9 | ||
|
|
5d2e327b6f | ||
|
|
77221d7528 | ||
|
|
1971c1ef87 | ||
|
|
7e7dbe16c2 | ||
|
|
002d323c94 | ||
|
|
4ad62ec016 | ||
|
|
95ee14bb2c | ||
|
|
88aabd1f71 | ||
|
|
34627c5c7e | ||
|
|
e33303df94 | ||
|
|
665eceaec3 | ||
|
|
ba09ee18bb | ||
|
|
62bf63d36f | ||
|
|
f38c262471 | ||
|
|
5db88fed2b | ||
|
|
316e65589b | ||
|
|
4401d180aa | ||
|
|
9ccd870267 | ||
|
|
16d1da2c1e | ||
|
|
00a0ee1899 | ||
|
|
b78c9a65fa | ||
|
|
ef3c350686 | ||
|
|
742af80972 | ||
|
|
08a2df51be | ||
|
|
2925e1384c | ||
|
|
2ec0c8d45f | ||
|
|
98579608ec | ||
|
|
a1a41aa0c1 | ||
|
|
f8d56bebaf | ||
|
|
5d799431a7 | ||
|
|
8f23cae1c0 | ||
|
|
964088affa | ||
|
|
f4068d406b | ||
|
|
7511b6f4f1 | ||
|
|
e618ea83dd | ||
|
|
34dc257c55 | ||
|
|
4cacf5d30c | ||
|
|
0537791d14 | ||
|
|
4b1d28550a | ||
|
|
d27c35ee4a | ||
|
|
ffec0d4f03 | ||
|
|
89daa9efd1 | ||
|
|
ee502a757f | ||
|
|
386acaa110 | ||
|
|
efdee3a5fe | ||
|
|
5d85e6bc9c | ||
|
|
4a9469a3dc | ||
|
|
f8884a7200 | ||
|
|
2a40f00077 | ||
|
|
9799fdbae2 | ||
|
|
492504a601 | ||
|
|
0c03a7fead | ||
|
|
7afb4487ef | ||
|
|
b9d0ed4f5c | ||
|
|
baa4c039a0 | ||
|
|
31a8211afa | ||
|
|
3544e09e95 | ||
|
|
b456be4303 | ||
|
|
3e96752079 | ||
|
|
4a5cbf2a19 | ||
|
|
dcd4edc9f5 | ||
|
|
7f5e347d94 | ||
|
|
040677ab5b | ||
|
|
6366d3dfc5 | ||
|
|
60d376c323 | ||
|
|
7b1ca716bf | ||
|
|
d8711cf7f9 | ||
|
|
cd69f9e6e8 | ||
|
|
a737ff21af | ||
|
|
ad9aa693a3 | ||
|
|
964c3e0732 | ||
|
|
a46a3c0811 | ||
|
|
60dcafe04d | ||
|
|
813bf029d4 | ||
|
|
f2d3264054 | ||
|
|
23a0d4a1e6 | ||
|
|
b96ebfc40b | ||
|
|
3fe2aaf96c | ||
|
|
c163e6b250 | ||
|
|
c1492cfa28 | ||
|
|
38a8071a58 | ||
|
|
8c68a76a4a | ||
|
|
e7b736f8ca | ||
|
|
cb30a8c80e | ||
|
|
629a3eeca2 | ||
|
|
f52ae75a51 | ||
|
|
9d5c5bf7ab | ||
|
|
53573b4a09 | ||
|
|
3622e064f5 | ||
|
|
6d28ea7ab5 | ||
|
|
b9fd02039b | ||
|
|
1a41c930f3 | ||
|
|
ddb7eb6e0a | ||
|
|
c114695a66 | ||
|
|
fcba51557f | ||
|
|
9393225a1d | ||
|
|
3d3ff61f74 | ||
|
|
d98f192425 | ||
|
|
54771e4402 | ||
|
|
dc286529bc | ||
|
|
7dc7c021db | ||
|
|
fe1aa13069 | ||
|
|
5fa8e7d957 | ||
|
|
9db7c51eaa | ||
|
|
3859fe2f52 | ||
|
|
0caf417779 | ||
|
|
9eab258ffb | ||
|
|
7df57cd625 | ||
|
|
1fd9b483c8 | ||
|
|
93353c431b | ||
|
|
886dfd23e2 | ||
|
|
116a8021bb | ||
|
|
9e2fbe0f1a | ||
|
|
6d65d116df | ||
|
|
edaeb51ea9 | ||
|
|
6e2e2d9eb2 | ||
|
|
20e15e52a9 | ||
|
|
d0f8b4f479 | ||
|
|
58d82a5c73 | ||
|
|
c0c74003f2 | ||
|
|
60bc7a079a | ||
|
|
20c5ca08fb | ||
|
|
fc57648b75 | ||
|
|
8c5c91e68f | ||
|
|
9dd39e8524 | ||
|
|
9c9186183d | ||
|
|
2ccf416e83 | ||
|
|
5577c7b760 | ||
|
|
f6dbb98a1d | ||
|
|
d042f3194f | ||
|
|
524cd327e6 | ||
|
|
b8c1cf7451 | ||
|
|
0fa68bda02 | ||
|
|
1378bfee63 | ||
|
|
d6870473a1 | ||
|
|
12cd322643 | ||
|
|
1406b6c3c9 | ||
|
|
088a83872d | ||
|
|
cb46092883 | ||
|
|
a2cd5d8fa3 | ||
|
|
1fe2460e38 | ||
|
|
ef5c212f9b | ||
|
|
268a7ff7b8 | ||
|
|
b47d6001a9 | ||
|
|
a4c4ddf052 | ||
|
|
4cc2a7f342 | ||
|
|
c72d2c67ed | ||
|
|
9deab5a563 | ||
|
|
da5b0cb611 | ||
|
|
0187bc494a | ||
|
|
2bdbf00fa3 | ||
|
|
9ee3ad70e9 | ||
|
|
ce182adf46 | ||
|
|
97fc3b9046 | ||
|
|
e59acd16c6 | ||
|
|
acfd7e2403 | ||
|
|
f47893873d | ||
|
|
b9a015e5b9 | ||
|
|
d72d9e591a | ||
|
|
df451e1e70 | ||
|
|
d9959b0271 | ||
|
|
f2c0f82fc6 | ||
|
|
f76c6cc893 | ||
|
|
5e95877840 | ||
|
|
8b491f7f3d | ||
|
|
aea8776a43 | ||
|
|
c387eb8c09 | ||
|
|
a12b2746b4 | ||
|
|
3dbef2b2fd | ||
|
|
f111e0eaf8 | ||
|
|
96207f342c | ||
|
|
e25ac4dcf0 | ||
|
|
28f6efe955 | ||
|
|
f17d7c0012 | ||
|
|
3761cf68b4 | ||
|
|
71554c1371 | ||
|
|
8a46dd1b57 | ||
|
|
3b21857097 | ||
|
|
a10fbf16ea | ||
|
|
f4750928ee | ||
|
|
657be2ace5 | ||
|
|
feaaca4987 | ||
|
|
ebd9462ea6 | ||
|
|
6b9e4f939d | ||
|
|
687a3b1832 | ||
|
|
25d5ed763c | ||
|
|
5e038a5e1e | ||
|
|
4b4e531846 | ||
|
|
89e8fb4818 | ||
|
|
b9bf91c510 | ||
|
|
40b58d59ad | ||
|
|
4fbb50422c | ||
|
|
8d847a4e94 | ||
|
|
e3e08a48cb | ||
|
|
ff6868900d | ||
|
|
aab076029f | ||
|
|
294f090361 | ||
|
|
301e1ad982 | ||
|
|
3cf6ea848b | ||
|
|
bb0b6432ae | ||
|
|
46078d391f | ||
|
|
849bf20598 | ||
|
|
e91f2e342a | ||
|
|
713f8f357d | ||
|
|
83368998be | ||
|
|
4013bc4a4c | ||
|
|
32925dae1f | ||
|
|
6cc70997ba | ||
|
|
d260e3824e | ||
|
|
a5bd26395e | ||
|
|
6fa74340a0 | ||
|
|
4d8ef7bca7 | ||
|
|
6a9ae32012 | ||
|
|
a7fd65bf2d | ||
|
|
1fed2d910c | ||
|
|
c95b580478 | ||
|
|
2be310cd6e | ||
|
|
02a5d350f9 | ||
|
|
18cd2064ec |
40
.github/ISSUE_TEMPLATE/Bug.md
vendored
40
.github/ISSUE_TEMPLATE/Bug.md
vendored
@@ -5,19 +5,31 @@ about: Report a problem with rclone
|
||||
|
||||
<!--
|
||||
|
||||
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
|
||||
We understand you are having a problem with rclone; we want to help you with that!
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
|
||||
**STOP and READ**
|
||||
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
|
||||
Please show the effort you've put in to solving the problem and please be specific.
|
||||
People are volunteering their time to help! Low effort posts are not likely to get good answers!
|
||||
|
||||
If you think you might have found a bug, try to replicate it with the latest beta (or stable).
|
||||
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
|
||||
|
||||
If you can still replicate it or just got a question then please use the rclone forum:
|
||||
|
||||
https://forum.rclone.org/
|
||||
|
||||
instead of filing an issue for a quick response.
|
||||
for a quick response instead of filing an issue on this repo.
|
||||
|
||||
If you think you might have found a bug, please can you try to replicate it with the latest beta?
|
||||
If nothing else helps, then please fill in the info below which helps us help you.
|
||||
|
||||
https://beta.rclone.org/
|
||||
|
||||
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
|
||||
**DO NOT REDACT** any information except passwords/keys/personal info.
|
||||
|
||||
You should use 3 backticks to begin and end your paste to make it readable.
|
||||
|
||||
Make sure to include a log obtained with '-vv'.
|
||||
|
||||
You can also use '-vv --log-file bug.log' and a service such as https://pastebin.com or https://gist.github.com/
|
||||
|
||||
Thank you
|
||||
|
||||
@@ -25,6 +37,10 @@ The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
#### The associated forum post URL from `https://forum.rclone.org`
|
||||
|
||||
|
||||
|
||||
#### What is the problem you are having with rclone?
|
||||
|
||||
|
||||
@@ -37,7 +53,7 @@ The Rclone Developers
|
||||
|
||||
|
||||
|
||||
#### Which cloud storage system are you using? (e.g. Google Drive)
|
||||
#### Which cloud storage system are you using? (e.g. Google Drive)
|
||||
|
||||
|
||||
|
||||
@@ -48,3 +64,11 @@ The Rclone Developers
|
||||
#### A log from the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||
|
||||
|
||||
|
||||
<!--- Please keep the note below for others who read your bug report. -->
|
||||
|
||||
#### How to use GitHub
|
||||
|
||||
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
|
||||
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
|
||||
* Subscribe to receive notifications on status change and new comments.
|
||||
|
||||
23
.github/ISSUE_TEMPLATE/Feature.md
vendored
23
.github/ISSUE_TEMPLATE/Feature.md
vendored
@@ -7,12 +7,16 @@ about: Suggest a new feature or enhancement for rclone
|
||||
|
||||
Welcome :-)
|
||||
|
||||
So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
|
||||
So you've got an idea to improve rclone? We love that!
|
||||
You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
|
||||
|
||||
Here is a checklist of things to do:
|
||||
Probably the latest beta (or stable) release has your feature, so try to update your rclone.
|
||||
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
|
||||
|
||||
1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
|
||||
2. Discuss on the forum first: https://forum.rclone.org/
|
||||
If it still isn't there, here is a checklist of things to do:
|
||||
|
||||
1. Search the old issues for your idea and +1 or comment on an existing issue if possible.
|
||||
2. Discuss on the forum: https://forum.rclone.org/
|
||||
3. Make a feature request issue (this is the right place!).
|
||||
4. Be prepared to get involved making the feature :-)
|
||||
|
||||
@@ -22,6 +26,9 @@ The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
#### The associated forum post URL from `https://forum.rclone.org`
|
||||
|
||||
|
||||
|
||||
#### What is your current rclone version (output from `rclone version`)?
|
||||
|
||||
@@ -34,3 +41,11 @@ The Rclone Developers
|
||||
#### How do you think rclone should be changed to solve that?
|
||||
|
||||
|
||||
|
||||
<!--- Please keep the note below for others who read your feature request. -->
|
||||
|
||||
#### How to use GitHub
|
||||
|
||||
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
|
||||
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
|
||||
* Subscribe to receive notifications on status change and new comments.
|
||||
|
||||
157
.github/workflows/build.yml
vendored
157
.github/workflows/build.yml
vendored
@@ -12,9 +12,15 @@ on:
|
||||
tags:
|
||||
- '*'
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
required: true
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -30,6 +36,7 @@ jobs:
|
||||
check: true
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
librclonetest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
@@ -187,6 +194,14 @@ jobs:
|
||||
make racequicktest
|
||||
if: matrix.racequicktest
|
||||
|
||||
- name: Run librclone tests
|
||||
shell: bash
|
||||
run: |
|
||||
make -C librclone/ctest test
|
||||
make -C librclone/ctest clean
|
||||
librclone/python/test_rclone.py
|
||||
if: matrix.librclonetest
|
||||
|
||||
- name: Code quality test
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -213,50 +228,110 @@ jobs:
|
||||
# Deploy binaries if enabled in config && not a PR && not a fork
|
||||
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
xgo:
|
||||
timeout-minutes: 60
|
||||
name: "xgo cross compile"
|
||||
runs-on: ubuntu-latest
|
||||
android:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 30
|
||||
name: "android-all"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
steps:
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go 1.14
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.14
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v1
|
||||
with:
|
||||
# Checkout into a fixed path to avoid import path problems on go < 1.11
|
||||
path: ./src/github.com/rclone/rclone
|
||||
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||
- name: Force NDK version
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;21.4.7075529" | grep -v = || true
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'GOPATH=${{ runner.workspace }}' >> $GITHUB_ENV
|
||||
echo '${{ runner.workspace }}/bin' >> $GITHUB_PATH
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Cross-compile rclone
|
||||
run: |
|
||||
docker pull billziss/xgo-cgofuse
|
||||
GO111MODULE=off go get -v github.com/karalabe/xgo # don't add to go.mod
|
||||
# xgo \
|
||||
# -image=billziss/xgo-cgofuse \
|
||||
# -targets=darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
# -tags cmount \
|
||||
# -dest build \
|
||||
# .
|
||||
xgo \
|
||||
-image=billziss/xgo-cgofuse \
|
||||
-targets=android/*,ios/* \
|
||||
-dest build \
|
||||
.
|
||||
- name: Set global environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
||||
|
||||
- name: Build rclone
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
- name: build native rclone
|
||||
run: |
|
||||
make
|
||||
|
||||
- name: Upload artifacts
|
||||
run: |
|
||||
make ci_upload
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# Upload artifacts if not a PR && not a fork
|
||||
if: github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
- name: install gomobile
|
||||
run: |
|
||||
go get golang.org/x/mobile/cmd/gobind
|
||||
go get golang.org/x/mobile/cmd/gomobile
|
||||
env PATH=$PATH:~/go/bin gomobile init
|
||||
|
||||
- name: arm-v7a gomobile build
|
||||
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||
|
||||
- name: arm-v7a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||
echo 'GOARM=7' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
- name: arm-v7a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
|
||||
|
||||
- name: arm64-v8a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm64-v8a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
|
||||
|
||||
- name: x86 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x86 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
|
||||
|
||||
- name: x64 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x64 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
|
||||
|
||||
- name: Upload artifacts
|
||||
run: |
|
||||
make ci_upload
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# Upload artifacts if not a PR && not a fork
|
||||
if: github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
@@ -7,6 +7,7 @@ on:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
|
||||
@@ -6,6 +6,7 @@ on:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,6 +1,7 @@
|
||||
*~
|
||||
_junk/
|
||||
rclone
|
||||
rclone.exe
|
||||
build
|
||||
docs/public
|
||||
rclone.iml
|
||||
@@ -10,3 +11,5 @@ rclone.iml
|
||||
*.log
|
||||
*.iml
|
||||
fuzz-build.zip
|
||||
*.orig
|
||||
*.rej
|
||||
|
||||
@@ -33,10 +33,11 @@ page](https://github.com/rclone/rclone).
|
||||
|
||||
Now in your terminal
|
||||
|
||||
go get -u github.com/rclone/rclone
|
||||
cd $GOPATH/src/github.com/rclone/rclone
|
||||
git clone https://github.com/rclone/rclone.git
|
||||
cd rclone
|
||||
git remote rename origin upstream
|
||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||
go build
|
||||
|
||||
Make a branch to add your new feature
|
||||
|
||||
|
||||
1289
MANUAL.html
generated
1289
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
1867
MANUAL.txt
generated
1867
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
8
Makefile
8
Makefile
@@ -119,7 +119,7 @@ doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||
rclone.1: MANUAL.md
|
||||
pandoc -s --from markdown-smart --to man MANUAL.md -o rclone.1
|
||||
|
||||
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs
|
||||
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs rcdocs
|
||||
./bin/make_manual.py
|
||||
|
||||
MANUAL.html: MANUAL.md
|
||||
@@ -187,10 +187,10 @@ upload_github:
|
||||
./bin/upload-github $(TAG)
|
||||
|
||||
cross: doc
|
||||
go run bin/cross-compile.go -release current $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
go run bin/cross-compile.go -release current $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
|
||||
beta:
|
||||
go run bin/cross-compile.go $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
||||
|
||||
@@ -198,7 +198,7 @@ log_since_last_release:
|
||||
git log $(LAST_TAG)..
|
||||
|
||||
compile_all:
|
||||
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
go run bin/cross-compile.go -compile-only $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
|
||||
ci_upload:
|
||||
sudo chown -R $$USER build
|
||||
|
||||
@@ -62,6 +62,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
@@ -87,7 +88,6 @@ Please see [the full list of all storage providers and their features](https://r
|
||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||
* Optional transparent compression ([Compress](https://rclone.org/compress/))
|
||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
* Optional cache ([Cache](https://rclone.org/cache/))
|
||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
* Multi-threaded downloads to local disk
|
||||
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
|
||||
|
||||
18
RELEASE.md
18
RELEASE.md
@@ -76,6 +76,24 @@ Now
|
||||
The rclone docker image should autobuild on via GitHub actions. If it doesn't
|
||||
or needs to be updated then rebuild like this.
|
||||
|
||||
See: https://github.com/ilteoood/docker_buildx/issues/19
|
||||
See: https://github.com/ilteoood/docker_buildx/blob/master/scripts/install_buildx.sh
|
||||
|
||||
```
|
||||
git co v1.54.1
|
||||
docker pull golang
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
docker buildx create --name actions_builder --use
|
||||
docker run --rm --privileged docker/binfmt:820fdd95a9972a5308930a2bdfb8573dd4447ad3
|
||||
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
|
||||
SUPPORTED_PLATFORMS=$(docker buildx inspect --bootstrap | grep 'Platforms:*.*' | cut -d : -f2,3)
|
||||
echo "Supported platforms: $SUPPORTED_PLATFORMS"
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
docker buildx stop actions_builder
|
||||
```
|
||||
|
||||
### Old build for linux/amd64 only
|
||||
|
||||
```
|
||||
docker pull golang
|
||||
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/local" // pull in test backend
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -19,7 +20,7 @@ var (
|
||||
)
|
||||
|
||||
func prepare(t *testing.T, root string) {
|
||||
config.LoadConfig(context.Background())
|
||||
configfile.Install()
|
||||
|
||||
// Configure the remote
|
||||
config.FileSet(remoteName, "type", "alias")
|
||||
|
||||
@@ -41,6 +41,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
_ "github.com/rclone/rclone/backend/tardigrade"
|
||||
_ "github.com/rclone/rclone/backend/union"
|
||||
_ "github.com/rclone/rclone/backend/uptobox"
|
||||
_ "github.com/rclone/rclone/backend/webdav"
|
||||
_ "github.com/rclone/rclone/backend/yandex"
|
||||
_ "github.com/rclone/rclone/backend/zoho"
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
@@ -70,11 +69,10 @@ func init() {
|
||||
Prefix: "acd",
|
||||
Description: "Amazon Drive",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config(ctx, "amazon cloud drive", name, m, acdConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: acdConfig,
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "checkpoint",
|
||||
@@ -83,16 +81,16 @@ func init() {
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_wait_per_gb",
|
||||
Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
|
||||
Help: `Additional time per GiB to wait after a failed complete upload to see if it appears.
|
||||
|
||||
Sometimes Amazon Drive gives an error when a file has been fully
|
||||
uploaded but the file appears anyway after a little while. This
|
||||
happens sometimes for files over 1GB in size and nearly every time for
|
||||
files bigger than 10GB. This parameter controls the time rclone waits
|
||||
happens sometimes for files over 1 GiB in size and nearly every time for
|
||||
files bigger than 10 GiB. This parameter controls the time rclone waits
|
||||
for the file to appear.
|
||||
|
||||
The default value for this parameter is 3 minutes per GB, so by
|
||||
default it will wait 3 minutes for every GB uploaded to see if the
|
||||
The default value for this parameter is 3 minutes per GiB, so by
|
||||
default it will wait 3 minutes for every GiB uploaded to see if the
|
||||
file appears.
|
||||
|
||||
You can disable this feature by setting it to 0. This may cause
|
||||
@@ -112,7 +110,7 @@ in this situation.`,
|
||||
|
||||
Files this size or more will be downloaded via their "tempLink". This
|
||||
is to work around a problem with Amazon Drive which blocks downloads
|
||||
of files bigger than about 10GB. The default for this is 9GB which
|
||||
of files bigger than about 10 GiB. The default for this is 9 GiB which
|
||||
shouldn't need to be changed.
|
||||
|
||||
To download files above this threshold, rclone requests a "tempLink"
|
||||
@@ -205,7 +203,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode == 401 {
|
||||
f.tokenRenewer.Invalidate()
|
||||
@@ -280,7 +281,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.getRootInfo()
|
||||
_, err := f.getRootInfo(ctx)
|
||||
return err
|
||||
})
|
||||
|
||||
@@ -288,14 +289,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, resp, err = f.c.Account.GetEndpoints()
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get endpoints")
|
||||
}
|
||||
|
||||
// Get rootID
|
||||
rootInfo, err := f.getRootInfo()
|
||||
rootInfo, err := f.getRootInfo(ctx)
|
||||
if err != nil || rootInfo.Id == nil {
|
||||
return nil, errors.Wrap(err, "failed to get root")
|
||||
}
|
||||
@@ -337,11 +338,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
// getRootInfo gets the root folder info
|
||||
func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) {
|
||||
func (f *Fs) getRootInfo(ctx context.Context) (rootInfo *acd.Folder, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
rootInfo, resp, err = f.c.Nodes.GetRoot()
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return rootInfo, err
|
||||
}
|
||||
@@ -380,7 +381,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
var subFolder *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
subFolder, resp, err = folder.GetFolder(f.opt.Enc.FromStandardName(leaf))
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if err == acd.ErrorNodeNotFound {
|
||||
@@ -407,7 +408,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
var info *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.CreateFolder(f.opt.Enc.FromStandardName(leaf))
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
@@ -428,7 +429,7 @@ type listAllFn func(*acd.Node) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
query := "parents:" + dirID
|
||||
if directoriesOnly {
|
||||
query += " AND kind:" + folderKind
|
||||
@@ -449,7 +450,7 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
|
||||
var resp *http.Response
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
nodes, resp, err = f.c.Nodes.GetNodes(&opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -508,7 +509,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
var iErr error
|
||||
for tries := 1; tries <= maxTries; tries++ {
|
||||
entries = nil
|
||||
_, err = f.listAll(directoryID, "", false, false, func(node *acd.Node) bool {
|
||||
_, err = f.listAll(ctx, directoryID, "", false, false, func(node *acd.Node) bool {
|
||||
remote := path.Join(dir, *node.Name)
|
||||
switch *node.Kind {
|
||||
case folderKind:
|
||||
@@ -667,7 +668,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
if ok {
|
||||
return false, nil
|
||||
}
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -708,7 +709,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = f.moveNode(srcObj.remote, dstLeaf, dstDirectoryID, srcObj.info, srcLeaf, srcDirectoryID, false)
|
||||
err = f.moveNode(ctx, srcObj.remote, dstLeaf, dstDirectoryID, srcObj.info, srcLeaf, srcDirectoryID, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -803,7 +804,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
var jsonStr string
|
||||
err = srcFs.pacer.Call(func() (bool, error) {
|
||||
jsonStr, err = srcInfo.GetMetadata()
|
||||
return srcFs.shouldRetry(nil, err)
|
||||
return srcFs.shouldRetry(ctx, nil, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(src, "DirMove error: error reading src metadata: %v", err)
|
||||
@@ -815,7 +816,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return err
|
||||
}
|
||||
|
||||
err = f.moveNode(srcPath, dstLeaf, dstDirectoryID, srcInfo, srcLeaf, srcDirectoryID, true)
|
||||
err = f.moveNode(ctx, srcPath, dstLeaf, dstDirectoryID, srcInfo, srcLeaf, srcDirectoryID, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -840,7 +841,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
if check {
|
||||
// check directory is empty
|
||||
empty := true
|
||||
_, err = f.listAll(rootID, "", false, false, func(node *acd.Node) bool {
|
||||
_, err = f.listAll(ctx, rootID, "", false, false, func(node *acd.Node) bool {
|
||||
switch *node.Kind {
|
||||
case folderKind:
|
||||
empty = false
|
||||
@@ -865,7 +866,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = node.Trash()
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -987,7 +988,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
var info *acd.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.GetFile(o.fs.opt.Enc.FromStandardName(leaf))
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if err == acd.ErrorNodeNotFound {
|
||||
@@ -1044,7 +1045,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
} else {
|
||||
in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers)
|
||||
}
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return in, err
|
||||
}
|
||||
@@ -1067,7 +1068,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if ok {
|
||||
return false, nil
|
||||
}
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1077,70 +1078,70 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// Remove a node
|
||||
func (f *Fs) removeNode(info *acd.Node) error {
|
||||
func (f *Fs) removeNode(ctx context.Context, info *acd.Node) error {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = info.Trash()
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.removeNode(o.info)
|
||||
return o.fs.removeNode(ctx, o.info)
|
||||
}
|
||||
|
||||
// Restore a node
|
||||
func (f *Fs) restoreNode(info *acd.Node) (newInfo *acd.Node, err error) {
|
||||
func (f *Fs) restoreNode(ctx context.Context, info *acd.Node) (newInfo *acd.Node, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
newInfo, resp, err = info.Restore()
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return newInfo, err
|
||||
}
|
||||
|
||||
// Changes name of given node
|
||||
func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) {
|
||||
func (f *Fs) renameNode(ctx context.Context, info *acd.Node, newName string) (newInfo *acd.Node, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
newInfo, resp, err = info.Rename(f.opt.Enc.FromStandardName(newName))
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return newInfo, err
|
||||
}
|
||||
|
||||
// Replaces one parent with another, effectively moving the file. Leaves other
|
||||
// parents untouched. ReplaceParent cannot be used when the file is trashed.
|
||||
func (f *Fs) replaceParent(info *acd.Node, oldParentID string, newParentID string) error {
|
||||
func (f *Fs) replaceParent(ctx context.Context, info *acd.Node, oldParentID string, newParentID string) error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := info.ReplaceParent(oldParentID, newParentID)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// Adds one additional parent to object.
|
||||
func (f *Fs) addParent(info *acd.Node, newParentID string) error {
|
||||
func (f *Fs) addParent(ctx context.Context, info *acd.Node, newParentID string) error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := info.AddParent(newParentID)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// Remove given parent from object, leaving the other possible
|
||||
// parents untouched. Object can end up having no parents.
|
||||
func (f *Fs) removeParent(info *acd.Node, parentID string) error {
|
||||
func (f *Fs) removeParent(ctx context.Context, info *acd.Node, parentID string) error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := info.RemoveParent(parentID)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// moveNode moves the node given from the srcLeaf,srcDirectoryID to
|
||||
// the dstLeaf,dstDirectoryID
|
||||
func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, srcLeaf, srcDirectoryID string, useDirErrorMsgs bool) (err error) {
|
||||
func (f *Fs) moveNode(ctx context.Context, name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, srcLeaf, srcDirectoryID string, useDirErrorMsgs bool) (err error) {
|
||||
// fs.Debugf(name, "moveNode dst(%q,%s) <- src(%q,%s)", dstLeaf, dstDirectoryID, srcLeaf, srcDirectoryID)
|
||||
cantMove := fs.ErrorCantMove
|
||||
if useDirErrorMsgs {
|
||||
@@ -1154,7 +1155,7 @@ func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, s
|
||||
|
||||
if srcLeaf != dstLeaf {
|
||||
// fs.Debugf(name, "renaming")
|
||||
_, err = f.renameNode(srcInfo, dstLeaf)
|
||||
_, err = f.renameNode(ctx, srcInfo, dstLeaf)
|
||||
if err != nil {
|
||||
fs.Debugf(name, "Move: quick path rename failed: %v", err)
|
||||
goto OnConflict
|
||||
@@ -1162,7 +1163,7 @@ func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, s
|
||||
}
|
||||
if srcDirectoryID != dstDirectoryID {
|
||||
// fs.Debugf(name, "trying parent replace: %s -> %s", oldParentID, newParentID)
|
||||
err = f.replaceParent(srcInfo, srcDirectoryID, dstDirectoryID)
|
||||
err = f.replaceParent(ctx, srcInfo, srcDirectoryID, dstDirectoryID)
|
||||
if err != nil {
|
||||
fs.Debugf(name, "Move: quick path parent replace failed: %v", err)
|
||||
return err
|
||||
@@ -1175,13 +1176,13 @@ OnConflict:
|
||||
fs.Debugf(name, "Could not directly rename file, presumably because there was a file with the same name already. Instead, the file will now be trashed where such operations do not cause errors. It will be restored to the correct parent after. If any of the subsequent calls fails, the rename/move will be in an invalid state.")
|
||||
|
||||
// fs.Debugf(name, "Trashing file")
|
||||
err = f.removeNode(srcInfo)
|
||||
err = f.removeNode(ctx, srcInfo)
|
||||
if err != nil {
|
||||
fs.Debugf(name, "Move: remove node failed: %v", err)
|
||||
return err
|
||||
}
|
||||
// fs.Debugf(name, "Renaming file")
|
||||
_, err = f.renameNode(srcInfo, dstLeaf)
|
||||
_, err = f.renameNode(ctx, srcInfo, dstLeaf)
|
||||
if err != nil {
|
||||
fs.Debugf(name, "Move: rename node failed: %v", err)
|
||||
return err
|
||||
@@ -1189,19 +1190,19 @@ OnConflict:
|
||||
// note: replacing parent is forbidden by API, modifying them individually is
|
||||
// okay though
|
||||
// fs.Debugf(name, "Adding target parent")
|
||||
err = f.addParent(srcInfo, dstDirectoryID)
|
||||
err = f.addParent(ctx, srcInfo, dstDirectoryID)
|
||||
if err != nil {
|
||||
fs.Debugf(name, "Move: addParent failed: %v", err)
|
||||
return err
|
||||
}
|
||||
// fs.Debugf(name, "removing original parent")
|
||||
err = f.removeParent(srcInfo, srcDirectoryID)
|
||||
err = f.removeParent(ctx, srcInfo, srcDirectoryID)
|
||||
if err != nil {
|
||||
fs.Debugf(name, "Move: removeParent failed: %v", err)
|
||||
return err
|
||||
}
|
||||
// fs.Debugf(name, "Restoring")
|
||||
_, err = f.restoreNode(srcInfo)
|
||||
_, err = f.restoreNode(ctx, srcInfo)
|
||||
if err != nil {
|
||||
fs.Debugf(name, "Move: restoreNode node failed: %v", err)
|
||||
return err
|
||||
|
||||
@@ -47,8 +47,8 @@ const (
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
storageDefaultBaseURL = "blob.core.windows.net"
|
||||
defaultChunkSize = 4 * fs.MebiByte
|
||||
maxChunkSize = 100 * fs.MebiByte
|
||||
defaultChunkSize = 4 * fs.Mebi
|
||||
maxChunkSize = 100 * fs.Mebi
|
||||
uploadConcurrency = 4
|
||||
defaultAccessTier = azblob.AccessTierNone
|
||||
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
||||
@@ -129,11 +129,11 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to chunked upload (<= 256MB). (Deprecated)",
|
||||
Help: "Cutoff for switching to chunked upload (<= 256 MiB). (Deprecated)",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Upload chunk size (<= 100MB).
|
||||
Help: `Upload chunk size (<= 100 MiB).
|
||||
|
||||
Note that this is stored in memory and there may be up to
|
||||
"--transfers" chunks stored at once in memory.`,
|
||||
@@ -217,6 +217,23 @@ This option controls how often unused buffers will be removed from the pool.`,
|
||||
encoder.EncodeDel |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeRightPeriod),
|
||||
}, {
|
||||
Name: "public_access",
|
||||
Help: "Public access level of a container: blob, container.",
|
||||
Default: string(azblob.PublicAccessNone),
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: string(azblob.PublicAccessNone),
|
||||
Help: "The container and its blobs can be accessed only with an authorized request. It's a default value",
|
||||
}, {
|
||||
Value: string(azblob.PublicAccessBlob),
|
||||
Help: "Blob data within this container can be read via anonymous request.",
|
||||
}, {
|
||||
Value: string(azblob.PublicAccessContainer),
|
||||
Help: "Allow full public read access for container and blob data.",
|
||||
},
|
||||
},
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -241,6 +258,7 @@ type Options struct {
|
||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
PublicAccess string `config:"public_access"`
|
||||
}
|
||||
|
||||
// Fs represents a remote azure server
|
||||
@@ -262,6 +280,7 @@ type Fs struct {
|
||||
imdsPacer *fs.Pacer // Same but for IMDS
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
pool *pool.Pool // memory pool
|
||||
publicAccess azblob.PublicAccessType // Container Public Access Level
|
||||
}
|
||||
|
||||
// Object describes an azure object
|
||||
@@ -335,6 +354,19 @@ func validateAccessTier(tier string) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// validatePublicAccess checks if azureblob supports use supplied public access level
|
||||
func validatePublicAccess(publicAccess string) bool {
|
||||
switch publicAccess {
|
||||
case string(azblob.PublicAccessNone),
|
||||
string(azblob.PublicAccessBlob),
|
||||
string(azblob.PublicAccessContainer):
|
||||
// valid cases
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
401, // Unauthorized (e.g. "Token has expired")
|
||||
@@ -347,7 +379,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// FIXME interpret special errors - more to do here
|
||||
if storageErr, ok := err.(azblob.StorageError); ok {
|
||||
switch storageErr.ServiceCode() {
|
||||
@@ -369,7 +404,7 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
const minChunkSize = fs.SizeSuffixBase
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
@@ -499,6 +534,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
|
||||
}
|
||||
|
||||
if !validatePublicAccess((opt.PublicAccess)) {
|
||||
return nil, errors.Errorf("Azure Blob: Supported public access level are %s and %s",
|
||||
string(azblob.PublicAccessBlob), string(azblob.PublicAccessContainer))
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
@@ -517,6 +557,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt.MemoryPoolUseMmap,
|
||||
),
|
||||
}
|
||||
f.publicAccess = azblob.PublicAccessType(opt.PublicAccess)
|
||||
f.imdsPacer.SetRetries(5) // per IMDS documentation
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
@@ -578,7 +619,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// Retry as specified by the documentation:
|
||||
// https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#retry-guidance
|
||||
token, err = GetMSIToken(ctx, userMSI)
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -594,7 +635,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
var refreshedToken adal.Token
|
||||
err := f.imdsPacer.Call(func() (bool, error) {
|
||||
refreshedToken, err = GetMSIToken(ctx, userMSI)
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
// Failed to refresh.
|
||||
@@ -803,7 +844,7 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
response, err = f.cntURL(container).ListBlobsHierarchySegment(ctx, marker, delimiter, options)
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -1029,7 +1070,7 @@ func (f *Fs) listContainersToFn(fn listContainerFn) error {
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
response, err = f.svcURL.ListContainersSegment(ctx, marker, params)
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1081,7 +1122,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
||||
}
|
||||
// now try to create the container
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
|
||||
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, f.publicAccess)
|
||||
if err != nil {
|
||||
if storageErr, ok := err.(azblob.StorageError); ok {
|
||||
switch storageErr.ServiceCode() {
|
||||
@@ -1098,7 +1139,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
@@ -1136,10 +1177,10 @@ func (f *Fs) deleteContainer(ctx context.Context, container string) error {
|
||||
return false, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
}
|
||||
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -1212,7 +1253,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
startCopy, err = dstBlobURL.StartCopyFromURL(ctx, *source, nil, azblob.ModifiedAccessConditions{}, options, azblob.AccessTierType(f.opt.AccessTier), nil)
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1373,7 +1414,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
var blobProperties *azblob.BlobGetPropertiesResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
blobProperties, err = blob.GetProperties(ctx, options, azblob.ClientProvidedKeyOptions{})
|
||||
return o.fs.shouldRetry(err)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
// On directories - GetProperties does not work and current SDK does not populate service code correctly hence check regular http response as well
|
||||
@@ -1408,7 +1449,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
blob := o.getBlobReference()
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
|
||||
return o.fs.shouldRetry(err)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1451,7 +1492,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
var downloadResponse *azblob.DownloadResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
downloadResponse, err = blob.Download(ctx, offset, count, ac, false, azblob.ClientProvidedKeyOptions{})
|
||||
return o.fs.shouldRetry(err)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open for download")
|
||||
@@ -1592,7 +1633,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// Stream contents of the reader object to the given blob URL
|
||||
blockBlobURL := blob.ToBlockBlobURL()
|
||||
_, err = azblob.UploadStreamToBlockBlob(ctx, in, blockBlobURL, putBlobOptions)
|
||||
return o.fs.shouldRetry(err)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1620,7 +1661,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
ac := azblob.BlobAccessConditions{}
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blob.Delete(ctx, snapShotOptions, ac)
|
||||
return o.fs.shouldRetry(err)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1649,7 +1690,7 @@ func (o *Object) SetTier(tier string) error {
|
||||
ctx := context.Background()
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blob.SetTier(ctx, desiredAccessTier, azblob.LeaseAccessConditions{})
|
||||
return o.fs.shouldRetry(err)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -2,12 +2,11 @@ package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
)
|
||||
|
||||
// Error describes a B2 error response
|
||||
@@ -63,16 +62,17 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
const versionFormat = "-v2006-01-02-150405.000"
|
||||
// HasVersion returns true if it looks like the passed filename has a timestamp on it.
|
||||
//
|
||||
// Note that the passed filename's timestamp may still be invalid even if this
|
||||
// function returns true.
|
||||
func HasVersion(remote string) bool {
|
||||
return version.Match(remote)
|
||||
}
|
||||
|
||||
// AddVersion adds the timestamp as a version string into the filename passed in.
|
||||
func (t Timestamp) AddVersion(remote string) string {
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
s := time.Time(t).Format(versionFormat)
|
||||
// Replace the '.' with a '-'
|
||||
s = strings.Replace(s, ".", "-", -1)
|
||||
return base + s + ext
|
||||
return version.Add(remote, time.Time(t))
|
||||
}
|
||||
|
||||
// RemoveVersion removes the timestamp from a filename as a version string.
|
||||
@@ -80,24 +80,9 @@ func (t Timestamp) AddVersion(remote string) string {
|
||||
// It returns the new file name and a timestamp, or the old filename
|
||||
// and a zero timestamp.
|
||||
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||
newRemote = remote
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
if len(base) < len(versionFormat) {
|
||||
return
|
||||
}
|
||||
versionStart := len(base) - len(versionFormat)
|
||||
// Check it ends in -xxx
|
||||
if base[len(base)-4] != '-' {
|
||||
return
|
||||
}
|
||||
// Replace with .xxx for parsing
|
||||
base = base[:len(base)-4] + "." + base[len(base)-3:]
|
||||
newT, err := time.Parse(versionFormat, base[versionStart:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return Timestamp(newT), base[:versionStart] + ext
|
||||
time, newRemote := version.Remove(remote)
|
||||
t = Timestamp(time)
|
||||
return
|
||||
}
|
||||
|
||||
// IsZero returns true if the timestamp is uninitialized
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
var (
|
||||
emptyT api.Timestamp
|
||||
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
|
||||
t0r = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123000000Z"))
|
||||
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
|
||||
)
|
||||
|
||||
@@ -36,40 +35,6 @@ func TestTimestampUnmarshalJSON(t *testing.T) {
|
||||
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
|
||||
}
|
||||
|
||||
func TestTimestampAddVersion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
t api.Timestamp
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
|
||||
{t1, "potato", "potato-v2001-02-03-040506-123"},
|
||||
{t1, "", "-v2001-02-03-040506-123"},
|
||||
} {
|
||||
actual := test.t.AddVersion(test.in)
|
||||
assert.Equal(t, test.expected, actual, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampRemoveVersion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedT api.Timestamp
|
||||
expectedRemote string
|
||||
}{
|
||||
{"potato.txt", emptyT, "potato.txt"},
|
||||
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
|
||||
{"potato-v2001-02-03-040506-123", t1, "potato"},
|
||||
{"-v2001-02-03-040506-123", t1, ""},
|
||||
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
|
||||
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
|
||||
} {
|
||||
actualT, actualRemote := api.RemoveVersion(test.in)
|
||||
assert.Equal(t, test.expectedT, actualT, test.in)
|
||||
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampIsZero(t *testing.T) {
|
||||
assert.True(t, emptyT.IsZero())
|
||||
assert.False(t, t0.IsZero())
|
||||
|
||||
@@ -54,10 +54,10 @@ const (
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxParts = 10000
|
||||
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
||||
minChunkSize = 5 * fs.MebiByte
|
||||
defaultChunkSize = 96 * fs.MebiByte
|
||||
defaultUploadCutoff = 200 * fs.MebiByte
|
||||
largeFileCopyCutoff = 4 * fs.GibiByte // 5E9 is the max
|
||||
minChunkSize = 5 * fs.Mebi
|
||||
defaultChunkSize = 96 * fs.Mebi
|
||||
defaultUploadCutoff = 200 * fs.Mebi
|
||||
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
|
||||
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
||||
memoryPoolUseMmap = false
|
||||
)
|
||||
@@ -116,7 +116,7 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
|
||||
|
||||
Files above this size will be uploaded in chunks of "--b2-chunk-size".
|
||||
|
||||
This value should be set no larger than 4.657GiB (== 5GB).`,
|
||||
This value should be set no larger than 4.657 GiB (== 5 GB).`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -126,7 +126,7 @@ This value should be set no larger than 4.657GiB (== 5GB).`,
|
||||
Any files larger than this that need to be server-side copied will be
|
||||
copied in chunks of this size.
|
||||
|
||||
The minimum is 0 and the maximum is 4.6GB.`,
|
||||
The minimum is 0 and the maximum is 4.6 GiB.`,
|
||||
Default: largeFileCopyCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -305,7 +305,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetryNoAuth returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
|
||||
func (f *Fs) shouldRetryNoReauth(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// For 429 or 503 errors look at the Retry-After: header and
|
||||
// set the retry appropriately, starting with a minimum of 1
|
||||
// second if it isn't set.
|
||||
@@ -336,7 +339,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
return f.shouldRetryNoReauth(resp, err)
|
||||
return f.shouldRetryNoReauth(ctx, resp, err)
|
||||
}
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
@@ -504,7 +507,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &f.info)
|
||||
return f.shouldRetryNoReauth(resp, err)
|
||||
return f.shouldRetryNoReauth(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to authenticate")
|
||||
@@ -1350,7 +1353,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
|
||||
}
|
||||
var request = api.GetDownloadAuthorizationRequest{
|
||||
BucketID: bucketID,
|
||||
FileNamePrefix: f.opt.Enc.FromStandardPath(path.Join(f.root, remote)),
|
||||
FileNamePrefix: f.opt.Enc.FromStandardPath(path.Join(f.rootDirectory, remote)),
|
||||
ValidDurationInSeconds: validDurationInSeconds,
|
||||
}
|
||||
var response api.GetDownloadAuthorizationResponse
|
||||
@@ -1741,6 +1744,13 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||
ContentType: resp.Header.Get("Content-Type"),
|
||||
Info: Info,
|
||||
}
|
||||
// When reading files from B2 via cloudflare using
|
||||
// --b2-download-url cloudflare strips the Content-Length
|
||||
// headers (presumably so it can inject stuff) so use the old
|
||||
// length read from the listing.
|
||||
if info.Size < 0 {
|
||||
info.Size = o.size
|
||||
}
|
||||
return resp, info, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -230,14 +230,14 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
||||
//
|
||||
// The number of bytes in the file being uploaded. Note that
|
||||
// this header is required; you cannot leave it out and just
|
||||
// use chunked encoding. The minimum size of every part but
|
||||
// the last one is 100MB.
|
||||
// use chunked encoding. The minimum size of every part but
|
||||
// the last one is 100 MB (100,000,000 bytes)
|
||||
//
|
||||
// X-Bz-Content-Sha1
|
||||
//
|
||||
// The SHA1 checksum of the this part of the file. B2 will
|
||||
// check this when the part is uploaded, to make sure that the
|
||||
// data arrived correctly. The same SHA1 checksum must be
|
||||
// data arrived correctly. The same SHA1 checksum must be
|
||||
// passed to b2_finish_large_file.
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
|
||||
@@ -36,13 +36,13 @@ func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
|
||||
// Error is returned from box when things go wrong
|
||||
type Error struct {
|
||||
Type string `json:"type"`
|
||||
Status int `json:"status"`
|
||||
Code string `json:"code"`
|
||||
ContextInfo json.RawMessage
|
||||
HelpURL string `json:"help_url"`
|
||||
Message string `json:"message"`
|
||||
RequestID string `json:"request_id"`
|
||||
Type string `json:"type"`
|
||||
Status int `json:"status"`
|
||||
Code string `json:"code"`
|
||||
ContextInfo json.RawMessage `json:"context_info"`
|
||||
HelpURL string `json:"help_url"`
|
||||
Message string `json:"message"`
|
||||
RequestID string `json:"request_id"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
@@ -132,6 +132,38 @@ type UploadFile struct {
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
}
|
||||
|
||||
// PreUploadCheck is the request for upload preflight check
|
||||
type PreUploadCheck struct {
|
||||
Name string `json:"name"`
|
||||
Parent Parent `json:"parent"`
|
||||
Size *int64 `json:"size,omitempty"`
|
||||
}
|
||||
|
||||
// PreUploadCheckResponse is the response from upload preflight check
|
||||
// if successful
|
||||
type PreUploadCheckResponse struct {
|
||||
UploadToken string `json:"upload_token"`
|
||||
UploadURL string `json:"upload_url"`
|
||||
}
|
||||
|
||||
// PreUploadCheckConflict is returned in the ContextInfo error field
|
||||
// from PreUploadCheck when the error code is "item_name_in_use"
|
||||
type PreUploadCheckConflict struct {
|
||||
Conflicts struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
FileVersion struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
Sha1 string `json:"sha1"`
|
||||
} `json:"file_version"`
|
||||
SequenceID string `json:"sequence_id"`
|
||||
Etag string `json:"etag"`
|
||||
Sha1 string `json:"sha1"`
|
||||
Name string `json:"name"`
|
||||
} `json:"conflicts"`
|
||||
}
|
||||
|
||||
// UpdateFileModTime is used in Update File Info
|
||||
type UpdateFileModTime struct {
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -84,7 +83,7 @@ func init() {
|
||||
Name: "box",
|
||||
Description: "Box",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
|
||||
@@ -93,15 +92,15 @@ func init() {
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure token with jwt authentication")
|
||||
}
|
||||
// Else, if not using an access token, use oauth2
|
||||
} else if boxAccessToken == "" || !boxAccessTokenOk {
|
||||
err = oauthutil.Config(ctx, "box", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
|
||||
}
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
}
|
||||
return nil, nil
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "root_folder_id",
|
||||
@@ -126,7 +125,7 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to multipart upload (>= 50MB).",
|
||||
Help: "Cutoff for switching to multipart upload (>= 50 MiB).",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -157,15 +156,15 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
|
||||
jsonFile = env.ShellExpand(jsonFile)
|
||||
boxConfig, err := getBoxConfig(jsonFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return errors.Wrap(err, "get box config")
|
||||
}
|
||||
privateKey, err := getDecryptedPrivateKey(boxConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return errors.Wrap(err, "get decrypted private key")
|
||||
}
|
||||
claims, err := getClaims(boxConfig, boxSubType)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return errors.Wrap(err, "get claims")
|
||||
}
|
||||
signingHeaders := getSigningHeaders(boxConfig)
|
||||
queryParams := getQueryParams(boxConfig)
|
||||
@@ -317,10 +316,13 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
authRetry := false
|
||||
|
||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
||||
if resp != nil && resp.StatusCode == 401 && strings.Contains(resp.Header.Get("Www-Authenticate"), "expired_token") {
|
||||
authRetry = true
|
||||
fs.Debugf(nil, "Should retry: %v", err)
|
||||
}
|
||||
@@ -548,7 +550,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
@@ -585,7 +587,7 @@ OUTER:
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return found, errors.Wrap(err, "couldn't list files")
|
||||
@@ -683,22 +685,80 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
return o, leaf, directoryID, nil
|
||||
}
|
||||
|
||||
// preUploadCheck checks to see if a file can be uploaded
|
||||
//
|
||||
// It returns "", nil if the file is good to go
|
||||
// It returns "ID", nil if the file must be updated
|
||||
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (ID string, err error) {
|
||||
check := api.PreUploadCheck{
|
||||
Name: f.opt.Enc.FromStandardName(leaf),
|
||||
Parent: api.Parent{
|
||||
ID: directoryID,
|
||||
},
|
||||
}
|
||||
if size >= 0 {
|
||||
check.Size = &size
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "OPTIONS",
|
||||
Path: "/files/content/",
|
||||
}
|
||||
var result api.PreUploadCheckResponse
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &check, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok && apiErr.Code == "item_name_in_use" {
|
||||
var conflict api.PreUploadCheckConflict
|
||||
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "pre-upload check: JSON decode failed")
|
||||
}
|
||||
if conflict.Conflicts.Type != api.ItemTypeFile {
|
||||
return "", errors.Wrap(err, "pre-upload check: can't overwrite non file with file")
|
||||
}
|
||||
return conflict.Conflicts.ID, nil
|
||||
}
|
||||
return "", errors.Wrap(err, "pre-upload check")
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
|
||||
switch err {
|
||||
case nil:
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(ctx, in, src)
|
||||
default:
|
||||
// If directory doesn't exist, file doesn't exist so can upload
|
||||
remote := src.Remote()
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Preflight check the upload, which returns the ID if the
|
||||
// object already exists
|
||||
ID, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ID == "" {
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// If object exists then create a skeleton one with just id
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
id: ID,
|
||||
}
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
@@ -740,7 +800,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -767,7 +827,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rmdir failed")
|
||||
@@ -839,7 +899,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var info *api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©File, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -877,7 +937,7 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -895,7 +955,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &user)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read user info")
|
||||
@@ -1008,7 +1068,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &shareLink, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return info.SharedLink.URL, err
|
||||
}
|
||||
@@ -1026,7 +1086,7 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1048,7 +1108,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't list trash")
|
||||
@@ -1182,7 +1242,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
|
||||
var info *api.Item
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return info, err
|
||||
}
|
||||
@@ -1215,7 +1275,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1225,7 +1285,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// upload does a single non-multipart upload
|
||||
//
|
||||
// This is recommended for less than 50 MB of content
|
||||
// This is recommended for less than 50 MiB of content
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) {
|
||||
upload := api.UploadFile{
|
||||
Name: o.fs.opt.Enc.FromStandardName(leaf),
|
||||
@@ -1255,7 +1315,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &upload, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -44,7 +44,7 @@ func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID stri
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
@@ -74,7 +74,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
opts.Body = wrap(bytes.NewReader(chunk))
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -109,10 +109,10 @@ outer:
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
}
|
||||
body, err = rest.ReadBody(resp)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
delay := defaultDelay
|
||||
var why string
|
||||
@@ -167,7 +167,7 @@ func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error)
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
20
backend/cache/cache.go
vendored
20
backend/cache/cache.go
vendored
@@ -98,14 +98,14 @@ changed, any downloaded chunks will be invalid and cache-chunk-path
|
||||
will need to be cleared or unexpected EOF errors will occur.`,
|
||||
Default: DefCacheChunkSize,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "1m",
|
||||
Help: "1MB",
|
||||
Value: "1M",
|
||||
Help: "1 MiB",
|
||||
}, {
|
||||
Value: "5M",
|
||||
Help: "5 MB",
|
||||
Help: "5 MiB",
|
||||
}, {
|
||||
Value: "10M",
|
||||
Help: "10 MB",
|
||||
Help: "10 MiB",
|
||||
}},
|
||||
}, {
|
||||
Name: "info_age",
|
||||
@@ -132,13 +132,13 @@ oldest chunks until it goes under this value.`,
|
||||
Default: DefCacheTotalChunkSize,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "500M",
|
||||
Help: "500 MB",
|
||||
Help: "500 MiB",
|
||||
}, {
|
||||
Value: "1G",
|
||||
Help: "1 GB",
|
||||
Help: "1 GiB",
|
||||
}, {
|
||||
Value: "10G",
|
||||
Help: "10 GB",
|
||||
Help: "10 GiB",
|
||||
}},
|
||||
}, {
|
||||
Name: "db_path",
|
||||
@@ -339,8 +339,14 @@ func parseRootPath(path string) (string, error) {
|
||||
return strings.Trim(path, "/"), nil
|
||||
}
|
||||
|
||||
var warnDeprecated sync.Once
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
warnDeprecated.Do(func() {
|
||||
fs.Logf(nil, "WARNING: Cache backend is deprecated and may be removed in future. Please use VFS instead.")
|
||||
})
|
||||
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
|
||||
8
backend/cache/cache_internal_test.go
vendored
8
backend/cache/cache_internal_test.go
vendored
@@ -836,7 +836,7 @@ func newRun() *run {
|
||||
if uploadDir == "" {
|
||||
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create temp dir: %v", err)
|
||||
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
|
||||
}
|
||||
} else {
|
||||
r.tmpUploadDir = uploadDir
|
||||
@@ -892,7 +892,7 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
m.Set("type", "cache")
|
||||
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
|
||||
} else {
|
||||
remoteType := config.FileGet(remote, "type", "")
|
||||
remoteType := config.FileGet(remote, "type")
|
||||
if remoteType == "" {
|
||||
t.Skipf("skipped due to invalid remote type for %v", remote)
|
||||
return nil, nil
|
||||
@@ -903,14 +903,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
m.Set("password", cryptPassword1)
|
||||
m.Set("password2", cryptPassword2)
|
||||
}
|
||||
remoteRemote := config.FileGet(remote, "remote", "")
|
||||
remoteRemote := config.FileGet(remote, "remote")
|
||||
if remoteRemote == "" {
|
||||
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
|
||||
return nil, nil
|
||||
}
|
||||
remoteRemoteParts := strings.Split(remoteRemote, ":")
|
||||
remoteWrapping := remoteRemoteParts[0]
|
||||
remoteType := config.FileGet(remoteWrapping, "type", "")
|
||||
remoteType := config.FileGet(remoteWrapping, "type")
|
||||
if remoteType != "cache" {
|
||||
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
|
||||
return nil, nil
|
||||
|
||||
@@ -155,7 +155,7 @@ Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Advanced: false,
|
||||
Default: fs.SizeSuffix(2147483648), // 2GB
|
||||
Default: fs.SizeSuffix(2147483648), // 2 GiB
|
||||
Help: `Files larger than chunk size will be split in chunks.`,
|
||||
}, {
|
||||
Name: "name_format",
|
||||
@@ -277,13 +277,10 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
return nil, errors.New("can't point remote at itself - check the value of the remote setting")
|
||||
}
|
||||
|
||||
baseName, basePath, err := fspath.Parse(remote)
|
||||
baseName, basePath, err := fspath.SplitFs(remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
||||
}
|
||||
if baseName != "" {
|
||||
baseName += ":"
|
||||
}
|
||||
// Look for a file first
|
||||
remotePath := fspath.JoinRootPath(basePath, rpath)
|
||||
baseFs, err := cache.Get(ctx, baseName+remotePath)
|
||||
@@ -1451,7 +1448,7 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
|
||||
c.accountBytes(size)
|
||||
return nil
|
||||
}
|
||||
const bufLen = 1048576 // 1MB
|
||||
const bufLen = 1048576 // 1 MiB
|
||||
buf := make([]byte, bufLen)
|
||||
for size > 0 {
|
||||
n := size
|
||||
|
||||
@@ -33,7 +33,7 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
|
||||
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
|
||||
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
||||
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
|
||||
Size: int64(kilobytes) * int64(fs.KibiByte),
|
||||
Size: int64(kilobytes) * int64(fs.Kibi),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ import (
|
||||
// Globals
|
||||
const (
|
||||
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
||||
maxChunkSize = 8388608 // at 256KB and 8 MB.
|
||||
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
|
||||
|
||||
bufferSize = 8388608
|
||||
heuristicBytes = 1048576
|
||||
@@ -53,7 +53,7 @@ const (
|
||||
Gzip = 2
|
||||
)
|
||||
|
||||
var nameRegexp = regexp.MustCompile("^(.+?)\\.([A-Za-z0-9+_]{11})$")
|
||||
var nameRegexp = regexp.MustCompile("^(.+?)\\.([A-Za-z0-9-_]{11})$")
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
|
||||
@@ -12,12 +12,14 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
"github.com/rfjakob/eme"
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
"golang.org/x/crypto/scrypt"
|
||||
@@ -442,11 +444,32 @@ func (c *Cipher) encryptFileName(in string) string {
|
||||
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Strip version string so that only the non-versioned part
|
||||
// of the file name gets encrypted/obfuscated
|
||||
hasVersion := false
|
||||
var t time.Time
|
||||
if i == (len(segments)-1) && version.Match(segments[i]) {
|
||||
var s string
|
||||
t, s = version.Remove(segments[i])
|
||||
// version.Remove can fail, in which case it returns segments[i]
|
||||
if s != segments[i] {
|
||||
segments[i] = s
|
||||
hasVersion = true
|
||||
}
|
||||
}
|
||||
|
||||
if c.mode == NameEncryptionStandard {
|
||||
segments[i] = c.encryptSegment(segments[i])
|
||||
} else {
|
||||
segments[i] = c.obfuscateSegment(segments[i])
|
||||
}
|
||||
|
||||
// Add back a version to the encrypted/obfuscated
|
||||
// file name, if we stripped it off earlier
|
||||
if hasVersion {
|
||||
segments[i] = version.Add(segments[i], t)
|
||||
}
|
||||
}
|
||||
return strings.Join(segments, "/")
|
||||
}
|
||||
@@ -477,6 +500,21 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
||||
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Strip version string so that only the non-versioned part
|
||||
// of the file name gets decrypted/deobfuscated
|
||||
hasVersion := false
|
||||
var t time.Time
|
||||
if i == (len(segments)-1) && version.Match(segments[i]) {
|
||||
var s string
|
||||
t, s = version.Remove(segments[i])
|
||||
// version.Remove can fail, in which case it returns segments[i]
|
||||
if s != segments[i] {
|
||||
segments[i] = s
|
||||
hasVersion = true
|
||||
}
|
||||
}
|
||||
|
||||
if c.mode == NameEncryptionStandard {
|
||||
segments[i], err = c.decryptSegment(segments[i])
|
||||
} else {
|
||||
@@ -486,6 +524,12 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Add back a version to the decrypted/deobfuscated
|
||||
// file name, if we stripped it off earlier
|
||||
if hasVersion {
|
||||
segments[i] = version.Add(segments[i], t)
|
||||
}
|
||||
}
|
||||
return strings.Join(segments, "/"), nil
|
||||
}
|
||||
@@ -494,10 +538,18 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
||||
func (c *Cipher) DecryptFileName(in string) (string, error) {
|
||||
if c.mode == NameEncryptionOff {
|
||||
remainingLength := len(in) - len(encryptedSuffix)
|
||||
if remainingLength > 0 && strings.HasSuffix(in, encryptedSuffix) {
|
||||
return in[:remainingLength], nil
|
||||
if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) {
|
||||
return "", ErrorNotAnEncryptedFile
|
||||
}
|
||||
return "", ErrorNotAnEncryptedFile
|
||||
decrypted := in[:remainingLength]
|
||||
if version.Match(decrypted) {
|
||||
_, unversioned := version.Remove(decrypted)
|
||||
if unversioned == "" {
|
||||
return "", ErrorNotAnEncryptedFile
|
||||
}
|
||||
}
|
||||
// Leave the version string on, if it was there
|
||||
return decrypted, nil
|
||||
}
|
||||
return c.decryptFileName(in)
|
||||
}
|
||||
|
||||
@@ -160,22 +160,29 @@ func TestEncryptFileName(t *testing.T) {
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
|
||||
// Standard mode with directory name encryption off
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false)
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
||||
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
||||
assert.Equal(t, "1/12/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
|
||||
// Now off mode
|
||||
c, _ = newCipher(NameEncryptionOff, "", "", true)
|
||||
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
|
||||
// Obfuscation mode
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", true)
|
||||
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||
assert.Equal(t, "49.6/99.23/150.890/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "49.6/99.23/150.890/162.uryyB-v2001-02-03-040506-123.GKG", c.EncryptFileName("1/12/123/hello-v2001-02-03-040506-123.txt"))
|
||||
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
||||
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
||||
// Obfuscation mode with directory name encryption off
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", false)
|
||||
assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||
assert.Equal(t, "1/12/123/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
||||
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
||||
}
|
||||
@@ -194,14 +201,19 @@ func TestDecryptFileName(t *testing.T) {
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
|
||||
{NameEncryptionStandard, false, "1/12/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", "1-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
|
||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
|
||||
actual, actualErr := c.DecryptFileName(test.in)
|
||||
|
||||
@@ -404,13 +404,16 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read destination hash")
|
||||
}
|
||||
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
||||
// remove object
|
||||
err = o.Remove(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
if srcHash != "" && dstHash != "" {
|
||||
if srcHash != dstHash {
|
||||
// remove object
|
||||
err = o.Remove(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||
}
|
||||
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"mime"
|
||||
"net/http"
|
||||
"path"
|
||||
@@ -68,8 +67,8 @@ const (
|
||||
defaultScope = "drive"
|
||||
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
minChunkSize = 256 * fs.KibiByte
|
||||
defaultChunkSize = 8 * fs.MebiByte
|
||||
minChunkSize = 256 * fs.Kibi
|
||||
defaultChunkSize = 8 * fs.Mebi
|
||||
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
|
||||
listRGrouping = 50 // number of IDs to search at once when using ListR
|
||||
listRInputBuffer = 1000 // size of input buffer when using ListR
|
||||
@@ -183,32 +182,64 @@ func init() {
|
||||
Description: "Google Drive",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||
return
|
||||
return nil, errors.Wrap(err, "couldn't parse config into struct")
|
||||
}
|
||||
|
||||
// Fill in the scopes
|
||||
driveConfig.Scopes = driveScopes(opt.Scope)
|
||||
// Set the root_folder_id if using drive.appfolder
|
||||
if driveScopesContainsAppFolder(driveConfig.Scopes) {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
switch config.State {
|
||||
case "":
|
||||
// Fill in the scopes
|
||||
driveConfig.Scopes = driveScopes(opt.Scope)
|
||||
|
||||
if opt.ServiceAccountFile == "" {
|
||||
err = oauthutil.Config(ctx, "drive", name, m, driveConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
// Set the root_folder_id if using drive.appfolder
|
||||
if driveScopesContainsAppFolder(driveConfig.Scopes) {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
|
||||
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
|
||||
return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{
|
||||
OAuth2Config: driveConfig,
|
||||
})
|
||||
}
|
||||
return fs.ConfigGoto("teamdrive")
|
||||
case "teamdrive":
|
||||
if opt.TeamDriveID == "" {
|
||||
return fs.ConfigConfirm("teamdrive_ok", false, "config_change_team_drive", "Configure this as a Shared Drive (Team Drive)?\n")
|
||||
}
|
||||
return fs.ConfigConfirm("teamdrive_ok", false, "config_change_team_drive", fmt.Sprintf("Change current Shared Drive (Team Drive) ID %q?\n", opt.TeamDriveID))
|
||||
case "teamdrive_ok":
|
||||
if config.Result == "false" {
|
||||
m.Set("team_drive", "")
|
||||
return nil, nil
|
||||
}
|
||||
f, err := newFs(ctx, name, "", m)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make Fs to list Shared Drives")
|
||||
}
|
||||
teamDrives, err := f.listTeamDrives(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(teamDrives) == 0 {
|
||||
return fs.ConfigError("", "No Shared Drives found in your account")
|
||||
}
|
||||
return fs.ConfigChoose("teamdrive_final", "config_team_drive", "Shared Drive", len(teamDrives), func(i int) (string, string) {
|
||||
teamDrive := teamDrives[i]
|
||||
return teamDrive.Id, teamDrive.Name
|
||||
})
|
||||
case "teamdrive_final":
|
||||
driveID := config.Result
|
||||
m.Set("team_drive", driveID)
|
||||
m.Set("root_folder_id", "")
|
||||
opt.TeamDriveID = driveID
|
||||
opt.RootFolderID = ""
|
||||
return nil, nil
|
||||
}
|
||||
err = configTeamDrive(ctx, opt, m, name)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Shared Drive: %v", err)
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
},
|
||||
Options: append(driveOAuthOptions(), []fs.Option{{
|
||||
Name: "scope",
|
||||
@@ -467,7 +498,7 @@ See: https://github.com/rclone/rclone/issues/3631
|
||||
Default: false,
|
||||
Help: `Make upload limit errors be fatal
|
||||
|
||||
At the time of writing it is only possible to upload 750GB of data to
|
||||
At the time of writing it is only possible to upload 750 GiB of data to
|
||||
Google Drive a day (this is an undocumented limit). When this limit is
|
||||
reached Google Drive produces a slightly different error message. When
|
||||
this flag is set it causes these errors to be fatal. These will stop
|
||||
@@ -484,7 +515,7 @@ See: https://github.com/rclone/rclone/issues/3857
|
||||
Default: false,
|
||||
Help: `Make download limit errors be fatal
|
||||
|
||||
At the time of writing it is only possible to download 10TB of data from
|
||||
At the time of writing it is only possible to download 10 TiB of data from
|
||||
Google Drive a day (this is an undocumented limit). When this limit is
|
||||
reached Google Drive produces a slightly different error message. When
|
||||
this flag is set it causes these errors to be fatal. These will stop
|
||||
@@ -522,7 +553,7 @@ If this flag is set then rclone will ignore shortcut files completely.
|
||||
} {
|
||||
for mimeType, extension := range m {
|
||||
if err := mime.AddExtensionType(extension, mimeType); err != nil {
|
||||
log.Fatalf("Failed to register MIME type %q: %v", mimeType, err)
|
||||
fs.Errorf("Failed to register MIME type %q: %v", mimeType, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -590,13 +621,13 @@ type Fs struct {
|
||||
}
|
||||
|
||||
type baseObject struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // Drive Id of this object
|
||||
modifiedDate string // RFC3339 time it was last modified
|
||||
mimeType string // The object MIME type
|
||||
bytes int64 // size of the object
|
||||
parents int // number of parents
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // Drive Id of this object
|
||||
modifiedDate string // RFC3339 time it was last modified
|
||||
mimeType string // The object MIME type
|
||||
bytes int64 // size of the object
|
||||
parents []string // IDs of the parent directories
|
||||
}
|
||||
type documentObject struct {
|
||||
baseObject
|
||||
@@ -641,7 +672,10 @@ func (f *Fs) Features() *fs.Features {
|
||||
}
|
||||
|
||||
// shouldRetry determines whether a given err rates being retried
|
||||
func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
@@ -695,20 +729,20 @@ func containsString(slice []string, s string) bool {
|
||||
}
|
||||
|
||||
// getFile returns drive.File for the ID passed and fields passed in
|
||||
func (f *Fs) getFile(ID string, fields googleapi.Field) (info *drive.File, err error) {
|
||||
func (f *Fs) getFile(ctx context.Context, ID string, fields googleapi.Field) (info *drive.File, err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, err = f.svc.Files.Get(ID).
|
||||
Fields(fields).
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
return f.shouldRetry(err)
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
return info, err
|
||||
}
|
||||
|
||||
// getRootID returns the canonical ID for the "root" ID
|
||||
func (f *Fs) getRootID() (string, error) {
|
||||
info, err := f.getFile("root", "id")
|
||||
func (f *Fs) getRootID(ctx context.Context) (string, error) {
|
||||
info, err := f.getFile(ctx, "root", "id")
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "couldn't find root directory ID")
|
||||
}
|
||||
@@ -814,7 +848,7 @@ OUTER:
|
||||
var files *drive.FileList
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
files, err = list.Fields(googleapi.Field(fields)).Context(ctx).Do()
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "couldn't list directory")
|
||||
@@ -837,7 +871,7 @@ OUTER:
|
||||
if filesOnly && item.ShortcutDetails.TargetMimeType == driveFolderType {
|
||||
continue
|
||||
}
|
||||
item, err = f.resolveShortcut(item)
|
||||
item, err = f.resolveShortcut(ctx, item)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "list")
|
||||
}
|
||||
@@ -855,7 +889,7 @@ OUTER:
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
_, exportName, _, _ := f.findExportFormat(item)
|
||||
_, exportName, _, _ := f.findExportFormat(ctx, item)
|
||||
if exportName == "" || exportName != title {
|
||||
continue
|
||||
}
|
||||
@@ -946,48 +980,6 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
|
||||
return
|
||||
}
|
||||
|
||||
// Figure out if the user wants to use a team drive
|
||||
func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error {
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
// Stop if we are running non-interactive config
|
||||
if ci.AutoConfirm {
|
||||
return nil
|
||||
}
|
||||
if opt.TeamDriveID == "" {
|
||||
fmt.Printf("Configure this as a Shared Drive (Team Drive)?\n")
|
||||
} else {
|
||||
fmt.Printf("Change current Shared Drive (Team Drive) ID %q?\n", opt.TeamDriveID)
|
||||
}
|
||||
if !config.Confirm(false) {
|
||||
return nil
|
||||
}
|
||||
f, err := newFs(ctx, name, "", m)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to make Fs to list Shared Drives")
|
||||
}
|
||||
fmt.Printf("Fetching Shared Drive list...\n")
|
||||
teamDrives, err := f.listTeamDrives(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(teamDrives) == 0 {
|
||||
fmt.Printf("No Shared Drives found in your account")
|
||||
return nil
|
||||
}
|
||||
var driveIDs, driveNames []string
|
||||
for _, teamDrive := range teamDrives {
|
||||
driveIDs = append(driveIDs, teamDrive.Id)
|
||||
driveNames = append(driveNames, teamDrive.Name)
|
||||
}
|
||||
driveID := config.Choose("Enter a Shared Drive ID", driveIDs, driveNames, true)
|
||||
m.Set("team_drive", driveID)
|
||||
m.Set("root_folder_id", "")
|
||||
opt.TeamDriveID = driveID
|
||||
opt.RootFolderID = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
// getClient makes an http client according to the options
|
||||
func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||
t := fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
|
||||
@@ -1155,7 +1147,7 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
|
||||
f.rootFolderID = f.opt.TeamDriveID
|
||||
} else {
|
||||
// otherwise look up the actual root ID
|
||||
rootID, err := f.getRootID()
|
||||
rootID, err := f.getRootID(ctx)
|
||||
if err != nil {
|
||||
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
// 404 means that this scope does not have permission to get the
|
||||
@@ -1166,7 +1158,7 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
}
|
||||
f.rootFolderID = rootID
|
||||
fs.Debugf(f, "root_folder_id = %q - save this in the config to speed up startup", rootID)
|
||||
fs.Debugf(f, "'root_folder_id = %s' - save this in the config to speed up startup", rootID)
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(f.root, f.rootFolderID, f)
|
||||
@@ -1236,7 +1228,7 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
|
||||
modifiedDate: modifiedDate,
|
||||
mimeType: info.MimeType,
|
||||
bytes: size,
|
||||
parents: len(info.Parents),
|
||||
parents: info.Parents,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1328,26 +1320,26 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
|
||||
// newObjectWithInfo creates an fs.Object for any drive.File
|
||||
//
|
||||
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *drive.File) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.File) (fs.Object, error) {
|
||||
// If item has MD5 sum or a length it is a file stored on drive
|
||||
if info.Md5Checksum != "" || info.Size > 0 {
|
||||
return f.newRegularObject(remote, info), nil
|
||||
}
|
||||
|
||||
extension, exportName, exportMimeType, isDocument := f.findExportFormat(info)
|
||||
return f.newObjectWithExportInfo(remote, info, extension, exportName, exportMimeType, isDocument)
|
||||
extension, exportName, exportMimeType, isDocument := f.findExportFormat(ctx, info)
|
||||
return f.newObjectWithExportInfo(ctx, remote, info, extension, exportName, exportMimeType, isDocument)
|
||||
}
|
||||
|
||||
// newObjectWithExportInfo creates an fs.Object for any drive.File and the result of findExportFormat
|
||||
//
|
||||
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
|
||||
func (f *Fs) newObjectWithExportInfo(
|
||||
remote string, info *drive.File,
|
||||
ctx context.Context, remote string, info *drive.File,
|
||||
extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
|
||||
// Note that resolveShortcut will have been called already if
|
||||
// we are being called from a listing. However the drive.Item
|
||||
// will have been resolved so this will do nothing.
|
||||
info, err = f.resolveShortcut(info)
|
||||
info, err = f.resolveShortcut(ctx, info)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new object")
|
||||
}
|
||||
@@ -1395,7 +1387,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
remote = remote[:len(remote)-len(extension)]
|
||||
obj, err := f.newObjectWithExportInfo(remote, info, extension, exportName, exportMimeType, isDocument)
|
||||
obj, err := f.newObjectWithExportInfo(ctx, remote, info, extension, exportName, exportMimeType, isDocument)
|
||||
switch {
|
||||
case err != nil:
|
||||
return nil, err
|
||||
@@ -1412,7 +1404,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
pathID = actualID(pathID)
|
||||
found, err = f.list(ctx, []string{pathID}, leaf, true, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
|
||||
if !f.opt.SkipGdocs {
|
||||
_, exportName, _, isDocument := f.findExportFormat(item)
|
||||
_, exportName, _, isDocument := f.findExportFormat(ctx, item)
|
||||
if exportName == leaf {
|
||||
pathIDOut = item.Id
|
||||
return true
|
||||
@@ -1447,8 +1439,8 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
info, err = f.svc.Files.Create(createInfo).
|
||||
Fields("id").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
return f.shouldRetry(err)
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -1483,15 +1475,15 @@ func linkTemplate(mt string) *template.Template {
|
||||
})
|
||||
return _linkTemplates[mt]
|
||||
}
|
||||
func (f *Fs) fetchFormats() {
|
||||
func (f *Fs) fetchFormats(ctx context.Context) {
|
||||
fetchFormatsOnce.Do(func() {
|
||||
var about *drive.About
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
about, err = f.svc.About.Get().
|
||||
Fields("exportFormats,importFormats").
|
||||
Do()
|
||||
return f.shouldRetry(err)
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to get Drive exportFormats and importFormats: %v", err)
|
||||
@@ -1508,8 +1500,8 @@ func (f *Fs) fetchFormats() {
|
||||
// if necessary.
|
||||
//
|
||||
// if the fetch fails then it will not export any drive formats
|
||||
func (f *Fs) exportFormats() map[string][]string {
|
||||
f.fetchFormats()
|
||||
func (f *Fs) exportFormats(ctx context.Context) map[string][]string {
|
||||
f.fetchFormats(ctx)
|
||||
return _exportFormats
|
||||
}
|
||||
|
||||
@@ -1517,8 +1509,8 @@ func (f *Fs) exportFormats() map[string][]string {
|
||||
// if necessary.
|
||||
//
|
||||
// if the fetch fails then it will not import any drive formats
|
||||
func (f *Fs) importFormats() map[string][]string {
|
||||
f.fetchFormats()
|
||||
func (f *Fs) importFormats(ctx context.Context) map[string][]string {
|
||||
f.fetchFormats(ctx)
|
||||
return _importFormats
|
||||
}
|
||||
|
||||
@@ -1527,9 +1519,9 @@ func (f *Fs) importFormats() map[string][]string {
|
||||
//
|
||||
// Look through the exportExtensions and find the first format that can be
|
||||
// converted. If none found then return ("", "", false)
|
||||
func (f *Fs) findExportFormatByMimeType(itemMimeType string) (
|
||||
func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string) (
|
||||
extension, mimeType string, isDocument bool) {
|
||||
exportMimeTypes, isDocument := f.exportFormats()[itemMimeType]
|
||||
exportMimeTypes, isDocument := f.exportFormats(ctx)[itemMimeType]
|
||||
if isDocument {
|
||||
for _, _extension := range f.exportExtensions {
|
||||
_mimeType := mime.TypeByExtension(_extension)
|
||||
@@ -1556,8 +1548,8 @@ func (f *Fs) findExportFormatByMimeType(itemMimeType string) (
|
||||
//
|
||||
// Look through the exportExtensions and find the first format that can be
|
||||
// converted. If none found then return ("", "", "", false)
|
||||
func (f *Fs) findExportFormat(item *drive.File) (extension, filename, mimeType string, isDocument bool) {
|
||||
extension, mimeType, isDocument = f.findExportFormatByMimeType(item.MimeType)
|
||||
func (f *Fs) findExportFormat(ctx context.Context, item *drive.File) (extension, filename, mimeType string, isDocument bool) {
|
||||
extension, mimeType, isDocument = f.findExportFormatByMimeType(ctx, item.MimeType)
|
||||
if extension != "" {
|
||||
filename = item.Name + extension
|
||||
}
|
||||
@@ -1569,9 +1561,9 @@ func (f *Fs) findExportFormat(item *drive.File) (extension, filename, mimeType s
|
||||
// MIME type is returned
|
||||
//
|
||||
// When no match is found "" is returned.
|
||||
func (f *Fs) findImportFormat(mimeType string) string {
|
||||
func (f *Fs) findImportFormat(ctx context.Context, mimeType string) string {
|
||||
mimeType = fixMimeType(mimeType)
|
||||
ifs := f.importFormats()
|
||||
ifs := f.importFormats(ctx)
|
||||
for _, mt := range f.importMimeTypes {
|
||||
if mt == mimeType {
|
||||
importMimeTypes := ifs[mimeType]
|
||||
@@ -1604,7 +1596,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
|
||||
var iErr error
|
||||
_, err = f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
|
||||
entry, err := f.itemToDirEntry(path.Join(dir, item.Name), item)
|
||||
entry, err := f.itemToDirEntry(ctx, path.Join(dir, item.Name), item)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
@@ -1717,7 +1709,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
|
||||
}
|
||||
}
|
||||
remote := path.Join(paths[i], item.Name)
|
||||
entry, err := f.itemToDirEntry(remote, item)
|
||||
entry, err := f.itemToDirEntry(ctx, remote, item)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
@@ -1982,7 +1974,7 @@ func isShortcut(item *drive.File) bool {
|
||||
// Note that we assume shortcuts can't point to shortcuts. Google
|
||||
// drive web interface doesn't offer the option to create a shortcut
|
||||
// to a shortcut. The documentation is silent on the issue.
|
||||
func (f *Fs) resolveShortcut(item *drive.File) (newItem *drive.File, err error) {
|
||||
func (f *Fs) resolveShortcut(ctx context.Context, item *drive.File) (newItem *drive.File, err error) {
|
||||
if f.opt.SkipShortcuts || item.MimeType != shortcutMimeType {
|
||||
return item, nil
|
||||
}
|
||||
@@ -1990,7 +1982,7 @@ func (f *Fs) resolveShortcut(item *drive.File) (newItem *drive.File, err error)
|
||||
fs.Errorf(nil, "Expecting shortcutDetails in %v", item)
|
||||
return item, nil
|
||||
}
|
||||
newItem, err = f.getFile(item.ShortcutDetails.TargetId, f.fileFields)
|
||||
newItem, err = f.getFile(ctx, item.ShortcutDetails.TargetId, f.fileFields)
|
||||
if err != nil {
|
||||
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
// 404 means dangling shortcut, so just return the shortcut with the mime type mangled
|
||||
@@ -2012,18 +2004,21 @@ func (f *Fs) resolveShortcut(item *drive.File) (newItem *drive.File, err error)
|
||||
// itemToDirEntry converts a drive.File to an fs.DirEntry.
|
||||
// When the drive.File cannot be represented as an fs.DirEntry
|
||||
// (nil, nil) is returned.
|
||||
func (f *Fs) itemToDirEntry(remote string, item *drive.File) (entry fs.DirEntry, err error) {
|
||||
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *drive.File) (entry fs.DirEntry, err error) {
|
||||
switch {
|
||||
case item.MimeType == driveFolderType:
|
||||
// cache the directory ID for later lookups
|
||||
f.dirCache.Put(remote, item.Id)
|
||||
when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
|
||||
d := fs.NewDir(remote, when).SetID(item.Id)
|
||||
if len(item.Parents) > 0 {
|
||||
d.SetParentID(item.Parents[0])
|
||||
}
|
||||
return d, nil
|
||||
case f.opt.AuthOwnerOnly && !isAuthOwned(item):
|
||||
// ignore object
|
||||
default:
|
||||
entry, err = f.newObjectWithInfo(remote, item)
|
||||
entry, err = f.newObjectWithInfo(ctx, remote, item)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -2090,12 +2085,12 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
importMimeType := ""
|
||||
|
||||
if f.importMimeTypes != nil && !f.opt.SkipGdocs {
|
||||
importMimeType = f.findImportFormat(srcMimeType)
|
||||
importMimeType = f.findImportFormat(ctx, srcMimeType)
|
||||
|
||||
if isInternalMimeType(importMimeType) {
|
||||
remote = remote[:len(remote)-len(srcExt)]
|
||||
|
||||
exportExt, _, _ = f.findExportFormatByMimeType(importMimeType)
|
||||
exportExt, _, _ = f.findExportFormatByMimeType(ctx, importMimeType)
|
||||
if exportExt == "" {
|
||||
return nil, errors.Errorf("No export format found for %q", importMimeType)
|
||||
}
|
||||
@@ -2125,8 +2120,8 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
Fields(partialFields).
|
||||
SupportsAllDrives(true).
|
||||
KeepRevisionForever(f.opt.KeepRevisionForever).
|
||||
Do()
|
||||
return f.shouldRetry(err)
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -2138,7 +2133,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return f.newObjectWithInfo(remote, info)
|
||||
return f.newObjectWithInfo(ctx, remote, info)
|
||||
}
|
||||
|
||||
// MergeDirs merges the contents of all the directories passed
|
||||
@@ -2180,8 +2175,8 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
AddParents(dstDir.ID()).
|
||||
Fields("").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
return f.shouldRetry(err)
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.Name, srcDir)
|
||||
@@ -2214,14 +2209,14 @@ func (f *Fs) delete(ctx context.Context, id string, useTrash bool) error {
|
||||
_, err = f.svc.Files.Update(id, &info).
|
||||
Fields("").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
Context(ctx).Do()
|
||||
} else {
|
||||
err = f.svc.Files.Delete(id).
|
||||
Fields("").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
Context(ctx).Do()
|
||||
}
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2334,11 +2329,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
if isDoc {
|
||||
// preserve the description on copy for docs
|
||||
info, err := f.getFile(actualID(srcObj.id), "description")
|
||||
info, err := f.getFile(ctx, actualID(srcObj.id), "description")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read description for Google Doc")
|
||||
fs.Errorf(srcObj, "Failed to read description for Google Doc: %v", err)
|
||||
} else {
|
||||
createInfo.Description = info.Description
|
||||
}
|
||||
createInfo.Description = info.Description
|
||||
} else {
|
||||
// don't overwrite the description on copy for files
|
||||
// this should work for docs but it doesn't - it is probably a bug in Google Drive
|
||||
@@ -2354,13 +2350,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
Fields(partialFields).
|
||||
SupportsAllDrives(true).
|
||||
KeepRevisionForever(f.opt.KeepRevisionForever).
|
||||
Do()
|
||||
return f.shouldRetry(err)
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newObject, err := f.newObjectWithInfo(remote, info)
|
||||
newObject, err := f.newObjectWithInfo(ctx, remote, info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -2454,7 +2450,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -2472,7 +2468,7 @@ func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
|
||||
var td *drive.Drive
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
td, err = f.svc.Drives.Get(f.opt.TeamDriveID).Fields("name,id,capabilities,createdTime,restrictions").Context(ctx).Do()
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get Shared Drive info")
|
||||
@@ -2495,7 +2491,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
about, err = f.svc.About.Get().Fields("storageQuota").Context(ctx).Do()
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get Drive storageQuota")
|
||||
@@ -2567,14 +2563,14 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
AddParents(dstParents).
|
||||
Fields(partialFields).
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
return f.shouldRetry(err)
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return f.newObjectWithInfo(remote, info)
|
||||
return f.newObjectWithInfo(ctx, remote, info)
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
@@ -2604,8 +2600,8 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
_, err = f.svc.Permissions.Create(id, permission).
|
||||
Fields("").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
return f.shouldRetry(err)
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -2647,8 +2643,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
AddParents(dstDirectoryID).
|
||||
Fields("").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
return f.shouldRetry(err)
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -2666,7 +2662,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
go func() {
|
||||
// get the StartPageToken early so all changes from now on get processed
|
||||
startPageToken, err := f.changeNotifyStartPageToken()
|
||||
startPageToken, err := f.changeNotifyStartPageToken(ctx)
|
||||
if err != nil {
|
||||
fs.Infof(f, "Failed to get StartPageToken: %s", err)
|
||||
}
|
||||
@@ -2691,7 +2687,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
}
|
||||
case <-tickerC:
|
||||
if startPageToken == "" {
|
||||
startPageToken, err = f.changeNotifyStartPageToken()
|
||||
startPageToken, err = f.changeNotifyStartPageToken(ctx)
|
||||
if err != nil {
|
||||
fs.Infof(f, "Failed to get StartPageToken: %s", err)
|
||||
continue
|
||||
@@ -2706,15 +2702,15 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
}
|
||||
}()
|
||||
}
|
||||
func (f *Fs) changeNotifyStartPageToken() (pageToken string, err error) {
|
||||
func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (pageToken string, err error) {
|
||||
var startPageToken *drive.StartPageToken
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
changes := f.svc.Changes.GetStartPageToken().SupportsAllDrives(true)
|
||||
if f.isTeamDrive {
|
||||
changes.DriveId(f.opt.TeamDriveID)
|
||||
}
|
||||
startPageToken, err = changes.Do()
|
||||
return f.shouldRetry(err)
|
||||
startPageToken, err = changes.Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
@@ -2743,7 +2739,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
changesCall.Spaces("appDataFolder")
|
||||
}
|
||||
changeList, err = changesCall.Context(ctx).Do()
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
@@ -2939,8 +2935,8 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
||||
Fields(partialFields).
|
||||
SupportsAllDrives(true).
|
||||
KeepRevisionForever(dstFs.opt.KeepRevisionForever).
|
||||
Do()
|
||||
return dstFs.shouldRetry(err)
|
||||
Context(ctx).Do()
|
||||
return dstFs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "shortcut creation failed")
|
||||
@@ -2948,24 +2944,24 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
||||
if isDir {
|
||||
return nil, nil
|
||||
}
|
||||
return dstFs.newObjectWithInfo(dstPath, info)
|
||||
return dstFs.newObjectWithInfo(ctx, dstPath, info)
|
||||
}
|
||||
|
||||
// List all team drives
|
||||
func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.TeamDrive, err error) {
|
||||
drives = []*drive.TeamDrive{}
|
||||
listTeamDrives := f.svc.Teamdrives.List().PageSize(100)
|
||||
func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.Drive, err error) {
|
||||
drives = []*drive.Drive{}
|
||||
listTeamDrives := f.svc.Drives.List().PageSize(100)
|
||||
var defaultFs Fs // default Fs with default Options
|
||||
for {
|
||||
var teamDrives *drive.TeamDriveList
|
||||
var teamDrives *drive.DriveList
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
teamDrives, err = listTeamDrives.Context(ctx).Do()
|
||||
return defaultFs.shouldRetry(err)
|
||||
return defaultFs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return drives, errors.Wrap(err, "listing Team Drives failed")
|
||||
}
|
||||
drives = append(drives, teamDrives.TeamDrives...)
|
||||
drives = append(drives, teamDrives.Drives...)
|
||||
if teamDrives.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
@@ -3002,8 +2998,8 @@ func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurs
|
||||
_, err := f.svc.Files.Update(item.Id, &update).
|
||||
SupportsAllDrives(true).
|
||||
Fields("trashed").
|
||||
Do()
|
||||
return f.shouldRetry(err)
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "failed to restore")
|
||||
@@ -3045,7 +3041,7 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
|
||||
|
||||
// copy file with id to dest
|
||||
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
info, err := f.getFile(id, f.fileFields)
|
||||
info, err := f.getFile(ctx, id, f.fileFields)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't find id")
|
||||
}
|
||||
@@ -3053,7 +3049,7 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
return errors.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
|
||||
}
|
||||
info.Name = f.opt.Enc.ToStandardName(info.Name)
|
||||
o, err := f.newObjectWithInfo(info.Name, info)
|
||||
o, err := f.newObjectWithInfo(ctx, info.Name, info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -3062,7 +3058,7 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
return err
|
||||
}
|
||||
if destLeaf == "" {
|
||||
destLeaf = info.Name
|
||||
destLeaf = path.Base(o.Remote())
|
||||
}
|
||||
if destDir == "" {
|
||||
destDir = "."
|
||||
@@ -3354,7 +3350,7 @@ func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
||||
|
||||
found, err := f.list(ctx, []string{directoryID}, leaf, false, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
|
||||
if !f.opt.SkipGdocs {
|
||||
extension, exportName, exportMimeType, isDocument = f.findExportFormat(item)
|
||||
extension, exportName, exportMimeType, isDocument = f.findExportFormat(ctx, item)
|
||||
if exportName == leaf {
|
||||
info = item
|
||||
return true
|
||||
@@ -3405,8 +3401,8 @@ func (o *baseObject) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo).
|
||||
Fields(partialFields).
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
return o.fs.shouldRetry(err)
|
||||
Context(ctx).Do()
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -3444,7 +3440,7 @@ func (o *baseObject) httpResponse(ctx context.Context, url, method string, optio
|
||||
_ = res.Body.Close() // ignore error
|
||||
}
|
||||
}
|
||||
return o.fs.shouldRetry(err)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return req, nil, err
|
||||
@@ -3536,8 +3532,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
v2File, err = o.fs.v2Svc.Files.Get(actualID(o.id)).
|
||||
Fields("downloadUrl").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
return o.fs.shouldRetry(err)
|
||||
Context(ctx).Do()
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Debugf(o, "Using v2 download: %v", v2File.DownloadUrl)
|
||||
@@ -3617,8 +3613,8 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
|
||||
Fields(partialFields).
|
||||
SupportsAllDrives(true).
|
||||
KeepRevisionForever(o.fs.opt.KeepRevisionForever).
|
||||
Do()
|
||||
return o.fs.shouldRetry(err)
|
||||
Context(ctx).Do()
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
@@ -3661,7 +3657,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newO, err := o.fs.newObjectWithInfo(src.Remote(), info)
|
||||
newO, err := o.fs.newObjectWithInfo(ctx, src.Remote(), info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -3685,7 +3681,7 @@ func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.Object
|
||||
if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs {
|
||||
return errors.Errorf("can't update google document type without --drive-import-formats")
|
||||
}
|
||||
importMimeType = o.fs.findImportFormat(updateInfo.MimeType)
|
||||
importMimeType = o.fs.findImportFormat(ctx, updateInfo.MimeType)
|
||||
if importMimeType == "" {
|
||||
return errors.Errorf("no import format found for %q", srcMimeType)
|
||||
}
|
||||
@@ -3702,7 +3698,7 @@ func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.Object
|
||||
remote := src.Remote()
|
||||
remote = remote[:len(remote)-o.extLen]
|
||||
|
||||
newO, err := o.fs.newObjectWithInfo(remote, info)
|
||||
newO, err := o.fs.newObjectWithInfo(ctx, remote, info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -3722,7 +3718,7 @@ func (o *linkObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo
|
||||
|
||||
// Remove an object
|
||||
func (o *baseObject) Remove(ctx context.Context) error {
|
||||
if o.parents > 1 {
|
||||
if len(o.parents) > 1 {
|
||||
return errors.New("can't delete safely - has multiple parents")
|
||||
}
|
||||
return o.fs.delete(ctx, shortcutID(o.id), o.fs.opt.UseTrash)
|
||||
@@ -3738,6 +3734,14 @@ func (o *baseObject) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
// ParentID returns the ID of the Object parent if known, or "" if not
|
||||
func (o *baseObject) ParentID() string {
|
||||
if len(o.parents) > 0 {
|
||||
return o.parents[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (o *documentObject) ext() string {
|
||||
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
|
||||
}
|
||||
@@ -3798,10 +3802,13 @@ var (
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.ParentIDer = (*Object)(nil)
|
||||
_ fs.Object = (*documentObject)(nil)
|
||||
_ fs.MimeTyper = (*documentObject)(nil)
|
||||
_ fs.IDer = (*documentObject)(nil)
|
||||
_ fs.ParentIDer = (*documentObject)(nil)
|
||||
_ fs.Object = (*linkObject)(nil)
|
||||
_ fs.MimeTyper = (*linkObject)(nil)
|
||||
_ fs.IDer = (*linkObject)(nil)
|
||||
_ fs.ParentIDer = (*linkObject)(nil)
|
||||
)
|
||||
|
||||
@@ -111,6 +111,7 @@ func TestInternalParseExtensions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInternalFindExportFormat(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
item := &drive.File{
|
||||
Name: "file",
|
||||
MimeType: "application/vnd.google-apps.document",
|
||||
@@ -128,7 +129,7 @@ func TestInternalFindExportFormat(t *testing.T) {
|
||||
} {
|
||||
f := new(Fs)
|
||||
f.exportExtensions = test.extensions
|
||||
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
|
||||
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(ctx, item)
|
||||
assert.Equal(t, test.wantExtension, gotExtension)
|
||||
if test.wantExtension != "" {
|
||||
assert.Equal(t, item.Name+gotExtension, gotFilename)
|
||||
|
||||
@@ -94,7 +94,7 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
|
||||
defer googleapi.CloseBody(res)
|
||||
err = googleapi.CheckResponse(res)
|
||||
}
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -202,7 +202,7 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
|
||||
err = rx.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
|
||||
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize)
|
||||
again, err := rx.f.shouldRetry(err)
|
||||
again, err := rx.f.shouldRetry(ctx, err)
|
||||
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
|
||||
again = false
|
||||
err = nil
|
||||
|
||||
350
backend/dropbox/batcher.go
Normal file
350
backend/dropbox/batcher.go
Normal file
@@ -0,0 +1,350 @@
|
||||
// This file contains the implementation of the sync batcher for uploads
|
||||
//
|
||||
// Dropbox rules say you can start as many batches as you want, but
|
||||
// you may only have one batch being committed and must wait for the
|
||||
// batch to be finished before committing another.
|
||||
|
||||
package dropbox
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/async"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
)
|
||||
|
||||
const (
|
||||
maxBatchSize = 1000 // max size the batch can be
|
||||
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
|
||||
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
|
||||
defaultBatchSizeAsync = 100 // default batch size if async
|
||||
)
|
||||
|
||||
// batcher holds info about the current items waiting for upload
|
||||
type batcher struct {
|
||||
f *Fs // Fs this batch is part of
|
||||
mode string // configured batch mode
|
||||
size int // maximum size for batch
|
||||
timeout time.Duration // idle timeout for batch
|
||||
async bool // whether we are using async batching
|
||||
in chan batcherRequest // incoming items to batch
|
||||
closed chan struct{} // close to indicate batcher shut down
|
||||
atexit atexit.FnHandle // atexit handle
|
||||
shutOnce sync.Once // make sure we shutdown once only
|
||||
wg sync.WaitGroup // wait for shutdown
|
||||
}
|
||||
|
||||
// batcherRequest holds an incoming request with a place for a reply
|
||||
type batcherRequest struct {
|
||||
commitInfo *files.UploadSessionFinishArg
|
||||
result chan<- batcherResponse
|
||||
}
|
||||
|
||||
// Return true if batcherRequest is the quit request
|
||||
func (br *batcherRequest) isQuit() bool {
|
||||
return br.commitInfo == nil
|
||||
}
|
||||
|
||||
// Send this to get the engine to quit
|
||||
var quitRequest = batcherRequest{}
|
||||
|
||||
// batcherResponse holds a response to be delivered to clients waiting
|
||||
// for a batch to complete.
|
||||
type batcherResponse struct {
|
||||
err error
|
||||
entry *files.FileMetadata
|
||||
}
|
||||
|
||||
// newBatcher creates a new batcher structure
|
||||
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
|
||||
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
|
||||
if size > maxBatchSize || size < 0 {
|
||||
return nil, errors.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
|
||||
}
|
||||
|
||||
async := false
|
||||
|
||||
switch mode {
|
||||
case "sync":
|
||||
if size <= 0 {
|
||||
ci := fs.GetConfig(ctx)
|
||||
size = ci.Transfers
|
||||
}
|
||||
if timeout <= 0 {
|
||||
timeout = defaultTimeoutSync
|
||||
}
|
||||
case "async":
|
||||
if size <= 0 {
|
||||
size = defaultBatchSizeAsync
|
||||
}
|
||||
if timeout <= 0 {
|
||||
timeout = defaultTimeoutAsync
|
||||
}
|
||||
async = true
|
||||
case "off":
|
||||
size = 0
|
||||
default:
|
||||
return nil, errors.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
|
||||
}
|
||||
|
||||
b := &batcher{
|
||||
f: f,
|
||||
mode: mode,
|
||||
size: size,
|
||||
timeout: timeout,
|
||||
async: async,
|
||||
in: make(chan batcherRequest, size),
|
||||
closed: make(chan struct{}),
|
||||
}
|
||||
if b.Batching() {
|
||||
b.atexit = atexit.Register(b.Shutdown)
|
||||
b.wg.Add(1)
|
||||
go b.commitLoop(context.Background())
|
||||
}
|
||||
return b, nil
|
||||
|
||||
}
|
||||
|
||||
// Batching returns true if batching is active
|
||||
func (b *batcher) Batching() bool {
|
||||
return b.size > 0
|
||||
}
|
||||
|
||||
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
||||
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (batchStatus *files.UploadSessionFinishBatchLaunch, err error) {
|
||||
var arg = &files.UploadSessionFinishBatchArg{
|
||||
Entries: items,
|
||||
}
|
||||
err = b.f.pacer.Call(func() (bool, error) {
|
||||
batchStatus, err = b.f.srv.UploadSessionFinishBatch(arg)
|
||||
// If error is insufficient space then don't retry
|
||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||
err = fserrors.NoRetryError(err)
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
// after the first chunk is uploaded, we retry everything
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "batch commit failed")
|
||||
}
|
||||
return batchStatus, nil
|
||||
}
|
||||
|
||||
// finishBatchJobStatus waits for the batch to complete returning completed entries
|
||||
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
|
||||
if launchBatchStatus.AsyncJobId == "" {
|
||||
return nil, errors.New("wait for batch completion: empty job ID")
|
||||
}
|
||||
var batchStatus *files.UploadSessionFinishBatchJobStatus
|
||||
sleepTime := 100 * time.Millisecond
|
||||
const maxTries = 120
|
||||
for try := 1; try <= maxTries; try++ {
|
||||
err = b.f.pacer.Call(func() (bool, error) {
|
||||
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
|
||||
AsyncJobId: launchBatchStatus.AsyncJobId,
|
||||
})
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d/%d", sleepTime, err, try, maxTries)
|
||||
} else {
|
||||
if batchStatus.Tag == "complete" {
|
||||
return batchStatus.Complete, nil
|
||||
}
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d/%d", sleepTime, batchStatus.Tag, try, maxTries)
|
||||
}
|
||||
time.Sleep(sleepTime)
|
||||
sleepTime *= 2
|
||||
if sleepTime > time.Second {
|
||||
sleepTime = time.Second
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
err = errors.New("batch didn't complete")
|
||||
}
|
||||
return nil, errors.Wrapf(err, "wait for batch failed after %d tries", maxTries)
|
||||
}
|
||||
|
||||
// commit a batch
|
||||
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
|
||||
// If commit fails then signal clients if sync
|
||||
var signalled = b.async
|
||||
defer func() {
|
||||
if err != nil && signalled {
|
||||
// Signal to clients that there was an error
|
||||
for _, result := range results {
|
||||
result <- batcherResponse{err: err}
|
||||
}
|
||||
}
|
||||
}()
|
||||
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
|
||||
fs.Debugf(b.f, "Committing %s", desc)
|
||||
|
||||
// finalise the batch getting either a result or a job id to poll
|
||||
batchStatus, err := b.finishBatch(ctx, items)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check whether batch is complete
|
||||
var complete *files.UploadSessionFinishBatchResult
|
||||
switch batchStatus.Tag {
|
||||
case "async_job_id":
|
||||
// wait for batch to complete
|
||||
complete, err = b.finishBatchJobStatus(ctx, batchStatus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case "complete":
|
||||
complete = batchStatus.Complete
|
||||
default:
|
||||
return errors.Errorf("batch returned unknown status %q", batchStatus.Tag)
|
||||
}
|
||||
|
||||
// Check we got the right number of entries
|
||||
entries := complete.Entries
|
||||
if len(entries) != len(results) {
|
||||
return errors.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
|
||||
}
|
||||
|
||||
// Report results to clients
|
||||
var (
|
||||
errorTag = ""
|
||||
errorCount = 0
|
||||
)
|
||||
for i := range results {
|
||||
item := entries[i]
|
||||
resp := batcherResponse{}
|
||||
if item.Tag == "success" {
|
||||
resp.entry = item.Success
|
||||
} else {
|
||||
errorCount++
|
||||
errorTag = item.Tag
|
||||
if item.Failure != nil {
|
||||
errorTag = item.Failure.Tag
|
||||
if item.Failure.LookupFailed != nil {
|
||||
errorTag += "/" + item.Failure.LookupFailed.Tag
|
||||
}
|
||||
if item.Failure.Path != nil {
|
||||
errorTag += "/" + item.Failure.Path.Tag
|
||||
}
|
||||
if item.Failure.PropertiesError != nil {
|
||||
errorTag += "/" + item.Failure.PropertiesError.Tag
|
||||
}
|
||||
}
|
||||
resp.err = errors.Errorf("batch upload failed: %s", errorTag)
|
||||
}
|
||||
if !b.async {
|
||||
results[i] <- resp
|
||||
}
|
||||
}
|
||||
// Show signalled so no need to report error to clients from now on
|
||||
signalled = true
|
||||
|
||||
// Report an error if any failed in the batch
|
||||
if errorTag != "" {
|
||||
return errors.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
|
||||
}
|
||||
|
||||
fs.Debugf(b.f, "Committed %s", desc)
|
||||
return nil
|
||||
}
|
||||
|
||||
// commitLoop runs the commit engine in the background
|
||||
func (b *batcher) commitLoop(ctx context.Context) {
|
||||
var (
|
||||
items []*files.UploadSessionFinishArg // current batch of uncommitted files
|
||||
results []chan<- batcherResponse // current batch of clients awaiting results
|
||||
idleTimer = time.NewTimer(b.timeout)
|
||||
commit = func() {
|
||||
err := b.commitBatch(ctx, items, results)
|
||||
if err != nil {
|
||||
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
|
||||
}
|
||||
items, results = nil, nil
|
||||
}
|
||||
)
|
||||
defer b.wg.Done()
|
||||
defer idleTimer.Stop()
|
||||
idleTimer.Stop()
|
||||
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case req := <-b.in:
|
||||
if req.isQuit() {
|
||||
break outer
|
||||
}
|
||||
items = append(items, req.commitInfo)
|
||||
results = append(results, req.result)
|
||||
idleTimer.Stop()
|
||||
if len(items) >= b.size {
|
||||
commit()
|
||||
} else {
|
||||
idleTimer.Reset(b.timeout)
|
||||
}
|
||||
case <-idleTimer.C:
|
||||
if len(items) > 0 {
|
||||
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
|
||||
commit()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
// commit any remaining items
|
||||
if len(items) > 0 {
|
||||
commit()
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown finishes any pending batches then shuts everything down
|
||||
//
|
||||
// Can be called from atexit handler
|
||||
func (b *batcher) Shutdown() {
|
||||
b.shutOnce.Do(func() {
|
||||
atexit.Unregister(b.atexit)
|
||||
fs.Infof(b.f, "Commiting uploads - please wait...")
|
||||
// show that batcher is shutting down
|
||||
close(b.closed)
|
||||
// quit the commitLoop by sending a quitRequest message
|
||||
//
|
||||
// Note that we don't close b.in because that will
|
||||
// cause write to closed channel in Commit when we are
|
||||
// exiting due to a signal.
|
||||
b.in <- quitRequest
|
||||
b.wg.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
// Commit commits the file using a batch call, first adding it to the
|
||||
// batch and then waiting for the batch to complete in a synchronous
|
||||
// way if async is not set.
|
||||
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
|
||||
select {
|
||||
case <-b.closed:
|
||||
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
|
||||
default:
|
||||
}
|
||||
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
|
||||
resp := make(chan batcherResponse, 1)
|
||||
b.in <- batcherRequest{
|
||||
commitInfo: commitInfo,
|
||||
result: resp,
|
||||
}
|
||||
// If running async then don't wait for the result
|
||||
if b.async {
|
||||
return nil, nil
|
||||
}
|
||||
result := <-resp
|
||||
return result.entry, result.err
|
||||
}
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
@@ -65,9 +64,9 @@ const (
|
||||
// Upload chunk size - setting too small makes uploads slow.
|
||||
// Chunks are buffered into memory for retries.
|
||||
//
|
||||
// Speed vs chunk size uploading a 1 GB file on 2017-11-22
|
||||
// Speed vs chunk size uploading a 1 GiB file on 2017-11-22
|
||||
//
|
||||
// Chunk Size MB, Speed Mbyte/s, % of max
|
||||
// Chunk Size MiB, Speed MiByte/s, % of max
|
||||
// 1 1.364 11%
|
||||
// 2 2.443 19%
|
||||
// 4 4.288 33%
|
||||
@@ -82,11 +81,11 @@ const (
|
||||
// 96 12.302 95%
|
||||
// 128 12.945 100%
|
||||
//
|
||||
// Choose 48MB which is 91% of Maximum speed. rclone by
|
||||
// default does 4 transfers so this should use 4*48MB = 192MB
|
||||
// Choose 48 MiB which is 91% of Maximum speed. rclone by
|
||||
// default does 4 transfers so this should use 4*48 MiB = 192 MiB
|
||||
// by default.
|
||||
defaultChunkSize = 48 * fs.MebiByte
|
||||
maxChunkSize = 150 * fs.MebiByte
|
||||
defaultChunkSize = 48 * fs.Mebi
|
||||
maxChunkSize = 150 * fs.Mebi
|
||||
// Max length of filename parts: https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
|
||||
maxFileNameLength = 255
|
||||
)
|
||||
@@ -99,8 +98,10 @@ var (
|
||||
"files.content.write",
|
||||
"files.content.read",
|
||||
"sharing.write",
|
||||
"account_info.read", // needed for About
|
||||
// "file_requests.write",
|
||||
// "members.read", // needed for impersonate - but causes app to need to be approved by Dropbox Team Admin during the flow
|
||||
// "team_data.member"
|
||||
},
|
||||
// Endpoint: oauth2.Endpoint{
|
||||
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
||||
@@ -130,29 +131,26 @@ func getOauthConfig(m configmap.Mapper) *oauth2.Config {
|
||||
}
|
||||
// Make a copy of the config
|
||||
config := *dropboxConfig
|
||||
// Make a copy of the scopes with "members.read" appended
|
||||
config.Scopes = append(config.Scopes, "members.read")
|
||||
// Make a copy of the scopes with extra scopes requires appended
|
||||
config.Scopes = append(config.Scopes, "members.read", "team_data.member")
|
||||
return &config
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
|
||||
DbHashType = hash.RegisterHash("dropbox", "DropboxHash", 64, dbhash.New)
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "dropbox",
|
||||
Description: "Dropbox",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
opt := oauthutil.Options{
|
||||
NoOffline: true,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: getOauthConfig(m),
|
||||
NoOffline: true,
|
||||
OAuth2Opts: []oauth2.AuthCodeOption{
|
||||
oauth2.SetAuthURLParam("token_access_type", "offline"),
|
||||
},
|
||||
}
|
||||
err := oauthutil.Config(ctx, "dropbox", name, m, getOauthConfig(m), &opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "chunk_size",
|
||||
@@ -162,7 +160,7 @@ Any files larger than this will be uploaded in chunks of this size.
|
||||
|
||||
Note that chunks are buffered in memory (one at a time) so rclone can
|
||||
deal with retries. Setting this larger will increase the speed
|
||||
slightly (at most 10%% for 128MB in tests) at the cost of using more
|
||||
slightly (at most 10%% for 128 MiB in tests) at the cost of using more
|
||||
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
@@ -211,6 +209,63 @@ Note that we don't unmount the shared folder afterwards so the
|
||||
shared folder.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "batch_mode",
|
||||
Help: `Upload file batching sync|async|off.
|
||||
|
||||
This sets the batch mode used by rclone.
|
||||
|
||||
For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)
|
||||
|
||||
This has 3 possible values
|
||||
|
||||
- off - no batching
|
||||
- sync - batch uploads and check completion (default)
|
||||
- async - batch upload and don't check completion
|
||||
|
||||
Rclone will close any outstanding batches when it exits which may make
|
||||
a delay on quit.
|
||||
`,
|
||||
Default: "sync",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "batch_size",
|
||||
Help: `Max number of files in upload batch.
|
||||
|
||||
This sets the batch size of files to upload. It has to be less than 1000.
|
||||
|
||||
By default this is 0 which means rclone which calculate the batch size
|
||||
depending on the setting of batch_mode.
|
||||
|
||||
- batch_mode: async - default batch_size is 100
|
||||
- batch_mode: sync - default batch_size is the same as --transfers
|
||||
- batch_mode: off - not in use
|
||||
|
||||
Rclone will close any outstanding batches when it exits which may make
|
||||
a delay on quit.
|
||||
|
||||
Setting this is a great idea if you are uploading lots of small files
|
||||
as it will make them a lot quicker. You can use --transfers 32 to
|
||||
maximise throughput.
|
||||
`,
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "batch_timeout",
|
||||
Help: `Max time to allow an idle upload batch before uploading
|
||||
|
||||
If an upload batch is idle for more than this long then it will be
|
||||
uploaded.
|
||||
|
||||
The default for this is 0 which means rclone will choose a sensible
|
||||
default based on the batch_mode in use.
|
||||
|
||||
- batch_mode: async - default batch_timeout is 500ms
|
||||
- batch_mode: sync - default batch_timeout is 10s
|
||||
- batch_mode: off - not in use
|
||||
`,
|
||||
Default: fs.Duration(0),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -234,6 +289,10 @@ type Options struct {
|
||||
Impersonate string `config:"impersonate"`
|
||||
SharedFiles bool `config:"shared_files"`
|
||||
SharedFolders bool `config:"shared_folders"`
|
||||
BatchMode string `config:"batch_mode"`
|
||||
BatchSize int `config:"batch_size"`
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
AsyncBatch bool `config:"async_batch"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -253,6 +312,7 @@ type Fs struct {
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
ns string // The namespace we are using or "" for none
|
||||
batcher *batcher // batch builder
|
||||
}
|
||||
|
||||
// Object describes a dropbox object
|
||||
@@ -268,8 +328,6 @@ type Object struct {
|
||||
hash string // content_hash of the object
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
@@ -292,7 +350,10 @@ func (f *Fs) Features() *fs.Features {
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if err == nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -307,7 +368,7 @@ func shouldRetry(err error) (bool, error) {
|
||||
switch e := err.(type) {
|
||||
case auth.RateLimitAPIError:
|
||||
if e.RateLimitError.RetryAfter > 0 {
|
||||
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||
fs.Logf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
||||
}
|
||||
return true, err
|
||||
@@ -320,7 +381,7 @@ func shouldRetry(err error) (bool, error) {
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
const minChunkSize = fs.SizeSuffixBase
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
@@ -377,6 +438,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ci: ci,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfg := dropbox.Config{
|
||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||
Client: oAuthClient, // maybe???
|
||||
@@ -425,7 +490,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if f.root == "" {
|
||||
return f, nil
|
||||
}
|
||||
_, err := f.findSharedFile(f.root)
|
||||
_, err := f.findSharedFile(ctx, f.root)
|
||||
f.root = ""
|
||||
if err == nil {
|
||||
return f, fs.ErrorIsFile
|
||||
@@ -445,7 +510,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
// root is not empty so we have find the right shared folder if it exists
|
||||
id, err := f.findSharedFolder(dir)
|
||||
id, err := f.findSharedFolder(ctx, dir)
|
||||
if err != nil {
|
||||
// if we didn't find the specified shared folder we have to bail out here
|
||||
return nil, err
|
||||
@@ -453,7 +518,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// we found the specified shared folder so let's mount it
|
||||
// this will add it to the users normal root namespace and allows us
|
||||
// to actually perform operations on it using the normal api endpoints.
|
||||
err = f.mountSharedFolder(id)
|
||||
err = f.mountSharedFolder(ctx, id)
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
case sharing.MountFolderAPIError:
|
||||
@@ -477,7 +542,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
var acc *users.FullAccount
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
acc, err = f.users.GetCurrentAccount()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get current account failed")
|
||||
@@ -495,7 +560,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.setRoot(root)
|
||||
|
||||
// See if the root is actually an object
|
||||
_, err = f.getFileMetadata(f.slashRoot)
|
||||
_, err = f.getFileMetadata(ctx, f.slashRoot)
|
||||
if err == nil {
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
@@ -529,12 +594,12 @@ func (f *Fs) setRoot(root string) {
|
||||
}
|
||||
|
||||
// getMetadata gets the metadata for a file or directory
|
||||
func (f *Fs) getMetadata(objPath string) (entry files.IsMetadata, notFound bool, err error) {
|
||||
func (f *Fs) getMetadata(ctx context.Context, objPath string) (entry files.IsMetadata, notFound bool, err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
|
||||
Path: f.opt.Enc.FromStandardPath(objPath),
|
||||
})
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
@@ -549,8 +614,8 @@ func (f *Fs) getMetadata(objPath string) (entry files.IsMetadata, notFound bool,
|
||||
}
|
||||
|
||||
// getFileMetadata gets the metadata for a file
|
||||
func (f *Fs) getFileMetadata(filePath string) (fileInfo *files.FileMetadata, err error) {
|
||||
entry, notFound, err := f.getMetadata(filePath)
|
||||
func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *files.FileMetadata, err error) {
|
||||
entry, notFound, err := f.getMetadata(ctx, filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -565,8 +630,8 @@ func (f *Fs) getFileMetadata(filePath string) (fileInfo *files.FileMetadata, err
|
||||
}
|
||||
|
||||
// getDirMetadata gets the metadata for a directory
|
||||
func (f *Fs) getDirMetadata(dirPath string) (dirInfo *files.FolderMetadata, err error) {
|
||||
entry, notFound, err := f.getMetadata(dirPath)
|
||||
func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (dirInfo *files.FolderMetadata, err error) {
|
||||
entry, notFound, err := f.getMetadata(ctx, dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -583,7 +648,7 @@ func (f *Fs) getDirMetadata(dirPath string) (dirInfo *files.FolderMetadata, err
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *files.FileMetadata) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -592,7 +657,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje
|
||||
if info != nil {
|
||||
err = o.setMetadataFromEntry(info)
|
||||
} else {
|
||||
err = o.readEntryAndSetMetadata()
|
||||
err = o.readEntryAndSetMetadata(ctx)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -604,14 +669,14 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
if f.opt.SharedFiles {
|
||||
return f.findSharedFile(remote)
|
||||
return f.findSharedFile(ctx, remote)
|
||||
}
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// listSharedFoldersApi lists all available shared folders mounted and not mounted
|
||||
// we'll need the id later so we have to return them in original format
|
||||
func (f *Fs) listSharedFolders() (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
started := false
|
||||
var res *sharing.ListFoldersResult
|
||||
for {
|
||||
@@ -621,7 +686,7 @@ func (f *Fs) listSharedFolders() (entries fs.DirEntries, err error) {
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.sharing.ListFolders(&arg)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -633,7 +698,7 @@ func (f *Fs) listSharedFolders() (entries fs.DirEntries, err error) {
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.sharing.ListFoldersContinue(&arg)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list continue")
|
||||
@@ -658,8 +723,8 @@ func (f *Fs) listSharedFolders() (entries fs.DirEntries, err error) {
|
||||
// findSharedFolder find the id for a given shared folder name
|
||||
// somewhat annoyingly there is no endpoint to query a shared folder by it's name
|
||||
// so our only option is to iterate over all shared folders
|
||||
func (f *Fs) findSharedFolder(name string) (id string, err error) {
|
||||
entries, err := f.listSharedFolders()
|
||||
func (f *Fs) findSharedFolder(ctx context.Context, name string) (id string, err error) {
|
||||
entries, err := f.listSharedFolders(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -672,20 +737,20 @@ func (f *Fs) findSharedFolder(name string) (id string, err error) {
|
||||
}
|
||||
|
||||
// mountSharedFolder mount a shared folder to the root namespace
|
||||
func (f *Fs) mountSharedFolder(id string) error {
|
||||
func (f *Fs) mountSharedFolder(ctx context.Context, id string) error {
|
||||
arg := sharing.MountFolderArg{
|
||||
SharedFolderId: id,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.sharing.MountFolder(&arg)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// listReceivedFiles lists shared the user as access to (note this means individual
|
||||
// files not files contained in shared folders)
|
||||
func (f *Fs) listReceivedFiles() (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
started := false
|
||||
var res *sharing.ListFilesResult
|
||||
for {
|
||||
@@ -695,7 +760,7 @@ func (f *Fs) listReceivedFiles() (entries fs.DirEntries, err error) {
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.sharing.ListReceivedFiles(&arg)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -707,7 +772,7 @@ func (f *Fs) listReceivedFiles() (entries fs.DirEntries, err error) {
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.sharing.ListReceivedFilesContinue(&arg)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list continue")
|
||||
@@ -734,8 +799,8 @@ func (f *Fs) listReceivedFiles() (entries fs.DirEntries, err error) {
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (f *Fs) findSharedFile(name string) (o *Object, err error) {
|
||||
files, err := f.listReceivedFiles()
|
||||
func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err error) {
|
||||
files, err := f.listReceivedFiles(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -758,10 +823,10 @@ func (f *Fs) findSharedFile(name string) (o *Object, err error) {
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if f.opt.SharedFiles {
|
||||
return f.listReceivedFiles()
|
||||
return f.listReceivedFiles(ctx)
|
||||
}
|
||||
if f.opt.SharedFolders {
|
||||
return f.listSharedFolders()
|
||||
return f.listSharedFolders(ctx)
|
||||
}
|
||||
|
||||
root := f.slashRoot
|
||||
@@ -782,7 +847,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.ListFolder(&arg)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
@@ -800,7 +865,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.ListFolderContinue(&arg)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list continue")
|
||||
@@ -830,7 +895,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
|
||||
entries = append(entries, d)
|
||||
} else if fileInfo != nil {
|
||||
o, err := f.newObjectWithInfo(remote, fileInfo)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, fileInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -879,7 +944,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
}
|
||||
|
||||
// check directory doesn't exist
|
||||
_, err := f.getDirMetadata(root)
|
||||
_, err := f.getDirMetadata(ctx, root)
|
||||
if err == nil {
|
||||
return nil // directory exists already
|
||||
} else if err != fs.ErrorDirNotFound {
|
||||
@@ -896,7 +961,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.srv.CreateFolderV2(&arg2)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
@@ -913,7 +978,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
|
||||
if check {
|
||||
// check directory exists
|
||||
_, err = f.getDirMetadata(root)
|
||||
_, err = f.getDirMetadata(ctx, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
@@ -930,7 +995,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
var res *files.ListFolderResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.ListFolder(&arg)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
@@ -943,7 +1008,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
// remove it
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: root})
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
@@ -996,7 +1061,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var result *files.RelocationResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
result, err = f.srv.CopyV2(&arg)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "copy failed")
|
||||
@@ -1057,7 +1122,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var result *files.RelocationResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
result, err = f.srv.MoveV2(&arg)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "move failed")
|
||||
@@ -1081,17 +1146,34 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
|
||||
createArg := sharing.CreateSharedLinkWithSettingsArg{
|
||||
Path: absPath,
|
||||
// FIXME this gives settings_error/not_authorized/.. errors
|
||||
// and the expires setting isn't in the documentation so remove
|
||||
// for now.
|
||||
// Settings: &sharing.SharedLinkSettings{
|
||||
// Expires: time.Now().Add(time.Duration(expire)).UTC().Round(time.Second),
|
||||
// },
|
||||
Settings: &sharing.SharedLinkSettings{
|
||||
RequestedVisibility: &sharing.RequestedVisibility{
|
||||
Tagged: dropbox.Tagged{Tag: sharing.RequestedVisibilityPublic},
|
||||
},
|
||||
Audience: &sharing.LinkAudience{
|
||||
Tagged: dropbox.Tagged{Tag: sharing.LinkAudiencePublic},
|
||||
},
|
||||
Access: &sharing.RequestedLinkAccessLevel{
|
||||
Tagged: dropbox.Tagged{Tag: sharing.RequestedLinkAccessLevelViewer},
|
||||
},
|
||||
},
|
||||
}
|
||||
if expire < fs.DurationOff {
|
||||
expiryTime := time.Now().Add(time.Duration(expire)).UTC().Round(time.Second)
|
||||
createArg.Settings.Expires = expiryTime
|
||||
}
|
||||
// FIXME note we can't set Settings for non enterprise dropbox
|
||||
// because of https://github.com/dropbox/dropbox-sdk-go-unofficial/issues/75
|
||||
// however this only goes wrong when we set Expires, so as a
|
||||
// work-around remove Settings unless expire is set.
|
||||
if expire == fs.DurationOff {
|
||||
createArg.Settings = nil
|
||||
}
|
||||
|
||||
var linkRes sharing.IsSharedLinkMetadata
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil && strings.Contains(err.Error(),
|
||||
@@ -1104,7 +1186,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
var listRes *sharing.ListSharedLinksResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
listRes, err = f.sharing.ListSharedLinks(&listArg)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
@@ -1146,7 +1228,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
dstPath := path.Join(f.slashRoot, dstRemote)
|
||||
|
||||
// Check if destination exists
|
||||
_, err := f.getDirMetadata(dstPath)
|
||||
_, err := f.getDirMetadata(ctx, dstPath)
|
||||
if err == nil {
|
||||
return fs.ErrorDirExists
|
||||
} else if err != fs.ErrorDirNotFound {
|
||||
@@ -1165,7 +1247,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.srv.MoveV2(&arg)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "MoveDir failed")
|
||||
@@ -1179,7 +1261,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var q *users.SpaceUsage
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
q, err = f.users.GetSpaceUsage()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "about failed")
|
||||
@@ -1210,7 +1292,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
go func() {
|
||||
// get the StartCursor early so all changes from now on get processed
|
||||
startCursor, err := f.changeNotifyCursor()
|
||||
startCursor, err := f.changeNotifyCursor(ctx)
|
||||
if err != nil {
|
||||
fs.Infof(f, "Failed to get StartCursor: %s", err)
|
||||
}
|
||||
@@ -1235,7 +1317,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
}
|
||||
case <-tickerC:
|
||||
if startCursor == "" {
|
||||
startCursor, err = f.changeNotifyCursor()
|
||||
startCursor, err = f.changeNotifyCursor(ctx)
|
||||
if err != nil {
|
||||
fs.Infof(f, "Failed to get StartCursor: %s", err)
|
||||
continue
|
||||
@@ -1251,7 +1333,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
}()
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyCursor() (cursor string, err error) {
|
||||
func (f *Fs) changeNotifyCursor(ctx context.Context) (cursor string, err error) {
|
||||
var startCursor *files.ListFolderGetLatestCursorResult
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -1266,7 +1348,7 @@ func (f *Fs) changeNotifyCursor() (cursor string, err error) {
|
||||
|
||||
startCursor, err = f.srv.ListFolderGetLatestCursor(&arg)
|
||||
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
@@ -1296,7 +1378,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
}
|
||||
|
||||
res, err = f.svc.ListFolderLongpoll(&args)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
@@ -1319,7 +1401,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
changeList, err = f.srv.ListFolderContinue(&arg)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "list continue")
|
||||
@@ -1331,13 +1413,13 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
switch info := entry.(type) {
|
||||
case *files.FolderMetadata:
|
||||
entryType = fs.EntryDirectory
|
||||
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
|
||||
entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
|
||||
case *files.FileMetadata:
|
||||
entryType = fs.EntryObject
|
||||
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
|
||||
entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
|
||||
case *files.DeletedMetadata:
|
||||
entryType = fs.EntryObject
|
||||
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
|
||||
entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
|
||||
default:
|
||||
fs.Errorf(entry, "dropbox ChangeNotify: ignoring unknown EntryType %T", entry)
|
||||
continue
|
||||
@@ -1359,6 +1441,13 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(DbHashType)
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
f.batcher.Shutdown()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
@@ -1392,7 +1481,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != DbHashType {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
err := o.readMetaData()
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to read hash from metadata")
|
||||
}
|
||||
@@ -1416,17 +1505,17 @@ func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
|
||||
}
|
||||
|
||||
// Reads the entry for a file from dropbox
|
||||
func (o *Object) readEntry() (*files.FileMetadata, error) {
|
||||
return o.fs.getFileMetadata(o.remotePath())
|
||||
func (o *Object) readEntry(ctx context.Context) (*files.FileMetadata, error) {
|
||||
return o.fs.getFileMetadata(ctx, o.remotePath())
|
||||
}
|
||||
|
||||
// Read entry if not set and set metadata from it
|
||||
func (o *Object) readEntryAndSetMetadata() error {
|
||||
func (o *Object) readEntryAndSetMetadata(ctx context.Context) error {
|
||||
// Last resort set time from client
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
entry, err := o.readEntry()
|
||||
entry, err := o.readEntry(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1439,12 +1528,12 @@ func (o *Object) remotePath() string {
|
||||
}
|
||||
|
||||
// readMetaData gets the info if it hasn't already been fetched
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
// Last resort
|
||||
return o.readEntryAndSetMetadata()
|
||||
return o.readEntryAndSetMetadata(ctx)
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
@@ -1452,7 +1541,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData()
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
@@ -1486,7 +1575,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, in, err = o.fs.sharing.GetSharedLinkFile(&arg)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1502,7 +1591,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, in, err = o.fs.srv.Download(&arg)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
switch e := err.(type) {
|
||||
@@ -1518,97 +1607,83 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// uploadChunked uploads the object in parts
|
||||
//
|
||||
// Will work optimally if size is >= uploadChunkSize. If the size is either
|
||||
// unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
|
||||
// avoidable request to the Dropbox API that does not carry payload.
|
||||
func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
chunks := 0
|
||||
if size != -1 {
|
||||
chunks = int(size/chunkSize) + 1
|
||||
}
|
||||
in := readers.NewCountingReader(in0)
|
||||
buf := make([]byte, int(chunkSize))
|
||||
|
||||
fmtChunk := func(cur int, last bool) {
|
||||
if chunks == 0 && last {
|
||||
fs.Debugf(o, "Streaming chunk %d/%d", cur, cur)
|
||||
} else if chunks == 0 {
|
||||
fs.Debugf(o, "Streaming chunk %d/unknown", cur)
|
||||
} else {
|
||||
fs.Debugf(o, "Uploading chunk %d/%d", cur, chunks)
|
||||
}
|
||||
}
|
||||
|
||||
// write the first chunk
|
||||
fmtChunk(1, false)
|
||||
// Will introduce two additional network requests to start and finish the session.
|
||||
// If the size is unknown (i.e. -1) the method incurs one additional
|
||||
// request to the Dropbox API that does not carry a payload to close the append session.
|
||||
func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
|
||||
// start upload
|
||||
var res *files.UploadSessionStartResult
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, chunk)
|
||||
return shouldRetry(err)
|
||||
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, nil)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
chunks, remainder := size/chunkSize, size%chunkSize
|
||||
if remainder > 0 {
|
||||
chunks++
|
||||
}
|
||||
|
||||
// write chunks
|
||||
in := readers.NewCountingReader(in0)
|
||||
buf := make([]byte, int(chunkSize))
|
||||
cursor := files.UploadSessionCursor{
|
||||
SessionId: res.SessionId,
|
||||
Offset: 0,
|
||||
}
|
||||
appendArg := files.UploadSessionAppendArg{
|
||||
Cursor: &cursor,
|
||||
Close: false,
|
||||
}
|
||||
|
||||
// write more whole chunks (if any)
|
||||
currentChunk := 2
|
||||
for {
|
||||
if chunks > 0 && currentChunk >= chunks {
|
||||
// if the size is known, only upload full chunks. Remaining bytes are uploaded with
|
||||
// the UploadSessionFinish request.
|
||||
break
|
||||
} else if chunks == 0 && in.BytesRead()-cursor.Offset < uint64(chunkSize) {
|
||||
// if the size is unknown, upload as long as we can read full chunks from the reader.
|
||||
// The UploadSessionFinish request will not contain any payload.
|
||||
break
|
||||
}
|
||||
appendArg := files.UploadSessionAppendArg{Cursor: &cursor}
|
||||
for currentChunk := 1; ; currentChunk++ {
|
||||
cursor.Offset = in.BytesRead()
|
||||
fmtChunk(currentChunk, false)
|
||||
chunk = readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
|
||||
if chunks < 0 {
|
||||
fs.Debugf(o, "Streaming chunk %d/unknown", currentChunk)
|
||||
} else {
|
||||
fs.Debugf(o, "Uploading chunk %d/%d", currentChunk, chunks)
|
||||
}
|
||||
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
|
||||
// after the first chunk is uploaded, we retry everything
|
||||
// after session is started, we retry everything
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentChunk++
|
||||
if appendArg.Close {
|
||||
break
|
||||
}
|
||||
|
||||
if size > 0 {
|
||||
// if size is known, check if next chunk is final
|
||||
appendArg.Close = uint64(size)-in.BytesRead() <= uint64(chunkSize)
|
||||
} else {
|
||||
// if size is unknown, upload as long as we can read full chunks from the reader
|
||||
appendArg.Close = in.BytesRead()-cursor.Offset < uint64(chunkSize)
|
||||
}
|
||||
}
|
||||
|
||||
// write the remains
|
||||
// finish upload
|
||||
cursor.Offset = in.BytesRead()
|
||||
args := &files.UploadSessionFinishArg{
|
||||
Cursor: &cursor,
|
||||
Commit: commitInfo,
|
||||
}
|
||||
fmtChunk(currentChunk, true)
|
||||
chunk = readers.NewRepeatableReaderBuffer(in, buf)
|
||||
// If we are batching then we should have written all the data now
|
||||
// store the commit info now for a batch commit
|
||||
if o.fs.batcher.Batching() {
|
||||
return o.fs.batcher.Commit(ctx, args)
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
entry, err = o.fs.srv.UploadSessionFinish(args, chunk)
|
||||
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
|
||||
// If error is insufficient space then don't retry
|
||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||
@@ -1675,17 +1750,26 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
size := src.Size()
|
||||
var err error
|
||||
var entry *files.FileMetadata
|
||||
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
|
||||
entry, err = o.uploadChunked(in, commitInfo, size)
|
||||
if size > int64(o.fs.opt.ChunkSize) || size < 0 || o.fs.batcher.Batching() {
|
||||
entry, err = o.uploadChunked(ctx, in, commitInfo, size)
|
||||
} else {
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
entry, err = o.fs.srv.Upload(commitInfo, in)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "upload failed")
|
||||
}
|
||||
// If we haven't received data back from batch upload then fake it
|
||||
//
|
||||
// This will only happen if we are uploading async batches
|
||||
if entry == nil {
|
||||
o.bytes = size
|
||||
o.modTime = commitInfo.ClientModified
|
||||
o.hash = "" // we don't have this
|
||||
return nil
|
||||
}
|
||||
return o.setMetadataFromEntry(entry)
|
||||
}
|
||||
|
||||
@@ -1698,7 +1782,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
|
||||
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
|
||||
})
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
@@ -1713,6 +1797,7 @@ var (
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = &Fs{}
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -28,7 +29,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// Detect this error which the integration tests provoke
|
||||
// error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
|
||||
//
|
||||
@@ -74,7 +78,7 @@ func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
|
||||
var file File
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, &file)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't read file info")
|
||||
@@ -87,6 +91,7 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
|
||||
request := DownloadRequest{
|
||||
URL: url,
|
||||
Single: 1,
|
||||
Pass: f.opt.FilePassword,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -96,7 +101,7 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
|
||||
var token GetTokenResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
@@ -115,16 +120,22 @@ func fileFromSharedFile(file *SharedFile) File {
|
||||
|
||||
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://1fichier.com/dir/",
|
||||
Path: id,
|
||||
Parameters: map[string][]string{"json": {"1"}},
|
||||
Method: "GET",
|
||||
RootURL: "https://1fichier.com/dir/",
|
||||
Path: id,
|
||||
Parameters: map[string][]string{"json": {"1"}},
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
}
|
||||
if f.opt.FolderPassword != "" {
|
||||
opts.Method = "POST"
|
||||
opts.Parameters = nil
|
||||
opts.Body = strings.NewReader("json=1&pass=" + url.QueryEscape(f.opt.FolderPassword))
|
||||
}
|
||||
|
||||
var sharedFiles SharedFolderResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, &sharedFiles)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
@@ -153,7 +164,7 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
|
||||
filesList = &FilesList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, filesList)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
@@ -181,7 +192,7 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
|
||||
foldersList = &FoldersList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, foldersList)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list folders")
|
||||
@@ -275,7 +286,7 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
|
||||
response = &MakeFolderResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't create folder")
|
||||
@@ -302,13 +313,13 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't remove folder")
|
||||
}
|
||||
if response.Status != "OK" {
|
||||
return nil, errors.New("Can't remove non-empty dir")
|
||||
return nil, errors.Errorf("can't remove folder: %s", response.Message)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
|
||||
@@ -331,7 +342,7 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
|
||||
response = &GenericOKResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -358,7 +369,7 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
|
||||
response = &MoveFileResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -383,7 +394,7 @@ func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename stri
|
||||
response = &CopyFileResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -393,6 +404,34 @@ func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename stri
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) renameFile(ctx context.Context, url string, newName string) (response *RenameFileResponse, err error) {
|
||||
request := &RenameFileRequest{
|
||||
URLs: []RenameFileURL{
|
||||
{
|
||||
URL: url,
|
||||
Filename: newName,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/rename.cgi",
|
||||
}
|
||||
|
||||
response = &RenameFileResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't rename file")
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
|
||||
// fs.Debugf(f, "Requesting Upload node")
|
||||
|
||||
@@ -405,7 +444,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
|
||||
response = &GetUploadNodeResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "didnt got an upload node")
|
||||
@@ -448,7 +487,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
|
||||
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, nil)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -482,7 +521,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
|
||||
response = &EndFileUploadResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -35,9 +35,7 @@ func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "fichier",
|
||||
Description: "1Fichier",
|
||||
Config: func(ctx context.Context, name string, config configmap.Mapper) {
|
||||
},
|
||||
NewFs: NewFs,
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
|
||||
Name: "api_key",
|
||||
@@ -46,6 +44,18 @@ func init() {
|
||||
Name: "shared_folder",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Help: "If you want to download a shared file that is password protected, add this parameter",
|
||||
Name: "file_password",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Help: "If you want to list the files in a shared folder that is password protected, add this parameter",
|
||||
Name: "folder_password",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -77,9 +87,11 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
APIKey string `config:"api_key"`
|
||||
SharedFolder string `config:"shared_folder"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
APIKey string `config:"api_key"`
|
||||
SharedFolder string `config:"shared_folder"`
|
||||
FilePassword string `config:"file_password"`
|
||||
FolderPassword string `config:"folder_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs is the interface a cloud storage system must provide
|
||||
@@ -348,8 +360,10 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(fileUploadResponse.Links) != 1 {
|
||||
return nil, errors.New("unexpected amount of files")
|
||||
if len(fileUploadResponse.Links) == 0 {
|
||||
return nil, errors.New("upload response not found")
|
||||
} else if len(fileUploadResponse.Links) > 1 {
|
||||
fs.Debugf(remote, "Multiple upload responses found, using the first")
|
||||
}
|
||||
|
||||
link := fileUploadResponse.Links[0]
|
||||
@@ -423,25 +437,45 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Find current directory ID
|
||||
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't move file")
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return nil, errors.New("couldn't move file")
|
||||
// If it is in the correct directory, just rename it
|
||||
var url string
|
||||
if currentDirectoryID == directoryID {
|
||||
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't rename file")
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return nil, errors.Errorf("couldn't rename file: %s", resp.Message)
|
||||
}
|
||||
url = resp.URLs[0].URL
|
||||
} else {
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't move file")
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
|
||||
}
|
||||
url = resp.URLs[0]
|
||||
}
|
||||
|
||||
file, err := f.readFileInfo(ctx, resp.URLs[0])
|
||||
file, err := f.readFileInfo(ctx, url)
|
||||
if err != nil {
|
||||
return nil, errors.New("couldn't read file data")
|
||||
}
|
||||
@@ -472,7 +506,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, errors.Wrap(err, "couldn't move file")
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return nil, errors.New("couldn't move file")
|
||||
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
|
||||
}
|
||||
|
||||
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
|
||||
|
||||
@@ -94,7 +94,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.rest.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -19,6 +19,7 @@ type ListFilesRequest struct {
|
||||
type DownloadRequest struct {
|
||||
URL string `json:"url"`
|
||||
Single int `json:"single"`
|
||||
Pass string `json:"pass,omitempty"`
|
||||
}
|
||||
|
||||
// RemoveFolderRequest is the request structure of the corresponding request
|
||||
@@ -63,8 +64,9 @@ type MoveFileRequest struct {
|
||||
|
||||
// MoveFileResponse is the response structure of the corresponding request
|
||||
type MoveFileResponse struct {
|
||||
Status string `json:"status"`
|
||||
URLs []string `json:"urls"`
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
URLs []string `json:"urls"`
|
||||
}
|
||||
|
||||
// CopyFileRequest is the request structure of the corresponding request
|
||||
@@ -76,9 +78,10 @@ type CopyFileRequest struct {
|
||||
|
||||
// CopyFileResponse is the response structure of the corresponding request
|
||||
type CopyFileResponse struct {
|
||||
Status string `json:"status"`
|
||||
Copied int `json:"copied"`
|
||||
URLs []FileCopy `json:"urls"`
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
Copied int `json:"copied"`
|
||||
URLs []FileCopy `json:"urls"`
|
||||
}
|
||||
|
||||
// FileCopy is used in the the CopyFileResponse
|
||||
@@ -87,6 +90,30 @@ type FileCopy struct {
|
||||
ToURL string `json:"to_url"`
|
||||
}
|
||||
|
||||
// RenameFileURL is the data structure to rename a single file
|
||||
type RenameFileURL struct {
|
||||
URL string `json:"url"`
|
||||
Filename string `json:"filename"`
|
||||
}
|
||||
|
||||
// RenameFileRequest is the request structure of the corresponding request
|
||||
type RenameFileRequest struct {
|
||||
URLs []RenameFileURL `json:"urls"`
|
||||
Pretty int `json:"pretty"`
|
||||
}
|
||||
|
||||
// RenameFileResponse is the response structure of the corresponding request
|
||||
type RenameFileResponse struct {
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
Renamed int `json:"renamed"`
|
||||
URLs []struct {
|
||||
URL string `json:"url"`
|
||||
OldFilename string `json:"old_filename"`
|
||||
NewFilename string `json:"new_filename"`
|
||||
} `json:"urls"`
|
||||
}
|
||||
|
||||
// GetUploadNodeResponse is the response structure of the corresponding request
|
||||
type GetUploadNodeResponse struct {
|
||||
ID string `json:"id"`
|
||||
|
||||
@@ -5,6 +5,7 @@ package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
@@ -51,6 +52,23 @@ func (t Time) String() string {
|
||||
return time.Time(t).UTC().Format(timeFormatParameters)
|
||||
}
|
||||
|
||||
// Int represents an integer which can be represented in JSON as a
|
||||
// quoted integer or an integer.
|
||||
type Int int
|
||||
|
||||
// MarshalJSON turns a Int into JSON
|
||||
func (i *Int) MarshalJSON() (out []byte, err error) {
|
||||
return json.Marshal((*int)(i))
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Int
|
||||
func (i *Int) UnmarshalJSON(data []byte) error {
|
||||
if len(data) >= 2 && data[0] == '"' && data[len(data)-1] == '"' {
|
||||
data = data[1 : len(data)-1]
|
||||
}
|
||||
return json.Unmarshal(data, (*int)(i))
|
||||
}
|
||||
|
||||
// Status return returned in all status responses
|
||||
type Status struct {
|
||||
Code string `json:"status"`
|
||||
@@ -115,7 +133,7 @@ type GetFolderContentsResponse struct {
|
||||
Total int `json:"total,string"`
|
||||
Items []Item `json:"filelist"`
|
||||
Folder Item `json:"folder"`
|
||||
From int `json:"from,string"`
|
||||
From Int `json:"from"`
|
||||
//Count int `json:"count"`
|
||||
Pid string `json:"pid"`
|
||||
RefreshResult Status `json:"refreshresult"`
|
||||
|
||||
@@ -228,7 +228,10 @@ var retryStatusCodes = []struct {
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func (f *Fs) shouldRetry(resp *http.Response, err error, status api.OKError) (bool, error) {
|
||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, status api.OKError) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if err != nil {
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
@@ -401,7 +404,7 @@ func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKEr
|
||||
// Refresh the body each retry
|
||||
opts.Body = strings.NewReader(data.Encode())
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, result)
|
||||
return f.shouldRetry(resp, err, result)
|
||||
return f.shouldRetry(ctx, resp, err, result)
|
||||
})
|
||||
if err != nil {
|
||||
return resp, err
|
||||
@@ -1277,7 +1280,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &uploader)
|
||||
return o.fs.shouldRetry(resp, err, nil)
|
||||
return o.fs.shouldRetry(ctx, resp, err, nil)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to upload")
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
@@ -33,6 +34,12 @@ var (
|
||||
currentUser = env.CurrentUser()
|
||||
)
|
||||
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -104,6 +111,11 @@ given, rclone will empty the connection pool.
|
||||
Set to 0 to keep connections indefinitely.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "close_timeout",
|
||||
Help: "Maximum time to wait for a response to close.",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -132,6 +144,7 @@ type Options struct {
|
||||
DisableEPSV bool `config:"disable_epsv"`
|
||||
DisableMLSD bool `config:"disable_mlsd"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -151,6 +164,7 @@ type Fs struct {
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
tokens *pacer.TokenDispenser
|
||||
tlsConf *tls.Config
|
||||
pacer *fs.Pacer // pacer for FTP connections
|
||||
}
|
||||
|
||||
// Object describes an FTP file
|
||||
@@ -227,29 +241,41 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
type dialCtx struct {
|
||||
f *Fs
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// dial a new connection with fshttp dialer
|
||||
func (d *dialCtx) dial(network, address string) (net.Conn, error) {
|
||||
conn, err := fshttp.NewDialer(d.ctx).Dial(network, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// shouldRetry returns a boolean as to whether this err deserve to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if d.f.tlsConf != nil {
|
||||
conn = tls.Client(conn, d.f.tlsConf)
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusNotAvailable:
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
return conn, err
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// Open a new connection to the FTP server.
|
||||
func (f *Fs) ftpConnection(ctx context.Context) (*ftp.ServerConn, error) {
|
||||
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
fs.Debugf(f, "Connecting to FTP server")
|
||||
dCtx := dialCtx{f, ctx}
|
||||
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dCtx.dial)}
|
||||
if f.opt.ExplicitTLS {
|
||||
|
||||
// Make ftp library dial with fshttp dialer optionally using TLS
|
||||
dial := func(network, address string) (conn net.Conn, err error) {
|
||||
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
|
||||
if f.tlsConf != nil && err == nil {
|
||||
conn = tls.Client(conn, f.tlsConf)
|
||||
}
|
||||
return
|
||||
}
|
||||
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dial)}
|
||||
|
||||
if f.opt.TLS {
|
||||
// Our dialer takes care of TLS but ftp library also needs tlsConf
|
||||
// as a trigger for sending PSBZ and PROT options to server.
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
|
||||
} else if f.opt.ExplicitTLS {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
|
||||
// Initial connection needs to be cleartext for explicit TLS
|
||||
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
|
||||
@@ -267,18 +293,22 @@ func (f *Fs) ftpConnection(ctx context.Context) (*ftp.ServerConn, error) {
|
||||
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
|
||||
}
|
||||
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
c, err = ftp.Dial(f.dialAddr, ftpConfig...)
|
||||
if err != nil {
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
err = c.Login(f.user, f.pass)
|
||||
if err != nil {
|
||||
_ = c.Quit()
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
|
||||
return nil, errors.Wrap(err, "ftpConnection Dial")
|
||||
err = errors.Wrapf(err, "failed to make FTP connection to %q", f.dialAddr)
|
||||
}
|
||||
err = c.Login(f.user, f.pass)
|
||||
if err != nil {
|
||||
_ = c.Quit()
|
||||
fs.Errorf(f, "Error while Logging in into %s: %s", f.dialAddr, err)
|
||||
return nil, errors.Wrap(err, "ftpConnection Login")
|
||||
}
|
||||
return c, nil
|
||||
return c, err
|
||||
}
|
||||
|
||||
// Get an FTP connection from the pool, or open a new one
|
||||
@@ -411,6 +441,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
dialAddr: dialAddr,
|
||||
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
||||
tlsConf: tlsConfig,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
@@ -931,8 +962,8 @@ func (f *ftpReadCloser) Close() error {
|
||||
go func() {
|
||||
errchan <- f.rc.Close()
|
||||
}()
|
||||
// Wait for Close for up to 60 seconds
|
||||
timer := time.NewTimer(60 * time.Second)
|
||||
// Wait for Close for up to 60 seconds by default
|
||||
timer := time.NewTimer(time.Duration(f.f.opt.CloseTimeout))
|
||||
select {
|
||||
case err = <-errchan:
|
||||
timer.Stop()
|
||||
|
||||
@@ -19,9 +19,9 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -51,10 +51,10 @@ import (
|
||||
const (
|
||||
rcloneClientID = "202264815644.apps.googleusercontent.com"
|
||||
rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
metaMtime = "mtime" // key to store mtime under in metadata
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
timeFormat = time.RFC3339Nano
|
||||
metaMtime = "mtime" // key to store mtime in metadata
|
||||
metaMtimeGsutil = "goog-reserved-file-mtime" // key used by GSUtil to store mtime in metadata
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
minSleep = 10 * time.Millisecond
|
||||
)
|
||||
|
||||
@@ -76,17 +76,16 @@ func init() {
|
||||
Prefix: "gcs",
|
||||
Description: "Google Cloud Storage (this is not Google Drive)",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
saFile, _ := m.Get("service_account_file")
|
||||
saCreds, _ := m.Get("service_account_credentials")
|
||||
anonymous, _ := m.Get("anonymous")
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" {
|
||||
return
|
||||
}
|
||||
err := oauthutil.Config(ctx, "google cloud storage", name, m, storageConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return nil, nil
|
||||
}
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: storageConfig,
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "project_number",
|
||||
@@ -329,7 +328,10 @@ func (f *Fs) Features() *fs.Features {
|
||||
}
|
||||
|
||||
// shouldRetry determines whether a given err rates being retried
|
||||
func shouldRetry(err error) (again bool, errOut error) {
|
||||
func shouldRetry(ctx context.Context, err error) (again bool, errOut error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
again = false
|
||||
if err != nil {
|
||||
if fserrors.ShouldRetry(err) {
|
||||
@@ -455,7 +457,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
newRoot := path.Dir(f.root)
|
||||
@@ -521,7 +523,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
var objects *storage.Objects
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
objects, err = list.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
if gErr, ok := err.(*googleapi.Error); ok {
|
||||
@@ -624,7 +626,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
var buckets *storage.Buckets
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
buckets, err = listBuckets.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -750,7 +752,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
// Bucket already exists
|
||||
@@ -785,7 +787,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
}
|
||||
_, err = insertBucket.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
@@ -802,7 +804,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
return f.cache.Remove(bucket, func() error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -848,7 +850,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -919,7 +921,7 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
// read mtime out of metadata if available
|
||||
mtimeString, ok := info.Metadata[metaMtime]
|
||||
if ok {
|
||||
modTime, err := time.Parse(timeFormatIn, mtimeString)
|
||||
modTime, err := time.Parse(timeFormat, mtimeString)
|
||||
if err == nil {
|
||||
o.modTime = modTime
|
||||
return
|
||||
@@ -927,8 +929,19 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
|
||||
}
|
||||
|
||||
// Fallback to GSUtil mtime
|
||||
mtimeGsutilString, ok := info.Metadata[metaMtimeGsutil]
|
||||
if ok {
|
||||
unixTimeSec, err := strconv.ParseInt(mtimeGsutilString, 10, 64)
|
||||
if err == nil {
|
||||
o.modTime = time.Unix(unixTimeSec, 0)
|
||||
return
|
||||
}
|
||||
fs.Debugf(o, "Failed to read GSUtil mtime from metadata: %s", err)
|
||||
}
|
||||
|
||||
// Fallback to the Updated time
|
||||
modTime, err := time.Parse(timeFormatIn, info.Updated)
|
||||
modTime, err := time.Parse(timeFormat, info.Updated)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Bad time decode: %v", err)
|
||||
} else {
|
||||
@@ -941,7 +954,7 @@ func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, er
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
if gErr, ok := err.(*googleapi.Error); ok {
|
||||
@@ -985,7 +998,8 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
// Returns metadata for an object
|
||||
func metadataFromModTime(modTime time.Time) map[string]string {
|
||||
metadata := make(map[string]string, 1)
|
||||
metadata[metaMtime] = modTime.Format(timeFormatOut)
|
||||
metadata[metaMtime] = modTime.Format(timeFormat)
|
||||
metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
|
||||
return metadata
|
||||
}
|
||||
|
||||
@@ -997,11 +1011,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
|
||||
return err
|
||||
}
|
||||
// Add the mtime to the existing metadata
|
||||
mtime := modTime.Format(timeFormatOut)
|
||||
if object.Metadata == nil {
|
||||
object.Metadata = make(map[string]string, 1)
|
||||
}
|
||||
object.Metadata[metaMtime] = mtime
|
||||
object.Metadata[metaMtime] = modTime.Format(timeFormat)
|
||||
object.Metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
|
||||
// Copy the object to itself to update the metadata
|
||||
// Using PATCH requires too many permissions
|
||||
bucket, bucketPath := o.split()
|
||||
@@ -1012,7 +1026,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
|
||||
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1043,7 +1057,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
_ = res.Body.Close() // ignore error
|
||||
}
|
||||
}
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1109,7 +1123,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = insertObject.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1124,7 +1138,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
golog "log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -78,36 +77,36 @@ func init() {
|
||||
Prefix: "gphotos",
|
||||
Description: "Google Photos",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||
return
|
||||
return nil, errors.Wrap(err, "couldn't parse config into struct")
|
||||
}
|
||||
|
||||
// Fill in the scopes
|
||||
if opt.ReadOnly {
|
||||
oauthConfig.Scopes[0] = scopeReadOnly
|
||||
} else {
|
||||
oauthConfig.Scopes[0] = scopeReadWrite
|
||||
switch config.State {
|
||||
case "":
|
||||
// Fill in the scopes
|
||||
if opt.ReadOnly {
|
||||
oauthConfig.Scopes[0] = scopeReadOnly
|
||||
} else {
|
||||
oauthConfig.Scopes[0] = scopeReadWrite
|
||||
}
|
||||
return oauthutil.ConfigOut("warning", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
case "warning":
|
||||
// Warn the user as required by google photos integration
|
||||
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
|
||||
|
||||
IMPORTANT: All media items uploaded to Google Photos with rclone
|
||||
are stored in full resolution at original quality. These uploads
|
||||
will count towards storage in your Google Account.`)
|
||||
case "warning_done":
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Do the oauth
|
||||
err = oauthutil.Config(ctx, "google photos", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
golog.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
|
||||
// Warn the user
|
||||
fmt.Print(`
|
||||
*** IMPORTANT: All media items uploaded to Google Photos with rclone
|
||||
*** are stored in full resolution at original quality. These uploads
|
||||
*** will count towards storage in your Google Account.
|
||||
|
||||
`)
|
||||
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "read_only",
|
||||
@@ -240,7 +239,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
@@ -329,7 +331,7 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
|
||||
var openIDconfig map[string]interface{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "couldn't read openID config")
|
||||
@@ -358,7 +360,7 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &userInfo)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't read user info")
|
||||
@@ -389,7 +391,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
|
||||
var res interface{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &res)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't revoke token")
|
||||
@@ -476,7 +478,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list albums")
|
||||
@@ -531,7 +533,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &filter, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't list files")
|
||||
@@ -675,7 +677,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, request, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't create album")
|
||||
@@ -810,7 +812,7 @@ func (o *Object) Size() int64 {
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Reading size failed: %v", err)
|
||||
@@ -861,7 +863,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &item)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't get media item")
|
||||
@@ -938,7 +940,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -993,10 +995,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
}
|
||||
token, err = rest.ReadBody(resp)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't upload file")
|
||||
@@ -1024,7 +1026,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var result api.BatchCreateResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, request, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create media item")
|
||||
@@ -1069,7 +1071,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't delete item from album")
|
||||
|
||||
@@ -109,7 +109,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
dirname := path.Dir(realpath)
|
||||
fs.Debugf(o.fs, "update [%s]", realpath)
|
||||
|
||||
err := o.fs.client.MkdirAll(dirname, 755)
|
||||
err := o.fs.client.MkdirAll(dirname, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
@@ -47,7 +47,7 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
ts := httptest.NewServer(handler)
|
||||
|
||||
// Configure the remote
|
||||
config.LoadConfig(context.Background())
|
||||
configfile.Install()
|
||||
// fs.Config.LogLevel = fs.LogLevelDebug
|
||||
// fs.Config.DumpHeaders = true
|
||||
// fs.Config.DumpBodies = true
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -56,11 +55,10 @@ func init() {
|
||||
Name: "hubic",
|
||||
Description: "Hubic",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config(ctx, "hubic", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
|
||||
})
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -49,37 +48,29 @@ const (
|
||||
rootURL = "https://jfs.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com/"
|
||||
baseURL = "https://www.jottacloud.com/"
|
||||
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
configDevice = "device"
|
||||
configMountpoint = "mountpoint"
|
||||
configTokenURL = "tokenURL"
|
||||
configClientID = "client_id"
|
||||
configClientSecret = "client_secret"
|
||||
configUsername = "username"
|
||||
configVersion = 1
|
||||
|
||||
v1tokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||
v1registerURL = "https://api.jottacloud.com/auth/v1/register"
|
||||
v1ClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
v1EncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
v1configVersion = 0
|
||||
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
|
||||
defaultClientID = "jottacli"
|
||||
|
||||
legacyTokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||
legacyRegisterURL = "https://api.jottacloud.com/auth/v1/register"
|
||||
legacyClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
legacyConfigVersion = 0
|
||||
|
||||
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
|
||||
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
|
||||
teliaCloudClientID = "desktop"
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app for a personal account
|
||||
oauthConfig = &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: defaultTokenURL,
|
||||
TokenURL: defaultTokenURL,
|
||||
},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
// needs to be done early so we can use oauth during config
|
||||
@@ -87,42 +78,7 @@ func init() {
|
||||
Name: "jottacloud",
|
||||
Description: "Jottacloud",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
refresh := false
|
||||
if version, ok := m.Get("configVersion"); ok {
|
||||
ver, err := strconv.Atoi(version)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse config version - corrupted config")
|
||||
}
|
||||
refresh = (ver != configVersion) && (ver != v1configVersion)
|
||||
}
|
||||
|
||||
if refresh {
|
||||
fmt.Printf("Config outdated - refreshing\n")
|
||||
} else {
|
||||
tokenString, ok := m.Get("token")
|
||||
if ok && tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !config.Confirm(false) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Choose authentication type:\n" +
|
||||
"1: Standard authentication - use this if you're a normal Jottacloud user.\n" +
|
||||
"2: Legacy authentication - this is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n" +
|
||||
"3: Telia Cloud authentication - use this if you are using Telia Cloud.\n")
|
||||
|
||||
switch config.ChooseNumber("Your choice", 1, 3) {
|
||||
case 1:
|
||||
v2config(ctx, name, m)
|
||||
case 2:
|
||||
v1config(ctx, name, m)
|
||||
case 3:
|
||||
teliaCloudConfig(ctx, name, m)
|
||||
}
|
||||
},
|
||||
Config: Config,
|
||||
Options: []fs.Option{{
|
||||
Name: "md5_memory_limit",
|
||||
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
|
||||
@@ -157,6 +113,183 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
// Config runs the backend configuration protocol
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
switch config.State {
|
||||
case "":
|
||||
return fs.ConfigChooseFixed("auth_type_done", "config_type", `Authentication type`, []fs.OptionExample{{
|
||||
Value: "standard",
|
||||
Help: "Standard authentication - use this if you're a normal Jottacloud user.",
|
||||
}, {
|
||||
Value: "legacy",
|
||||
Help: "Legacy authentication - this is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
|
||||
}, {
|
||||
Value: "telia",
|
||||
Help: "Telia Cloud authentication - use this if you are using Telia Cloud.",
|
||||
}})
|
||||
case "auth_type_done":
|
||||
// Jump to next state according to config chosen
|
||||
return fs.ConfigGoto(config.Result)
|
||||
case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\n\nGenerate here: https://www.jottacloud.com/web/secure")
|
||||
case "standard_token":
|
||||
loginToken := config.Result
|
||||
m.Set(configClientID, defaultClientID)
|
||||
m.Set(configClientSecret, "")
|
||||
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
token, tokenEndpoint, err := doTokenAuth(ctx, srv, loginToken)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get oauth token")
|
||||
}
|
||||
m.Set(configTokenURL, tokenEndpoint)
|
||||
err = oauthutil.PutToken(name, m, &token, true)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error while saving token")
|
||||
}
|
||||
return fs.ConfigGoto("choose_device")
|
||||
case "legacy": // configure a jottacloud backend using legacy authentication
|
||||
m.Set("configVersion", fmt.Sprint(legacyConfigVersion))
|
||||
return fs.ConfigConfirm("legacy_api", false, "config_machine_specific", `Do you want to create a machine specific API key?
|
||||
|
||||
Rclone has it's own Jottacloud API KEY which works fine as long as one
|
||||
only uses rclone on a single machine. When you want to use rclone with
|
||||
this account on more than one machine it's recommended to create a
|
||||
machine specific API key. These keys can NOT be shared between
|
||||
machines.`)
|
||||
case "legacy_api":
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
if config.Result == "true" {
|
||||
deviceRegistration, err := registerDevice(ctx, srv)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to register device")
|
||||
}
|
||||
m.Set(configClientID, deviceRegistration.ClientID)
|
||||
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
||||
fs.Debugf(nil, "Got clientID %q and clientSecret %q", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
|
||||
}
|
||||
return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address)")
|
||||
case "legacy_username":
|
||||
m.Set(configUsername, config.Result)
|
||||
return fs.ConfigPassword("legacy_password", "config_password", "Password (only used in setup, will not be stored)")
|
||||
case "legacy_password":
|
||||
m.Set("password", config.Result)
|
||||
m.Set("auth_code", "")
|
||||
return fs.ConfigGoto("legacy_do_auth")
|
||||
case "legacy_auth_code":
|
||||
authCode := strings.Replace(config.Result, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
|
||||
m.Set("auth_code", authCode)
|
||||
return fs.ConfigGoto("legacy_do_auth")
|
||||
case "legacy_do_auth":
|
||||
username, _ := m.Get(configUsername)
|
||||
password, _ := m.Get("password")
|
||||
password = obscure.MustReveal(password)
|
||||
authCode, _ := m.Get("auth_code")
|
||||
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID = legacyClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret = legacyEncryptedClientSecret
|
||||
}
|
||||
|
||||
oauthConfig := &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: legacyTokenURL,
|
||||
},
|
||||
ClientID: clientID,
|
||||
ClientSecret: obscure.MustReveal(clientSecret),
|
||||
}
|
||||
token, err := doLegacyAuth(ctx, srv, oauthConfig, username, password, authCode)
|
||||
if err == errAuthCodeRequired {
|
||||
return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification Code\nThis account uses 2 factor authentication you will receive a verification code via SMS.")
|
||||
}
|
||||
m.Set("password", "")
|
||||
m.Set("auth_code", "")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get oauth token")
|
||||
}
|
||||
err = oauthutil.PutToken(name, m, &token, true)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error while saving token")
|
||||
}
|
||||
return fs.ConfigGoto("choose_device")
|
||||
case "telia": // telia cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, teliaCloudClientID)
|
||||
m.Set(configTokenURL, teliaCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: teliaCloudAuthURL,
|
||||
TokenURL: teliaCloudTokenURL,
|
||||
},
|
||||
ClientID: teliaCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "choose_device":
|
||||
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", "Use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?")
|
||||
case "choose_device_query":
|
||||
if config.Result != "true" {
|
||||
m.Set(configDevice, "")
|
||||
m.Set(configMountpoint, "")
|
||||
return fs.ConfigGoto("end")
|
||||
}
|
||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.Set(configUsername, cust.Username)
|
||||
|
||||
acc, err := getDriveInfo(ctx, srv, cust.Username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fs.ConfigChoose("choose_device_result", "config_device", `Please select the device to use. Normally this will be Jotta`, len(acc.Devices), func(i int) (string, string) {
|
||||
return acc.Devices[i].Name, ""
|
||||
})
|
||||
case "choose_device_result":
|
||||
device := config.Result
|
||||
m.Set(configDevice, device)
|
||||
|
||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
|
||||
username, _ := m.Get(configUsername)
|
||||
dev, err := getDeviceInfo(ctx, srv, path.Join(username, device))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fs.ConfigChoose("choose_device_mountpoint", "config_mountpoint", `Please select the mountpoint to use. Normally this will be Archive.`, len(dev.MountPoints), func(i int) (string, string) {
|
||||
return dev.MountPoints[i].Name, ""
|
||||
})
|
||||
case "choose_device_mountpoint":
|
||||
mountpoint := config.Result
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
return fs.ConfigGoto("end")
|
||||
case "end":
|
||||
// All the config flows end up here in case we need to carry on with something
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Device string `config:"device"`
|
||||
@@ -217,10 +350,21 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// parsePath parses a box 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
// joinPath joins two path/url elements
|
||||
//
|
||||
// Does not perform clean on the result like path.Join does,
|
||||
// which breaks urls by changing prefix "https://" into "https:/".
|
||||
func joinPath(base string, rel string) string {
|
||||
if rel == "" {
|
||||
return base
|
||||
}
|
||||
if strings.HasSuffix(base, "/") {
|
||||
return base + strings.TrimPrefix(rel, "/")
|
||||
}
|
||||
if strings.HasPrefix(rel, "/") {
|
||||
return strings.TrimSuffix(base, "/") + rel
|
||||
}
|
||||
return base + "/" + rel
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
@@ -235,114 +379,13 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
func teliaCloudConfig(ctx context.Context, name string, m configmap.Mapper) {
|
||||
teliaCloudOauthConfig := &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: teliaCloudAuthURL,
|
||||
TokenURL: teliaCloudTokenURL,
|
||||
},
|
||||
ClientID: teliaCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
|
||||
err := oauthutil.Config(ctx, "jottacloud", name, m, teliaCloudOauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm(false) {
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, teliaCloudOauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
}
|
||||
|
||||
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to setup mountpoint: %s", err)
|
||||
}
|
||||
m.Set(configDevice, device)
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
}
|
||||
|
||||
m.Set("configVersion", strconv.Itoa(configVersion))
|
||||
m.Set(configClientID, teliaCloudClientID)
|
||||
m.Set(configTokenURL, teliaCloudTokenURL)
|
||||
}
|
||||
|
||||
// v1config configure a jottacloud backend using legacy authentication
|
||||
func v1config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
|
||||
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
|
||||
if config.Confirm(false) {
|
||||
deviceRegistration, err := registerDevice(ctx, srv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to register device: %v", err)
|
||||
}
|
||||
|
||||
m.Set(configClientID, deviceRegistration.ClientID)
|
||||
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
||||
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
|
||||
}
|
||||
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID = v1ClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret = v1EncryptedClientSecret
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
oauthConfig.Endpoint.AuthURL = v1tokenURL
|
||||
oauthConfig.Endpoint.TokenURL = v1tokenURL
|
||||
|
||||
fmt.Printf("Username> ")
|
||||
username := config.ReadLine()
|
||||
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
|
||||
|
||||
token, err := doAuthV1(ctx, srv, username, password)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get oauth token: %s", err)
|
||||
}
|
||||
err = oauthutil.PutToken(name, m, &token, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Error while saving token: %s", err)
|
||||
}
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm(false) {
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
}
|
||||
|
||||
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to setup mountpoint: %s", err)
|
||||
}
|
||||
m.Set(configDevice, device)
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
}
|
||||
|
||||
m.Set("configVersion", strconv.Itoa(v1configVersion))
|
||||
}
|
||||
|
||||
// registerDevice register a new device for use with the jottacloud API
|
||||
func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
|
||||
// random generator to generate random device names
|
||||
@@ -361,7 +404,7 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: v1registerURL,
|
||||
RootURL: legacyRegisterURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
|
||||
Parameters: values,
|
||||
@@ -372,8 +415,13 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
|
||||
return deviceRegistration, err
|
||||
}
|
||||
|
||||
// doAuthV1 runs the actual token request for V1 authentication
|
||||
func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
|
||||
var errAuthCodeRequired = errors.New("auth code required")
|
||||
|
||||
// doLegacyAuth runs the actual token request for V1 authentication
|
||||
//
|
||||
// Call this first with blank authCode. If errAuthCodeRequired is
|
||||
// returned then call it again with an authCode
|
||||
func doLegacyAuth(ctx context.Context, srv *rest.Client, oauthConfig *oauth2.Config, username, password, authCode string) (token oauth2.Token, err error) {
|
||||
// prepare out token request with username and password
|
||||
values := url.Values{}
|
||||
values.Set("grant_type", "PASSWORD")
|
||||
@@ -387,22 +435,19 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Parameters: values,
|
||||
}
|
||||
if authCode != "" {
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||
}
|
||||
|
||||
// do the first request
|
||||
var jsonToken api.TokenJSON
|
||||
resp, err := srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
if err != nil {
|
||||
if err != nil && authCode == "" {
|
||||
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
|
||||
if resp != nil {
|
||||
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
|
||||
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
|
||||
fmt.Printf("Enter verification code> ")
|
||||
authCode := config.ReadLine()
|
||||
|
||||
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
return token, errAuthCodeRequired
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -414,51 +459,11 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
|
||||
return token, err
|
||||
}
|
||||
|
||||
// v2config configure a jottacloud backend using the modern JottaCli token based authentication
|
||||
func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
|
||||
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
|
||||
fmt.Printf("Login Token> ")
|
||||
loginToken := config.ReadLine()
|
||||
|
||||
m.Set(configClientID, "jottacli")
|
||||
m.Set(configClientSecret, "")
|
||||
|
||||
token, err := doAuthV2(ctx, srv, loginToken, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get oauth token: %s", err)
|
||||
}
|
||||
err = oauthutil.PutToken(name, m, &token, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Error while saving token: %s", err)
|
||||
}
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm(false) {
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
}
|
||||
|
||||
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to setup mountpoint: %s", err)
|
||||
}
|
||||
m.Set(configDevice, device)
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
}
|
||||
|
||||
m.Set("configVersion", strconv.Itoa(configVersion))
|
||||
}
|
||||
|
||||
// doAuthV2 runs the actual token request for V2 authentication
|
||||
func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
|
||||
// doTokenAuth runs the actual token request for V2 authentication
|
||||
func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 string) (token oauth2.Token, tokenEndpoint string, err error) {
|
||||
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
|
||||
if err != nil {
|
||||
return token, err
|
||||
return token, "", err
|
||||
}
|
||||
|
||||
// decode login token
|
||||
@@ -466,7 +471,7 @@ func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m
|
||||
decoder := json.NewDecoder(bytes.NewReader(loginTokenBytes))
|
||||
err = decoder.Decode(&loginToken)
|
||||
if err != nil {
|
||||
return token, err
|
||||
return token, "", err
|
||||
}
|
||||
|
||||
// retrieve endpoint urls
|
||||
@@ -475,19 +480,14 @@ func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m
|
||||
RootURL: loginToken.WellKnownLink,
|
||||
}
|
||||
var wellKnown api.WellKnown
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &wellKnown)
|
||||
_, err = apiSrv.CallJSON(ctx, &opts, nil, &wellKnown)
|
||||
if err != nil {
|
||||
return token, err
|
||||
return token, "", err
|
||||
}
|
||||
|
||||
// save the tokenurl
|
||||
oauthConfig.Endpoint.AuthURL = wellKnown.TokenEndpoint
|
||||
oauthConfig.Endpoint.TokenURL = wellKnown.TokenEndpoint
|
||||
m.Set(configTokenURL, wellKnown.TokenEndpoint)
|
||||
|
||||
// prepare out token request with username and password
|
||||
values := url.Values{}
|
||||
values.Set("client_id", "jottacli")
|
||||
values.Set("client_id", defaultClientID)
|
||||
values.Set("grant_type", "password")
|
||||
values.Set("password", loginToken.AuthToken)
|
||||
values.Set("scope", "offline_access+openid")
|
||||
@@ -495,68 +495,33 @@ func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m
|
||||
values.Encode()
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: oauthConfig.Endpoint.AuthURL,
|
||||
RootURL: wellKnown.TokenEndpoint,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Body: strings.NewReader(values.Encode()),
|
||||
}
|
||||
|
||||
// do the first request
|
||||
var jsonToken api.TokenJSON
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
_, err = apiSrv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
if err != nil {
|
||||
return token, err
|
||||
return token, "", err
|
||||
}
|
||||
|
||||
token.AccessToken = jsonToken.AccessToken
|
||||
token.RefreshToken = jsonToken.RefreshToken
|
||||
token.TokenType = jsonToken.TokenType
|
||||
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
|
||||
return token, err
|
||||
}
|
||||
|
||||
// setupMountpoint sets up a custom device and mountpoint if desired by the user
|
||||
func setupMountpoint(ctx context.Context, srv *rest.Client, apiSrv *rest.Client) (device, mountpoint string, err error) {
|
||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
acc, err := getDriveInfo(ctx, srv, cust.Username)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
var deviceNames []string
|
||||
for i := range acc.Devices {
|
||||
deviceNames = append(deviceNames, acc.Devices[i].Name)
|
||||
}
|
||||
fmt.Printf("Please select the device to use. Normally this will be Jotta\n")
|
||||
device = config.Choose("Devices", deviceNames, nil, false)
|
||||
|
||||
dev, err := getDeviceInfo(ctx, srv, path.Join(cust.Username, device))
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if len(dev.MountPoints) == 0 {
|
||||
return "", "", errors.New("no mountpoints for selected device")
|
||||
}
|
||||
var mountpointNames []string
|
||||
for i := range dev.MountPoints {
|
||||
mountpointNames = append(mountpointNames, dev.MountPoints[i].Name)
|
||||
}
|
||||
fmt.Printf("Please select the mountpoint to user. Normally this will be Archive\n")
|
||||
mountpoint = config.Choose("Mountpoints", mountpointNames, nil, false)
|
||||
|
||||
return device, mountpoint, err
|
||||
return token, wellKnown.TokenEndpoint, err
|
||||
}
|
||||
|
||||
// getCustomerInfo queries general information about the account
|
||||
func getCustomerInfo(ctx context.Context, srv *rest.Client) (info *api.CustomerInfo, err error) {
|
||||
func getCustomerInfo(ctx context.Context, apiSrv *rest.Client) (info *api.CustomerInfo, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "account/v1/customer",
|
||||
}
|
||||
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &info)
|
||||
_, err = apiSrv.CallJSON(ctx, &opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get customer info")
|
||||
}
|
||||
@@ -615,7 +580,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Jo
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
@@ -673,7 +638,7 @@ func (f *Fs) filePath(file string) string {
|
||||
// This filter catches all refresh requests, reads the body,
|
||||
// changes the case and then sends it on
|
||||
func grantTypeFilter(req *http.Request) {
|
||||
if v1tokenURL == req.URL.String() {
|
||||
if legacyTokenURL == req.URL.String() {
|
||||
// read the entire body
|
||||
refreshBody, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
@@ -689,53 +654,50 @@ func grantTypeFilter(req *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuthClient *http.Client, ts *oauthutil.TokenSource, err error) {
|
||||
// Check config version
|
||||
var ver int
|
||||
version, ok := m.Get("configVersion")
|
||||
if ok {
|
||||
ver, err = strconv.Atoi(version)
|
||||
if err != nil {
|
||||
return nil, errors.New("Failed to parse config version")
|
||||
return nil, nil, errors.New("Failed to parse config version")
|
||||
}
|
||||
ok = (ver == configVersion) || (ver == v1configVersion)
|
||||
ok = (ver == configVersion) || (ver == legacyConfigVersion)
|
||||
}
|
||||
if !ok {
|
||||
return nil, errors.New("Outdated config - please reconfigure this backend")
|
||||
return nil, nil, errors.New("Outdated config - please reconfigure this backend")
|
||||
}
|
||||
|
||||
baseClient := fshttp.NewClient(ctx)
|
||||
|
||||
oauthConfig := &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: defaultTokenURL,
|
||||
TokenURL: defaultTokenURL,
|
||||
},
|
||||
}
|
||||
if ver == configVersion {
|
||||
oauthConfig.ClientID = "jottacli"
|
||||
oauthConfig.ClientID = defaultClientID
|
||||
// if custom endpoints are set use them else stick with defaults
|
||||
if tokenURL, ok := m.Get(configTokenURL); ok {
|
||||
oauthConfig.Endpoint.TokenURL = tokenURL
|
||||
// jottacloud is weird. we need to use the tokenURL as authURL
|
||||
oauthConfig.Endpoint.AuthURL = tokenURL
|
||||
}
|
||||
} else if ver == v1configVersion {
|
||||
} else if ver == legacyConfigVersion {
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID = v1ClientID
|
||||
clientID = legacyClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret = v1EncryptedClientSecret
|
||||
clientSecret = legacyEncryptedClientSecret
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
oauthConfig.Endpoint.TokenURL = v1tokenURL
|
||||
oauthConfig.Endpoint.AuthURL = v1tokenURL
|
||||
oauthConfig.Endpoint.TokenURL = legacyTokenURL
|
||||
oauthConfig.Endpoint.AuthURL = legacyTokenURL
|
||||
|
||||
// add the request filter to fix token refresh
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
@@ -748,13 +710,29 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
// Create OAuth Client
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||
oAuthClient, ts, err = oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
|
||||
return nil, nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
|
||||
}
|
||||
return oAuthClient, ts, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
oAuthClient, ts, err := getOAuthClient(ctx, name, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rootIsDir := strings.HasSuffix(root, "/")
|
||||
root = parsePath(root)
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
@@ -854,7 +832,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (jf *api.JottaFolder, e
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &jf)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
@@ -883,7 +861,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
var result api.JottaFolder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -995,7 +973,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
var result api.JottaFolder // Could be JottaFileDirList, but JottaFolder is close enough
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
@@ -1101,7 +1079,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't purge directory")
|
||||
@@ -1140,7 +1118,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1268,7 +1246,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
var result api.JottaFile
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
@@ -1292,8 +1270,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
if result.PublicSharePath == "" {
|
||||
return "", errors.New("couldn't create public link - no link path received")
|
||||
}
|
||||
link = path.Join(baseURL, result.PublicSharePath)
|
||||
return link, nil
|
||||
return joinPath(baseURL, result.PublicSharePath), nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
@@ -1446,7 +1423,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1559,7 +1536,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var response api.AllocateFileResponse
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, &request, &response)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1624,7 +1601,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -534,7 +534,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return nil
|
||||
}
|
||||
|
||||
// About reports space usage (with a MB precision)
|
||||
// About reports space usage (with a MiB precision)
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
mount, err := f.client.MountsDetails(f.mountID)
|
||||
if err != nil {
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// Constants
|
||||
@@ -42,8 +43,9 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows",
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows",
|
||||
Advanced: runtime.GOOS != "windows",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
Help: "Disables long file names",
|
||||
@@ -72,25 +74,34 @@ points, as you explicitly acknowledge that they should be skipped.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "zero_size_links",
|
||||
Help: `Assume the Stat size of links is zero (and read them instead)
|
||||
Help: `Assume the Stat size of links is zero (and read them instead) (Deprecated)
|
||||
|
||||
On some virtual filesystems (such ash LucidLink), reading a link size via a Stat call always returns 0.
|
||||
However, on unix it reads as the length of the text in the link. This may cause errors like this when
|
||||
syncing:
|
||||
Rclone used to use the Stat size of links as the link size, but this fails in quite a few places
|
||||
|
||||
Failed to copy: corrupted on transfer: sizes differ 0 vs 13
|
||||
- Windows
|
||||
- On some virtual filesystems (such ash LucidLink)
|
||||
- Android
|
||||
|
||||
Setting this flag causes rclone to read the link and use that as the size of the link
|
||||
instead of 0 which in most cases fixes the problem.`,
|
||||
So rclone now always reads the link
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_unicode_normalization",
|
||||
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
Name: "unicode_normalization",
|
||||
Help: `Apply unicode NFC normalization to paths and filenames
|
||||
|
||||
This flag is deprecated now. Rclone no longer normalizes unicode file
|
||||
names, but it compares them with unicode normalization in the sync
|
||||
routine instead.`,
|
||||
This flag can be used to normalize file names into unicode NFC form
|
||||
that are read from the local filesystem.
|
||||
|
||||
Rclone does not normally touch the encoding of file names it reads from
|
||||
the file system.
|
||||
|
||||
This can be useful when using macOS as it normally provides decomposed (NFD)
|
||||
unicode which in some language (eg Korean) doesn't display properly on
|
||||
some OSes.
|
||||
|
||||
Note that rclone compares filenames with unicode normalization in the sync
|
||||
routine so this flag shouldn't normally be used.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -195,8 +206,7 @@ type Options struct {
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
TranslateSymlinks bool `config:"links"`
|
||||
SkipSymlinks bool `config:"skip_links"`
|
||||
ZeroSizeLinks bool `config:"zero_size_links"`
|
||||
NoUTFNorm bool `config:"no_unicode_normalization"`
|
||||
UTFNorm bool `config:"unicode_normalization"`
|
||||
NoCheckUpdated bool `config:"no_check_updated"`
|
||||
NoUNC bool `config:"nounc"`
|
||||
OneFileSystem bool `config:"one_file_system"`
|
||||
@@ -255,10 +265,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, errLinksAndCopyLinks
|
||||
}
|
||||
|
||||
if opt.NoUTFNorm {
|
||||
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
@@ -521,6 +527,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
|
||||
func (f *Fs) cleanRemote(dir, filename string) (remote string) {
|
||||
if f.opt.UTFNorm {
|
||||
filename = norm.NFC.String(filename)
|
||||
}
|
||||
remote = path.Join(dir, f.opt.Enc.ToStandardName(filename))
|
||||
|
||||
if !utf8.ValidString(filename) {
|
||||
@@ -1144,6 +1153,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
err = file.PreAllocate(src.Size(), f)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
if err == file.ErrDiskFull {
|
||||
_ = f.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
out = f
|
||||
@@ -1262,9 +1275,13 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
||||
o.modTime = info.ModTime()
|
||||
o.mode = info.Mode()
|
||||
o.fs.objectMetaMu.Unlock()
|
||||
// On Windows links read as 0 size so set the correct size here
|
||||
// Optionally, users can turn this feature on with the zero_size_links flag
|
||||
if (runtime.GOOS == "windows" || o.fs.opt.ZeroSizeLinks) && o.translatedLink {
|
||||
// Read the size of the link.
|
||||
//
|
||||
// The value in info.Size() is not always correct
|
||||
// - Windows links read as 0 size
|
||||
// - Some virtual filesystems (such ash LucidLink) links read as 0 size
|
||||
// - Android - some versions the links are larger than readlink suggests
|
||||
if o.translatedLink {
|
||||
linkdst, err := os.Readlink(o.path)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to read link size: %v", err)
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -48,7 +48,7 @@ func (w *BinWriter) Reader() io.Reader {
|
||||
// WritePu16 writes a short as unsigned varint
|
||||
func (w *BinWriter) WritePu16(val int) {
|
||||
if val < 0 || val > 65535 {
|
||||
log.Fatalf("Invalid UInt16 %v", val)
|
||||
panic(fmt.Sprintf("Invalid UInt16 %v", val))
|
||||
}
|
||||
w.WritePu64(int64(val))
|
||||
}
|
||||
@@ -56,7 +56,7 @@ func (w *BinWriter) WritePu16(val int) {
|
||||
// WritePu32 writes a signed long as unsigned varint
|
||||
func (w *BinWriter) WritePu32(val int64) {
|
||||
if val < 0 || val > 4294967295 {
|
||||
log.Fatalf("Invalid UInt32 %v", val)
|
||||
panic(fmt.Sprintf("Invalid UInt32 %v", val))
|
||||
}
|
||||
w.WritePu64(val)
|
||||
}
|
||||
@@ -64,7 +64,7 @@ func (w *BinWriter) WritePu32(val int64) {
|
||||
// WritePu64 writes an unsigned (actually, signed) long as unsigned varint
|
||||
func (w *BinWriter) WritePu64(val int64) {
|
||||
if val < 0 {
|
||||
log.Fatalf("Invalid UInt64 %v", val)
|
||||
panic(fmt.Sprintf("Invalid UInt64 %v", val))
|
||||
}
|
||||
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
|
||||
}
|
||||
@@ -123,7 +123,7 @@ func (r *BinReader) check(err error) bool {
|
||||
r.err = err
|
||||
}
|
||||
if err != io.EOF {
|
||||
log.Fatalf("Error parsing response: %v", err)
|
||||
panic(fmt.Sprintf("Error parsing response: %v", err))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ var oauthConfig = &oauth2.Config{
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
MrHashType = hash.RegisterHash("MailruHash", 40, mrhash.New)
|
||||
MrHashType = hash.RegisterHash("mailru", "MailruHash", 40, mrhash.New)
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "mailru",
|
||||
Description: "Mail.ru Cloud",
|
||||
@@ -234,7 +234,10 @@ var retryErrorCodes = []int{
|
||||
// shouldRetry returns a boolean as to whether this response and err
|
||||
// deserve to be retried. It returns the err as a convenience.
|
||||
// Retries password authorization (once) in a special case of access denied.
|
||||
func shouldRetry(res *http.Response, err error, f *Fs, opts *rest.Opts) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, res *http.Response, err error, f *Fs, opts *rest.Opts) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if res != nil && res.StatusCode == 403 && f.opt.Password != "" && !f.passFailed {
|
||||
reAuthErr := f.reAuthorize(opts, err)
|
||||
return reAuthErr == nil, err // return an original error
|
||||
@@ -600,7 +603,7 @@ func (f *Fs) readItemMetaData(ctx context.Context, path string) (entry fs.DirEnt
|
||||
var info api.ItemInfoResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
return shouldRetry(ctx, res, err, f, &opts)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -736,7 +739,7 @@ func (f *Fs) listM1(ctx context.Context, dirPath string, offset int, limit int)
|
||||
)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
return shouldRetry(ctx, res, err, f, &opts)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -800,7 +803,7 @@ func (f *Fs) listBin(ctx context.Context, dirPath string, depth int) (entries fs
|
||||
var res *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
return shouldRetry(ctx, res, err, f, &opts)
|
||||
})
|
||||
if err != nil {
|
||||
closeBody(res)
|
||||
@@ -1073,7 +1076,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) error {
|
||||
var res *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
return shouldRetry(ctx, res, err, f, &opts)
|
||||
})
|
||||
if err != nil {
|
||||
closeBody(res)
|
||||
@@ -1216,7 +1219,7 @@ func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) error {
|
||||
var response api.GenericResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
return shouldRetry(ctx, res, err, f, &opts)
|
||||
})
|
||||
|
||||
switch {
|
||||
@@ -1288,7 +1291,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var response api.GenericBodyResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
return shouldRetry(ctx, res, err, f, &opts)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -1392,7 +1395,7 @@ func (f *Fs) moveItemBin(ctx context.Context, srcPath, dstPath, opName string) e
|
||||
var res *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
return shouldRetry(ctx, res, err, f, &opts)
|
||||
})
|
||||
if err != nil {
|
||||
closeBody(res)
|
||||
@@ -1483,7 +1486,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
var response api.GenericBodyResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
return shouldRetry(ctx, res, err, f, &opts)
|
||||
})
|
||||
|
||||
if err == nil && response.Body != "" {
|
||||
@@ -1524,7 +1527,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
var response api.CleanupResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
return shouldRetry(ctx, res, err, f, &opts)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1557,7 +1560,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var info api.UserInfoResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
return shouldRetry(ctx, res, err, f, &opts)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -2076,7 +2079,7 @@ func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error {
|
||||
var res *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(res, err, o.fs, &opts)
|
||||
return shouldRetry(ctx, res, err, o.fs, &opts)
|
||||
})
|
||||
if err != nil {
|
||||
closeBody(res)
|
||||
@@ -2172,7 +2175,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
opts.RootURL = server
|
||||
res, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(res, err, o.fs, &opts)
|
||||
return shouldRetry(ctx, res, err, o.fs, &opts)
|
||||
})
|
||||
if err != nil {
|
||||
if res != nil && res.Body != nil {
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
@@ -158,7 +159,10 @@ func parsePath(path string) (root string) {
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// Let the mega library handle the low level retries
|
||||
return false, err
|
||||
/*
|
||||
@@ -171,8 +175,8 @@ func shouldRetry(err error) (bool, error) {
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(remote string) (info *mega.Node, err error) {
|
||||
rootNode, err := f.findRoot(false)
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, remote string) (info *mega.Node, err error) {
|
||||
rootNode, err := f.findRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -237,7 +241,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// Find the root node and check if it is a file or not
|
||||
_, err = f.findRoot(false)
|
||||
_, err = f.findRoot(ctx, false)
|
||||
switch err {
|
||||
case nil:
|
||||
// root node found and is a directory
|
||||
@@ -307,8 +311,8 @@ func (f *Fs) findObject(rootNode *mega.Node, file string) (node *mega.Node, err
|
||||
// lookupDir looks up the node for the directory of the name given
|
||||
//
|
||||
// if create is true it tries to create the root directory if not found
|
||||
func (f *Fs) lookupDir(dir string) (*mega.Node, error) {
|
||||
rootNode, err := f.findRoot(false)
|
||||
func (f *Fs) lookupDir(ctx context.Context, dir string) (*mega.Node, error) {
|
||||
rootNode, err := f.findRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -316,15 +320,15 @@ func (f *Fs) lookupDir(dir string) (*mega.Node, error) {
|
||||
}
|
||||
|
||||
// lookupParentDir finds the parent node for the remote passed in
|
||||
func (f *Fs) lookupParentDir(remote string) (dirNode *mega.Node, leaf string, err error) {
|
||||
func (f *Fs) lookupParentDir(ctx context.Context, remote string) (dirNode *mega.Node, leaf string, err error) {
|
||||
parent, leaf := path.Split(remote)
|
||||
dirNode, err = f.lookupDir(parent)
|
||||
dirNode, err = f.lookupDir(ctx, parent)
|
||||
return dirNode, leaf, err
|
||||
}
|
||||
|
||||
// mkdir makes the directory and any parent directories for the
|
||||
// directory of the name given
|
||||
func (f *Fs) mkdir(rootNode *mega.Node, dir string) (node *mega.Node, err error) {
|
||||
func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *mega.Node, err error) {
|
||||
f.mkdirMu.Lock()
|
||||
defer f.mkdirMu.Unlock()
|
||||
|
||||
@@ -358,7 +362,7 @@ func (f *Fs) mkdir(rootNode *mega.Node, dir string) (node *mega.Node, err error)
|
||||
// create directory called name in node
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
node, err = f.srv.CreateDir(name, node)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "mkdir create node failed")
|
||||
@@ -368,20 +372,20 @@ func (f *Fs) mkdir(rootNode *mega.Node, dir string) (node *mega.Node, err error)
|
||||
}
|
||||
|
||||
// mkdirParent creates the parent directory of remote
|
||||
func (f *Fs) mkdirParent(remote string) (dirNode *mega.Node, leaf string, err error) {
|
||||
rootNode, err := f.findRoot(true)
|
||||
func (f *Fs) mkdirParent(ctx context.Context, remote string) (dirNode *mega.Node, leaf string, err error) {
|
||||
rootNode, err := f.findRoot(ctx, true)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
parent, leaf := path.Split(remote)
|
||||
dirNode, err = f.mkdir(rootNode, parent)
|
||||
dirNode, err = f.mkdir(ctx, rootNode, parent)
|
||||
return dirNode, leaf, err
|
||||
}
|
||||
|
||||
// findRoot looks up the root directory node and returns it.
|
||||
//
|
||||
// if create is true it tries to create the root directory if not found
|
||||
func (f *Fs) findRoot(create bool) (*mega.Node, error) {
|
||||
func (f *Fs) findRoot(ctx context.Context, create bool) (*mega.Node, error) {
|
||||
f.rootNodeMu.Lock()
|
||||
defer f.rootNodeMu.Unlock()
|
||||
|
||||
@@ -403,7 +407,7 @@ func (f *Fs) findRoot(create bool) (*mega.Node, error) {
|
||||
}
|
||||
|
||||
//..not found so create the root directory
|
||||
f._rootNode, err = f.mkdir(absRoot, f.root)
|
||||
f._rootNode, err = f.mkdir(ctx, absRoot, f.root)
|
||||
return f._rootNode, err
|
||||
}
|
||||
|
||||
@@ -433,7 +437,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
fs.Debugf(f, "Deleting trash %q", f.opt.Enc.ToStandardName(item.GetName()))
|
||||
deleteErr := f.pacer.Call(func() (bool, error) {
|
||||
err := f.srv.Delete(item, true)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if deleteErr != nil {
|
||||
err = deleteErr
|
||||
@@ -447,7 +451,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *mega.Node) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *mega.Node) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -457,7 +461,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *mega.Node) (fs.Object, error
|
||||
// Set info
|
||||
err = o.setMetaData(info)
|
||||
} else {
|
||||
err = o.readMetaData() // reads info and meta, returning an error
|
||||
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -468,7 +472,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *mega.Node) (fs.Object, error
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// list the objects into the function supplied
|
||||
@@ -506,7 +510,7 @@ func (f *Fs) list(ctx context.Context, dir *mega.Node, fn listFn) (found bool, e
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
dirNode, err := f.lookupDir(dir)
|
||||
dirNode, err := f.lookupDir(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -518,7 +522,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
d := fs.NewDir(remote, info.GetTimeStamp()).SetID(info.GetHash())
|
||||
entries = append(entries, d)
|
||||
case mega.FILE:
|
||||
o, err := f.newObjectWithInfo(remote, info)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
@@ -542,8 +546,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Returns the dirNode, object, leaf and error
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
|
||||
dirNode, leaf, err = f.mkdirParent(remote)
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
|
||||
dirNode, leaf, err = f.mkdirParent(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, nil, leaf, err
|
||||
}
|
||||
@@ -565,7 +569,7 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
||||
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
|
||||
switch err {
|
||||
case nil:
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
@@ -591,7 +595,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
size := src.Size()
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
o, _, _, err := f.createObject(remote, modTime, size)
|
||||
o, _, _, err := f.createObject(ctx, remote, modTime, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -600,30 +604,30 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
|
||||
// Mkdir creates the directory if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
rootNode, err := f.findRoot(true)
|
||||
rootNode, err := f.findRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = f.mkdir(rootNode, dir)
|
||||
_, err = f.mkdir(ctx, rootNode, dir)
|
||||
return errors.Wrap(err, "Mkdir failed")
|
||||
}
|
||||
|
||||
// deleteNode removes a file or directory, observing useTrash
|
||||
func (f *Fs) deleteNode(node *mega.Node) (err error) {
|
||||
func (f *Fs) deleteNode(ctx context.Context, node *mega.Node) (err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.srv.Delete(node, f.opt.HardDelete)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// purgeCheck removes the directory dir, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
f.mkdirMu.Lock()
|
||||
defer f.mkdirMu.Unlock()
|
||||
|
||||
rootNode, err := f.findRoot(false)
|
||||
rootNode, err := f.findRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -644,7 +648,7 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
|
||||
waitEvent := f.srv.WaitEventsStart()
|
||||
|
||||
err = f.deleteNode(dirNode)
|
||||
err = f.deleteNode(ctx, dirNode)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "delete directory node failed")
|
||||
}
|
||||
@@ -662,7 +666,7 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(dir, true)
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
@@ -676,13 +680,13 @@ func (f *Fs) Precision() time.Duration {
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(dir, false)
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// move a file or folder (srcFs, srcRemote, info) to (f, dstRemote)
|
||||
//
|
||||
// info will be updates
|
||||
func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node) (err error) {
|
||||
func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node) (err error) {
|
||||
var (
|
||||
dstFs = f
|
||||
srcDirNode, dstDirNode *mega.Node
|
||||
@@ -692,12 +696,12 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
||||
|
||||
if dstRemote != "" {
|
||||
// lookup or create the destination parent directory
|
||||
dstDirNode, dstLeaf, err = dstFs.mkdirParent(dstRemote)
|
||||
dstDirNode, dstLeaf, err = dstFs.mkdirParent(ctx, dstRemote)
|
||||
} else {
|
||||
// find or create the parent of the root directory
|
||||
absRoot := dstFs.srv.FS.GetRoot()
|
||||
dstParent, dstLeaf = path.Split(dstFs.root)
|
||||
dstDirNode, err = dstFs.mkdir(absRoot, dstParent)
|
||||
dstDirNode, err = dstFs.mkdir(ctx, absRoot, dstParent)
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "server-side move failed to make dst parent dir")
|
||||
@@ -705,7 +709,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
||||
|
||||
if srcRemote != "" {
|
||||
// lookup the existing parent directory
|
||||
srcDirNode, srcLeaf, err = srcFs.lookupParentDir(srcRemote)
|
||||
srcDirNode, srcLeaf, err = srcFs.lookupParentDir(ctx, srcRemote)
|
||||
} else {
|
||||
// lookup the existing root parent
|
||||
absRoot := srcFs.srv.FS.GetRoot()
|
||||
@@ -721,7 +725,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
||||
//log.Printf("move src %p %q dst %p %q", srcDirNode, srcDirNode.GetName(), dstDirNode, dstDirNode.GetName())
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.srv.Move(info, dstDirNode)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "server-side move failed")
|
||||
@@ -735,7 +739,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
||||
//log.Printf("rename %q to %q", srcLeaf, dstLeaf)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.srv.Rename(info, f.opt.Enc.FromStandardName(dstLeaf))
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "server-side rename failed")
|
||||
@@ -767,7 +771,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// Do the move
|
||||
err := f.move(remote, srcObj.fs, srcObj.remote, srcObj.info)
|
||||
err := f.move(ctx, remote, srcObj.fs, srcObj.remote, srcObj.info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -798,13 +802,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// find the source
|
||||
info, err := srcFs.lookupDir(srcRemote)
|
||||
info, err := srcFs.lookupDir(ctx, srcRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check the destination doesn't exist
|
||||
_, err = dstFs.lookupDir(dstRemote)
|
||||
_, err = dstFs.lookupDir(ctx, dstRemote)
|
||||
if err == nil {
|
||||
return fs.ErrorDirExists
|
||||
} else if err != fs.ErrorDirNotFound {
|
||||
@@ -812,7 +816,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// Do the move
|
||||
err = f.move(dstRemote, srcFs, srcRemote, info)
|
||||
err = f.move(ctx, dstRemote, srcFs, srcRemote, info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -838,7 +842,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
|
||||
root, err := f.findRoot(false)
|
||||
root, err := f.findRoot(ctx, false)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "PublicLink failed to find root node")
|
||||
}
|
||||
@@ -886,7 +890,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
fs.Infof(srcDir, "merging %q", f.opt.Enc.ToStandardName(info.GetName()))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.srv.Move(info, dstDirNode)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", f.opt.Enc.ToStandardName(info.GetName()), srcDir)
|
||||
@@ -894,7 +898,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
}
|
||||
// rmdir (into trash) the now empty source directory
|
||||
fs.Infof(srcDir, "removing empty directory")
|
||||
err = f.deleteNode(srcDirNode)
|
||||
err = f.deleteNode(ctx, srcDirNode)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
|
||||
}
|
||||
@@ -908,7 +912,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
q, err = f.srv.GetQuota()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get Mega Quota")
|
||||
@@ -963,11 +967,11 @@ func (o *Object) setMetaData(info *mega.Node) (err error) {
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if o.info != nil {
|
||||
return nil
|
||||
}
|
||||
info, err := o.fs.readMetaDataForPath(o.remote)
|
||||
info, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
err = fs.ErrorObjectNotFound
|
||||
@@ -998,6 +1002,7 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// openObject represents a download in progress
|
||||
type openObject struct {
|
||||
ctx context.Context
|
||||
mu sync.Mutex
|
||||
o *Object
|
||||
d *mega.Download
|
||||
@@ -1008,14 +1013,14 @@ type openObject struct {
|
||||
}
|
||||
|
||||
// get the next chunk
|
||||
func (oo *openObject) getChunk() (err error) {
|
||||
func (oo *openObject) getChunk(ctx context.Context) (err error) {
|
||||
if oo.id >= oo.d.Chunks() {
|
||||
return io.EOF
|
||||
}
|
||||
var chunk []byte
|
||||
err = oo.o.fs.pacer.Call(func() (bool, error) {
|
||||
chunk, err = oo.d.DownloadChunk(oo.id)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1045,7 +1050,7 @@ func (oo *openObject) Read(p []byte) (n int, err error) {
|
||||
oo.skip -= int64(size)
|
||||
}
|
||||
if len(oo.chunk) == 0 {
|
||||
err = oo.getChunk()
|
||||
err = oo.getChunk(oo.ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -1068,7 +1073,7 @@ func (oo *openObject) Close() (err error) {
|
||||
}
|
||||
err = oo.o.fs.pacer.Call(func() (bool, error) {
|
||||
err = oo.d.Finish()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(oo.ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to finish download")
|
||||
@@ -1096,13 +1101,14 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
var d *mega.Download
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
d, err = o.fs.srv.NewDownload(o.info)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open download file failed")
|
||||
}
|
||||
|
||||
oo := &openObject{
|
||||
ctx: ctx,
|
||||
o: o,
|
||||
d: d,
|
||||
skip: offset,
|
||||
@@ -1125,7 +1131,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
remote := o.Remote()
|
||||
|
||||
// Create the parent directory
|
||||
dirNode, leaf, err := o.fs.mkdirParent(remote)
|
||||
dirNode, leaf, err := o.fs.mkdirParent(ctx, remote)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "update make parent dir failed")
|
||||
}
|
||||
@@ -1133,7 +1139,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var u *mega.Upload
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
u, err = o.fs.srv.NewUpload(dirNode, o.fs.opt.Enc.FromStandardName(leaf), size)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "upload file failed to create session")
|
||||
@@ -1154,7 +1160,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = u.UploadChunk(id, chunk)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "upload file failed to upload chunk")
|
||||
@@ -1165,7 +1171,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var info *mega.Node
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, err = u.Finish()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to finish upload")
|
||||
@@ -1173,7 +1179,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// If the upload succeeded and the original object existed, then delete it
|
||||
if o.info != nil {
|
||||
err = o.fs.deleteNode(o.info)
|
||||
err = o.fs.deleteNode(ctx, o.info)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "upload failed to remove old version")
|
||||
}
|
||||
@@ -1185,7 +1191,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
err := o.fs.deleteNode(o.info)
|
||||
err := o.fs.deleteNode(ctx, o.info)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Remove object failed")
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -52,8 +51,8 @@ const (
|
||||
driveTypePersonal = "personal"
|
||||
driveTypeBusiness = "business"
|
||||
driveTypeSharepoint = "documentLibrary"
|
||||
defaultChunkSize = 10 * fs.MebiByte
|
||||
chunkSizeMultiple = 320 * fs.KibiByte
|
||||
defaultChunkSize = 10 * fs.Mebi
|
||||
chunkSizeMultiple = 320 * fs.Kibi
|
||||
|
||||
regionGlobal = "global"
|
||||
regionUS = "us"
|
||||
@@ -94,216 +93,12 @@ var (
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
QuickXorHashType = hash.RegisterHash("QuickXorHash", 40, quickxorhash.New)
|
||||
QuickXorHashType = hash.RegisterHash("quickxor", "QuickXorHash", 40, quickxorhash.New)
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "onedrive",
|
||||
Description: "Microsoft OneDrive",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
region, _ := m.Get("region")
|
||||
graphURL := graphAPIEndpoint[region] + "/v1.0"
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[region] + authPath,
|
||||
TokenURL: authEndpoint[region] + tokenPath,
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
err := oauthutil.Config(ctx, "onedrive", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Stop if we are running non-interactive config
|
||||
if ci.AutoConfirm {
|
||||
return
|
||||
}
|
||||
|
||||
type driveResource struct {
|
||||
DriveID string `json:"id"`
|
||||
DriveName string `json:"name"`
|
||||
DriveType string `json:"driveType"`
|
||||
}
|
||||
type drivesResponse struct {
|
||||
Drives []driveResource `json:"value"`
|
||||
}
|
||||
|
||||
type siteResource struct {
|
||||
SiteID string `json:"id"`
|
||||
SiteName string `json:"displayName"`
|
||||
SiteURL string `json:"webUrl"`
|
||||
}
|
||||
type siteResponse struct {
|
||||
Sites []siteResource `json:"value"`
|
||||
}
|
||||
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure OneDrive: %v", err)
|
||||
}
|
||||
srv := rest.NewClient(oAuthClient)
|
||||
|
||||
var opts rest.Opts
|
||||
var finalDriveID string
|
||||
var siteID string
|
||||
var relativePath string
|
||||
switch config.Choose("Your choice",
|
||||
[]string{"onedrive", "sharepoint", "url", "search", "driveid", "siteid", "path"},
|
||||
[]string{
|
||||
"OneDrive Personal or Business",
|
||||
"Root Sharepoint site",
|
||||
"Sharepoint site name or URL (e.g. mysite or https://contoso.sharepoint.com/sites/mysite)",
|
||||
"Search for a Sharepoint site",
|
||||
"Type in driveID (advanced)",
|
||||
"Type in SiteID (advanced)",
|
||||
"Sharepoint server-relative path (advanced, e.g. /teams/hr)",
|
||||
},
|
||||
false) {
|
||||
|
||||
case "onedrive":
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/me/drives",
|
||||
}
|
||||
case "sharepoint":
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/sites/root/drives",
|
||||
}
|
||||
case "driveid":
|
||||
fmt.Printf("Paste your Drive ID here> ")
|
||||
finalDriveID = config.ReadLine()
|
||||
case "siteid":
|
||||
fmt.Printf("Paste your Site ID here> ")
|
||||
siteID = config.ReadLine()
|
||||
case "url":
|
||||
fmt.Println("Example: \"https://contoso.sharepoint.com/sites/mysite\" or \"mysite\"")
|
||||
fmt.Printf("Paste your Site URL here> ")
|
||||
siteURL := config.ReadLine()
|
||||
re := regexp.MustCompile(`https://.*\.sharepoint.com/sites/(.*)`)
|
||||
match := re.FindStringSubmatch(siteURL)
|
||||
if len(match) == 2 {
|
||||
relativePath = "/sites/" + match[1]
|
||||
} else {
|
||||
relativePath = "/sites/" + siteURL
|
||||
}
|
||||
case "path":
|
||||
fmt.Printf("Enter server-relative URL here> ")
|
||||
relativePath = config.ReadLine()
|
||||
case "search":
|
||||
fmt.Printf("What to search for> ")
|
||||
searchTerm := config.ReadLine()
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/sites?search=" + searchTerm,
|
||||
}
|
||||
|
||||
sites := siteResponse{}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &sites)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available sites: %v", err)
|
||||
}
|
||||
|
||||
if len(sites.Sites) == 0 {
|
||||
log.Fatalf("Search for '%s' returned no results", searchTerm)
|
||||
} else {
|
||||
fmt.Printf("Found %d sites, please select the one you want to use:\n", len(sites.Sites))
|
||||
for index, site := range sites.Sites {
|
||||
fmt.Printf("%d: %s (%s) id=%s\n", index, site.SiteName, site.SiteURL, site.SiteID)
|
||||
}
|
||||
siteID = sites.Sites[config.ChooseNumber("Chose drive to use:", 0, len(sites.Sites)-1)].SiteID
|
||||
}
|
||||
}
|
||||
|
||||
// if we use server-relative URL for finding the drive
|
||||
if relativePath != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/sites/root:" + relativePath,
|
||||
}
|
||||
site := siteResource{}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &site)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available site by relative path: %v", err)
|
||||
}
|
||||
siteID = site.SiteID
|
||||
}
|
||||
|
||||
// if we have a siteID we need to ask for the drives
|
||||
if siteID != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/sites/" + siteID + "/drives",
|
||||
}
|
||||
}
|
||||
|
||||
// We don't have the final ID yet?
|
||||
// query Microsoft Graph
|
||||
if finalDriveID == "" {
|
||||
drives := drivesResponse{}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &drives)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available drives: %v", err)
|
||||
}
|
||||
|
||||
// Also call /me/drive as sometimes /me/drives doesn't return it #4068
|
||||
if opts.Path == "/me/drives" {
|
||||
opts.Path = "/me/drive"
|
||||
meDrive := driveResource{}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &meDrive)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available drives: %v", err)
|
||||
}
|
||||
found := false
|
||||
for _, drive := range drives.Drives {
|
||||
if drive.DriveID == meDrive.DriveID {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// add the me drive if not found already
|
||||
if !found {
|
||||
fs.Debugf(nil, "Adding %v to drives list from /me/drive", meDrive)
|
||||
drives.Drives = append(drives.Drives, meDrive)
|
||||
}
|
||||
}
|
||||
|
||||
if len(drives.Drives) == 0 {
|
||||
log.Fatalf("No drives found")
|
||||
} else {
|
||||
fmt.Printf("Found %d drives, please select the one you want to use:\n", len(drives.Drives))
|
||||
for index, drive := range drives.Drives {
|
||||
fmt.Printf("%d: %s (%s) id=%s\n", index, drive.DriveName, drive.DriveType, drive.DriveID)
|
||||
}
|
||||
finalDriveID = drives.Drives[config.ChooseNumber("Chose drive to use:", 0, len(drives.Drives)-1)].DriveID
|
||||
}
|
||||
}
|
||||
|
||||
// Test the driveID and get drive type
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/drives/" + finalDriveID + "/root"}
|
||||
var rootItem api.Item
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &rootItem)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query root for drive %s: %v", finalDriveID, err)
|
||||
}
|
||||
|
||||
fmt.Printf("Found drive '%s' of type '%s', URL: %s\nIs that okay?\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL)
|
||||
// This does not work, YET :)
|
||||
if !config.ConfirmWithConfig(ctx, m, "config_drive_ok", true) {
|
||||
log.Fatalf("Cancelled by user")
|
||||
}
|
||||
|
||||
m.Set(configDriveID, finalDriveID)
|
||||
m.Set(configDriveType, rootItem.ParentReference.DriveType)
|
||||
config.SaveConfig()
|
||||
},
|
||||
Config: Config,
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "region",
|
||||
Help: "Choose national cloud region for OneDrive.",
|
||||
@@ -362,6 +157,11 @@ This will only work if you are copying between two OneDrive *Personal* drives AN
|
||||
the files to copy are already shared between them. In other cases, rclone will
|
||||
fall back to normal copy (which will be slightly slower).`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Help: "Size of listing chunk.",
|
||||
Default: 1000,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_versions",
|
||||
Default: false,
|
||||
@@ -461,6 +261,266 @@ At the time of writing this only works with OneDrive personal paid accounts.
|
||||
})
|
||||
}
|
||||
|
||||
type driveResource struct {
|
||||
DriveID string `json:"id"`
|
||||
DriveName string `json:"name"`
|
||||
DriveType string `json:"driveType"`
|
||||
}
|
||||
type drivesResponse struct {
|
||||
Drives []driveResource `json:"value"`
|
||||
}
|
||||
|
||||
type siteResource struct {
|
||||
SiteID string `json:"id"`
|
||||
SiteName string `json:"displayName"`
|
||||
SiteURL string `json:"webUrl"`
|
||||
}
|
||||
type siteResponse struct {
|
||||
Sites []siteResource `json:"value"`
|
||||
}
|
||||
|
||||
// Get the region and graphURL from the config
|
||||
func getRegionURL(m configmap.Mapper) (region, graphURL string) {
|
||||
region, _ = m.Get("region")
|
||||
graphURL = graphAPIEndpoint[region] + "/v1.0"
|
||||
return region, graphURL
|
||||
}
|
||||
|
||||
// Config for chooseDrive
|
||||
type chooseDriveOpt struct {
|
||||
opts rest.Opts
|
||||
finalDriveID string
|
||||
siteID string
|
||||
relativePath string
|
||||
}
|
||||
|
||||
// chooseDrive returns a query to choose which drive the user is interested in
|
||||
func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest.Client, opt chooseDriveOpt) (*fs.ConfigOut, error) {
|
||||
_, graphURL := getRegionURL(m)
|
||||
|
||||
// if we use server-relative URL for finding the drive
|
||||
if opt.relativePath != "" {
|
||||
opt.opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/sites/root:" + opt.relativePath,
|
||||
}
|
||||
site := siteResource{}
|
||||
_, err := srv.CallJSON(ctx, &opt.opts, nil, &site)
|
||||
if err != nil {
|
||||
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available site by relative path: %v", err))
|
||||
}
|
||||
opt.siteID = site.SiteID
|
||||
}
|
||||
|
||||
// if we have a siteID we need to ask for the drives
|
||||
if opt.siteID != "" {
|
||||
opt.opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/sites/" + opt.siteID + "/drives",
|
||||
}
|
||||
}
|
||||
|
||||
drives := drivesResponse{}
|
||||
|
||||
// We don't have the final ID yet?
|
||||
// query Microsoft Graph
|
||||
if opt.finalDriveID == "" {
|
||||
_, err := srv.CallJSON(ctx, &opt.opts, nil, &drives)
|
||||
if err != nil {
|
||||
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available drives: %v", err))
|
||||
}
|
||||
|
||||
// Also call /me/drive as sometimes /me/drives doesn't return it #4068
|
||||
if opt.opts.Path == "/me/drives" {
|
||||
opt.opts.Path = "/me/drive"
|
||||
meDrive := driveResource{}
|
||||
_, err := srv.CallJSON(ctx, &opt.opts, nil, &meDrive)
|
||||
if err != nil {
|
||||
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available drives: %v", err))
|
||||
}
|
||||
found := false
|
||||
for _, drive := range drives.Drives {
|
||||
if drive.DriveID == meDrive.DriveID {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// add the me drive if not found already
|
||||
if !found {
|
||||
fs.Debugf(nil, "Adding %v to drives list from /me/drive", meDrive)
|
||||
drives.Drives = append(drives.Drives, meDrive)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
drives.Drives = append(drives.Drives, driveResource{
|
||||
DriveID: opt.finalDriveID,
|
||||
DriveName: "Chosen Drive ID",
|
||||
DriveType: "drive",
|
||||
})
|
||||
}
|
||||
if len(drives.Drives) == 0 {
|
||||
return fs.ConfigError("choose_type", "No drives found")
|
||||
}
|
||||
return fs.ConfigChoose("driveid_final", "config_driveid", "Select drive you want to use", len(drives.Drives), func(i int) (string, string) {
|
||||
drive := drives.Drives[i]
|
||||
return drive.DriveID, fmt.Sprintf("%s (%s)", drive.DriveName, drive.DriveType)
|
||||
})
|
||||
}
|
||||
|
||||
// Config the backend
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
region, graphURL := getRegionURL(m)
|
||||
|
||||
if config.State == "" {
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[region] + authPath,
|
||||
TokenURL: authEndpoint[region] + tokenPath,
|
||||
}
|
||||
return oauthutil.ConfigOut("choose_type", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
}
|
||||
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure OneDrive")
|
||||
}
|
||||
srv := rest.NewClient(oAuthClient)
|
||||
|
||||
switch config.State {
|
||||
case "choose_type":
|
||||
return fs.ConfigChooseFixed("choose_type_done", "config_type", "Type of connection", []fs.OptionExample{{
|
||||
Value: "onedrive",
|
||||
Help: "OneDrive Personal or Business",
|
||||
}, {
|
||||
Value: "sharepoint",
|
||||
Help: "Root Sharepoint site",
|
||||
}, {
|
||||
Value: "url",
|
||||
Help: "Sharepoint site name or URL (e.g. mysite or https://contoso.sharepoint.com/sites/mysite)",
|
||||
}, {
|
||||
Value: "search",
|
||||
Help: "Search for a Sharepoint site",
|
||||
}, {
|
||||
Value: "driveid",
|
||||
Help: "Type in driveID (advanced)",
|
||||
}, {
|
||||
Value: "siteid",
|
||||
Help: "Type in SiteID (advanced)",
|
||||
}, {
|
||||
Value: "path",
|
||||
Help: "Sharepoint server-relative path (advanced, e.g. /teams/hr)",
|
||||
}})
|
||||
case "choose_type_done":
|
||||
// Jump to next state according to config chosen
|
||||
return fs.ConfigGoto(config.Result)
|
||||
case "onedrive":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
opts: rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/me/drives",
|
||||
},
|
||||
})
|
||||
case "sharepoint":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
opts: rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/sites/root/drives",
|
||||
},
|
||||
})
|
||||
case "driveid":
|
||||
return fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
|
||||
case "driveid_end":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
finalDriveID: config.Result,
|
||||
})
|
||||
case "siteid":
|
||||
return fs.ConfigInput("siteid_end", "config_siteid", "Site ID")
|
||||
case "siteid_end":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
siteID: config.Result,
|
||||
})
|
||||
case "url":
|
||||
return fs.ConfigInput("url_end", "config_site_url", `Site URL
|
||||
|
||||
Example: "https://contoso.sharepoint.com/sites/mysite" or "mysite"
|
||||
`)
|
||||
case "url_end":
|
||||
siteURL := config.Result
|
||||
re := regexp.MustCompile(`https://.*\.sharepoint.com/sites/(.*)`)
|
||||
match := re.FindStringSubmatch(siteURL)
|
||||
if len(match) == 2 {
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
relativePath: "/sites/" + match[1],
|
||||
})
|
||||
}
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
relativePath: "/sites/" + siteURL,
|
||||
})
|
||||
case "path":
|
||||
return fs.ConfigInput("path_end", "config_sharepoint_url", `Server-relative URL`)
|
||||
case "path_end":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
relativePath: config.Result,
|
||||
})
|
||||
case "search":
|
||||
return fs.ConfigInput("search_end", "config_search_term", `Search term`)
|
||||
case "search_end":
|
||||
searchTerm := config.Result
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/sites?search=" + searchTerm,
|
||||
}
|
||||
|
||||
sites := siteResponse{}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &sites)
|
||||
if err != nil {
|
||||
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available sites: %v", err))
|
||||
}
|
||||
|
||||
if len(sites.Sites) == 0 {
|
||||
return fs.ConfigError("choose_type", fmt.Sprintf("search for %q returned no results", searchTerm))
|
||||
}
|
||||
return fs.ConfigChoose("search_sites", "config_site", `Select the Site you want to use`, len(sites.Sites), func(i int) (string, string) {
|
||||
site := sites.Sites[i]
|
||||
return site.SiteID, fmt.Sprintf("%s (%s)", site.SiteName, site.SiteURL)
|
||||
})
|
||||
case "search_sites":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
siteID: config.Result,
|
||||
})
|
||||
case "driveid_final":
|
||||
finalDriveID := config.Result
|
||||
|
||||
// Test the driveID and get drive type
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/drives/" + finalDriveID + "/root"}
|
||||
var rootItem api.Item
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &rootItem)
|
||||
if err != nil {
|
||||
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query root for drive %q: %v", finalDriveID, err))
|
||||
}
|
||||
|
||||
m.Set(configDriveID, finalDriveID)
|
||||
m.Set(configDriveType, rootItem.ParentReference.DriveType)
|
||||
|
||||
return fs.ConfigConfirm("driveid_final_end", true, "config_drive_ok", fmt.Sprintf("Drive OK?\n\nFound drive %q of type %q\nURL: %s\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL))
|
||||
case "driveid_final_end":
|
||||
if config.Result == "true" {
|
||||
return nil, nil
|
||||
}
|
||||
return fs.ConfigGoto("choose_type")
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Region string `config:"region"`
|
||||
@@ -469,6 +529,7 @@ type Options struct {
|
||||
DriveType string `config:"drive_type"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
NoVersions bool `config:"no_versions"`
|
||||
LinkScope string `config:"link_scope"`
|
||||
LinkType string `config:"link_type"`
|
||||
@@ -550,7 +611,10 @@ var errAsyncJobAccessDenied = errors.New("async job failed - access denied")
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
retry := false
|
||||
if resp != nil {
|
||||
switch resp.StatusCode {
|
||||
@@ -558,6 +622,9 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
if len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
||||
retry = true
|
||||
fs.Debugf(nil, "Should retry: %v", err)
|
||||
} else if err != nil && strings.Contains(err.Error(), "Unable to initialize RPS") {
|
||||
retry = true
|
||||
fs.Debugf(nil, "HTTP 401: Unable to initialize RPS. Trying again.")
|
||||
}
|
||||
case 429: // Too Many Requests.
|
||||
// see https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online
|
||||
@@ -597,7 +664,7 @@ func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID s
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
return info, resp, err
|
||||
@@ -613,7 +680,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
opts.Path = strings.TrimSuffix(opts.Path, ":")
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return info, resp, err
|
||||
}
|
||||
@@ -685,7 +752,7 @@ func errorHandler(resp *http.Response) error {
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
const minChunkSize = fs.SizeSuffixBase
|
||||
if cs%chunkSizeMultiple != 0 {
|
||||
return errors.Errorf("%s is not a multiple of %s", cs, chunkSizeMultiple)
|
||||
}
|
||||
@@ -869,7 +936,7 @@ func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, e
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
@@ -894,14 +961,14 @@ type listAllFn func(*api.Item) bool
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
// Top parameter asks for bigger pages of data
|
||||
// https://dev.onedrive.com/odata/optional-query-parameters.htm
|
||||
opts := f.newOptsCall(dirID, "GET", "/children?$top=1000")
|
||||
opts := f.newOptsCall(dirID, "GET", fmt.Sprintf("/children?$top=%d", f.opt.ListChunk))
|
||||
OUTER:
|
||||
for {
|
||||
var result api.ListChildrenResponse
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return found, errors.Wrap(err, "couldn't list files")
|
||||
@@ -1038,7 +1105,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1194,7 +1261,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©Req, nil)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1287,7 +1354,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var info api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1354,7 +1421,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
var info api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1380,7 +1447,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &drive)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "about failed")
|
||||
@@ -1421,7 +1488,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
Password: f.opt.LinkPassword,
|
||||
}
|
||||
|
||||
if expire < fs.Duration(time.Hour*24*365*100) {
|
||||
if expire < fs.DurationOff {
|
||||
expiry := time.Now().Add(time.Duration(expire))
|
||||
share.Expiry = &expiry
|
||||
}
|
||||
@@ -1430,7 +1497,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
var result api.CreateShareLinkResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &share, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
@@ -1475,7 +1542,7 @@ func (o *Object) deleteVersions(ctx context.Context) error {
|
||||
var versions api.VersionsResponse
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &versions)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1502,7 +1569,7 @@ func (o *Object) deleteVersion(ctx context.Context, ID string) error {
|
||||
opts.NoResponse = true
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1653,7 +1720,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
|
||||
var info *api.Item
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
// Remove versions if required
|
||||
if o.fs.opt.NoVersions {
|
||||
@@ -1695,7 +1762,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1723,7 +1790,7 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
|
||||
err = errors.New(err.Error() + " (is it a OneNote file?)")
|
||||
}
|
||||
}
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return response, err
|
||||
}
|
||||
@@ -1738,7 +1805,7 @@ func (o *Object) getPosition(ctx context.Context, url string) (pos int64, err er
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@@ -1798,11 +1865,11 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
|
||||
return true, errors.Wrapf(err, "retry this chunk skipping %d bytes", skip)
|
||||
}
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
}
|
||||
body, err = rest.ReadBody(resp)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
}
|
||||
if resp.StatusCode == 200 || resp.StatusCode == 201 {
|
||||
// we are done :)
|
||||
@@ -1825,7 +1892,7 @@ func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
@@ -1849,7 +1916,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
fs.Debugf(o, "Cancelling multipart upload: %v", err)
|
||||
cancelErr := o.cancelUploadSession(ctx, uploadURL)
|
||||
if cancelErr != nil {
|
||||
fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr)
|
||||
fs.Logf(o, "Failed to cancel multipart upload: %v (upload failed due to: %v)", cancelErr, err)
|
||||
}
|
||||
})()
|
||||
|
||||
@@ -1874,11 +1941,11 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Update the content of a remote file within 4MB size in one single request
|
||||
// Update the content of a remote file within 4 MiB size in one single request
|
||||
// This function will set modtime after uploading, which will create a new version for the remote file
|
||||
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, modTime time.Time, options ...fs.OpenOption) (info *api.Item, err error) {
|
||||
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
|
||||
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
|
||||
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4 MiB")
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Starting singlepart upload")
|
||||
@@ -1896,7 +1963,7 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
||||
err = errors.New(err.Error() + " (is it a OneNote file?)")
|
||||
}
|
||||
}
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -88,7 +88,7 @@ func init() {
|
||||
|
||||
Note that these chunks are buffered in memory so increasing them will
|
||||
increase memory use.`,
|
||||
Default: 10 * fs.MebiByte,
|
||||
Default: 10 * fs.Mebi,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
@@ -119,6 +119,7 @@ type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // ID of the file
|
||||
parent string // ID of the parent directory
|
||||
modTime time.Time // The modified time of the object if known
|
||||
md5 string // MD5 hash if known
|
||||
size int64 // Size of the object
|
||||
@@ -206,7 +207,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
Path: "/session/login.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &account, &f.session)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create session")
|
||||
@@ -233,7 +234,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := tempF.newObjectWithInfo(ctx, remote, nil)
|
||||
_, err := tempF.newObjectWithInfo(ctx, remote, nil, "")
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -293,7 +294,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
Path: "/folder/remove.json",
|
||||
}
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &removeDirData, nil)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -388,7 +389,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
Path: "/file/move_copy.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©FileData, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -445,7 +446,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
Path: "/file/move_copy.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©FileData, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -494,7 +495,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
Path: "/folder/move_copy.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &moveFolderData, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(src, "DirMove error %v", err)
|
||||
@@ -517,7 +518,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *File) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *File, parent string) (fs.Object, error) {
|
||||
// fs.Debugf(nil, "newObjectWithInfo(%s, %v)", remote, file)
|
||||
|
||||
var o *Object
|
||||
@@ -526,6 +527,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *File) (
|
||||
fs: f,
|
||||
remote: remote,
|
||||
id: file.FileID,
|
||||
parent: parent,
|
||||
modTime: time.Unix(file.DateModified, 0),
|
||||
size: file.Size,
|
||||
md5: file.FileHash,
|
||||
@@ -548,7 +550,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *File) (
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// fs.Debugf(nil, "NewObject(\"%s\")", remote)
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
return f.newObjectWithInfo(ctx, remote, nil, "")
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
@@ -581,7 +583,7 @@ func (f *Fs) readMetaDataForFolderID(ctx context.Context, id string) (info *Fold
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -631,7 +633,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
Path: "/upload/create_file.json",
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &createFileData, &response)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create file")
|
||||
@@ -657,7 +659,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
@@ -683,7 +688,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
Path: "/folder.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &createDirData, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -711,7 +716,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
Path: "/folder/list.json/" + f.session.SessionID + "/" + pathID,
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &folderList)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", false, errors.Wrap(err, "failed to get folder list")
|
||||
@@ -754,7 +759,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
folderList := FolderList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &folderList)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get folder list")
|
||||
@@ -768,6 +773,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
f.dirCache.Put(remote, folder.FolderID)
|
||||
d := fs.NewDir(remote, time.Unix(folder.DateModified, 0)).SetID(folder.FolderID)
|
||||
d.SetItems(int64(folder.ChildFolders))
|
||||
d.SetParentID(directoryID)
|
||||
entries = append(entries, d)
|
||||
}
|
||||
|
||||
@@ -775,7 +781,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
file.Name = f.opt.Enc.ToStandardName(file.Name)
|
||||
// fs.Debugf(nil, "File: %s (%s)", file.Name, file.FileID)
|
||||
remote := path.Join(dir, file.Name)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, &file)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, &file, directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -842,7 +848,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, nil)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
o.modTime = modTime
|
||||
@@ -862,7 +868,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open file)")
|
||||
@@ -881,7 +887,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
Path: "/file.json/" + o.fs.session.SessionID + "/" + o.id,
|
||||
}
|
||||
resp, err := o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -910,7 +916,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Path: "/upload/open_file_upload.json",
|
||||
}
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &openUploadData, &openResponse)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create file")
|
||||
@@ -954,7 +960,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &reply)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create file")
|
||||
@@ -977,7 +983,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Path: "/upload/close_file_upload.json",
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &closeUploadData, &closeResponse)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create file")
|
||||
@@ -1003,7 +1009,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Path: "/file/access.json",
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &update, nil)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1029,7 +1035,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
o.fs.session.SessionID, directoryID, url.QueryEscape(o.fs.opt.Enc.FromStandardName(leaf))),
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &folderList)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get folder list")
|
||||
@@ -1053,6 +1059,11 @@ func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
// ParentID returns the ID of the Object parent directory if known, or "" if not
|
||||
func (o *Object) ParentID() string {
|
||||
return o.parent
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1063,4 +1074,5 @@ var (
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.ParentIDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -72,7 +71,7 @@ func init() {
|
||||
Name: "pcloud",
|
||||
Description: "Pcloud",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
optc := new(Options)
|
||||
err := configstruct.Set(m, optc)
|
||||
if err != nil {
|
||||
@@ -94,14 +93,11 @@ func init() {
|
||||
fs.Debugf(nil, "pcloud: got hostname %q", hostname)
|
||||
return nil
|
||||
}
|
||||
opt := oauthutil.Options{
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
CheckAuth: checkAuth,
|
||||
StateBlankOK: true, // pCloud seems to drop the state parameter now - see #4210
|
||||
}
|
||||
err = oauthutil.Config(ctx, "pcloud", name, m, oauthConfig, &opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: config.ConfigEncoding,
|
||||
@@ -213,7 +209,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
doRetry := false
|
||||
|
||||
// Check if it is an api.Error
|
||||
@@ -405,7 +404,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
@@ -460,7 +459,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return found, errors.Wrap(err, "couldn't list files")
|
||||
@@ -597,7 +596,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rmdir failed")
|
||||
@@ -662,7 +661,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -700,7 +699,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -740,7 +739,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -787,7 +786,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -814,7 +813,7 @@ func (f *Fs) linkDir(ctx context.Context, dirID string, expire fs.Duration) (str
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -838,7 +837,7 @@ func (f *Fs) linkFile(ctx context.Context, path string, expire fs.Duration) (str
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -869,7 +868,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &q)
|
||||
err = q.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "about failed")
|
||||
@@ -927,7 +926,7 @@ func (o *Object) getHashes(ctx context.Context) (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1046,7 +1045,7 @@ func (o *Object) downloadURL(ctx context.Context) (URL string, err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -1072,7 +1071,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1134,7 +1133,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// opts.Body=0), so upload it as a multipart form POST with
|
||||
// Content-Length set.
|
||||
if size == 0 {
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(in, opts.Parameters, "content", leaf)
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to make multipart upload for 0 length file")
|
||||
}
|
||||
@@ -1151,7 +1150,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
// sometimes pcloud leaves a half complete file on
|
||||
@@ -1181,7 +1180,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -78,11 +77,10 @@ func init() {
|
||||
Name: "premiumizeme",
|
||||
Description: "premiumize.me",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config(ctx, "premiumizeme", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "api_key",
|
||||
@@ -176,7 +174,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
@@ -370,7 +371,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
@@ -407,7 +408,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return found, errors.Wrap(err, "couldn't list files")
|
||||
@@ -581,7 +582,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
var result api.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rmdir failed")
|
||||
@@ -660,7 +661,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
|
||||
var result api.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Move http")
|
||||
@@ -769,7 +770,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "CreateDir http")
|
||||
@@ -896,7 +897,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -934,7 +935,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
}
|
||||
// Just check the download URL resolves - sometimes
|
||||
// the URLs returned by premiumize.me don't resolve so
|
||||
@@ -993,7 +994,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var result api.Response
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "upload file http")
|
||||
@@ -1035,7 +1036,7 @@ func (f *Fs) renameLeaf(ctx context.Context, isFile bool, id string, newLeaf str
|
||||
var result api.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rename http")
|
||||
@@ -1060,7 +1061,7 @@ func (f *Fs) remove(ctx context.Context, id string) (err error) {
|
||||
var result api.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "remove http")
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package putio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
@@ -29,7 +30,10 @@ func (e *statusCodeError) Temporary() bool {
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -147,7 +147,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "creating folder. part: %s, parentID: %d", leaf, parentID)
|
||||
entry, err = f.client.Files.CreateFolder(ctx, f.opt.Enc.FromStandardName(leaf), parentID)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return itoa(entry.ID), err
|
||||
}
|
||||
@@ -164,7 +164,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "listing file: %d", fileID)
|
||||
children, _, err = f.client.Files.List(ctx, fileID)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode == 404 {
|
||||
@@ -205,7 +205,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "listing files inside List: %d", parentID)
|
||||
children, _, err = f.client.Files.List(ctx, parentID)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
@@ -271,7 +271,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "getting file: %d", fileID)
|
||||
entry, err = f.client.Files.Get(ctx, fileID)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -295,7 +295,7 @@ func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID
|
||||
req.Header.Set("upload-metadata", fmt.Sprintf("name %s,no-torrent %s,parent_id %s,updated-at %s", b64name, b64true, b64parentID, b64modifiedAt))
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
resp, err := f.oAuthClient.Do(req)
|
||||
retry, err := shouldRetry(err)
|
||||
retry, err := shouldRetry(ctx, err)
|
||||
if retry {
|
||||
return true, err
|
||||
}
|
||||
@@ -320,7 +320,7 @@ func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(f, "Sending zero length chunk")
|
||||
_, fileID, err = f.transferChunk(ctx, location, 0, bytes.NewReader([]byte{}), 0)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
@@ -344,13 +344,13 @@ func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.
|
||||
// Get file offset and seek to the position
|
||||
offset, err := f.getServerOffset(ctx, location)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
sentBytes := offset - chunkStart
|
||||
fs.Debugf(f, "sentBytes: %d", sentBytes)
|
||||
_, err = chunk.Seek(sentBytes, io.SeekStart)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
transferOffset = offset
|
||||
reqSize = chunkSize - sentBytes
|
||||
@@ -367,7 +367,7 @@ func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.
|
||||
offsetMismatch = true
|
||||
return true, errors.New("connection broken")
|
||||
}
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
@@ -479,7 +479,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "listing files: %d", dirID)
|
||||
children, _, err = f.client.Files.List(ctx, dirID)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
@@ -493,7 +493,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "deleting file: %d", dirID)
|
||||
err = f.client.Files.Delete(ctx, dirID)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
f.dirCache.FlushDir(dir)
|
||||
return err
|
||||
@@ -552,7 +552,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
// fs.Debugf(f, "copying file (%d) to parent_id: %s", srcObj.file.ID, directoryID)
|
||||
_, err = f.client.Do(req, nil)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -591,7 +591,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
// fs.Debugf(f, "moving file (%d) to parent_id: %s", srcObj.file.ID, directoryID)
|
||||
_, err = f.client.Do(req, nil)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -631,7 +631,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
// fs.Debugf(f, "moving file (%s) to parent_id: %s", srcID, dstDirectoryID)
|
||||
_, err = f.client.Do(req, nil)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
srcFs.dirCache.FlushDir(srcRemote)
|
||||
return err
|
||||
@@ -644,7 +644,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "getting account info")
|
||||
ai, err = f.client.Account.Info(ctx)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "about failed")
|
||||
@@ -678,6 +678,6 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
}
|
||||
// fs.Debugf(f, "emptying trash")
|
||||
_, err = f.client.Do(req, nil)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -145,7 +145,7 @@ func (o *Object) readEntry(ctx context.Context) (f *putio.File, err error) {
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode == 404 {
|
||||
return false, fs.ErrorObjectNotFound
|
||||
}
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -220,7 +220,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
var storageURL string
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
storageURL, err = o.fs.client.Files.URL(ctx, o.file.ID, true)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
@@ -231,7 +231,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, storageURL, nil)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
req.Header.Set("User-Agent", o.fs.client.UserAgent)
|
||||
|
||||
@@ -241,7 +241,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
// fs.Debugf(o, "opening file: id=%d", o.file.ID)
|
||||
resp, err = o.fs.httpClient.Do(req)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode >= 400 && perr.Response.StatusCode <= 499 {
|
||||
_ = resp.Body.Close()
|
||||
@@ -283,6 +283,6 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(o, "removing file: id=%d", o.file.ID)
|
||||
err = o.fs.client.Files.Delete(ctx, o.file.ID)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package putio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
@@ -35,7 +34,7 @@ const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultChunkSize = 48 * fs.MebiByte
|
||||
defaultChunkSize = 48 * fs.Mebi
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -60,14 +59,11 @@ func init() {
|
||||
Name: "putio",
|
||||
Description: "Put.io",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
opt := oauthutil.Options{
|
||||
NoOffline: true,
|
||||
}
|
||||
err := oauthutil.Config(ctx, "putio", name, m, putioConfig, &opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: putioConfig,
|
||||
NoOffline: true,
|
||||
})
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigEncoding,
|
||||
|
||||
@@ -80,7 +80,7 @@ func init() {
|
||||
Help: `Cutoff for switching to chunked upload
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5GB.`,
|
||||
The minimum is 0 and the maximum is 5 GiB.`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
|
||||
148
backend/s3/s3.go
148
backend/s3/s3.go
@@ -26,7 +26,6 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go/aws/defaults"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
@@ -59,7 +58,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, and Tencent COS",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, SeaweedFS, and Tencent COS",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
@@ -92,6 +91,9 @@ func init() {
|
||||
}, {
|
||||
Value: "Scaleway",
|
||||
Help: "Scaleway Object Storage",
|
||||
}, {
|
||||
Value: "SeaweedFS",
|
||||
Help: "SeaweedFS S3",
|
||||
}, {
|
||||
Value: "StackPath",
|
||||
Help: "StackPath Object Storage",
|
||||
@@ -593,6 +595,10 @@ func init() {
|
||||
Value: "sgp1.digitaloceanspaces.com",
|
||||
Help: "Digital Ocean Spaces Singapore 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "localhost:8333",
|
||||
Help: "SeaweedFS S3 localhost",
|
||||
Provider: "SeaweedFS",
|
||||
}, {
|
||||
Value: "s3.wasabisys.com",
|
||||
Help: "Wasabi US East endpoint",
|
||||
@@ -1017,7 +1023,7 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Help: `Cutoff for switching to chunked upload
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5GB.`,
|
||||
The minimum is 0 and the maximum is 5 GiB.`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -1039,9 +1045,9 @@ Rclone will automatically increase the chunk size when uploading a
|
||||
large file of known size to stay below the 10,000 chunks limit.
|
||||
|
||||
Files of unknown size are uploaded with the configured
|
||||
chunk_size. Since the default chunk size is 5MB and there can be at
|
||||
chunk_size. Since the default chunk size is 5 MiB and there can be at
|
||||
most 10,000 chunks, this means that by default the maximum size of
|
||||
a file you can stream upload is 48GB. If you wish to stream upload
|
||||
a file you can stream upload is 48 GiB. If you wish to stream upload
|
||||
larger files then you will need to increase chunk_size.`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
@@ -1067,7 +1073,7 @@ large file of a known size to stay below this number of chunks limit.
|
||||
Any files larger than this that need to be server-side copied will be
|
||||
copied in chunks of this size.
|
||||
|
||||
The minimum is 0 and the maximum is 5GB.`,
|
||||
The minimum is 0 and the maximum is 5 GiB.`,
|
||||
Default: fs.SizeSuffix(maxSizeForCopy),
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -1221,6 +1227,11 @@ very small even with this flag.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_head_object",
|
||||
Help: `If set, don't HEAD objects`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -1271,7 +1282,7 @@ See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rcl
|
||||
const (
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - e.g. X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
// The maximum size of object we can COPY - this should be 5GiB but is < 5GB for b2 compatibility
|
||||
// The maximum size of object we can COPY - this should be 5 GiB but is < 5 GB for b2 compatibility
|
||||
// See https://forum.rclone.org/t/copying-files-within-a-b2-bucket/16680/76
|
||||
maxSizeForCopy = 4768 * 1024 * 1024
|
||||
maxUploadParts = 10000 // maximum allowed number of parts in a multi-part upload
|
||||
@@ -1319,6 +1330,7 @@ type Options struct {
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
NoHead bool `config:"no_head"`
|
||||
NoHeadObject bool `config:"no_head_object"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
@@ -1399,7 +1411,10 @@ var retryErrorCodes = []int{
|
||||
//S3 is pretty resilient, and the built in retry handling is probably sufficient
|
||||
// as it should notice closed connections and timeouts which are the most likely
|
||||
// sort of failure modes
|
||||
func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// If this is an awserr object, try and extract more useful information to determine if we should retry
|
||||
if awsError, ok := err.(awserr.Error); ok {
|
||||
// Simple case, check the original embedded error in case it's generically retryable
|
||||
@@ -1411,7 +1426,7 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
// 301 if wrong region for bucket - can only update if running from a bucket
|
||||
if f.rootBucket != "" {
|
||||
if reqErr.StatusCode() == http.StatusMovedPermanently {
|
||||
urfbErr := f.updateRegionForBucket(f.rootBucket)
|
||||
urfbErr := f.updateRegionForBucket(ctx, f.rootBucket)
|
||||
if urfbErr != nil {
|
||||
fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr)
|
||||
return false, err
|
||||
@@ -1508,11 +1523,6 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
||||
}),
|
||||
ExpiryWindow: 3 * time.Minute,
|
||||
},
|
||||
|
||||
// Pick up IAM role if we are in EKS
|
||||
&stscreds.WebIdentityRoleProvider{
|
||||
ExpiryWindow: 3 * time.Minute,
|
||||
},
|
||||
}
|
||||
cred := credentials.NewChainCredentials(providers)
|
||||
|
||||
@@ -1559,6 +1569,8 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
||||
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
|
||||
// Enable loading config options from ~/.aws/config (selected by AWS_PROFILE env)
|
||||
awsSessionOpts.SharedConfigState = session.SharedConfigEnable
|
||||
// Set the name of the profile if supplied
|
||||
awsSessionOpts.Profile = opt.Profile
|
||||
}
|
||||
ses, err := session.NewSessionWithOptions(awsSessionOpts)
|
||||
if err != nil {
|
||||
@@ -1688,7 +1700,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
GetTier: true,
|
||||
SlowModTime: true,
|
||||
}).Fill(ctx, f)
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
if f.rootBucket != "" && f.rootDirectory != "" && !opt.NoHeadObject && !strings.HasSuffix(root, "/") {
|
||||
// Check to see if the (bucket,directory) is actually an existing file
|
||||
oldRoot := f.root
|
||||
newRoot, leaf := path.Split(oldRoot)
|
||||
@@ -1725,7 +1737,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Obje
|
||||
o.setMD5FromEtag(aws.StringValue(info.ETag))
|
||||
o.bytes = aws.Int64Value(info.Size)
|
||||
o.storageClass = aws.StringValue(info.StorageClass)
|
||||
} else {
|
||||
} else if !o.fs.opt.NoHeadObject {
|
||||
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1741,7 +1753,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// Gets the bucket location
|
||||
func (f *Fs) getBucketLocation(bucket string) (string, error) {
|
||||
func (f *Fs) getBucketLocation(ctx context.Context, bucket string) (string, error) {
|
||||
req := s3.GetBucketLocationInput{
|
||||
Bucket: &bucket,
|
||||
}
|
||||
@@ -1749,7 +1761,7 @@ func (f *Fs) getBucketLocation(bucket string) (string, error) {
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.c.GetBucketLocation(&req)
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -1759,8 +1771,8 @@ func (f *Fs) getBucketLocation(bucket string) (string, error) {
|
||||
|
||||
// Updates the region for the bucket by reading the region from the
|
||||
// bucket then updating the session.
|
||||
func (f *Fs) updateRegionForBucket(bucket string) error {
|
||||
region, err := f.getBucketLocation(bucket)
|
||||
func (f *Fs) updateRegionForBucket(ctx context.Context, bucket string) error {
|
||||
region, err := f.getBucketLocation(ctx, bucket)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "reading bucket location failed")
|
||||
}
|
||||
@@ -1854,7 +1866,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
}
|
||||
}
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.RequestFailure); ok {
|
||||
@@ -2001,7 +2013,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
var resp *s3.ListBucketsOutput
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.c.ListBucketsWithContext(ctx, &req)
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -2116,7 +2128,7 @@ func (f *Fs) bucketExists(ctx context.Context, bucket string) (bool, error) {
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.HeadBucketWithContext(ctx, &req)
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
@@ -2152,7 +2164,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.CreateBucketWithContext(ctx, &req)
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Infof(f, "Bucket %q created with ACL %q", bucket, f.opt.BucketACL)
|
||||
@@ -2182,7 +2194,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.DeleteBucketWithContext(ctx, &req)
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Infof(f, "Bucket %q deleted", bucket)
|
||||
@@ -2242,7 +2254,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.CopyObjectWithContext(ctx, req)
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2286,7 +2298,7 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
|
||||
if err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
cout, err = f.c.CreateMultipartUploadWithContext(ctx, req)
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2302,7 +2314,7 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
|
||||
UploadId: uid,
|
||||
RequestPayer: req.RequestPayer,
|
||||
})
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
})()
|
||||
|
||||
@@ -2325,7 +2337,7 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
|
||||
uploadPartReq.CopySourceRange = aws.String(calculateRange(partSize, partNum-1, numParts, srcSize))
|
||||
uout, err := f.c.UploadPartCopyWithContext(ctx, uploadPartReq)
|
||||
if err != nil {
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
}
|
||||
parts = append(parts, &s3.CompletedPart{
|
||||
PartNumber: &partNum,
|
||||
@@ -2347,7 +2359,7 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
|
||||
RequestPayer: req.RequestPayer,
|
||||
UploadId: uid,
|
||||
})
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2578,7 +2590,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
reqCopy.Key = &bucketPath
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.c.RestoreObject(&reqCopy)
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
st.Status = err.Error()
|
||||
@@ -2626,7 +2638,7 @@ func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uplo
|
||||
var resp *s3.ListMultipartUploadsOutput
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.c.ListMultipartUploads(&req)
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "list multipart uploads bucket %q key %q", bucket, key)
|
||||
@@ -2801,7 +2813,7 @@ func (o *Object) headObject(ctx context.Context) (resp *s3.HeadObjectOutput, err
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
resp, err = o.fs.c.HeadObjectWithContext(ctx, &req)
|
||||
return o.fs.shouldRetry(err)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.RequestFailure); ok {
|
||||
@@ -2826,15 +2838,23 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.LastModified == nil {
|
||||
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
|
||||
}
|
||||
o.setMetaData(resp.ETag, resp.ContentLength, resp.LastModified, resp.Metadata, resp.ContentType, resp.StorageClass)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) setMetaData(etag *string, contentLength *int64, lastModified *time.Time, meta map[string]*string, mimeType *string, storageClass *string) {
|
||||
var size int64
|
||||
// Ignore missing Content-Length assuming it is 0
|
||||
// Some versions of ceph do this due their apache proxies
|
||||
if resp.ContentLength != nil {
|
||||
size = *resp.ContentLength
|
||||
if contentLength != nil {
|
||||
size = *contentLength
|
||||
}
|
||||
o.setMD5FromEtag(aws.StringValue(resp.ETag))
|
||||
o.setMD5FromEtag(aws.StringValue(etag))
|
||||
o.bytes = size
|
||||
o.meta = resp.Metadata
|
||||
o.meta = meta
|
||||
if o.meta == nil {
|
||||
o.meta = map[string]*string{}
|
||||
}
|
||||
@@ -2849,15 +2869,13 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
o.md5 = hex.EncodeToString(md5sumBytes)
|
||||
}
|
||||
}
|
||||
o.storageClass = aws.StringValue(resp.StorageClass)
|
||||
if resp.LastModified == nil {
|
||||
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
|
||||
o.storageClass = aws.StringValue(storageClass)
|
||||
if lastModified == nil {
|
||||
o.lastModified = time.Now()
|
||||
} else {
|
||||
o.lastModified = *resp.LastModified
|
||||
o.lastModified = *lastModified
|
||||
}
|
||||
o.mimeType = aws.StringValue(resp.ContentType)
|
||||
return nil
|
||||
o.mimeType = aws.StringValue(mimeType)
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
@@ -2957,7 +2975,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
var err error
|
||||
httpReq.HTTPRequest = httpReq.HTTPRequest.WithContext(ctx)
|
||||
err = httpReq.Send()
|
||||
return o.fs.shouldRetry(err)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err, ok := err.(awserr.RequestFailure); ok {
|
||||
if err.Code() == "InvalidObjectState" {
|
||||
@@ -2967,6 +2985,26 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.LastModified == nil {
|
||||
fs.Logf(o, "Failed to read last modified: %v", err)
|
||||
}
|
||||
// read size from ContentLength or ContentRange
|
||||
size := resp.ContentLength
|
||||
if resp.ContentRange != nil {
|
||||
var contentRange = *resp.ContentRange
|
||||
slash := strings.IndexRune(contentRange, '/')
|
||||
if slash >= 0 {
|
||||
i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
|
||||
if err == nil {
|
||||
size = &i
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err)
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to find length in %q", contentRange)
|
||||
}
|
||||
}
|
||||
o.setMetaData(resp.ETag, size, resp.LastModified, resp.Metadata, resp.ContentType, resp.StorageClass)
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
@@ -2992,9 +3030,9 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
// calculate size of parts
|
||||
partSize := int(f.opt.ChunkSize)
|
||||
|
||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||
// buffers here (default 5MB). With a maximum number of parts (10,000) this will be a file of
|
||||
// 48GB which seems like a not too unreasonable limit.
|
||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||
// buffers here (default 5 MiB). With a maximum number of parts (10,000) this will be a file of
|
||||
// 48 GiB which seems like a not too unreasonable limit.
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
@@ -3003,7 +3041,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
} else {
|
||||
// Adjust partSize until the number of parts is small enough.
|
||||
if size/int64(partSize) >= uploadParts {
|
||||
// Calculate partition size rounded up to the nearest MB
|
||||
// Calculate partition size rounded up to the nearest MiB
|
||||
partSize = int((((size / uploadParts) >> 20) + 1) << 20)
|
||||
}
|
||||
}
|
||||
@@ -3016,7 +3054,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
cout, err = f.c.CreateMultipartUploadWithContext(ctx, &mReq)
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to initialise")
|
||||
@@ -3035,7 +3073,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
UploadId: uid,
|
||||
RequestPayer: req.RequestPayer,
|
||||
})
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if errCancel != nil {
|
||||
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
|
||||
@@ -3111,7 +3149,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
uout, err := f.c.UploadPartWithContext(gCtx, uploadPartReq)
|
||||
if err != nil {
|
||||
if partNum <= int64(concurrency) {
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
}
|
||||
// retry all chunks once have done the first batch
|
||||
return true, err
|
||||
@@ -3151,7 +3189,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
RequestPayer: req.RequestPayer,
|
||||
UploadId: uid,
|
||||
})
|
||||
return f.shouldRetry(err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to finalise")
|
||||
@@ -3306,11 +3344,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var err error
|
||||
resp, err = o.fs.srv.Do(httpReq)
|
||||
if err != nil {
|
||||
return o.fs.shouldRetry(err)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
}
|
||||
body, err := rest.ReadBody(resp)
|
||||
if err != nil {
|
||||
return o.fs.shouldRetry(err)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
}
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 299 {
|
||||
return false, nil
|
||||
@@ -3361,7 +3399,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := o.fs.c.DeleteObjectWithContext(ctx, &req)
|
||||
return o.fs.shouldRetry(err)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -296,87 +296,86 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
// Config callback for 2FA
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
serverURL, ok := m.Get(configURL)
|
||||
if !ok || serverURL == "" {
|
||||
// If there's no server URL, it means we're trying an operation at the backend level, like a "rclone authorize seafile"
|
||||
fmt.Print("\nOperation not supported on this remote.\nIf you need a 2FA code on your account, use the command:\n\nrclone config reconnect <remote name>:\n\n")
|
||||
return
|
||||
}
|
||||
|
||||
// Stop if we are running non-interactive config
|
||||
if ci.AutoConfirm {
|
||||
return
|
||||
return nil, errors.New("operation not supported on this remote. If you need a 2FA code on your account, use the command: rclone config reconnect <remote name>: ")
|
||||
}
|
||||
|
||||
u, err := url.Parse(serverURL)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Invalid server URL %s", serverURL)
|
||||
return
|
||||
return nil, errors.Errorf("invalid server URL %s", serverURL)
|
||||
}
|
||||
|
||||
is2faEnabled, _ := m.Get(config2FA)
|
||||
if is2faEnabled != "true" {
|
||||
fmt.Println("Two-factor authentication is not enabled on this account.")
|
||||
return
|
||||
return nil, errors.New("two-factor authentication is not enabled on this account")
|
||||
}
|
||||
|
||||
username, _ := m.Get(configUser)
|
||||
if username == "" {
|
||||
fs.Errorf(nil, "A username is required")
|
||||
return
|
||||
return nil, errors.New("a username is required")
|
||||
}
|
||||
|
||||
password, _ := m.Get(configPassword)
|
||||
if password != "" {
|
||||
password, _ = obscure.Reveal(password)
|
||||
}
|
||||
// Just make sure we do have a password
|
||||
for password == "" {
|
||||
fmt.Print("Two-factor authentication: please enter your password (it won't be saved in the configuration)\npassword> ")
|
||||
password = config.ReadPassword()
|
||||
}
|
||||
|
||||
// Create rest client for getAuthorizationToken
|
||||
url := u.String()
|
||||
if !strings.HasPrefix(url, "/") {
|
||||
url += "/"
|
||||
}
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(url)
|
||||
|
||||
// We loop asking for a 2FA code
|
||||
for {
|
||||
code := ""
|
||||
for code == "" {
|
||||
fmt.Print("Two-factor authentication: please enter your 2FA code\n2fa code> ")
|
||||
code = config.ReadLine()
|
||||
switch config.State {
|
||||
case "":
|
||||
// Just make sure we do have a password
|
||||
if password == "" {
|
||||
return fs.ConfigPassword("", "config_password", "Two-factor authentication: please enter your password (it won't be saved in the configuration)")
|
||||
}
|
||||
return fs.ConfigGoto("password")
|
||||
case "password":
|
||||
password = config.Result
|
||||
if password == "" {
|
||||
return fs.ConfigError("password", "Password can't be blank")
|
||||
}
|
||||
m.Set(configPassword, obscure.MustObscure(config.Result))
|
||||
return fs.ConfigGoto("2fa")
|
||||
case "2fa":
|
||||
return fs.ConfigInput("2fa_do", "config_2fa", "Two-factor authentication: please enter your 2FA code")
|
||||
case "2fa_do":
|
||||
code := config.Result
|
||||
if code == "" {
|
||||
return fs.ConfigError("2fa", "2FA codes can't be blank")
|
||||
}
|
||||
|
||||
// Create rest client for getAuthorizationToken
|
||||
url := u.String()
|
||||
if !strings.HasPrefix(url, "/") {
|
||||
url += "/"
|
||||
}
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(url)
|
||||
|
||||
// We loop asking for a 2FA code
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
fmt.Println("Authenticating...")
|
||||
token, err := getAuthorizationToken(ctx, srv, username, password, code)
|
||||
if err != nil {
|
||||
fmt.Printf("Authentication failed: %v\n", err)
|
||||
tryAgain := strings.ToLower(config.ReadNonEmptyLine("Do you want to try again (y/n)?"))
|
||||
if tryAgain != "y" && tryAgain != "yes" {
|
||||
// The user is giving up, we're done here
|
||||
break
|
||||
}
|
||||
return fs.ConfigConfirm("2fa_error", true, "config_retry", fmt.Sprintf("Authentication failed: %v\n\nTry Again?", err))
|
||||
}
|
||||
if token != "" {
|
||||
fmt.Println("Success!")
|
||||
// Let's save the token into the configuration
|
||||
m.Set(configAuthToken, token)
|
||||
// And delete any previous entry for password
|
||||
m.Set(configPassword, "")
|
||||
config.SaveConfig()
|
||||
// And we're done here
|
||||
break
|
||||
if token == "" {
|
||||
return fs.ConfigConfirm("2fa_error", true, "config_retry", "Authentication failed - no token returned.\n\nTry Again?")
|
||||
}
|
||||
// Let's save the token into the configuration
|
||||
m.Set(configAuthToken, token)
|
||||
// And delete any previous entry for password
|
||||
m.Set(configPassword, "")
|
||||
// And we're done here
|
||||
return nil, nil
|
||||
case "2fa_error":
|
||||
if config.Result == "true" {
|
||||
return fs.ConfigGoto("2fa")
|
||||
}
|
||||
return nil, errors.New("2fa authentication failed")
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
}
|
||||
|
||||
// sets the AuthorizationToken up
|
||||
@@ -408,7 +407,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// For 429 errors look at the Retry-After: header and
|
||||
// set the retry appropriately, starting with a minimum of 1
|
||||
// second if it isn't set.
|
||||
|
||||
@@ -86,7 +86,7 @@ func (f *Fs) getServerInfo(ctx context.Context) (account *api.ServerInfo, err er
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -112,7 +112,7 @@ func (f *Fs) getUserAccountInfo(ctx context.Context) (account *api.AccountInfo,
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -139,7 +139,7 @@ func (f *Fs) getLibraries(ctx context.Context) ([]api.Library, error) {
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -170,7 +170,7 @@ func (f *Fs) createLibrary(ctx context.Context, libraryName, password string) (l
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &request, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -197,7 +197,7 @@ func (f *Fs) deleteLibrary(ctx context.Context, libraryID string) error {
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -228,7 +228,7 @@ func (f *Fs) decryptLibrary(ctx context.Context, libraryID, password string) err
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -271,7 +271,7 @@ func (f *Fs) getDirectoryEntriesAPIv21(ctx context.Context, libraryID, dirPath s
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -316,7 +316,7 @@ func (f *Fs) getDirectoryDetails(ctx context.Context, libraryID, dirPath string)
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -358,7 +358,7 @@ func (f *Fs) createDir(ctx context.Context, libraryID, dirPath string) error {
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -398,7 +398,7 @@ func (f *Fs) renameDir(ctx context.Context, libraryID, dirPath, newName string)
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -438,7 +438,7 @@ func (f *Fs) moveDir(ctx context.Context, srcLibraryID, srcDir, srcName, dstLibr
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &request, nil)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -474,7 +474,7 @@ func (f *Fs) deleteDir(ctx context.Context, libraryID, filePath string) error {
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, nil)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -505,7 +505,7 @@ func (f *Fs) getFileDetails(ctx context.Context, libraryID, filePath string) (*a
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -539,7 +539,7 @@ func (f *Fs) deleteFile(ctx context.Context, libraryID, filePath string) error {
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, nil)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to delete file")
|
||||
@@ -565,7 +565,7 @@ func (f *Fs) getDownloadLink(ctx context.Context, libraryID, filePath string) (s
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -614,7 +614,7 @@ func (f *Fs) download(ctx context.Context, url string, size int64, options ...fs
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -659,7 +659,7 @@ func (f *Fs) getUploadLink(ctx context.Context, libraryID string) (string, error
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -682,7 +682,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath stri
|
||||
"need_idx_progress": {"true"},
|
||||
"replace": {"1"},
|
||||
}
|
||||
formReader, contentType, _, err := rest.MultipartUpload(in, parameters, "file", f.opt.Enc.FromStandardName(filename))
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make multipart upload")
|
||||
}
|
||||
@@ -739,7 +739,7 @@ func (f *Fs) listShareLinks(ctx context.Context, libraryID, remote string) ([]ap
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -777,7 +777,7 @@ func (f *Fs) createShareLink(ctx context.Context, libraryID, remote string) (*ap
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &request, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -818,7 +818,7 @@ func (f *Fs) copyFile(ctx context.Context, srcLibraryID, srcPath, dstLibraryID,
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &request, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -860,7 +860,7 @@ func (f *Fs) moveFile(ctx context.Context, srcLibraryID, srcPath, dstLibraryID,
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &request, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -900,7 +900,7 @@ func (f *Fs) renameFile(ctx context.Context, libraryID, filePath, newname string
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &request, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -938,7 +938,7 @@ func (f *Fs) emptyLibraryTrash(ctx context.Context, libraryID string) error {
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, nil)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -976,7 +976,7 @@ func (f *Fs) getDirectoryEntriesAPIv2(ctx context.Context, libraryID, dirPath st
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -1030,7 +1030,7 @@ func (f *Fs) copyFileAPIv2(ctx context.Context, srcLibraryID, srcPath, dstLibrar
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
@@ -1075,7 +1075,7 @@ func (f *Fs) renameFileAPIv2(ctx context.Context, libraryID, filePath, newname s
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -204,6 +205,36 @@ Fstat instead of Stat which is called on an already open file handle.
|
||||
It has been found that this helps with IBM Sterling SFTP servers which have
|
||||
"extractability" level set to 1 which means only 1 file can be opened at
|
||||
any given time.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_concurrent_reads",
|
||||
Default: false,
|
||||
Help: `If set don't use concurrent reads
|
||||
|
||||
Normally concurrent reads are safe to use and not using them will
|
||||
degrade performance, so this option is disabled by default.
|
||||
|
||||
Some servers limit the amount number of times a file can be
|
||||
downloaded. Using concurrent reads can trigger this limit, so if you
|
||||
have a server which returns
|
||||
|
||||
Failed to copy: file does not exist
|
||||
|
||||
Then you may need to enable this flag.
|
||||
|
||||
If concurrent reads are disabled, the use_fstat option is ignored.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_concurrent_writes",
|
||||
Default: false,
|
||||
Help: `If set don't use concurrent writes
|
||||
|
||||
Normally rclone uses concurrent writes to upload files. This improves
|
||||
the performance greatly, especially for distant servers.
|
||||
|
||||
This option disables concurrent writes should that be necessary.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -224,28 +255,30 @@ Set to 0 to keep connections indefinitely.
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Port string `config:"port"`
|
||||
Pass string `config:"pass"`
|
||||
KeyPem string `config:"key_pem"`
|
||||
KeyFile string `config:"key_file"`
|
||||
KeyFilePass string `config:"key_file_pass"`
|
||||
PubKeyFile string `config:"pubkey_file"`
|
||||
KnownHostsFile string `config:"known_hosts_file"`
|
||||
KeyUseAgent bool `config:"key_use_agent"`
|
||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
PathOverride string `config:"path_override"`
|
||||
SetModTime bool `config:"set_modtime"`
|
||||
Md5sumCommand string `config:"md5sum_command"`
|
||||
Sha1sumCommand string `config:"sha1sum_command"`
|
||||
SkipLinks bool `config:"skip_links"`
|
||||
Subsystem string `config:"subsystem"`
|
||||
ServerCommand string `config:"server_command"`
|
||||
UseFstat bool `config:"use_fstat"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Port string `config:"port"`
|
||||
Pass string `config:"pass"`
|
||||
KeyPem string `config:"key_pem"`
|
||||
KeyFile string `config:"key_file"`
|
||||
KeyFilePass string `config:"key_file_pass"`
|
||||
PubKeyFile string `config:"pubkey_file"`
|
||||
KnownHostsFile string `config:"known_hosts_file"`
|
||||
KeyUseAgent bool `config:"key_use_agent"`
|
||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
PathOverride string `config:"path_override"`
|
||||
SetModTime bool `config:"set_modtime"`
|
||||
Md5sumCommand string `config:"md5sum_command"`
|
||||
Sha1sumCommand string `config:"sha1sum_command"`
|
||||
SkipLinks bool `config:"skip_links"`
|
||||
Subsystem string `config:"subsystem"`
|
||||
ServerCommand string `config:"server_command"`
|
||||
UseFstat bool `config:"use_fstat"`
|
||||
DisableConcurrentReads bool `config:"disable_concurrent_reads"`
|
||||
DisableConcurrentWrites bool `config:"disable_concurrent_writes"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
@@ -266,6 +299,7 @@ type Fs struct {
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
pacer *fs.Pacer // pacer for operations
|
||||
savedpswd string
|
||||
transfers int32 // count in use references
|
||||
}
|
||||
|
||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
@@ -328,6 +362,23 @@ func (c *conn) closed() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Show that we are doing an upload or download
|
||||
//
|
||||
// Call removeTransfer() when done
|
||||
func (f *Fs) addTransfer() {
|
||||
atomic.AddInt32(&f.transfers, 1)
|
||||
}
|
||||
|
||||
// Show the upload or download done
|
||||
func (f *Fs) removeTransfer() {
|
||||
atomic.AddInt32(&f.transfers, -1)
|
||||
}
|
||||
|
||||
// getTransfers shows whether there are any transfers in progress
|
||||
func (f *Fs) getTransfers() int32 {
|
||||
return atomic.LoadInt32(&f.transfers)
|
||||
}
|
||||
|
||||
// Open a new connection to the SFTP server.
|
||||
func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
|
||||
// Rate limit rate of new connections
|
||||
@@ -373,7 +424,14 @@ func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.C
|
||||
}
|
||||
}
|
||||
opts = opts[:len(opts):len(opts)] // make sure we don't overwrite the callers opts
|
||||
opts = append(opts, sftp.UseFstat(f.opt.UseFstat))
|
||||
opts = append(opts,
|
||||
sftp.UseFstat(f.opt.UseFstat),
|
||||
sftp.UseConcurrentReads(!f.opt.DisableConcurrentReads),
|
||||
sftp.UseConcurrentWrites(!f.opt.DisableConcurrentWrites),
|
||||
)
|
||||
if f.opt.DisableConcurrentReads { // FIXME
|
||||
fs.Errorf(f, "Ignoring disable_concurrent_reads after library reversion - see #5197")
|
||||
}
|
||||
|
||||
return sftp.NewClientPipe(pr, pw, opts...)
|
||||
}
|
||||
@@ -451,6 +509,13 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
||||
func (f *Fs) drainPool(ctx context.Context) (err error) {
|
||||
f.poolMu.Lock()
|
||||
defer f.poolMu.Unlock()
|
||||
if transfers := f.getTransfers(); transfers != 0 {
|
||||
fs.Debugf(f, "Not closing %d unused connections as %d transfers in progress", len(f.pool), transfers)
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Stop()
|
||||
}
|
||||
@@ -501,7 +566,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
if opt.KnownHostsFile != "" {
|
||||
hostcallback, err := knownhosts.New(opt.KnownHostsFile)
|
||||
hostcallback, err := knownhosts.New(env.ShellExpand(opt.KnownHostsFile))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't parse known_hosts_file")
|
||||
}
|
||||
@@ -1331,18 +1396,19 @@ func (o *Object) stat(ctx context.Context) error {
|
||||
//
|
||||
// it also updates the info field
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if o.fs.opt.SetModTime {
|
||||
c, err := o.fs.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime")
|
||||
}
|
||||
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime failed")
|
||||
}
|
||||
if !o.fs.opt.SetModTime {
|
||||
return nil
|
||||
}
|
||||
err := o.stat(ctx)
|
||||
c, err := o.fs.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime")
|
||||
}
|
||||
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime failed")
|
||||
}
|
||||
err = o.stat(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime stat failed")
|
||||
}
|
||||
@@ -1356,18 +1422,22 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// objectReader represents a file open for reading on the SFTP server
|
||||
type objectReader struct {
|
||||
f *Fs
|
||||
sftpFile *sftp.File
|
||||
pipeReader *io.PipeReader
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func newObjectReader(sftpFile *sftp.File) *objectReader {
|
||||
func (f *Fs) newObjectReader(sftpFile *sftp.File) *objectReader {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
file := &objectReader{
|
||||
f: f,
|
||||
sftpFile: sftpFile,
|
||||
pipeReader: pipeReader,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
// Show connection in use
|
||||
f.addTransfer()
|
||||
|
||||
go func() {
|
||||
// Use sftpFile.WriteTo to pump data so that it gets a
|
||||
@@ -1397,6 +1467,8 @@ func (file *objectReader) Close() (err error) {
|
||||
_ = file.pipeReader.Close()
|
||||
// Wait for the background process to finish
|
||||
<-file.done
|
||||
// Show connection no longer in use
|
||||
file.f.removeTransfer()
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1430,12 +1502,27 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, errors.Wrap(err, "Open Seek failed")
|
||||
}
|
||||
}
|
||||
in = readers.NewLimitedReadCloser(newObjectReader(sftpFile), limit)
|
||||
in = readers.NewLimitedReadCloser(o.fs.newObjectReader(sftpFile), limit)
|
||||
return in, nil
|
||||
}
|
||||
|
||||
type sizeReader struct {
|
||||
io.Reader
|
||||
size int64
|
||||
}
|
||||
|
||||
// Size returns the expected size of the stream
|
||||
//
|
||||
// It is used in sftpFile.ReadFrom as a hint to work out the
|
||||
// concurrency needed
|
||||
func (sr *sizeReader) Size() int64 {
|
||||
return sr.size
|
||||
}
|
||||
|
||||
// Update a remote sftp file using the data <in> and ModTime from <src>
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
o.fs.addTransfer() // Show transfer in progress
|
||||
defer o.fs.removeTransfer()
|
||||
// Clear the hash cache since we are about to update the object
|
||||
o.md5sum = nil
|
||||
o.sha1sum = nil
|
||||
@@ -1463,7 +1550,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
fs.Debugf(src, "Removed after failed upload: %v", err)
|
||||
}
|
||||
}
|
||||
_, err = file.ReadFrom(in)
|
||||
_, err = file.ReadFrom(&sizeReader{Reader: in, size: src.Size()})
|
||||
if err != nil {
|
||||
remove()
|
||||
return errors.Wrap(err, "Update ReadFrom failed")
|
||||
@@ -1473,10 +1560,28 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
remove()
|
||||
return errors.Wrap(err, "Update Close failed")
|
||||
}
|
||||
|
||||
// Set the mod time - this stats the object if o.fs.opt.SetModTime == true
|
||||
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Update SetModTime failed")
|
||||
}
|
||||
|
||||
// Stat the file after the upload to read its stats back if o.fs.opt.SetModTime == false
|
||||
if !o.fs.opt.SetModTime {
|
||||
err = o.stat(ctx)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// In the specific case of o.fs.opt.SetModTime == false
|
||||
// if the object wasn't found then don't return an error
|
||||
fs.Debugf(o, "Not found after upload with set_modtime=false so returning best guess")
|
||||
o.modTime = src.ModTime(ctx)
|
||||
o.size = src.Size()
|
||||
o.mode = os.FileMode(0666) // regular file
|
||||
} else if err != nil {
|
||||
return errors.Wrap(err, "Update stat failed")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -77,7 +77,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -110,10 +109,10 @@ const (
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
apiPath = "/sf/v3" // add to endpoint to get API path
|
||||
tokenPath = "/oauth/token" // add to endpoint to get Token path
|
||||
minChunkSize = 256 * fs.KibiByte
|
||||
maxChunkSize = 2 * fs.GibiByte
|
||||
defaultChunkSize = 64 * fs.MebiByte
|
||||
defaultUploadCutoff = 128 * fs.MebiByte
|
||||
minChunkSize = 256 * fs.Kibi
|
||||
maxChunkSize = 2 * fs.Gibi
|
||||
defaultChunkSize = 64 * fs.Mebi
|
||||
defaultUploadCutoff = 128 * fs.Mebi
|
||||
)
|
||||
|
||||
// Generate a new oauth2 config which we will update when we know the TokenURL
|
||||
@@ -136,7 +135,7 @@ func init() {
|
||||
Name: "sharefile",
|
||||
Description: "Citrix Sharefile",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
oauthConfig := newOauthConfig("")
|
||||
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
|
||||
if auth == nil || auth.Form == nil {
|
||||
@@ -152,13 +151,10 @@ func init() {
|
||||
oauthConfig.Endpoint.TokenURL = endpoint + tokenPath
|
||||
return nil
|
||||
}
|
||||
opt := oauthutil.Options{
|
||||
CheckAuth: checkAuth,
|
||||
}
|
||||
err := oauthutil.Config(ctx, "sharefile", name, m, oauthConfig, &opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
CheckAuth: checkAuth,
|
||||
})
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "upload_cutoff",
|
||||
@@ -299,7 +295,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
@@ -324,7 +323,7 @@ func (f *Fs) readMetaDataForIDPath(ctx context.Context, id, path string, directo
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &item)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
@@ -631,7 +630,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &req, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "CreateDir")
|
||||
@@ -663,7 +662,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return found, errors.Wrap(err, "couldn't list files")
|
||||
@@ -912,7 +911,7 @@ func (f *Fs) updateItem(ctx context.Context, id, leaf, directoryID string, modTi
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &update, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1133,7 +1132,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
||||
var info *api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1294,7 +1293,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
var dl api.DownloadSpecification
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &dl)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open: fetch download specification")
|
||||
@@ -1309,7 +1308,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open")
|
||||
@@ -1365,7 +1364,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &req, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "upload get specification")
|
||||
@@ -1390,7 +1389,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var finish api.UploadFinishResponse
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &finish)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "upload file")
|
||||
@@ -1426,7 +1425,7 @@ func (f *Fs) remove(ctx context.Context, id string) (err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "remove")
|
||||
|
||||
@@ -155,7 +155,7 @@ func (up *largeUpload) finish(ctx context.Context) error {
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
}
|
||||
respBody, err = rest.ReadBody(resp)
|
||||
// retry all errors now that the multipart upload has started
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -76,50 +75,63 @@ func init() {
|
||||
Name: "sugarsync",
|
||||
Description: "Sugarsync",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read options: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to read options")
|
||||
}
|
||||
|
||||
if opt.RefreshToken != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !config.ConfirmWithConfig(ctx, m, "config_refresh_token", true) {
|
||||
return
|
||||
switch config.State {
|
||||
case "":
|
||||
if opt.RefreshToken == "" {
|
||||
return fs.ConfigGoto("username")
|
||||
}
|
||||
}
|
||||
fmt.Printf("Username (email address)> ")
|
||||
username := config.ReadLine()
|
||||
password := config.GetPassword("Your Sugarsync password is only required during setup and will not be stored.")
|
||||
return fs.ConfigConfirm("refresh", true, "config_refresh", "Already have a token - refresh?")
|
||||
case "refresh":
|
||||
if config.Result == "false" {
|
||||
return nil, nil
|
||||
}
|
||||
return fs.ConfigGoto("username")
|
||||
case "username":
|
||||
return fs.ConfigInput("password", "config_username", "username (email address)")
|
||||
case "password":
|
||||
m.Set("username", config.Result)
|
||||
return fs.ConfigPassword("auth", "config_password", "Your Sugarsync password.\n\nOnly required during setup and will not be stored.")
|
||||
case "auth":
|
||||
username, _ := m.Get("username")
|
||||
m.Set("username", "")
|
||||
password := config.Result
|
||||
|
||||
authRequest := api.AppAuthorization{
|
||||
Username: username,
|
||||
Password: password,
|
||||
Application: withDefault(opt.AppID, appID),
|
||||
AccessKeyID: withDefault(opt.AccessKeyID, accessKeyID),
|
||||
PrivateAccessKey: withDefault(opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)),
|
||||
}
|
||||
authRequest := api.AppAuthorization{
|
||||
Username: username,
|
||||
Password: password,
|
||||
Application: withDefault(opt.AppID, appID),
|
||||
AccessKeyID: withDefault(opt.AccessKeyID, accessKeyID),
|
||||
PrivateAccessKey: withDefault(opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)),
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/app-authorization",
|
||||
}
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(rootURL) // FIXME
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/app-authorization",
|
||||
}
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(rootURL) // FIXME
|
||||
|
||||
// FIXME
|
||||
//err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = srv.CallXML(context.Background(), &opts, &authRequest, nil)
|
||||
// return shouldRetry(resp, err)
|
||||
//})
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get token: %v", err)
|
||||
// FIXME
|
||||
//err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = srv.CallXML(context.Background(), &opts, &authRequest, nil)
|
||||
// return shouldRetry(ctx, resp, err)
|
||||
//})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get token")
|
||||
}
|
||||
opt.RefreshToken = resp.Header.Get("Location")
|
||||
m.Set("refresh_token", opt.RefreshToken)
|
||||
return nil, nil
|
||||
}
|
||||
opt.RefreshToken = resp.Header.Get("Location")
|
||||
m.Set("refresh_token", opt.RefreshToken)
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
}, Options: []fs.Option{{
|
||||
Name: "app_id",
|
||||
Help: "Sugarsync App ID.\n\nLeave blank to use rclone's.",
|
||||
}, {
|
||||
@@ -248,7 +260,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
@@ -288,7 +303,7 @@ func (f *Fs) readMetaDataForID(ctx context.Context, ID string) (info *api.File,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
@@ -325,7 +340,7 @@ func (f *Fs) getAuthToken(ctx context.Context) error {
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, &authRequest, &authResponse)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get authorization")
|
||||
@@ -373,7 +388,7 @@ func (f *Fs) getUser(ctx context.Context) (user *api.User, err error) {
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &user)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get user")
|
||||
@@ -567,7 +582,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, mkdir, nil)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -618,7 +633,7 @@ OUTER:
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return found, errors.Wrap(err, "couldn't list files")
|
||||
@@ -774,7 +789,7 @@ func (f *Fs) delete(ctx context.Context, isFile bool, id string, remote string,
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
// Move file/dir to deleted files if not hard delete
|
||||
@@ -880,7 +895,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, ©File, nil)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -934,7 +949,7 @@ func (f *Fs) moveFile(ctx context.Context, id, leaf, directoryID string) (info *
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, &move, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -964,7 +979,7 @@ func (f *Fs) moveDir(ctx context.Context, id, leaf, directoryID string) (err err
|
||||
var resp *http.Response
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, &move, nil)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1053,7 +1068,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
var info *api.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, &linkFile, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -1182,7 +1197,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1204,7 +1219,7 @@ func (f *Fs) createFile(ctx context.Context, pathID, leaf, mimeType string) (new
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, &mkdir, nil)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -1262,7 +1277,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to upload file")
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -35,7 +36,7 @@ import (
|
||||
const (
|
||||
directoryMarkerContentType = "application/directory" // content type of directory marker objects
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
defaultChunkSize = 5 * fs.GibiByte
|
||||
defaultChunkSize = 5 * fs.Gibi
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
)
|
||||
|
||||
@@ -45,7 +46,7 @@ var SharedOptions = []fs.Option{{
|
||||
Help: `Above this size files will be chunked into a _segments container.
|
||||
|
||||
Above this size files will be chunked into a _segments container. The
|
||||
default for this is 5GB which is its maximum value.`,
|
||||
default for this is 5 GiB which is its maximum value.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -55,7 +56,7 @@ default for this is 5GB which is its maximum value.`,
|
||||
When doing streaming uploads (e.g. using rcat or mount) setting this
|
||||
flag will cause the swift backend to not upload chunked files.
|
||||
|
||||
This will limit the maximum upload size to 5GB. However non chunked
|
||||
This will limit the maximum upload size to 5 GiB. However non chunked
|
||||
files are easier to deal with and have an MD5SUM.
|
||||
|
||||
Rclone will still chunk files bigger than chunk_size when doing normal
|
||||
@@ -291,7 +292,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// If this is a swift.Error object extract the HTTP error code
|
||||
if swiftError, ok := err.(*swift.Error); ok {
|
||||
for _, e := range retryErrorCodes {
|
||||
@@ -307,7 +311,7 @@ func shouldRetry(err error) (bool, error) {
|
||||
// shouldRetryHeaders returns a boolean as to whether this err
|
||||
// deserves to be retried. It reads the headers passed in looking for
|
||||
// `Retry-After`. It returns the err as a convenience
|
||||
func shouldRetryHeaders(headers swift.Headers, err error) (bool, error) {
|
||||
func shouldRetryHeaders(ctx context.Context, headers swift.Headers, err error) (bool, error) {
|
||||
if swiftError, ok := err.(*swift.Error); ok && swiftError.StatusCode == 429 {
|
||||
if value := headers["Retry-After"]; value != "" {
|
||||
retryAfter, parseErr := strconv.Atoi(value)
|
||||
@@ -326,7 +330,7 @@ func shouldRetryHeaders(headers swift.Headers, err error) (bool, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
|
||||
// parsePath parses a remote 'url'
|
||||
@@ -415,7 +419,7 @@ func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Con
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
const minChunkSize = fs.SizeSuffixBase
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
@@ -468,7 +472,7 @@ func NewFsWithConnection(ctx context.Context, opt *Options, name, root string, c
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
info, rxHeaders, err = f.c.Object(ctx, f.rootContainer, encodedDirectory)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||
})
|
||||
if err == nil && info.ContentType != directoryMarkerContentType {
|
||||
newRoot := path.Dir(f.root)
|
||||
@@ -576,7 +580,7 @@ func (f *Fs) listContainerRoot(ctx context.Context, container, directory, prefix
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
objects, err = f.c.Objects(ctx, container, opts)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
for i := range objects {
|
||||
@@ -661,7 +665,7 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
|
||||
var containers []swift.Container
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(ctx, nil)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "container listing failed")
|
||||
@@ -753,7 +757,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(ctx, nil)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "container listing failed")
|
||||
@@ -805,7 +809,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
_, rxHeaders, err = f.c.Container(ctx, container)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||
})
|
||||
}
|
||||
if err == swift.ContainerNotFound {
|
||||
@@ -815,7 +819,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.c.ContainerCreate(ctx, container, headers)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Infof(f, "Container %q created", container)
|
||||
@@ -836,7 +840,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
err := f.cache.Remove(container, func() error {
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
err := f.c.ContainerDelete(ctx, container)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Infof(f, "Container %q removed", container)
|
||||
@@ -902,18 +906,125 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcContainer, srcPath := srcObj.split()
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
rxHeaders, err = f.c.ObjectCopy(ctx, srcContainer, srcPath, dstContainer, dstPath, nil)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
isLargeObject, err := srcObj.isLargeObject(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isLargeObject {
|
||||
/*handle large object*/
|
||||
err = copyLargeObject(ctx, f, srcObj, dstContainer, dstPath)
|
||||
} else {
|
||||
srcContainer, srcPath := srcObj.split()
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
rxHeaders, err = f.c.ObjectCopy(ctx, srcContainer, srcPath, dstContainer, dstPath, nil)
|
||||
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
func copyLargeObject(ctx context.Context, f *Fs, src *Object, dstContainer string, dstPath string) error {
|
||||
segmentsContainer := dstContainer + "_segments"
|
||||
err := f.makeContainer(ctx, segmentsContainer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
segments, err := src.getSegmentsLargeObject(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(segments) == 0 {
|
||||
return errors.New("could not copy object, list segments are empty")
|
||||
}
|
||||
nanoSeconds := time.Now().Nanosecond()
|
||||
prefixSegment := fmt.Sprintf("%v/%v/%s", nanoSeconds, src.size, strings.ReplaceAll(uuid.New().String(), "-", ""))
|
||||
copiedSegmentsLen := 10
|
||||
for _, value := range segments {
|
||||
if len(value) <= 0 {
|
||||
continue
|
||||
}
|
||||
fragment := value[0]
|
||||
if len(fragment) <= 0 {
|
||||
continue
|
||||
}
|
||||
copiedSegmentsLen = len(value)
|
||||
firstIndex := strings.Index(fragment, "/")
|
||||
if firstIndex < 0 {
|
||||
firstIndex = 0
|
||||
} else {
|
||||
firstIndex = firstIndex + 1
|
||||
}
|
||||
lastIndex := strings.LastIndex(fragment, "/")
|
||||
if lastIndex < 0 {
|
||||
lastIndex = len(fragment)
|
||||
} else {
|
||||
lastIndex = lastIndex - 1
|
||||
}
|
||||
prefixSegment = fragment[firstIndex:lastIndex]
|
||||
break
|
||||
}
|
||||
copiedSegments := make([]string, copiedSegmentsLen)
|
||||
defer handleCopyFail(ctx, f, segmentsContainer, copiedSegments, err)
|
||||
for c, ss := range segments {
|
||||
if len(ss) <= 0 {
|
||||
continue
|
||||
}
|
||||
for _, s := range ss {
|
||||
lastIndex := strings.LastIndex(s, "/")
|
||||
if lastIndex <= 0 {
|
||||
lastIndex = 0
|
||||
} else {
|
||||
lastIndex = lastIndex + 1
|
||||
}
|
||||
segmentName := dstPath + "/" + prefixSegment + "/" + s[lastIndex:]
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
rxHeaders, err = f.c.ObjectCopy(ctx, c, s, segmentsContainer, segmentName, nil)
|
||||
copiedSegments = append(copiedSegments, segmentName)
|
||||
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
m := swift.Metadata{}
|
||||
headers := m.ObjectHeaders()
|
||||
headers["X-Object-Manifest"] = urlEncode(fmt.Sprintf("%s/%s/%s", segmentsContainer, dstPath, prefixSegment))
|
||||
headers["Content-Length"] = "0"
|
||||
emptyReader := bytes.NewReader(nil)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
rxHeaders, err = f.c.ObjectPut(ctx, dstContainer, dstPath, emptyReader, true, "", src.contentType, headers)
|
||||
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
//remove copied segments when copy process failed
|
||||
func handleCopyFail(ctx context.Context, f *Fs, segmentsContainer string, segments []string, err error) {
|
||||
fs.Debugf(f, "handle copy segment fail")
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
if len(segmentsContainer) == 0 {
|
||||
fs.Debugf(f, "invalid segments container")
|
||||
return
|
||||
}
|
||||
if len(segments) == 0 {
|
||||
fs.Debugf(f, "segments is empty")
|
||||
return
|
||||
}
|
||||
fs.Debugf(f, "action delete segments what copied")
|
||||
for _, v := range segments {
|
||||
_ = f.c.ObjectDelete(ctx, segmentsContainer, v)
|
||||
}
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
@@ -1041,7 +1152,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
container, containerPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, h, err = o.fs.c.Object(ctx, container, containerPath)
|
||||
return shouldRetryHeaders(h, err)
|
||||
return shouldRetryHeaders(ctx, h, err)
|
||||
})
|
||||
if err != nil {
|
||||
if err == swift.ObjectNotFound {
|
||||
@@ -1100,7 +1211,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
container, containerPath := o.split()
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectUpdate(ctx, container, containerPath, newHeaders)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1121,7 +1232,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
in, rxHeaders, err = o.fs.c.ObjectOpen(ctx, container, containerPath, !isRanging, headers)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
@@ -1211,7 +1322,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
_, rxHeaders, err = o.fs.c.Container(ctx, segmentsContainer)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||
})
|
||||
if err == swift.ContainerNotFound {
|
||||
headers := swift.Headers{}
|
||||
@@ -1220,7 +1331,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ContainerCreate(ctx, segmentsContainer, headers)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
@@ -1241,7 +1352,8 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
|
||||
if segmentInfos == nil || len(segmentInfos) == 0 {
|
||||
return
|
||||
}
|
||||
deleteChunks(ctx, o, segmentsContainer, segmentInfos)
|
||||
_ctx := context.Background()
|
||||
deleteChunks(_ctx, o, segmentsContainer, segmentInfos)
|
||||
})()
|
||||
for {
|
||||
// can we read at least one byte?
|
||||
@@ -1267,7 +1379,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
|
||||
if err == nil {
|
||||
segmentInfos = append(segmentInfos, segmentPath)
|
||||
}
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -1281,7 +1393,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
rxHeaders, err = o.fs.c.ObjectPut(ctx, container, containerPath, emptyReader, true, "", contentType, headers)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
@@ -1356,7 +1468,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var rxHeaders swift.Headers
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
rxHeaders, err = o.fs.c.ObjectPut(ctx, container, containerPath, in, true, "", contentType, headers)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1414,7 +1526,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
// Remove file/manifest first
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectDelete(ctx, container, containerPath)
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package swift
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -32,6 +33,7 @@ func TestInternalUrlEncode(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInternalShouldRetryHeaders(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
headers := swift.Headers{
|
||||
"Content-Length": "64",
|
||||
"Content-Type": "text/html; charset=UTF-8",
|
||||
@@ -45,7 +47,7 @@ func TestInternalShouldRetryHeaders(t *testing.T) {
|
||||
|
||||
// Short sleep should just do the sleep
|
||||
start := time.Now()
|
||||
retry, gotErr := shouldRetryHeaders(headers, err)
|
||||
retry, gotErr := shouldRetryHeaders(ctx, headers, err)
|
||||
dt := time.Since(start)
|
||||
assert.True(t, retry)
|
||||
assert.Equal(t, err, gotErr)
|
||||
@@ -54,7 +56,7 @@ func TestInternalShouldRetryHeaders(t *testing.T) {
|
||||
// Long sleep should return RetryError
|
||||
headers["Retry-After"] = "3600"
|
||||
start = time.Now()
|
||||
retry, gotErr = shouldRetryHeaders(headers, err)
|
||||
retry, gotErr = shouldRetryHeaders(ctx, headers, err)
|
||||
dt = time.Since(start)
|
||||
assert.True(t, dt < time.Second)
|
||||
assert.False(t, retry)
|
||||
|
||||
@@ -80,13 +80,14 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("NoChunk", f.testNoChunk)
|
||||
t.Run("WithChunk", f.testWithChunk)
|
||||
t.Run("WithChunkFail", f.testWithChunkFail)
|
||||
t.Run("CopyLargeObject", f.testCopyLargeObject)
|
||||
}
|
||||
|
||||
func (f *Fs) testWithChunk(t *testing.T) {
|
||||
preConfChunkSize := f.opt.ChunkSize
|
||||
preConfChunk := f.opt.NoChunk
|
||||
f.opt.NoChunk = false
|
||||
f.opt.ChunkSize = 1024 * fs.Byte
|
||||
f.opt.ChunkSize = 1024 * fs.SizeSuffixBase
|
||||
defer func() {
|
||||
//restore old config after test
|
||||
f.opt.ChunkSize = preConfChunkSize
|
||||
@@ -116,7 +117,7 @@ func (f *Fs) testWithChunkFail(t *testing.T) {
|
||||
preConfChunkSize := f.opt.ChunkSize
|
||||
preConfChunk := f.opt.NoChunk
|
||||
f.opt.NoChunk = false
|
||||
f.opt.ChunkSize = 1024 * fs.Byte
|
||||
f.opt.ChunkSize = 1024 * fs.SizeSuffixBase
|
||||
segmentContainer := f.root + "_segments"
|
||||
defer func() {
|
||||
//restore config
|
||||
@@ -154,4 +155,39 @@ func (f *Fs) testWithChunkFail(t *testing.T) {
|
||||
require.Empty(t, objs)
|
||||
}
|
||||
|
||||
func (f *Fs) testCopyLargeObject(t *testing.T) {
|
||||
preConfChunkSize := f.opt.ChunkSize
|
||||
preConfChunk := f.opt.NoChunk
|
||||
f.opt.NoChunk = false
|
||||
f.opt.ChunkSize = 1024 * fs.SizeSuffixBase
|
||||
defer func() {
|
||||
//restore old config after test
|
||||
f.opt.ChunkSize = preConfChunkSize
|
||||
f.opt.NoChunk = preConfChunk
|
||||
}()
|
||||
|
||||
file := fstest.Item{
|
||||
ModTime: fstest.Time("2020-12-31T04:05:06.499999999Z"),
|
||||
Path: "large.txt",
|
||||
Size: -1, // use unknown size during upload
|
||||
}
|
||||
const contentSize = 2048
|
||||
contents := random.String(contentSize)
|
||||
buf := bytes.NewBufferString(contents)
|
||||
uploadHash := hash.NewMultiHasher()
|
||||
in := io.TeeReader(buf, uploadHash)
|
||||
|
||||
file.Size = -1
|
||||
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
|
||||
ctx := context.TODO()
|
||||
obj, err := f.Features().PutStream(ctx, in, obji)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, obj)
|
||||
remoteTarget := "large.txt (copy)"
|
||||
objTarget, err := f.Features().Copy(ctx, obj, remoteTarget)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, objTarget)
|
||||
require.Equal(t, obj.Size(), objTarget.Size())
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -42,19 +41,19 @@ func init() {
|
||||
Name: "tardigrade",
|
||||
Description: "Tardigrade Decentralized Cloud Storage",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, configMapper configmap.Mapper) {
|
||||
provider, _ := configMapper.Get(fs.ConfigProvider)
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, configIn fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
provider, _ := m.Get(fs.ConfigProvider)
|
||||
|
||||
config.FileDeleteKey(name, fs.ConfigProvider)
|
||||
|
||||
if provider == newProvider {
|
||||
satelliteString, _ := configMapper.Get("satellite_address")
|
||||
apiKey, _ := configMapper.Get("api_key")
|
||||
passphrase, _ := configMapper.Get("passphrase")
|
||||
satelliteString, _ := m.Get("satellite_address")
|
||||
apiKey, _ := m.Get("api_key")
|
||||
passphrase, _ := m.Get("passphrase")
|
||||
|
||||
// satelliteString contains always default and passphrase can be empty
|
||||
if apiKey == "" {
|
||||
return
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
satellite, found := satMap[satelliteString]
|
||||
@@ -64,22 +63,23 @@ func init() {
|
||||
|
||||
access, err := uplink.RequestAccessWithPassphrase(context.TODO(), satellite, apiKey, passphrase)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't create access grant: %v", err)
|
||||
return nil, errors.Wrap(err, "couldn't create access grant")
|
||||
}
|
||||
|
||||
serializedAccess, err := access.Serialize()
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't serialize access grant: %v", err)
|
||||
return nil, errors.Wrap(err, "couldn't serialize access grant")
|
||||
}
|
||||
configMapper.Set("satellite_address", satellite)
|
||||
configMapper.Set("access_grant", serializedAccess)
|
||||
m.Set("satellite_address", satellite)
|
||||
m.Set("access_grant", serializedAccess)
|
||||
} else if provider == existingProvider {
|
||||
config.FileDeleteKey(name, "satellite_address")
|
||||
config.FileDeleteKey(name, "api_key")
|
||||
config.FileDeleteKey(name, "passphrase")
|
||||
} else {
|
||||
log.Fatalf("Invalid provider type: %s", provider)
|
||||
return nil, errors.Errorf("invalid provider type: %s", provider)
|
||||
}
|
||||
return nil, nil
|
||||
},
|
||||
Options: []fs.Option{
|
||||
{
|
||||
|
||||
@@ -148,13 +148,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (_ io.ReadC
|
||||
case s && !e:
|
||||
offset = opt.Start
|
||||
case !s && e:
|
||||
object, err := o.fs.project.StatObject(ctx, bucketName, bucketPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
offset = object.System.ContentLength - opt.End
|
||||
length = opt.End
|
||||
offset = -opt.End
|
||||
}
|
||||
case *fs.SeekOption:
|
||||
offset = opt.Offset
|
||||
|
||||
@@ -59,7 +59,17 @@ func (d *Directory) candidates() []upstream.Entry {
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
entries, err := o.fs.actionEntries(o.candidates()...)
|
||||
if err != nil {
|
||||
if err == fs.ErrorPermissionDenied {
|
||||
// There are no candidates in this object which can be written to
|
||||
// So attempt to create a new object instead
|
||||
newO, err := o.fs.put(ctx, in, src, false, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Update current object
|
||||
*o = *newO.(*Object)
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(entries) == 1 {
|
||||
|
||||
@@ -17,7 +17,9 @@ func init() {
|
||||
type EpFF struct{}
|
||||
|
||||
func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath string) (*upstream.Fs, error) {
|
||||
ch := make(chan *upstream.Fs)
|
||||
ch := make(chan *upstream.Fs, len(upstreams))
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
for _, u := range upstreams {
|
||||
u := u // Closure
|
||||
go func() {
|
||||
@@ -30,16 +32,10 @@ func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath stri
|
||||
}()
|
||||
}
|
||||
var u *upstream.Fs
|
||||
for i := 0; i < len(upstreams); i++ {
|
||||
for range upstreams {
|
||||
u = <-ch
|
||||
if u != nil {
|
||||
// close remaining goroutines
|
||||
go func(num int) {
|
||||
defer close(ch)
|
||||
for i := 0; i < num; i++ {
|
||||
<-ch
|
||||
}
|
||||
}(len(upstreams) - 1 - i)
|
||||
break
|
||||
}
|
||||
}
|
||||
if u == nil {
|
||||
|
||||
67
backend/union/union_internal_test.go
Normal file
67
backend/union/union_internal_test.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package union
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func (f *Fs) TestInternalReadOnly(t *testing.T) {
|
||||
if f.name != "TestUnionRO" {
|
||||
t.Skip("Only on RO union")
|
||||
}
|
||||
dir := "TestInternalReadOnly"
|
||||
ctx := context.Background()
|
||||
rofs := f.upstreams[len(f.upstreams)-1]
|
||||
assert.False(t, rofs.IsWritable())
|
||||
|
||||
// Put a file onto the read only fs
|
||||
contents := random.String(50)
|
||||
file1 := fstest.NewItem(dir+"/file.txt", contents, time.Now())
|
||||
_, obj1 := fstests.PutTestContents(ctx, t, rofs, &file1, contents, true)
|
||||
|
||||
// Check read from readonly fs via union
|
||||
o, err := f.NewObject(ctx, file1.Path)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(50), o.Size())
|
||||
|
||||
// Now call Update on the union Object with new data
|
||||
contents2 := random.String(100)
|
||||
file2 := fstest.NewItem(dir+"/file.txt", contents2, time.Now())
|
||||
in := bytes.NewBufferString(contents2)
|
||||
src := object.NewStaticObjectInfo(file2.Path, file2.ModTime, file2.Size, true, nil, nil)
|
||||
err = o.Update(ctx, in, src)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(100), o.Size())
|
||||
|
||||
// Check we read the new object via the union
|
||||
o, err = f.NewObject(ctx, file1.Path)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(100), o.Size())
|
||||
|
||||
// Remove the object
|
||||
assert.NoError(t, o.Remove(ctx))
|
||||
|
||||
// Check we read the old object in the read only layer now
|
||||
o, err = f.NewObject(ctx, file1.Path)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(50), o.Size())
|
||||
|
||||
// Remove file and dir from read only fs
|
||||
assert.NoError(t, obj1.Remove(ctx))
|
||||
assert.NoError(t, rofs.Rmdir(ctx, dir))
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("ReadOnly", f.TestInternalReadOnly)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
@@ -2,13 +2,15 @@
|
||||
package union_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -24,17 +26,28 @@ func TestIntegration(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func makeTestDirs(t *testing.T, n int) (dirs []string, clean func()) {
|
||||
for i := 1; i <= n; i++ {
|
||||
dir, err := ioutil.TempDir("", fmt.Sprintf("rclone-union-test-%d", n))
|
||||
require.NoError(t, err)
|
||||
dirs = append(dirs, dir)
|
||||
}
|
||||
clean = func() {
|
||||
for _, dir := range dirs {
|
||||
err := os.RemoveAll(dir)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
return dirs, clean
|
||||
}
|
||||
|
||||
func TestStandard(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-standard1")
|
||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-standard2")
|
||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-standard3")
|
||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
||||
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
|
||||
dirs, clean := makeTestDirs(t, 3)
|
||||
defer clean()
|
||||
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||
name := "TestUnion"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
@@ -54,13 +67,9 @@ func TestRO(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-ro1")
|
||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-ro2")
|
||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-ro3")
|
||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
||||
upstreams := tempdir1 + " " + tempdir2 + ":ro " + tempdir3 + ":ro"
|
||||
dirs, clean := makeTestDirs(t, 3)
|
||||
defer clean()
|
||||
upstreams := dirs[0] + " " + dirs[1] + ":ro " + dirs[2] + ":ro"
|
||||
name := "TestUnionRO"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
@@ -80,13 +89,9 @@ func TestNC(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-nc1")
|
||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-nc2")
|
||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-nc3")
|
||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
||||
upstreams := tempdir1 + " " + tempdir2 + ":nc " + tempdir3 + ":nc"
|
||||
dirs, clean := makeTestDirs(t, 3)
|
||||
defer clean()
|
||||
upstreams := dirs[0] + " " + dirs[1] + ":nc " + dirs[2] + ":nc"
|
||||
name := "TestUnionNC"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
@@ -106,13 +111,9 @@ func TestPolicy1(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-policy11")
|
||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-policy12")
|
||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-policy13")
|
||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
||||
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
|
||||
dirs, clean := makeTestDirs(t, 3)
|
||||
defer clean()
|
||||
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||
name := "TestUnionPolicy1"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
@@ -132,13 +133,9 @@ func TestPolicy2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-policy21")
|
||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-policy22")
|
||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-policy23")
|
||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
||||
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
|
||||
dirs, clean := makeTestDirs(t, 3)
|
||||
defer clean()
|
||||
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||
name := "TestUnionPolicy2"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
@@ -158,13 +155,9 @@ func TestPolicy3(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-policy31")
|
||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-policy32")
|
||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-policy33")
|
||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
||||
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
|
||||
dirs, clean := makeTestDirs(t, 3)
|
||||
defer clean()
|
||||
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||
name := "TestUnionPolicy3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -62,7 +63,7 @@ type Entry interface {
|
||||
// New creates a new Fs based on the
|
||||
// string formatted `type:root_path(:ro/:nc)`
|
||||
func New(ctx context.Context, remote, root string, cacheTime time.Duration) (*Fs, error) {
|
||||
_, configName, fsPath, err := fs.ParseRemote(remote)
|
||||
configName, fsPath, err := fspath.SplitFs(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -83,15 +84,13 @@ func New(ctx context.Context, remote, root string, cacheTime time.Duration) (*Fs
|
||||
f.creatable = false
|
||||
fsPath = fsPath[0 : len(fsPath)-3]
|
||||
}
|
||||
if configName != "local" {
|
||||
fsPath = configName + ":" + fsPath
|
||||
}
|
||||
rFs, err := cache.Get(ctx, fsPath)
|
||||
remote = configName + fsPath
|
||||
rFs, err := cache.Get(ctx, remote)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, err
|
||||
}
|
||||
f.RootFs = rFs
|
||||
rootString := path.Join(fsPath, filepath.ToSlash(root))
|
||||
rootString := path.Join(remote, filepath.ToSlash(root))
|
||||
myFs, err := cache.Get(ctx, rootString)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, err
|
||||
|
||||
170
backend/uptobox/api/types.go
Normal file
170
backend/uptobox/api/types.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package api
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Error contains the error code and message returned by the API
|
||||
type Error struct {
|
||||
Success bool `json:"success,omitempty"`
|
||||
StatusCode int `json:"statusCode,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Data string `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e Error) Error() string {
|
||||
out := fmt.Sprintf("api error %d", e.StatusCode)
|
||||
if e.Message != "" {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
if e.Data != "" {
|
||||
out += ": " + e.Data
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// FolderEntry represents a Uptobox subfolder when listing folder contents
|
||||
type FolderEntry struct {
|
||||
FolderID uint64 `json:"fld_id"`
|
||||
Description string `json:"fld_descr"`
|
||||
Password string `json:"fld_password"`
|
||||
FullPath string `json:"fullPath"`
|
||||
Path string `json:"fld_name"`
|
||||
Name string `json:"name"`
|
||||
Hash string `json:"hash"`
|
||||
}
|
||||
|
||||
// FolderInfo represents the current folder when listing folder contents
|
||||
type FolderInfo struct {
|
||||
FolderID uint64 `json:"fld_id"`
|
||||
Hash string `json:"hash"`
|
||||
FileCount uint64 `json:"fileCount"`
|
||||
TotalFileSize int64 `json:"totalFileSize"`
|
||||
}
|
||||
|
||||
// FileInfo represents a file when listing folder contents
|
||||
type FileInfo struct {
|
||||
Name string `json:"file_name"`
|
||||
Description string `json:"file_descr"`
|
||||
Created string `json:"file_created"`
|
||||
Size int64 `json:"file_size"`
|
||||
Downloads uint64 `json:"file_downloads"`
|
||||
Code string `json:"file_code"`
|
||||
Password string `json:"file_password"`
|
||||
Public int `json:"file_public"`
|
||||
LastDownload string `json:"file_last_download"`
|
||||
ID uint64 `json:"id"`
|
||||
}
|
||||
|
||||
// ReadMetadataResponse is the response when listing folder contents
|
||||
type ReadMetadataResponse struct {
|
||||
StatusCode int `json:"statusCode"`
|
||||
Message string `json:"message"`
|
||||
Data struct {
|
||||
CurrentFolder FolderInfo `json:"currentFolder"`
|
||||
Folders []FolderEntry `json:"folders"`
|
||||
Files []FileInfo `json:"files"`
|
||||
PageCount int `json:"pageCount"`
|
||||
TotalFileCount int `json:"totalFileCount"`
|
||||
TotalFileSize int64 `json:"totalFileSize"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadInfo is the response when initiating an upload
|
||||
type UploadInfo struct {
|
||||
StatusCode int `json:"statusCode"`
|
||||
Message string `json:"message"`
|
||||
Data struct {
|
||||
UploadLink string `json:"uploadLink"`
|
||||
MaxUpload string `json:"maxUpload"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadResponse is the respnse to a successful upload
|
||||
type UploadResponse struct {
|
||||
Files []struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
URL string `json:"url"`
|
||||
DeleteURL string `json:"deleteUrl"`
|
||||
} `json:"files"`
|
||||
}
|
||||
|
||||
// UpdateResponse is a generic response to various action on files (rename/copy/move)
|
||||
type UpdateResponse struct {
|
||||
Message string `json:"message"`
|
||||
StatusCode int `json:"statusCode"`
|
||||
}
|
||||
|
||||
// Download is the response when requesting a download link
|
||||
type Download struct {
|
||||
StatusCode int `json:"statusCode"`
|
||||
Message string `json:"message"`
|
||||
Data struct {
|
||||
DownloadLink string `json:"dlLink"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// MetadataRequestOptions represents all the options when listing folder contents
|
||||
type MetadataRequestOptions struct {
|
||||
Limit uint64
|
||||
Offset uint64
|
||||
SearchField string
|
||||
Search string
|
||||
}
|
||||
|
||||
// CreateFolderRequest is used for creating a folder
|
||||
type CreateFolderRequest struct {
|
||||
Token string `json:"token"`
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// DeleteFolderRequest is used for deleting a folder
|
||||
type DeleteFolderRequest struct {
|
||||
Token string `json:"token"`
|
||||
FolderID uint64 `json:"fld_id"`
|
||||
}
|
||||
|
||||
// CopyMoveFileRequest is used for moving/copying a file
|
||||
type CopyMoveFileRequest struct {
|
||||
Token string `json:"token"`
|
||||
FileCodes string `json:"file_codes"`
|
||||
DestinationFolderID uint64 `json:"destination_fld_id"`
|
||||
Action string `json:"action"`
|
||||
}
|
||||
|
||||
// MoveFolderRequest is used for moving a folder
|
||||
type MoveFolderRequest struct {
|
||||
Token string `json:"token"`
|
||||
FolderID uint64 `json:"fld_id"`
|
||||
DestinationFolderID uint64 `json:"destination_fld_id"`
|
||||
Action string `json:"action"`
|
||||
}
|
||||
|
||||
// RenameFolderRequest is used for renaming a folder
|
||||
type RenameFolderRequest struct {
|
||||
Token string `json:"token"`
|
||||
FolderID uint64 `json:"fld_id"`
|
||||
NewName string `json:"new_name"`
|
||||
}
|
||||
|
||||
// UpdateFileInformation is used for renaming a file
|
||||
type UpdateFileInformation struct {
|
||||
Token string `json:"token"`
|
||||
FileCode string `json:"file_code"`
|
||||
NewName string `json:"new_name,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Public string `json:"public,omitempty"`
|
||||
}
|
||||
|
||||
// RemoveFileRequest is used for deleting a file
|
||||
type RemoveFileRequest struct {
|
||||
Token string `json:"token"`
|
||||
FileCodes string `json:"file_codes"`
|
||||
}
|
||||
|
||||
// Token represents the authentication token
|
||||
type Token struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
1053
backend/uptobox/uptobox.go
Normal file
1053
backend/uptobox/uptobox.go
Normal file
File diff suppressed because it is too large
Load Diff
21
backend/uptobox/uptobox_test.go
Normal file
21
backend/uptobox/uptobox_test.go
Normal file
@@ -0,0 +1,21 @@
|
||||
// Test Uptobox filesystem interface
|
||||
package uptobox_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/uptobox"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
*fstest.RemoteName = "TestUptobox:"
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*uptobox.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -125,7 +125,7 @@ func (ca *CookieAuth) getSPCookie(conf *SharepointSuccessResponse) (*CookieRespo
|
||||
return nil, errors.Wrap(err, "Error while constructing endpoint URL")
|
||||
}
|
||||
|
||||
u, err := url.Parse("https://" + spRoot.Host + "/_forms/default.aspx?wa=wsignin1.0")
|
||||
u, err := url.Parse(spRoot.Scheme + "://" + spRoot.Host + "/_forms/default.aspx?wa=wsignin1.0")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error while constructing login URL")
|
||||
}
|
||||
|
||||
@@ -113,6 +113,21 @@ func init() {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: configEncodingHelp,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "headers",
|
||||
Help: `Set HTTP headers for all transactions
|
||||
|
||||
Use this to set additional HTTP headers for all transactions
|
||||
|
||||
The input format is comma separated list of key,value pairs. Standard
|
||||
[CSV encoding](https://godoc.org/encoding/csv) may be used.
|
||||
|
||||
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||
|
||||
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
|
||||
`,
|
||||
Default: fs.CommaSepList{},
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -126,6 +141,7 @@ type Options struct {
|
||||
BearerToken string `config:"bearer_token"`
|
||||
BearerTokenCommand string `config:"bearer_token_command"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
Headers fs.CommaSepList `config:"headers"`
|
||||
}
|
||||
|
||||
// Fs represents a remote webdav
|
||||
@@ -196,7 +212,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// If we have a bearer token command and it has expired then refresh it
|
||||
if f.opt.BearerTokenCommand != "" && resp != nil && resp.StatusCode == 401 {
|
||||
fs.Debugf(f, "Bearer token expired: %v", err)
|
||||
@@ -270,7 +289,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// does not exist
|
||||
@@ -356,6 +375,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(opt.Headers)%2 != 0 {
|
||||
return nil, errors.New("odd number of headers supplied")
|
||||
}
|
||||
fs.Debugf(nil, "found headers: %v", opt.Headers)
|
||||
|
||||
rootIsDir := strings.HasSuffix(root, "/")
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
@@ -425,6 +450,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if opt.Headers != nil {
|
||||
f.addHeaders(opt.Headers)
|
||||
}
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
err = f.setQuirks(ctx, opt.Vendor)
|
||||
if err != nil {
|
||||
@@ -484,6 +512,15 @@ func (f *Fs) fetchBearerToken(cmd string) (string, error) {
|
||||
return stdoutString, nil
|
||||
}
|
||||
|
||||
// Adds the configured headers to the request if any
|
||||
func (f *Fs) addHeaders(headers fs.CommaSepList) {
|
||||
for i := 0; i < len(headers); i += 2 {
|
||||
key := f.opt.Headers[i]
|
||||
value := f.opt.Headers[i+1]
|
||||
f.srv.SetHeader(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
// fetch the bearer token and set it if successful
|
||||
func (f *Fs) fetchAndSetBearerToken() error {
|
||||
if f.opt.BearerTokenCommand == "" {
|
||||
@@ -628,7 +665,7 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
@@ -800,7 +837,7 @@ func (f *Fs) _dirExists(ctx context.Context, dirPath string) (exists bool) {
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return err == nil
|
||||
}
|
||||
@@ -822,7 +859,7 @@ func (f *Fs) _mkdir(ctx context.Context, dirPath string) error {
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// Check if it already exists. The response code for this isn't
|
||||
@@ -911,7 +948,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, nil)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rmdir failed")
|
||||
@@ -974,7 +1011,7 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Copy call failed")
|
||||
@@ -1070,7 +1107,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove MOVE call failed")
|
||||
@@ -1112,7 +1149,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &q)
|
||||
return f.shouldRetry(resp, err)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "about call failed")
|
||||
@@ -1240,7 +1277,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1291,7 +1328,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
// Give the WebDAV server a chance to get its internal state in order after the
|
||||
@@ -1318,7 +1355,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
}
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
74
backend/webdav/webdav_internal_test.go
Normal file
74
backend/webdav/webdav_internal_test.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package webdav_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/webdav"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
remoteName = "TestWebDAV"
|
||||
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
|
||||
)
|
||||
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
// with each request the headers option tests are executed
|
||||
func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// file server
|
||||
fileServer := http.FileServer(http.Dir(""))
|
||||
|
||||
// test the headers are there then pass on to fileServer
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
|
||||
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
|
||||
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
|
||||
fileServer.ServeHTTP(w, r)
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
ts := httptest.NewServer(handler)
|
||||
|
||||
// Configure the remote
|
||||
configfile.Install()
|
||||
|
||||
m := configmap.Simple{
|
||||
"type": "webdav",
|
||||
"url": ts.URL,
|
||||
// add headers to test the headers option
|
||||
"headers": strings.Join(headers, ","),
|
||||
}
|
||||
|
||||
// return a function to tidy up
|
||||
return m, ts.Close
|
||||
}
|
||||
|
||||
// prepare the test server and return a function to tidy it up afterwards
|
||||
func prepare(t *testing.T) (fs.Fs, func()) {
|
||||
m, tidy := prepareServer(t)
|
||||
|
||||
// Instantiate the WebDAV server
|
||||
f, err := webdav.NewFs(context.Background(), remoteName, "", m)
|
||||
require.NoError(t, err)
|
||||
|
||||
return f, tidy
|
||||
}
|
||||
|
||||
// TestHeaders any request will test the headers option
|
||||
func TestHeaders(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
// any request will do
|
||||
_, err := f.Features().About(context.Background())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -60,12 +60,10 @@ func init() {
|
||||
Name: "yandex",
|
||||
Description: "Yandex Disk",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config(ctx, "yandex", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return
|
||||
}
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: config.ConfigEncoding,
|
||||
@@ -153,7 +151,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
@@ -226,7 +227,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, options *api.
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -248,22 +249,22 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
token, err := oauthutil.GetToken(name, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't read OAuth token (this should never happen).")
|
||||
return nil, errors.Wrap(err, "couldn't read OAuth token")
|
||||
}
|
||||
if token.RefreshToken == "" {
|
||||
log.Fatalf("Unable to get RefreshToken. If you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend.")
|
||||
return nil, errors.New("unable to get RefreshToken. If you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
|
||||
}
|
||||
if token.TokenType != "OAuth" {
|
||||
token.TokenType = "OAuth"
|
||||
err = oauthutil.PutToken(name, m, token, false)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't save OAuth token (this should never happen).")
|
||||
return nil, errors.Wrap(err, "couldn't save OAuth token")
|
||||
}
|
||||
log.Printf("Automatically upgraded OAuth config.")
|
||||
}
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Yandex: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Yandex")
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
@@ -468,7 +469,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (err error) {
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
// fmt.Printf("CreateDir %q Error: %s\n", path, err.Error())
|
||||
@@ -543,6 +544,9 @@ func (f *Fs) waitForJob(ctx context.Context, location string) (err error) {
|
||||
var body []byte
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if err != nil {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
@@ -585,6 +589,9 @@ func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) (err erro
|
||||
var body []byte
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if err != nil {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
@@ -658,6 +665,9 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dst string, overwrite
|
||||
var body []byte
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if err != nil {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
@@ -810,7 +820,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if apiErr, ok := err.(*api.ErrorResponse); ok {
|
||||
@@ -848,7 +858,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
@@ -865,7 +875,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -999,7 +1009,7 @@ func (o *Object) setCustomProperty(ctx context.Context, property string, value s
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &cpr, nil)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
@@ -1032,7 +1042,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &dl)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -1047,7 +1057,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1071,7 +1081,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeT
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &ur)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -1089,7 +1099,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeT
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
return err
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -36,8 +35,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
rcloneClientID = "1000.OZNFWW075EKDSIE1R42HI9I2SUPC9A"
|
||||
rcloneEncryptedClientSecret = "rn7myzbsYK3WlqO2EU6jU8wmj0ylsx7_1B5wvSaVncYbu1Wt0QxPW9FFbidjqAZtyxnBenYIWq1pcA"
|
||||
rcloneClientID = "1000.46MXF275FM2XV7QCHX5A7K3LGME66B"
|
||||
rcloneEncryptedClientSecret = "U-2gxclZQBcOG9NPhjiXAhj-f0uQ137D0zar8YyNHXHkQZlTeSpIOQfmCb4oSpvosJp_SJLXmLLeUA"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
@@ -73,36 +72,97 @@ func init() {
|
||||
Name: "zoho",
|
||||
Description: "Zoho",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
// Need to setup region before configuring oauth
|
||||
setupRegion(m)
|
||||
opt := oauthutil.Options{
|
||||
// No refresh token unless ApprovalForce is set
|
||||
OAuth2Opts: []oauth2.AuthCodeOption{oauth2.ApprovalForce},
|
||||
}
|
||||
if err := oauthutil.Config(ctx, "zoho", name, m, oauthConfig, &opt); err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
// We need to rewrite the token type to "Zoho-oauthtoken" because Zoho wants
|
||||
// it's own custom type
|
||||
token, err := oauthutil.GetToken(name, m)
|
||||
err := setupRegion(m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read token: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
if token.TokenType != "Zoho-oauthtoken" {
|
||||
token.TokenType = "Zoho-oauthtoken"
|
||||
err = oauthutil.PutToken(name, m, token, false)
|
||||
getSrvs := func() (authSrv, apiSrv *rest.Client, err error) {
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return nil, nil, errors.Wrap(err, "failed to load oAuthClient")
|
||||
}
|
||||
authSrv = rest.NewClient(oAuthClient).SetRoot(accountsURL)
|
||||
apiSrv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
return authSrv, apiSrv, nil
|
||||
}
|
||||
if err = setupRoot(ctx, name, m); err != nil {
|
||||
log.Fatalf("Failed to configure root directory: %v", err)
|
||||
|
||||
switch config.State {
|
||||
case "":
|
||||
return oauthutil.ConfigOut("teams", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
// No refresh token unless ApprovalForce is set
|
||||
OAuth2Opts: []oauth2.AuthCodeOption{oauth2.ApprovalForce},
|
||||
})
|
||||
case "teams":
|
||||
// We need to rewrite the token type to "Zoho-oauthtoken" because Zoho wants
|
||||
// it's own custom type
|
||||
token, err := oauthutil.GetToken(name, m)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read token")
|
||||
}
|
||||
if token.TokenType != "Zoho-oauthtoken" {
|
||||
token.TokenType = "Zoho-oauthtoken"
|
||||
err = oauthutil.PutToken(name, m, token, false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure token")
|
||||
}
|
||||
}
|
||||
|
||||
authSrv, apiSrv, err := getSrvs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the user Info
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/oauth/user/info",
|
||||
}
|
||||
var user api.User
|
||||
_, err = authSrv.CallJSON(ctx, &opts, nil, &user)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the teams
|
||||
teams, err := listTeams(ctx, user.ZUID, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fs.ConfigChoose("workspace", "config_team_drive_id", "Team Drive ID", len(teams), func(i int) (string, string) {
|
||||
team := teams[i]
|
||||
return team.ID, team.Attributes.Name
|
||||
})
|
||||
case "workspace":
|
||||
_, apiSrv, err := getSrvs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
teamID := config.Result
|
||||
workspaces, err := listWorkspaces(ctx, teamID, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fs.ConfigChoose("workspace_end", "config_workspace", "Workspace ID", len(workspaces), func(i int) (string, string) {
|
||||
workspace := workspaces[i]
|
||||
return workspace.ID, workspace.Attributes.Name
|
||||
})
|
||||
case "workspace_end":
|
||||
worksspaceID := config.Result
|
||||
m.Set(configRootID, worksspaceID)
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "region",
|
||||
Help: "Zoho region to connect to. You'll have to use the region you organization is registered in.",
|
||||
Help: `Zoho region to connect to.
|
||||
|
||||
You'll have to use the region your organization is registered in. If
|
||||
not sure use the same top level domain as you connect to in your
|
||||
browser.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "com",
|
||||
Help: "United states / Global",
|
||||
@@ -159,15 +219,16 @@ type Object struct {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
func setupRegion(m configmap.Mapper) {
|
||||
func setupRegion(m configmap.Mapper) error {
|
||||
region, ok := m.Get("region")
|
||||
if !ok {
|
||||
log.Fatalf("No region set\n")
|
||||
if !ok || region == "" {
|
||||
return errors.New("no region set")
|
||||
}
|
||||
rootURL = fmt.Sprintf("https://workdrive.zoho.%s/api/v1", region)
|
||||
accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region)
|
||||
oauthConfig.Endpoint.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
|
||||
oauthConfig.Endpoint.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -200,49 +261,6 @@ func listWorkspaces(ctx context.Context, teamID string, srv *rest.Client) ([]api
|
||||
return workspaceList.TeamWorkspace, nil
|
||||
}
|
||||
|
||||
func setupRoot(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
}
|
||||
authSrv := rest.NewClient(oAuthClient).SetRoot(accountsURL)
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/oauth/user/info",
|
||||
}
|
||||
|
||||
var user api.User
|
||||
_, err = authSrv.CallJSON(ctx, &opts, nil, &user)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
teams, err := listTeams(ctx, user.ZUID, apiSrv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var teamIDs, teamNames []string
|
||||
for _, team := range teams {
|
||||
teamIDs = append(teamIDs, team.ID)
|
||||
teamNames = append(teamNames, team.Attributes.Name)
|
||||
}
|
||||
teamID := config.Choose("Enter a Team Drive ID", teamIDs, teamNames, true)
|
||||
|
||||
workspaces, err := listWorkspaces(ctx, teamID, apiSrv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var workspaceIDs, workspaceNames []string
|
||||
for _, workspace := range workspaces {
|
||||
workspaceIDs = append(workspaceIDs, workspace.ID)
|
||||
workspaceNames = append(workspaceNames, workspace.Attributes.Name)
|
||||
}
|
||||
worksspaceID := config.Choose("Enter a Workspace ID", workspaceIDs, workspaceNames, true)
|
||||
m.Set(configRootID, worksspaceID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
@@ -257,7 +275,10 @@ var retryErrorCodes = []int{
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
authRetry := false
|
||||
|
||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
||||
@@ -354,7 +375,7 @@ func (f *Fs) readMetaDataForID(ctx context.Context, id string) (*api.Item, error
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -369,6 +390,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err := configstruct.Set(m, opt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err := setupRegion(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
@@ -450,7 +475,7 @@ OUTER:
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return found, errors.Wrap(err, "couldn't list files")
|
||||
@@ -555,7 +580,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
@@ -643,7 +668,7 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
|
||||
params.Set("filename", name)
|
||||
params.Set("parent_id", parent)
|
||||
params.Set("override-name-exist", strconv.FormatBool(true))
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(in, nil, "content", name)
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make multipart upload")
|
||||
}
|
||||
@@ -664,7 +689,7 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
|
||||
var uploadResponse *api.UploadResponse
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &uploadResponse)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "upload error")
|
||||
@@ -746,7 +771,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) (err error) {
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &delete, nil)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "delete object failed")
|
||||
@@ -816,7 +841,7 @@ func (f *Fs) rename(ctx context.Context, id, name string) (item *api.Item, err e
|
||||
var result *api.ItemInfo
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &rename, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "rename failed")
|
||||
@@ -869,7 +894,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var result *api.ItemList
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©File, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't copy file")
|
||||
@@ -914,7 +939,7 @@ func (f *Fs) move(ctx context.Context, srcID, parentID string) (item *api.Item,
|
||||
var result *api.ItemList
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &moveFile, &result)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "move failed")
|
||||
@@ -1181,7 +1206,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
203
bin/config.py
Executable file
203
bin/config.py
Executable file
@@ -0,0 +1,203 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test program to demonstrate the remote config interfaces in
|
||||
rclone.
|
||||
|
||||
This program can simulate
|
||||
|
||||
rclone config create
|
||||
rclone config update
|
||||
rclone config password - NOT implemented yet
|
||||
rclone authorize - NOT implemented yet
|
||||
|
||||
Pass the desired action as the first argument then any parameters.
|
||||
|
||||
This assumes passwords will be passed in the clear.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
import json
|
||||
from pprint import pprint
|
||||
|
||||
sep = "-"*60
|
||||
|
||||
def rpc(args, command, params):
|
||||
"""
|
||||
Run the command. This could be either over the CLI or the API.
|
||||
|
||||
Here we run over the API either using `rclone rc --loopback` which
|
||||
is useful for making sure state is saved properly or to an
|
||||
existing rclone rcd if `--rc` is used on the command line.
|
||||
"""
|
||||
if args.rc:
|
||||
import requests
|
||||
kwargs = {
|
||||
"json": params,
|
||||
}
|
||||
if args.user:
|
||||
kwargs["auth"] = (args.user, args.password)
|
||||
r = requests.post('http://localhost:5572/'+command, **kwargs)
|
||||
if r.status_code != 200:
|
||||
raise ValueError(f"RC command failed: Error {r.status_code}: {r.text}")
|
||||
return r.json()
|
||||
cmd = ["rclone", "-vv", "rc", "--loopback", command, "--json", json.dumps(params)]
|
||||
result = subprocess.run(cmd, stdout=subprocess.PIPE, check=True)
|
||||
return json.loads(result.stdout)
|
||||
|
||||
def parse_parameters(parameters):
|
||||
"""
|
||||
Parse the incoming key=value parameters into a dict
|
||||
"""
|
||||
d = {}
|
||||
for param in parameters:
|
||||
parts = param.split("=", 1)
|
||||
if len(parts) != 2:
|
||||
raise ValueError("bad format for parameter need name=value")
|
||||
d[parts[0]] = parts[1]
|
||||
return d
|
||||
|
||||
def ask(opt):
|
||||
"""
|
||||
Ask the user to enter the option
|
||||
|
||||
This is the user interface for asking a user a question.
|
||||
|
||||
If there are examples they should be presented.
|
||||
"""
|
||||
while True:
|
||||
if opt["IsPassword"]:
|
||||
print("*** Inputting a password")
|
||||
print(opt['Help'])
|
||||
examples = opt.get("Examples", ())
|
||||
or_number = ""
|
||||
if len(examples) > 0:
|
||||
or_number = " or choice number"
|
||||
for i, example in enumerate(examples):
|
||||
print(f"{i:3} value: {example['Value']}")
|
||||
print(f" help: {example['Help']}")
|
||||
print(f"Enter a {opt['Type']} value{or_number}. Press Enter for the default ('{opt['DefaultStr']}')")
|
||||
print(f"{opt['Name']}> ", end='')
|
||||
s = input()
|
||||
if s == "":
|
||||
return opt["DefaultStr"]
|
||||
try:
|
||||
i = int(s)
|
||||
if i >= 0 and i < len(examples):
|
||||
return examples[i]["Value"]
|
||||
except ValueError:
|
||||
pass
|
||||
if opt["Exclusive"]:
|
||||
for example in examples:
|
||||
if s == example["Value"]:
|
||||
return s
|
||||
# Exclusive is set but the value isn't one of the accepted
|
||||
# ones so continue
|
||||
print("Value isn't one of the acceptable values")
|
||||
else:
|
||||
return s
|
||||
return s
|
||||
|
||||
def create_or_update(what, args):
|
||||
"""
|
||||
Run the equivalent of rclone config create
|
||||
or rclone config update
|
||||
|
||||
what should either be "create" or "update
|
||||
"""
|
||||
print(what, args)
|
||||
params = parse_parameters(args.parameters)
|
||||
inp = {
|
||||
"name": args.name,
|
||||
"parameters": params,
|
||||
"opt": {
|
||||
"nonInteractive": True,
|
||||
"all": args.all,
|
||||
"noObscure": args.obscured_passwords,
|
||||
"obscure": not args.obscured_passwords,
|
||||
},
|
||||
}
|
||||
if what == "create":
|
||||
inp["type"] = args.type
|
||||
while True:
|
||||
print(sep)
|
||||
print("Input to API")
|
||||
pprint(inp)
|
||||
print(sep)
|
||||
out = rpc(args, "config/"+what, inp)
|
||||
print(sep)
|
||||
print("Output from API")
|
||||
pprint(out)
|
||||
print(sep)
|
||||
if out["State"] == "":
|
||||
return
|
||||
if out["Error"]:
|
||||
print("Error", out["Error"])
|
||||
result = ask(out["Option"])
|
||||
inp["opt"]["state"] = out["State"]
|
||||
inp["opt"]["result"] = result
|
||||
inp["opt"]["continue"] = True
|
||||
|
||||
def create(args):
|
||||
"""Run the equivalent of rclone config create"""
|
||||
create_or_update("create", args)
|
||||
|
||||
def update(args):
|
||||
"""Run the equivalent of rclone config update"""
|
||||
create_or_update("update", args)
|
||||
|
||||
def password(args):
|
||||
"""Run the equivalent of rclone config password"""
|
||||
print("password", args)
|
||||
raise NotImplementedError()
|
||||
|
||||
def authorize(args):
|
||||
"""Run the equivalent of rclone authorize"""
|
||||
print("authorize", args)
|
||||
raise NotImplementedError()
|
||||
|
||||
def main():
|
||||
"""
|
||||
Make the command line parser and dispatch
|
||||
"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description=__doc__,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
)
|
||||
parser.add_argument("-a", "--all", action='store_true',
|
||||
help="Ask all the config questions if set")
|
||||
parser.add_argument("-o", "--obscured-passwords", action='store_true',
|
||||
help="If set assume the passwords are obscured")
|
||||
parser.add_argument("--rc", action='store_true',
|
||||
help="If set use the rc (you'll need to start an rclone rcd)")
|
||||
parser.add_argument("--user", type=str, default="",
|
||||
help="Username for use with --rc")
|
||||
parser.add_argument("--pass", type=str, default="", dest='password',
|
||||
help="Password for use with --rc")
|
||||
|
||||
subparsers = parser.add_subparsers(dest='command', required=True)
|
||||
|
||||
subparser = subparsers.add_parser('create')
|
||||
subparser.add_argument("name", type=str, help="Name of remote to create")
|
||||
subparser.add_argument("type", type=str, help="Type of remote to create")
|
||||
subparser.add_argument("parameters", type=str, nargs='*', help="Config parameters name=value name=value")
|
||||
subparser.set_defaults(func=create)
|
||||
|
||||
subparser = subparsers.add_parser('update')
|
||||
subparser.add_argument("name", type=str, help="Name of remote to update")
|
||||
subparser.add_argument("parameters", type=str, nargs='*', help="Config parameters name=value name=value")
|
||||
subparser.set_defaults(func=update)
|
||||
|
||||
subparser = subparsers.add_parser('password')
|
||||
subparser.add_argument("name", type=str, help="Name of remote to update")
|
||||
subparser.add_argument("parameters", type=str, nargs='*', help="Config parameters name=value name=value")
|
||||
subparser.set_defaults(func=password)
|
||||
|
||||
subparser = subparsers.add_parser('authorize')
|
||||
subparser.set_defaults(func=authorize)
|
||||
|
||||
args = parser.parse_args()
|
||||
args.func(args)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -62,6 +62,7 @@ docs = [
|
||||
"sftp.md",
|
||||
"sugarsync.md",
|
||||
"tardigrade.md",
|
||||
"uptobox.md",
|
||||
"union.md",
|
||||
"webdav.md",
|
||||
"yandex.md",
|
||||
|
||||
@@ -44,10 +44,10 @@ var commandDefinition = &cobra.Command{
|
||||
Use: "about remote:",
|
||||
Short: `Get quota information from the remote.`,
|
||||
Long: `
|
||||
` + "`rclone about`" + `prints quota information about a remote to standard
|
||||
` + "`rclone about`" + ` prints quota information about a remote to standard
|
||||
output. The output is typically used, free, quota and trash contents.
|
||||
|
||||
E.g. Typical output from` + "`rclone about remote:`" + `is:
|
||||
E.g. Typical output from ` + "`rclone about remote:`" + ` is:
|
||||
|
||||
Total: 17G
|
||||
Used: 7.444G
|
||||
@@ -75,7 +75,7 @@ Applying a ` + "`--full`" + ` flag to the command prints the bytes in full, e.g.
|
||||
Trashed: 104857602
|
||||
Other: 8849156022
|
||||
|
||||
A ` + "`--json`" + `flag generates conveniently computer readable output, e.g.
|
||||
A ` + "`--json`" + ` flag generates conveniently computer readable output, e.g.
|
||||
|
||||
{
|
||||
"total": 18253611008,
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
_ "github.com/rclone/rclone/cmd/copyurl"
|
||||
_ "github.com/rclone/rclone/cmd/cryptcheck"
|
||||
_ "github.com/rclone/rclone/cmd/cryptdecode"
|
||||
_ "github.com/rclone/rclone/cmd/dbhashsum"
|
||||
_ "github.com/rclone/rclone/cmd/dedupe"
|
||||
_ "github.com/rclone/rclone/cmd/delete"
|
||||
_ "github.com/rclone/rclone/cmd/deletefile"
|
||||
@@ -47,12 +46,14 @@ import (
|
||||
_ "github.com/rclone/rclone/cmd/reveal"
|
||||
_ "github.com/rclone/rclone/cmd/rmdir"
|
||||
_ "github.com/rclone/rclone/cmd/rmdirs"
|
||||
_ "github.com/rclone/rclone/cmd/selfupdate"
|
||||
_ "github.com/rclone/rclone/cmd/serve"
|
||||
_ "github.com/rclone/rclone/cmd/settier"
|
||||
_ "github.com/rclone/rclone/cmd/sha1sum"
|
||||
_ "github.com/rclone/rclone/cmd/size"
|
||||
_ "github.com/rclone/rclone/cmd/sync"
|
||||
_ "github.com/rclone/rclone/cmd/test"
|
||||
_ "github.com/rclone/rclone/cmd/test/changenotify"
|
||||
_ "github.com/rclone/rclone/cmd/test/histogram"
|
||||
_ "github.com/rclone/rclone/cmd/test/info"
|
||||
_ "github.com/rclone/rclone/cmd/test/makefiles"
|
||||
|
||||
@@ -29,8 +29,8 @@ rclone config.
|
||||
|
||||
Use the --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 3, command, args)
|
||||
config.Authorize(context.Background(), args, noAutoBrowser)
|
||||
return config.Authorize(context.Background(), args, noAutoBrowser)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
@@ -156,6 +157,12 @@ to check all the data.
|
||||
if download {
|
||||
return operations.CheckDownload(context.Background(), opt)
|
||||
}
|
||||
hashType := fsrc.Hashes().Overlap(fdst.Hashes()).GetOne()
|
||||
if hashType == hash.None {
|
||||
fs.Errorf(nil, "No common hash found - not using a hash for checks")
|
||||
} else {
|
||||
fs.Infof(nil, "Using %v for hash comparisons", hashType)
|
||||
}
|
||||
return operations.Check(context.Background(), opt)
|
||||
})
|
||||
},
|
||||
|
||||
34
cmd/cmd.go
34
cmd/cmd.go
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/rclone/rclone/fs/config/configflags"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
@@ -35,6 +36,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||
"github.com/rclone/rclone/fs/rc/rcserver"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/buildinfo"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -47,7 +49,7 @@ var (
|
||||
cpuProfile = flags.StringP("cpuprofile", "", "", "Write cpu profile to file")
|
||||
memProfile = flags.StringP("memprofile", "", "", "Write memory profile to file")
|
||||
statsInterval = flags.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable)")
|
||||
dataRateUnit = flags.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes'/s")
|
||||
dataRateUnit = flags.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes' per second")
|
||||
version bool
|
||||
retries = flags.IntP("retries", "", 3, "Retry operations this many times if they fail")
|
||||
retriesInterval = flags.DurationP("retries-sleep", "", 0, "Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)")
|
||||
@@ -73,9 +75,24 @@ const (
|
||||
|
||||
// ShowVersion prints the version to stdout
|
||||
func ShowVersion() {
|
||||
osVersion, osKernel := buildinfo.GetOSVersion()
|
||||
if osVersion == "" {
|
||||
osVersion = "unknown"
|
||||
}
|
||||
if osKernel == "" {
|
||||
osKernel = "unknown"
|
||||
}
|
||||
|
||||
linking, tagString := buildinfo.GetLinkingAndTags()
|
||||
|
||||
fmt.Printf("rclone %s\n", fs.Version)
|
||||
fmt.Printf("- os/arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)
|
||||
fmt.Printf("- go version: %s\n", runtime.Version())
|
||||
fmt.Printf("- os/version: %s\n", osVersion)
|
||||
fmt.Printf("- os/kernel: %s\n", osKernel)
|
||||
fmt.Printf("- os/type: %s\n", runtime.GOOS)
|
||||
fmt.Printf("- os/arch: %s\n", runtime.GOARCH)
|
||||
fmt.Printf("- go/version: %s\n", runtime.Version())
|
||||
fmt.Printf("- go/linking: %s\n", linking)
|
||||
fmt.Printf("- go/tags: %s\n", tagString)
|
||||
}
|
||||
|
||||
// NewFsFile creates an Fs from a name but may point to a file.
|
||||
@@ -83,7 +100,7 @@ func ShowVersion() {
|
||||
// It returns a string with the file name if points to a file
|
||||
// otherwise "".
|
||||
func NewFsFile(remote string) (fs.Fs, string) {
|
||||
_, _, fsPath, err := fs.ParseRemote(remote)
|
||||
_, fsPath, err := fspath.SplitFs(remote)
|
||||
if err != nil {
|
||||
err = fs.CountError(err)
|
||||
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
||||
@@ -382,6 +399,12 @@ func initConfig() {
|
||||
// Finish parsing any command line flags
|
||||
configflags.SetFlags(ci)
|
||||
|
||||
// Load the config
|
||||
configfile.Install()
|
||||
|
||||
// Start accounting
|
||||
accounting.Start(ctx)
|
||||
|
||||
// Hide console window
|
||||
if ci.NoConsole {
|
||||
terminal.HideConsole()
|
||||
@@ -541,6 +564,9 @@ func Main() {
|
||||
setupRootCommand(Root)
|
||||
AddBackendFlags()
|
||||
if err := Root.Execute(); err != nil {
|
||||
if strings.HasPrefix(err.Error(), "unknown command") && selfupdateEnabled {
|
||||
Root.PrintErrf("You could use '%s selfupdate' to get latest features.\n\n", Root.CommandPath())
|
||||
}
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
7
cmd/cmount/arch.go
Normal file
7
cmd/cmount/arch.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package cmount
|
||||
|
||||
// ProvidedBy returns true if the rclone build for the given OS
|
||||
// provides support for lib/cgo-fuse
|
||||
func ProvidedBy(osName string) bool {
|
||||
return osName == "windows" || osName == "darwin"
|
||||
}
|
||||
@@ -21,12 +21,13 @@ import (
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/buildinfo"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
name := "cmount"
|
||||
cmountOnly := runtime.GOOS == "windows" || runtime.GOOS == "darwin"
|
||||
cmountOnly := ProvidedBy(runtime.GOOS)
|
||||
if cmountOnly {
|
||||
name = "mount"
|
||||
}
|
||||
@@ -35,6 +36,7 @@ func init() {
|
||||
cmd.Aliases = append(cmd.Aliases, "cmount")
|
||||
}
|
||||
mountlib.AddRc("cmount", mount)
|
||||
buildinfo.Tags = append(buildinfo.Tags, "cmount")
|
||||
}
|
||||
|
||||
// Find the option string in the current options
|
||||
|
||||
@@ -22,6 +22,7 @@ func init() {
|
||||
cmd.Root.AddCommand(configCommand)
|
||||
configCommand.AddCommand(configEditCommand)
|
||||
configCommand.AddCommand(configFileCommand)
|
||||
configCommand.AddCommand(configTouchCommand)
|
||||
configCommand.AddCommand(configShowCommand)
|
||||
configCommand.AddCommand(configDumpCommand)
|
||||
configCommand.AddCommand(configProvidersCommand)
|
||||
@@ -41,9 +42,9 @@ var configCommand = &cobra.Command{
|
||||
remotes and manage existing ones. You may also set or remove a
|
||||
password to protect your configuration.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
config.EditConfig(context.Background())
|
||||
return config.EditConfig(context.Background())
|
||||
},
|
||||
}
|
||||
|
||||
@@ -63,6 +64,15 @@ var configFileCommand = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var configTouchCommand = &cobra.Command{
|
||||
Use: "touch",
|
||||
Short: `Ensure configuration file exists.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
config.SaveConfig()
|
||||
},
|
||||
}
|
||||
|
||||
var configShowCommand = &cobra.Command{
|
||||
Use: "show [<remote>]",
|
||||
Short: `Print (decrypted) config file, or the config for a single remote.`,
|
||||
@@ -95,12 +105,14 @@ var configProvidersCommand = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
configObscure bool
|
||||
configNoObscure bool
|
||||
)
|
||||
var updateRemoteOpt config.UpdateRemoteOpt
|
||||
|
||||
var configPasswordHelp = strings.ReplaceAll(`
|
||||
Note that if the config process would normally ask a question the
|
||||
default is taken (unless |--non-interactive| is used). Each time
|
||||
that happens rclone will print or DEBUG a message saying how to
|
||||
affect the value taken.
|
||||
|
||||
const configPasswordHelp = `
|
||||
If any of the parameters passed is a password field, then rclone will
|
||||
automatically obscure them if they aren't already obscured before
|
||||
putting them in the config file.
|
||||
@@ -109,84 +121,170 @@ putting them in the config file.
|
||||
consists only of base64 characters then rclone can get confused about
|
||||
whether the password is already obscured or not and put unobscured
|
||||
passwords into the config file. If you want to be 100% certain that
|
||||
the passwords get obscured then use the "--obscure" flag, or if you
|
||||
the passwords get obscured then use the |--obscure| flag, or if you
|
||||
are 100% certain you are already passing obscured passwords then use
|
||||
"--no-obscure". You can also set obscured passwords using the
|
||||
"rclone config password" command.
|
||||
`
|
||||
|--no-obscure|. You can also set obscured passwords using the
|
||||
|rclone config password| command.
|
||||
|
||||
The flag |--non-interactive| is for use by applications that wish to
|
||||
configure rclone themeselves, rather than using rclone's text based
|
||||
configuration questions. If this flag is set, and rclone needs to ask
|
||||
the user a question, a JSON blob will be returned with the question in
|
||||
it.
|
||||
|
||||
This will look something like (some irrelevant detail removed):
|
||||
|
||||
|||
|
||||
{
|
||||
"State": "*oauth-islocal,teamdrive,,",
|
||||
"Option": {
|
||||
"Name": "config_is_local",
|
||||
"Help": "Use auto config?\n * Say Y if not sure\n * Say N if you are working on a remote or headless machine\n",
|
||||
"Default": true,
|
||||
"Examples": [
|
||||
{
|
||||
"Value": "true",
|
||||
"Help": "Yes"
|
||||
},
|
||||
{
|
||||
"Value": "false",
|
||||
"Help": "No"
|
||||
}
|
||||
],
|
||||
"Required": false,
|
||||
"IsPassword": false,
|
||||
"Type": "bool",
|
||||
"Exclusive": true,
|
||||
},
|
||||
"Error": "",
|
||||
}
|
||||
|||
|
||||
|
||||
The format of |Option| is the same as returned by |rclone config
|
||||
providers|. The question should be asked to the user and returned to
|
||||
rclone as the |--result| option along with the |--state| parameter.
|
||||
|
||||
The keys of |Option| are used as follows:
|
||||
|
||||
- |Name| - name of variable - show to user
|
||||
- |Help| - help text. Hard wrapped at 80 chars. Any URLs should be clicky.
|
||||
- |Default| - default value - return this if the user just wants the default.
|
||||
- |Examples| - the user should be able to choose one of these
|
||||
- |Required| - the value should be non-empty
|
||||
- |IsPassword| - the value is a password and should be edited as such
|
||||
- |Type| - type of value, eg |bool|, |string|, |int| and others
|
||||
- |Exclusive| - if set no free-form entry allowed only the |Examples|
|
||||
- Irrelevant keys |Provider|, |ShortOpt|, |Hide|, |NoPrefix|, |Advanced|
|
||||
|
||||
If |Error| is set then it should be shown to the user at the same
|
||||
time as the question.
|
||||
|
||||
rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
|
||||
|
||||
Note that when using |--continue| all passwords should be passed in
|
||||
the clear (not obscured). Any default config values should be passed
|
||||
in with each invocation of |--continue|.
|
||||
|
||||
At the end of the non interactive process, rclone will return a result
|
||||
with |State| as empty string.
|
||||
|
||||
If |--all| is passed then rclone will ask all the config questions,
|
||||
not just the post config questions. Any parameters are used as
|
||||
defaults for questions as usual.
|
||||
|
||||
Note that |bin/config.py| in the rclone source implements this protocol
|
||||
as a readable demonstration.
|
||||
`, "|", "`")
|
||||
var configCreateCommand = &cobra.Command{
|
||||
Use: "create `name` `type` [`key` `value`]*",
|
||||
Short: `Create a new remote with name, type and options.`,
|
||||
Long: `
|
||||
Create a new remote of ` + "`name`" + ` with ` + "`type`" + ` and options. The options
|
||||
should be passed in pairs of ` + "`key` `value`" + `.
|
||||
Long: strings.ReplaceAll(`
|
||||
Create a new remote of |name| with |type| and options. The options
|
||||
should be passed in pairs of |key| |value| or as |key=value|.
|
||||
|
||||
For example to make a swift remote of name myremote using auto config
|
||||
you would do:
|
||||
|
||||
rclone config create myremote swift env_auth true
|
||||
rclone config create myremote swift env_auth=true
|
||||
|
||||
Note that if the config process would normally ask a question the
|
||||
default is taken. Each time that happens rclone will print a message
|
||||
saying how to affect the value taken.
|
||||
` + configPasswordHelp + `
|
||||
So for example if you wanted to configure a Google Drive remote but
|
||||
using remote authorization you would do this:
|
||||
|
||||
rclone config create mydrive drive config_is_local false
|
||||
`,
|
||||
rclone config create mydrive drive config_is_local=false
|
||||
`, "|", "`") + configPasswordHelp,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 256, command, args)
|
||||
in, err := argsToMap(args[2:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = config.CreateRemote(context.Background(), args[0], args[1], in, configObscure, configNoObscure)
|
||||
return doConfig(args[0], in, func(opts config.UpdateRemoteOpt) (*fs.ConfigOut, error) {
|
||||
return config.CreateRemote(context.Background(), args[0], args[1], in, opts)
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
func doConfig(name string, in rc.Params, do func(config.UpdateRemoteOpt) (*fs.ConfigOut, error)) error {
|
||||
out, err := do(updateRemoteOpt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !(updateRemoteOpt.NonInteractive || updateRemoteOpt.Continue) {
|
||||
config.ShowRemote(name)
|
||||
} else {
|
||||
if out == nil {
|
||||
out = &fs.ConfigOut{}
|
||||
}
|
||||
outBytes, err := json.MarshalIndent(out, "", "\t")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.ShowRemote(args[0])
|
||||
return nil
|
||||
},
|
||||
_, _ = os.Stdout.Write(outBytes)
|
||||
_, _ = os.Stdout.WriteString("\n")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
for _, cmdFlags := range []*pflag.FlagSet{configCreateCommand.Flags(), configUpdateCommand.Flags()} {
|
||||
flags.BoolVarP(cmdFlags, &configObscure, "obscure", "", false, "Force any passwords to be obscured.")
|
||||
flags.BoolVarP(cmdFlags, &configNoObscure, "no-obscure", "", false, "Force any passwords not to be obscured.")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.Obscure, "obscure", "", false, "Force any passwords to be obscured.")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NoObscure, "no-obscure", "", false, "Force any passwords not to be obscured.")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NonInteractive, "non-interactive", "", false, "Don't interact with user and return questions.")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.Continue, "continue", "", false, "Continue the configuration process with an answer.")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.All, "all", "", false, "Ask the full set of config questions.")
|
||||
flags.StringVarP(cmdFlags, &updateRemoteOpt.State, "state", "", "", "State - use with --continue.")
|
||||
flags.StringVarP(cmdFlags, &updateRemoteOpt.Result, "result", "", "", "Result - use with --continue.")
|
||||
}
|
||||
}
|
||||
|
||||
var configUpdateCommand = &cobra.Command{
|
||||
Use: "update `name` [`key` `value`]+",
|
||||
Short: `Update options in an existing remote.`,
|
||||
Long: `
|
||||
Long: strings.ReplaceAll(`
|
||||
Update an existing remote's options. The options should be passed in
|
||||
in pairs of ` + "`key` `value`" + `.
|
||||
pairs of |key| |value| or as |key=value|.
|
||||
|
||||
For example to update the env_auth field of a remote of name myremote
|
||||
you would do:
|
||||
|
||||
rclone config update myremote swift env_auth true
|
||||
` + configPasswordHelp + `
|
||||
rclone config update myremote env_auth true
|
||||
rclone config update myremote env_auth=true
|
||||
|
||||
If the remote uses OAuth the token will be updated, if you don't
|
||||
require this add an extra parameter thus:
|
||||
|
||||
rclone config update myremote swift env_auth true config_refresh_token false
|
||||
`,
|
||||
rclone config update myremote env_auth=true config_refresh_token=false
|
||||
`, "|", "`") + configPasswordHelp,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(3, 256, command, args)
|
||||
cmd.CheckArgs(1, 256, command, args)
|
||||
in, err := argsToMap(args[1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = config.UpdateRemote(context.Background(), args[0], in, configObscure, configNoObscure)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.ShowRemote(args[0])
|
||||
return nil
|
||||
return doConfig(args[0], in, func(opts config.UpdateRemoteOpt) (*fs.ConfigOut, error) {
|
||||
return config.UpdateRemote(context.Background(), args[0], in, opts)
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
@@ -202,19 +300,21 @@ var configDeleteCommand = &cobra.Command{
|
||||
var configPasswordCommand = &cobra.Command{
|
||||
Use: "password `name` [`key` `value`]+",
|
||||
Short: `Update password in an existing remote.`,
|
||||
Long: `
|
||||
Long: strings.ReplaceAll(`
|
||||
Update an existing remote's password. The password
|
||||
should be passed in pairs of ` + "`key` `value`" + `.
|
||||
should be passed in pairs of |key| |password| or as |key=password|.
|
||||
The |password| should be passed in in clear (unobscured).
|
||||
|
||||
For example to set password of a remote of name myremote you would do:
|
||||
|
||||
rclone config password myremote fieldname mypassword
|
||||
rclone config password myremote fieldname=mypassword
|
||||
|
||||
This command is obsolete now that "config update" and "config create"
|
||||
both support obscuring passwords directly.
|
||||
`,
|
||||
`, "|", "`"),
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(3, 256, command, args)
|
||||
cmd.CheckArgs(1, 256, command, args)
|
||||
in, err := argsToMap(args[1:])
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -228,16 +328,24 @@ both support obscuring passwords directly.
|
||||
},
|
||||
}
|
||||
|
||||
// This takes a list of arguments in key value key value form and
|
||||
// converts it into a map
|
||||
// This takes a list of arguments in key value key value form, or
|
||||
// key=value key=value form and converts it into a map
|
||||
func argsToMap(args []string) (out rc.Params, err error) {
|
||||
if len(args)%2 != 0 {
|
||||
return nil, errors.New("found key without value")
|
||||
}
|
||||
out = rc.Params{}
|
||||
// Set the config
|
||||
for i := 0; i < len(args); i += 2 {
|
||||
out[args[i]] = args[i+1]
|
||||
for i := 0; i < len(args); i++ {
|
||||
key := args[i]
|
||||
equals := strings.IndexRune(key, '=')
|
||||
var value string
|
||||
if equals >= 0 {
|
||||
key, value = key[:equals], key[equals+1:]
|
||||
} else {
|
||||
i++
|
||||
if i >= len(args) {
|
||||
return nil, errors.New("found key without value")
|
||||
}
|
||||
value = args[i]
|
||||
}
|
||||
out[key] = value
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
@@ -255,15 +363,11 @@ This normally means going through the interactive oauth flow again.
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsInfo, configName, _, config, err := fs.ConfigFs(args[0])
|
||||
fsInfo, configName, _, m, err := fs.ConfigFs(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fsInfo.Config == nil {
|
||||
return errors.Errorf("%s: doesn't support Reconnect", configName)
|
||||
}
|
||||
fsInfo.Config(ctx, configName, config)
|
||||
return nil
|
||||
return config.PostConfig(ctx, configName, m, fsInfo)
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user