mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
255 Commits
v1.55.1
...
fix-sftp-d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
97ade36d8c | ||
|
|
6545755758 | ||
|
|
c86a55c798 | ||
|
|
1d280081d4 | ||
|
|
f48cb5985f | ||
|
|
55e766f4e8 | ||
|
|
63a24255f8 | ||
|
|
bc74f0621e | ||
|
|
f39a08c9d7 | ||
|
|
675548070d | ||
|
|
37ff05a5fa | ||
|
|
c67c1ab4ee | ||
|
|
76f8095bc5 | ||
|
|
f646cd0a2a | ||
|
|
d38f6bb0ab | ||
|
|
11d86c74b2 | ||
|
|
feb6046a8a | ||
|
|
807102ada2 | ||
|
|
770b3496a1 | ||
|
|
da36ce08e4 | ||
|
|
8652cfe575 | ||
|
|
94b1439299 | ||
|
|
97c9e55ddb | ||
|
|
c0b2832509 | ||
|
|
7436768d62 | ||
|
|
55153403aa | ||
|
|
daf449b5f2 | ||
|
|
221dfc3882 | ||
|
|
aab29353d1 | ||
|
|
c24504b793 | ||
|
|
6338d0026e | ||
|
|
ba836d45ff | ||
|
|
367cf984af | ||
|
|
6b7d7d0441 | ||
|
|
cf19073ac9 | ||
|
|
ba5c559fec | ||
|
|
abb8fe8ba1 | ||
|
|
765af387e6 | ||
|
|
d05cf6aba8 | ||
|
|
76a3fef24d | ||
|
|
b40d9bd4c4 | ||
|
|
4680c0776d | ||
|
|
fb305b5976 | ||
|
|
5e91b93e59 | ||
|
|
58c99427b3 | ||
|
|
fee0abf513 | ||
|
|
40024990b7 | ||
|
|
04aa6969a4 | ||
|
|
d2050523de | ||
|
|
1cc6dd349e | ||
|
|
721bae11c3 | ||
|
|
b439199578 | ||
|
|
0bfd6f793b | ||
|
|
76ea716abf | ||
|
|
e635f4c0be | ||
|
|
0cb973f127 | ||
|
|
96ace599a8 | ||
|
|
80bccacd83 | ||
|
|
3349b055f5 | ||
|
|
bef0c23e00 | ||
|
|
84201ed891 | ||
|
|
04608428bf | ||
|
|
6aaa06d7be | ||
|
|
e53bad5353 | ||
|
|
f5397246eb | ||
|
|
b8b73f2656 | ||
|
|
96b67ce0ec | ||
|
|
e2beeffd76 | ||
|
|
30b949642d | ||
|
|
92b3518c78 | ||
|
|
062919e08c | ||
|
|
654f5309b0 | ||
|
|
318fa4472b | ||
|
|
5104e24153 | ||
|
|
9d87a5192d | ||
|
|
29f967dba3 | ||
|
|
1f846c18d4 | ||
|
|
41f561bf26 | ||
|
|
df60e6323c | ||
|
|
58006a925a | ||
|
|
ee2fac1855 | ||
|
|
2188fe38e5 | ||
|
|
b5f8f0973b | ||
|
|
85b8ba9469 | ||
|
|
04a1f673f0 | ||
|
|
0574ebf44a | ||
|
|
22e86ce335 | ||
|
|
c9fce20249 | ||
|
|
5b6f637461 | ||
|
|
07f2f3a62e | ||
|
|
6dc190ec93 | ||
|
|
71f75a1d95 | ||
|
|
1b44035e45 | ||
|
|
054b467f32 | ||
|
|
23da913d03 | ||
|
|
c0cda087a8 | ||
|
|
1773717a47 | ||
|
|
04308dcaa1 | ||
|
|
06f27384dd | ||
|
|
82f1f7d2c4 | ||
|
|
6555d3eb33 | ||
|
|
03229cf394 | ||
|
|
f572bf7829 | ||
|
|
f593558dc2 | ||
|
|
08040a57b0 | ||
|
|
2fa7a3c0fb | ||
|
|
798d1293df | ||
|
|
75c417ad93 | ||
|
|
5ee646f264 | ||
|
|
4a4aca4da7 | ||
|
|
2e4b65f888 | ||
|
|
77cda6773c | ||
|
|
dbc5167281 | ||
|
|
635d1e10ae | ||
|
|
296ceadda6 | ||
|
|
7ae2891252 | ||
|
|
99caf79ffe | ||
|
|
095cf9e4be | ||
|
|
e57553930f | ||
|
|
f122808d86 | ||
|
|
94dbfa4ea6 | ||
|
|
6f2e525821 | ||
|
|
119bddc10b | ||
|
|
28e9fd45cc | ||
|
|
326f3b35ff | ||
|
|
ce83228cb2 | ||
|
|
732bc08ced | ||
|
|
6ef7178ee4 | ||
|
|
9ff6f48d74 | ||
|
|
532af77fd1 | ||
|
|
ab7dfe0c87 | ||
|
|
e489a101f6 | ||
|
|
35a86193b7 | ||
|
|
2833941da8 | ||
|
|
9e6c23d9af | ||
|
|
8bef972262 | ||
|
|
0a968818f6 | ||
|
|
c2ac353183 | ||
|
|
773da395fb | ||
|
|
9e8cd6bff9 | ||
|
|
5d2e327b6f | ||
|
|
77221d7528 | ||
|
|
1971c1ef87 | ||
|
|
7e7dbe16c2 | ||
|
|
002d323c94 | ||
|
|
4ad62ec016 | ||
|
|
95ee14bb2c | ||
|
|
88aabd1f71 | ||
|
|
34627c5c7e | ||
|
|
e33303df94 | ||
|
|
665eceaec3 | ||
|
|
ba09ee18bb | ||
|
|
62bf63d36f | ||
|
|
f38c262471 | ||
|
|
5db88fed2b | ||
|
|
316e65589b | ||
|
|
4401d180aa | ||
|
|
9ccd870267 | ||
|
|
16d1da2c1e | ||
|
|
00a0ee1899 | ||
|
|
b78c9a65fa | ||
|
|
ef3c350686 | ||
|
|
742af80972 | ||
|
|
08a2df51be | ||
|
|
2925e1384c | ||
|
|
2ec0c8d45f | ||
|
|
98579608ec | ||
|
|
a1a41aa0c1 | ||
|
|
f8d56bebaf | ||
|
|
5d799431a7 | ||
|
|
8f23cae1c0 | ||
|
|
964088affa | ||
|
|
f4068d406b | ||
|
|
7511b6f4f1 | ||
|
|
e618ea83dd | ||
|
|
34dc257c55 | ||
|
|
4cacf5d30c | ||
|
|
0537791d14 | ||
|
|
4b1d28550a | ||
|
|
d27c35ee4a | ||
|
|
ffec0d4f03 | ||
|
|
89daa9efd1 | ||
|
|
ee502a757f | ||
|
|
386acaa110 | ||
|
|
efdee3a5fe | ||
|
|
5d85e6bc9c | ||
|
|
4a9469a3dc | ||
|
|
f8884a7200 | ||
|
|
2a40f00077 | ||
|
|
9799fdbae2 | ||
|
|
492504a601 | ||
|
|
0c03a7fead | ||
|
|
7afb4487ef | ||
|
|
b9d0ed4f5c | ||
|
|
baa4c039a0 | ||
|
|
31a8211afa | ||
|
|
3544e09e95 | ||
|
|
b456be4303 | ||
|
|
3e96752079 | ||
|
|
4a5cbf2a19 | ||
|
|
dcd4edc9f5 | ||
|
|
7f5e347d94 | ||
|
|
040677ab5b | ||
|
|
6366d3dfc5 | ||
|
|
60d376c323 | ||
|
|
7b1ca716bf | ||
|
|
d8711cf7f9 | ||
|
|
cd69f9e6e8 | ||
|
|
a737ff21af | ||
|
|
ad9aa693a3 | ||
|
|
964c3e0732 | ||
|
|
a46a3c0811 | ||
|
|
60dcafe04d | ||
|
|
813bf029d4 | ||
|
|
f2d3264054 | ||
|
|
23a0d4a1e6 | ||
|
|
b96ebfc40b | ||
|
|
3fe2aaf96c | ||
|
|
c163e6b250 | ||
|
|
c1492cfa28 | ||
|
|
38a8071a58 | ||
|
|
8c68a76a4a | ||
|
|
e7b736f8ca | ||
|
|
cb30a8c80e | ||
|
|
629a3eeca2 | ||
|
|
f52ae75a51 | ||
|
|
9d5c5bf7ab | ||
|
|
53573b4a09 | ||
|
|
3622e064f5 | ||
|
|
6d28ea7ab5 | ||
|
|
b9fd02039b | ||
|
|
1a41c930f3 | ||
|
|
ddb7eb6e0a | ||
|
|
c114695a66 | ||
|
|
fcba51557f | ||
|
|
9393225a1d | ||
|
|
3d3ff61f74 | ||
|
|
d98f192425 | ||
|
|
54771e4402 | ||
|
|
dc286529bc | ||
|
|
7dc7c021db | ||
|
|
fe1aa13069 | ||
|
|
5fa8e7d957 | ||
|
|
9db7c51eaa | ||
|
|
3859fe2f52 | ||
|
|
0caf417779 | ||
|
|
9eab258ffb | ||
|
|
7df57cd625 | ||
|
|
1fd9b483c8 | ||
|
|
93353c431b | ||
|
|
886dfd23e2 | ||
|
|
116a8021bb | ||
|
|
9e2fbe0f1a | ||
|
|
6d65d116df | ||
|
|
edaeb51ea9 |
40
.github/ISSUE_TEMPLATE/Bug.md
vendored
40
.github/ISSUE_TEMPLATE/Bug.md
vendored
@@ -5,19 +5,31 @@ about: Report a problem with rclone
|
||||
|
||||
<!--
|
||||
|
||||
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
|
||||
We understand you are having a problem with rclone; we want to help you with that!
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
|
||||
**STOP and READ**
|
||||
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
|
||||
Please show the effort you've put in to solving the problem and please be specific.
|
||||
People are volunteering their time to help! Low effort posts are not likely to get good answers!
|
||||
|
||||
If you think you might have found a bug, try to replicate it with the latest beta (or stable).
|
||||
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
|
||||
|
||||
If you can still replicate it or just got a question then please use the rclone forum:
|
||||
|
||||
https://forum.rclone.org/
|
||||
|
||||
instead of filing an issue for a quick response.
|
||||
for a quick response instead of filing an issue on this repo.
|
||||
|
||||
If you think you might have found a bug, please can you try to replicate it with the latest beta?
|
||||
If nothing else helps, then please fill in the info below which helps us help you.
|
||||
|
||||
https://beta.rclone.org/
|
||||
|
||||
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
|
||||
**DO NOT REDACT** any information except passwords/keys/personal info.
|
||||
|
||||
You should use 3 backticks to begin and end your paste to make it readable.
|
||||
|
||||
Make sure to include a log obtained with '-vv'.
|
||||
|
||||
You can also use '-vv --log-file bug.log' and a service such as https://pastebin.com or https://gist.github.com/
|
||||
|
||||
Thank you
|
||||
|
||||
@@ -25,6 +37,10 @@ The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
#### The associated forum post URL from `https://forum.rclone.org`
|
||||
|
||||
|
||||
|
||||
#### What is the problem you are having with rclone?
|
||||
|
||||
|
||||
@@ -37,7 +53,7 @@ The Rclone Developers
|
||||
|
||||
|
||||
|
||||
#### Which cloud storage system are you using? (e.g. Google Drive)
|
||||
#### Which cloud storage system are you using? (e.g. Google Drive)
|
||||
|
||||
|
||||
|
||||
@@ -48,3 +64,11 @@ The Rclone Developers
|
||||
#### A log from the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||
|
||||
|
||||
|
||||
<!--- Please keep the note below for others who read your bug report. -->
|
||||
|
||||
#### How to use GitHub
|
||||
|
||||
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
|
||||
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
|
||||
* Subscribe to receive notifications on status change and new comments.
|
||||
|
||||
23
.github/ISSUE_TEMPLATE/Feature.md
vendored
23
.github/ISSUE_TEMPLATE/Feature.md
vendored
@@ -7,12 +7,16 @@ about: Suggest a new feature or enhancement for rclone
|
||||
|
||||
Welcome :-)
|
||||
|
||||
So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
|
||||
So you've got an idea to improve rclone? We love that!
|
||||
You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
|
||||
|
||||
Here is a checklist of things to do:
|
||||
Probably the latest beta (or stable) release has your feature, so try to update your rclone.
|
||||
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
|
||||
|
||||
1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
|
||||
2. Discuss on the forum first: https://forum.rclone.org/
|
||||
If it still isn't there, here is a checklist of things to do:
|
||||
|
||||
1. Search the old issues for your idea and +1 or comment on an existing issue if possible.
|
||||
2. Discuss on the forum: https://forum.rclone.org/
|
||||
3. Make a feature request issue (this is the right place!).
|
||||
4. Be prepared to get involved making the feature :-)
|
||||
|
||||
@@ -22,6 +26,9 @@ The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
#### The associated forum post URL from `https://forum.rclone.org`
|
||||
|
||||
|
||||
|
||||
#### What is your current rclone version (output from `rclone version`)?
|
||||
|
||||
@@ -34,3 +41,11 @@ The Rclone Developers
|
||||
#### How do you think rclone should be changed to solve that?
|
||||
|
||||
|
||||
|
||||
<!--- Please keep the note below for others who read your feature request. -->
|
||||
|
||||
#### How to use GitHub
|
||||
|
||||
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
|
||||
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
|
||||
* Subscribe to receive notifications on status change and new comments.
|
||||
|
||||
27
.github/workflows/build.yml
vendored
27
.github/workflows/build.yml
vendored
@@ -12,9 +12,15 @@ on:
|
||||
tags:
|
||||
- '*'
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
required: true
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -30,6 +36,7 @@ jobs:
|
||||
check: true
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
librclonetest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
@@ -187,6 +194,14 @@ jobs:
|
||||
make racequicktest
|
||||
if: matrix.racequicktest
|
||||
|
||||
- name: Run librclone tests
|
||||
shell: bash
|
||||
run: |
|
||||
make -C librclone/ctest test
|
||||
make -C librclone/ctest clean
|
||||
librclone/python/test_rclone.py
|
||||
if: matrix.librclonetest
|
||||
|
||||
- name: Code quality test
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -214,6 +229,7 @@ jobs:
|
||||
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
android:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 30
|
||||
name: "android-all"
|
||||
runs-on: ubuntu-latest
|
||||
@@ -221,6 +237,8 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go 1.14
|
||||
@@ -249,6 +267,15 @@ jobs:
|
||||
run: |
|
||||
make
|
||||
|
||||
- name: install gomobile
|
||||
run: |
|
||||
go get golang.org/x/mobile/cmd/gobind
|
||||
go get golang.org/x/mobile/cmd/gomobile
|
||||
env PATH=$PATH:~/go/bin gomobile init
|
||||
|
||||
- name: arm-v7a gomobile build
|
||||
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||
|
||||
- name: arm-v7a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
|
||||
@@ -7,6 +7,7 @@ on:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
|
||||
@@ -6,6 +6,7 @@ on:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
@@ -31,3 +32,40 @@ jobs:
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
|
||||
build_docker_volume_plugin:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
name: Build and publish docker volume plugin
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set plugin parameters
|
||||
shell: bash
|
||||
run: |
|
||||
GITHUB_REF=${{ github.ref }}
|
||||
|
||||
PLUGIN_IMAGE_USER=rclone
|
||||
PLUGIN_IMAGE_NAME=docker-volume-rclone
|
||||
PLUGIN_IMAGE_TAG=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_IMAGE=${PLUGIN_IMAGE_USER}/${PLUGIN_IMAGE_NAME}:${PLUGIN_IMAGE_TAG}
|
||||
PLUGIN_IMAGE_LATEST=${PLUGIN_IMAGE_USER}/${PLUGIN_IMAGE_NAME}:latest
|
||||
|
||||
echo "PLUGIN_IMAGE_USER=${PLUGIN_IMAGE_USER}" >> $GITHUB_ENV
|
||||
echo "PLUGIN_IMAGE_NAME=${PLUGIN_IMAGE_NAME}" >> $GITHUB_ENV
|
||||
echo "PLUGIN_IMAGE_TAG=${PLUGIN_IMAGE_TAG}" >> $GITHUB_ENV
|
||||
echo "PLUGIN_IMAGE=${PLUGIN_IMAGE}" >> $GITHUB_ENV
|
||||
echo "PLUGIN_IMAGE_LATEST=${PLUGIN_IMAGE_LATEST}" >> $GITHUB_ENV
|
||||
- name: Build image
|
||||
shell: bash
|
||||
run: |
|
||||
make docker-plugin
|
||||
- name: Push image
|
||||
shell: bash
|
||||
run: |
|
||||
docker login -u ${{ secrets.DOCKER_HUB_USER }} -p ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
make docker-plugin-push PLUGIN_IMAGE=${PLUGIN_IMAGE}
|
||||
make docker-plugin-push PLUGIN_IMAGE=${PLUGIN_IMAGE_LATEST}
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -11,3 +11,6 @@ rclone.iml
|
||||
*.log
|
||||
*.iml
|
||||
fuzz-build.zip
|
||||
*.orig
|
||||
*.rej
|
||||
Thumbs.db
|
||||
|
||||
158
CONTRIBUTING.md
158
CONTRIBUTING.md
@@ -12,94 +12,162 @@ When filing an issue, please include the following information if
|
||||
possible as well as a description of the problem. Make sure you test
|
||||
with the [latest beta of rclone](https://beta.rclone.org/):
|
||||
|
||||
* Rclone version (e.g. output from `rclone -V`)
|
||||
* Which OS you are using and how many bits (e.g. Windows 7, 64 bit)
|
||||
* Rclone version (e.g. output from `rclone version`)
|
||||
* Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
|
||||
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
||||
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||
* if the log contains secrets then edit the file with a text editor first to obscure them
|
||||
|
||||
## Submitting a pull request ##
|
||||
## Submitting a new feature or bug fix ##
|
||||
|
||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||
like to implement then please submit a pull request via GitHub.
|
||||
|
||||
If it is a big feature then make an issue first so it can be discussed.
|
||||
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) first so it can be discussed.
|
||||
|
||||
You'll need a Go environment set up with GOPATH set. See [the Go
|
||||
getting started docs](https://golang.org/doc/install) for more info.
|
||||
|
||||
First in your web browser press the fork button on [rclone's GitHub
|
||||
To prepare your pull request first press the fork button on [rclone's GitHub
|
||||
page](https://github.com/rclone/rclone).
|
||||
|
||||
Now in your terminal
|
||||
Then [install Git](https://git-scm.com/downloads) and set your public contribution [name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git) and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
|
||||
|
||||
go get -u github.com/rclone/rclone
|
||||
cd $GOPATH/src/github.com/rclone/rclone
|
||||
Next open your terminal, change directory to your preferred folder and initialise your local rclone project:
|
||||
|
||||
git clone https://github.com/rclone/rclone.git
|
||||
cd rclone
|
||||
git remote rename origin upstream
|
||||
# if you have SSH keys setup in your GitHub account:
|
||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||
# otherwise:
|
||||
git remote add origin https://github.com/YOURUSER/rclone.git
|
||||
|
||||
Make a branch to add your new feature
|
||||
Note that most of the terminal commands in the rest of this guide must be executed from the rclone folder created above.
|
||||
|
||||
Now [install Go](https://golang.org/doc/install) and verify your installation:
|
||||
|
||||
go version
|
||||
|
||||
Great, you can now compile and execute your own version of rclone:
|
||||
|
||||
go build
|
||||
./rclone version
|
||||
|
||||
Finally make a branch to add your new feature
|
||||
|
||||
git checkout -b my-new-feature
|
||||
|
||||
And get hacking.
|
||||
|
||||
When ready - run the unit tests for the code you changed
|
||||
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) and a quick view on the rclone [code organisation](#code-organisation).
|
||||
|
||||
When ready - test the affected functionality and run the unit tests for the code you changed
|
||||
|
||||
cd folder/with/changed/files
|
||||
go test -v
|
||||
|
||||
Note that you may need to make a test remote, e.g. `TestSwift` for some
|
||||
of the unit tests.
|
||||
|
||||
Note the top level Makefile targets
|
||||
|
||||
* make check
|
||||
* make test
|
||||
|
||||
Both of these will be run by Travis when you make a pull request but
|
||||
you can do this yourself locally too. These require some extra go
|
||||
packages which you can install with
|
||||
|
||||
* make build_dep
|
||||
This is typically enough if you made a simple bug fix, otherwise please read the rclone [testing](#testing) section too.
|
||||
|
||||
Make sure you
|
||||
|
||||
* Add [unit tests](#testing) for a new feature.
|
||||
* Add [documentation](#writing-documentation) for a new feature.
|
||||
* Follow the [commit message guidelines](#commit-messages).
|
||||
* Add [unit tests](#testing) for a new feature
|
||||
* squash commits down to one per feature
|
||||
* rebase to master with `git rebase master`
|
||||
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
|
||||
|
||||
When you are done with that
|
||||
When you are done with that push your changes to Github:
|
||||
|
||||
git push -u origin my-new-feature
|
||||
|
||||
Go to the GitHub website and click [Create pull
|
||||
and open the GitHub website to [create your pull
|
||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
You patch will get reviewed and you might get asked to fix some stuff.
|
||||
Your changes will then get reviewed and you might get asked to fix some stuff. If so, then make the changes in the same branch, commit and push your updates to GitHub.
|
||||
|
||||
If so, then make the changes in the same branch, squash the commits (make multiple commits one commit) by running:
|
||||
```
|
||||
git log # See how many commits you want to squash
|
||||
git reset --soft HEAD~2 # This squashes the 2 latest commits together.
|
||||
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
|
||||
git commit # Add a new commit message.
|
||||
git push --force # Push the squashed commit to your GitHub repo.
|
||||
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com
|
||||
```
|
||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
||||
|
||||
## CI for your fork ##
|
||||
## Using Git and Github ##
|
||||
|
||||
### Committing your changes ###
|
||||
|
||||
Follow the guideline for [commit messages](#commit-messages) and then:
|
||||
|
||||
git checkout my-new-feature # To switch to your branch
|
||||
git status # To see the new and changed files
|
||||
git add FILENAME # To select FILENAME for the commit
|
||||
git status # To verify the changes to be committed
|
||||
git commit # To do the commit
|
||||
git log # To verify the commit. Use q to quit the log
|
||||
|
||||
You can modify the message or changes in the latest commit using:
|
||||
|
||||
git commit --amend
|
||||
|
||||
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
|
||||
### Replacing your previously pushed commits ###
|
||||
|
||||
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
|
||||
|
||||
Your previously pushed commits are replaced by:
|
||||
|
||||
git push --force origin my-new-feature
|
||||
|
||||
### Basing your changes on the latest master ###
|
||||
|
||||
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
||||
|
||||
git checkout master
|
||||
git fetch upstream
|
||||
git merge --ff-only
|
||||
git push origin --follow-tags # optional update of your fork in GitHub
|
||||
git checkout my-new-feature
|
||||
git rebase master
|
||||
|
||||
If you rebase commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
|
||||
### Squashing your commits ###
|
||||
|
||||
To combine your commits into one commit:
|
||||
|
||||
git log # To count the commits to squash, e.g. the last 2
|
||||
git reset --soft HEAD~2 # To undo the 2 latest commits
|
||||
git status # To check everything is as expected
|
||||
|
||||
If everything is fine, then make the new combined commit:
|
||||
|
||||
git commit # To commit the undone commits as one
|
||||
|
||||
otherwise, you may roll back using:
|
||||
|
||||
git reflog # To check that HEAD{1} is your previous state
|
||||
git reset --soft 'HEAD@{1}' # To roll back to your previous state
|
||||
|
||||
If you squash commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
|
||||
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
|
||||
|
||||
### GitHub Continuous Integration ###
|
||||
|
||||
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
||||
|
||||
## Testing ##
|
||||
|
||||
### Quick testing ###
|
||||
|
||||
rclone's tests are run from the go testing framework, so at the top
|
||||
level you can run this to run all the tests.
|
||||
|
||||
go test -v ./...
|
||||
|
||||
You can also use `make`, if supported by your platform
|
||||
|
||||
make quicktest
|
||||
|
||||
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
|
||||
|
||||
### Backend testing ###
|
||||
|
||||
rclone contains a mixture of unit tests and integration tests.
|
||||
Because it is difficult (and in some respects pointless) to test cloud
|
||||
storage systems by mocking all their interfaces, rclone unit tests can
|
||||
@@ -133,12 +201,19 @@ project root:
|
||||
go install github.com/rclone/rclone/fstest/test_all
|
||||
test_all -backend drive
|
||||
|
||||
### Full integration testing ###
|
||||
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
then change into the project root and run
|
||||
|
||||
make check
|
||||
make test
|
||||
|
||||
This command is run daily on the integration test server. You can
|
||||
The commands may require some extra go packages which you can install with
|
||||
|
||||
make build_dep
|
||||
|
||||
The full integration tests are run daily on the integration test server. You can
|
||||
find the results at https://pub.rclone.org/integration-tests/
|
||||
|
||||
## Code Organisation ##
|
||||
@@ -153,6 +228,7 @@ with modules beneath.
|
||||
* cmd - the rclone commands
|
||||
* all - import this to load all the commands
|
||||
* ...commands
|
||||
* cmdtest - end-to-end tests of commands, flags, environment variables,...
|
||||
* docs - the documentation and website
|
||||
* content - adjust these docs only - everything else is autogenerated
|
||||
* command - these are auto generated - edit the corresponding .go file
|
||||
|
||||
2451
MANUAL.html
generated
2451
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
3139
MANUAL.txt
generated
3139
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
33
Makefile
33
Makefile
@@ -256,3 +256,36 @@ startstable:
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
# docker volume plugin
|
||||
PLUGIN_IMAGE_USER ?= rclone
|
||||
PLUGIN_IMAGE_TAG ?= latest
|
||||
PLUGIN_IMAGE_NAME ?= docker-volume-rclone
|
||||
PLUGIN_IMAGE ?= $(PLUGIN_IMAGE_USER)/$(PLUGIN_IMAGE_NAME):$(PLUGIN_IMAGE_TAG)
|
||||
|
||||
PLUGIN_BASE_IMAGE := rclone/rclone:latest
|
||||
PLUGIN_BUILD_DIR := ./build/docker-plugin
|
||||
PLUGIN_CONTRIB_DIR := ./cmd/serve/docker/contrib/plugin
|
||||
PLUGIN_CONFIG := $(PLUGIN_CONTRIB_DIR)/config.json
|
||||
PLUGIN_DOCKERFILE := $(PLUGIN_CONTRIB_DIR)/Dockerfile
|
||||
PLUGIN_CONTAINER := docker-volume-rclone-dev-$(shell date +'%Y%m%d-%H%M%S')
|
||||
|
||||
docker-plugin: docker-plugin-rootfs docker-plugin-create
|
||||
|
||||
docker-plugin-image: rclone
|
||||
docker build --no-cache --pull --build-arg BASE_IMAGE=${PLUGIN_BASE_IMAGE} -t ${PLUGIN_IMAGE} -f ${PLUGIN_DOCKERFILE} .
|
||||
|
||||
docker-plugin-rootfs: docker-plugin-image
|
||||
mkdir -p ${PLUGIN_BUILD_DIR}/rootfs
|
||||
docker create --name ${PLUGIN_CONTAINER} ${PLUGIN_IMAGE}
|
||||
docker export ${PLUGIN_CONTAINER} | tar -x -C ${PLUGIN_BUILD_DIR}/rootfs
|
||||
docker rm -vf ${PLUGIN_CONTAINER}
|
||||
cp ${PLUGIN_CONFIG} ${PLUGIN_BUILD_DIR}/config.json
|
||||
|
||||
docker-plugin-create:
|
||||
docker plugin rm -f ${PLUGIN_IMAGE} 2>/dev/null || true
|
||||
docker plugin create ${PLUGIN_IMAGE} ${PLUGIN_BUILD_DIR}
|
||||
|
||||
docker-plugin-push: docker-plugin-create
|
||||
docker plugin push ${PLUGIN_IMAGE}
|
||||
docker plugin rm ${PLUGIN_IMAGE}
|
||||
|
||||
@@ -62,6 +62,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
@@ -87,7 +88,6 @@ Please see [the full list of all storage providers and their features](https://r
|
||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||
* Optional transparent compression ([Compress](https://rclone.org/compress/))
|
||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
* Optional cache ([Cache](https://rclone.org/cache/))
|
||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
* Multi-threaded downloads to local disk
|
||||
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
|
||||
|
||||
@@ -20,7 +20,7 @@ var (
|
||||
)
|
||||
|
||||
func prepare(t *testing.T, root string) {
|
||||
configfile.LoadConfig(context.Background())
|
||||
configfile.Install()
|
||||
|
||||
// Configure the remote
|
||||
config.FileSet(remoteName, "type", "alias")
|
||||
|
||||
@@ -41,6 +41,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
_ "github.com/rclone/rclone/backend/tardigrade"
|
||||
_ "github.com/rclone/rclone/backend/union"
|
||||
_ "github.com/rclone/rclone/backend/uptobox"
|
||||
_ "github.com/rclone/rclone/backend/webdav"
|
||||
_ "github.com/rclone/rclone/backend/yandex"
|
||||
_ "github.com/rclone/rclone/backend/zoho"
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
@@ -70,11 +69,10 @@ func init() {
|
||||
Prefix: "acd",
|
||||
Description: "Amazon Drive",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config(ctx, "amazon cloud drive", name, m, acdConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: acdConfig,
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "checkpoint",
|
||||
@@ -83,16 +81,16 @@ func init() {
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_wait_per_gb",
|
||||
Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
|
||||
Help: `Additional time per GiB to wait after a failed complete upload to see if it appears.
|
||||
|
||||
Sometimes Amazon Drive gives an error when a file has been fully
|
||||
uploaded but the file appears anyway after a little while. This
|
||||
happens sometimes for files over 1GB in size and nearly every time for
|
||||
files bigger than 10GB. This parameter controls the time rclone waits
|
||||
happens sometimes for files over 1 GiB in size and nearly every time for
|
||||
files bigger than 10 GiB. This parameter controls the time rclone waits
|
||||
for the file to appear.
|
||||
|
||||
The default value for this parameter is 3 minutes per GB, so by
|
||||
default it will wait 3 minutes for every GB uploaded to see if the
|
||||
The default value for this parameter is 3 minutes per GiB, so by
|
||||
default it will wait 3 minutes for every GiB uploaded to see if the
|
||||
file appears.
|
||||
|
||||
You can disable this feature by setting it to 0. This may cause
|
||||
@@ -112,7 +110,7 @@ in this situation.`,
|
||||
|
||||
Files this size or more will be downloaded via their "tempLink". This
|
||||
is to work around a problem with Amazon Drive which blocks downloads
|
||||
of files bigger than about 10GB. The default for this is 9GB which
|
||||
of files bigger than about 10 GiB. The default for this is 9 GiB which
|
||||
shouldn't need to be changed.
|
||||
|
||||
To download files above this threshold, rclone requests a "tempLink"
|
||||
|
||||
@@ -47,8 +47,8 @@ const (
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
storageDefaultBaseURL = "blob.core.windows.net"
|
||||
defaultChunkSize = 4 * fs.MebiByte
|
||||
maxChunkSize = 100 * fs.MebiByte
|
||||
defaultChunkSize = 4 * fs.Mebi
|
||||
maxChunkSize = 100 * fs.Mebi
|
||||
uploadConcurrency = 4
|
||||
defaultAccessTier = azblob.AccessTierNone
|
||||
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
||||
@@ -80,13 +80,12 @@ func init() {
|
||||
|
||||
Leave blank normally. Needed only if you want to use a service principal instead of interactive login.
|
||||
|
||||
$ az sp create-for-rbac --name "<name>" \
|
||||
$ az ad sp create-for-rbac --name "<name>" \
|
||||
--role "Storage Blob Data Owner" \
|
||||
--scopes "/subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Storage/storageAccounts/<storage-account>/blobServices/default/containers/<container>" \
|
||||
> azure-principal.json
|
||||
|
||||
See [Use Azure CLI to assign an Azure role for access to blob and queue data](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli)
|
||||
for more details.
|
||||
See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli) and ["Assign an Azure role for access to blob data"](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) pages for more details.
|
||||
`,
|
||||
}, {
|
||||
Name: "key",
|
||||
@@ -129,11 +128,11 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to chunked upload (<= 256MB). (Deprecated)",
|
||||
Help: "Cutoff for switching to chunked upload (<= 256 MiB). (Deprecated)",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Upload chunk size (<= 100MB).
|
||||
Help: `Upload chunk size (<= 100 MiB).
|
||||
|
||||
Note that this is stored in memory and there may be up to
|
||||
"--transfers" chunks stored at once in memory.`,
|
||||
@@ -404,7 +403,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
const minChunkSize = fs.SizeSuffixBase
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
|
||||
@@ -2,12 +2,11 @@ package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
)
|
||||
|
||||
// Error describes a B2 error response
|
||||
@@ -63,16 +62,17 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
const versionFormat = "-v2006-01-02-150405.000"
|
||||
// HasVersion returns true if it looks like the passed filename has a timestamp on it.
|
||||
//
|
||||
// Note that the passed filename's timestamp may still be invalid even if this
|
||||
// function returns true.
|
||||
func HasVersion(remote string) bool {
|
||||
return version.Match(remote)
|
||||
}
|
||||
|
||||
// AddVersion adds the timestamp as a version string into the filename passed in.
|
||||
func (t Timestamp) AddVersion(remote string) string {
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
s := time.Time(t).Format(versionFormat)
|
||||
// Replace the '.' with a '-'
|
||||
s = strings.Replace(s, ".", "-", -1)
|
||||
return base + s + ext
|
||||
return version.Add(remote, time.Time(t))
|
||||
}
|
||||
|
||||
// RemoveVersion removes the timestamp from a filename as a version string.
|
||||
@@ -80,24 +80,9 @@ func (t Timestamp) AddVersion(remote string) string {
|
||||
// It returns the new file name and a timestamp, or the old filename
|
||||
// and a zero timestamp.
|
||||
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||
newRemote = remote
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
if len(base) < len(versionFormat) {
|
||||
return
|
||||
}
|
||||
versionStart := len(base) - len(versionFormat)
|
||||
// Check it ends in -xxx
|
||||
if base[len(base)-4] != '-' {
|
||||
return
|
||||
}
|
||||
// Replace with .xxx for parsing
|
||||
base = base[:len(base)-4] + "." + base[len(base)-3:]
|
||||
newT, err := time.Parse(versionFormat, base[versionStart:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return Timestamp(newT), base[:versionStart] + ext
|
||||
time, newRemote := version.Remove(remote)
|
||||
t = Timestamp(time)
|
||||
return
|
||||
}
|
||||
|
||||
// IsZero returns true if the timestamp is uninitialized
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
var (
|
||||
emptyT api.Timestamp
|
||||
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
|
||||
t0r = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123000000Z"))
|
||||
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
|
||||
)
|
||||
|
||||
@@ -36,40 +35,6 @@ func TestTimestampUnmarshalJSON(t *testing.T) {
|
||||
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
|
||||
}
|
||||
|
||||
func TestTimestampAddVersion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
t api.Timestamp
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
|
||||
{t1, "potato", "potato-v2001-02-03-040506-123"},
|
||||
{t1, "", "-v2001-02-03-040506-123"},
|
||||
} {
|
||||
actual := test.t.AddVersion(test.in)
|
||||
assert.Equal(t, test.expected, actual, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampRemoveVersion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedT api.Timestamp
|
||||
expectedRemote string
|
||||
}{
|
||||
{"potato.txt", emptyT, "potato.txt"},
|
||||
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
|
||||
{"potato-v2001-02-03-040506-123", t1, "potato"},
|
||||
{"-v2001-02-03-040506-123", t1, ""},
|
||||
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
|
||||
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
|
||||
} {
|
||||
actualT, actualRemote := api.RemoveVersion(test.in)
|
||||
assert.Equal(t, test.expectedT, actualT, test.in)
|
||||
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampIsZero(t *testing.T) {
|
||||
assert.True(t, emptyT.IsZero())
|
||||
assert.False(t, t0.IsZero())
|
||||
|
||||
@@ -54,10 +54,10 @@ const (
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxParts = 10000
|
||||
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
||||
minChunkSize = 5 * fs.MebiByte
|
||||
defaultChunkSize = 96 * fs.MebiByte
|
||||
defaultUploadCutoff = 200 * fs.MebiByte
|
||||
largeFileCopyCutoff = 4 * fs.GibiByte // 5E9 is the max
|
||||
minChunkSize = 5 * fs.Mebi
|
||||
defaultChunkSize = 96 * fs.Mebi
|
||||
defaultUploadCutoff = 200 * fs.Mebi
|
||||
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
|
||||
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
||||
memoryPoolUseMmap = false
|
||||
)
|
||||
@@ -116,7 +116,7 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
|
||||
|
||||
Files above this size will be uploaded in chunks of "--b2-chunk-size".
|
||||
|
||||
This value should be set no larger than 4.657GiB (== 5GB).`,
|
||||
This value should be set no larger than 4.657 GiB (== 5 GB).`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -126,7 +126,7 @@ This value should be set no larger than 4.657GiB (== 5GB).`,
|
||||
Any files larger than this that need to be server-side copied will be
|
||||
copied in chunks of this size.
|
||||
|
||||
The minimum is 0 and the maximum is 4.6GB.`,
|
||||
The minimum is 0 and the maximum is 4.6 GiB.`,
|
||||
Default: largeFileCopyCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -1353,7 +1353,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
|
||||
}
|
||||
var request = api.GetDownloadAuthorizationRequest{
|
||||
BucketID: bucketID,
|
||||
FileNamePrefix: f.opt.Enc.FromStandardPath(path.Join(f.root, remote)),
|
||||
FileNamePrefix: f.opt.Enc.FromStandardPath(path.Join(f.rootDirectory, remote)),
|
||||
ValidDurationInSeconds: validDurationInSeconds,
|
||||
}
|
||||
var response api.GetDownloadAuthorizationResponse
|
||||
|
||||
@@ -230,14 +230,14 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
||||
//
|
||||
// The number of bytes in the file being uploaded. Note that
|
||||
// this header is required; you cannot leave it out and just
|
||||
// use chunked encoding. The minimum size of every part but
|
||||
// the last one is 100MB.
|
||||
// use chunked encoding. The minimum size of every part but
|
||||
// the last one is 100 MB (100,000,000 bytes)
|
||||
//
|
||||
// X-Bz-Content-Sha1
|
||||
//
|
||||
// The SHA1 checksum of the this part of the file. B2 will
|
||||
// check this when the part is uploaded, to make sure that the
|
||||
// data arrived correctly. The same SHA1 checksum must be
|
||||
// data arrived correctly. The same SHA1 checksum must be
|
||||
// passed to b2_finish_large_file.
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
|
||||
@@ -36,13 +36,13 @@ func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
|
||||
// Error is returned from box when things go wrong
|
||||
type Error struct {
|
||||
Type string `json:"type"`
|
||||
Status int `json:"status"`
|
||||
Code string `json:"code"`
|
||||
ContextInfo json.RawMessage
|
||||
HelpURL string `json:"help_url"`
|
||||
Message string `json:"message"`
|
||||
RequestID string `json:"request_id"`
|
||||
Type string `json:"type"`
|
||||
Status int `json:"status"`
|
||||
Code string `json:"code"`
|
||||
ContextInfo json.RawMessage `json:"context_info"`
|
||||
HelpURL string `json:"help_url"`
|
||||
Message string `json:"message"`
|
||||
RequestID string `json:"request_id"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
@@ -132,6 +132,38 @@ type UploadFile struct {
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
}
|
||||
|
||||
// PreUploadCheck is the request for upload preflight check
|
||||
type PreUploadCheck struct {
|
||||
Name string `json:"name"`
|
||||
Parent Parent `json:"parent"`
|
||||
Size *int64 `json:"size,omitempty"`
|
||||
}
|
||||
|
||||
// PreUploadCheckResponse is the response from upload preflight check
|
||||
// if successful
|
||||
type PreUploadCheckResponse struct {
|
||||
UploadToken string `json:"upload_token"`
|
||||
UploadURL string `json:"upload_url"`
|
||||
}
|
||||
|
||||
// PreUploadCheckConflict is returned in the ContextInfo error field
|
||||
// from PreUploadCheck when the error code is "item_name_in_use"
|
||||
type PreUploadCheckConflict struct {
|
||||
Conflicts struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
FileVersion struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
Sha1 string `json:"sha1"`
|
||||
} `json:"file_version"`
|
||||
SequenceID string `json:"sequence_id"`
|
||||
Etag string `json:"etag"`
|
||||
Sha1 string `json:"sha1"`
|
||||
Name string `json:"name"`
|
||||
} `json:"conflicts"`
|
||||
}
|
||||
|
||||
// UpdateFileModTime is used in Update File Info
|
||||
type UpdateFileModTime struct {
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -84,7 +83,7 @@ func init() {
|
||||
Name: "box",
|
||||
Description: "Box",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
|
||||
@@ -93,15 +92,15 @@ func init() {
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure token with jwt authentication")
|
||||
}
|
||||
// Else, if not using an access token, use oauth2
|
||||
} else if boxAccessToken == "" || !boxAccessTokenOk {
|
||||
err = oauthutil.Config(ctx, "box", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
|
||||
}
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
}
|
||||
return nil, nil
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "root_folder_id",
|
||||
@@ -126,7 +125,7 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to multipart upload (>= 50MB).",
|
||||
Help: "Cutoff for switching to multipart upload (>= 50 MiB).",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -157,15 +156,15 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
|
||||
jsonFile = env.ShellExpand(jsonFile)
|
||||
boxConfig, err := getBoxConfig(jsonFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return errors.Wrap(err, "get box config")
|
||||
}
|
||||
privateKey, err := getDecryptedPrivateKey(boxConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return errors.Wrap(err, "get decrypted private key")
|
||||
}
|
||||
claims, err := getClaims(boxConfig, boxSubType)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return errors.Wrap(err, "get claims")
|
||||
}
|
||||
signingHeaders := getSigningHeaders(boxConfig)
|
||||
queryParams := getQueryParams(boxConfig)
|
||||
@@ -686,22 +685,80 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
return o, leaf, directoryID, nil
|
||||
}
|
||||
|
||||
// preUploadCheck checks to see if a file can be uploaded
|
||||
//
|
||||
// It returns "", nil if the file is good to go
|
||||
// It returns "ID", nil if the file must be updated
|
||||
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (ID string, err error) {
|
||||
check := api.PreUploadCheck{
|
||||
Name: f.opt.Enc.FromStandardName(leaf),
|
||||
Parent: api.Parent{
|
||||
ID: directoryID,
|
||||
},
|
||||
}
|
||||
if size >= 0 {
|
||||
check.Size = &size
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "OPTIONS",
|
||||
Path: "/files/content/",
|
||||
}
|
||||
var result api.PreUploadCheckResponse
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &check, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok && apiErr.Code == "item_name_in_use" {
|
||||
var conflict api.PreUploadCheckConflict
|
||||
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "pre-upload check: JSON decode failed")
|
||||
}
|
||||
if conflict.Conflicts.Type != api.ItemTypeFile {
|
||||
return "", errors.Wrap(err, "pre-upload check: can't overwrite non file with file")
|
||||
}
|
||||
return conflict.Conflicts.ID, nil
|
||||
}
|
||||
return "", errors.Wrap(err, "pre-upload check")
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
|
||||
switch err {
|
||||
case nil:
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(ctx, in, src)
|
||||
default:
|
||||
// If directory doesn't exist, file doesn't exist so can upload
|
||||
remote := src.Remote()
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Preflight check the upload, which returns the ID if the
|
||||
// object already exists
|
||||
ID, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ID == "" {
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// If object exists then create a skeleton one with just id
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
id: ID,
|
||||
}
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
@@ -1228,7 +1285,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// upload does a single non-multipart upload
|
||||
//
|
||||
// This is recommended for less than 50 MB of content
|
||||
// This is recommended for less than 50 MiB of content
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) {
|
||||
upload := api.UploadFile{
|
||||
Name: o.fs.opt.Enc.FromStandardName(leaf),
|
||||
|
||||
20
backend/cache/cache.go
vendored
20
backend/cache/cache.go
vendored
@@ -98,14 +98,14 @@ changed, any downloaded chunks will be invalid and cache-chunk-path
|
||||
will need to be cleared or unexpected EOF errors will occur.`,
|
||||
Default: DefCacheChunkSize,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "1m",
|
||||
Help: "1MB",
|
||||
Value: "1M",
|
||||
Help: "1 MiB",
|
||||
}, {
|
||||
Value: "5M",
|
||||
Help: "5 MB",
|
||||
Help: "5 MiB",
|
||||
}, {
|
||||
Value: "10M",
|
||||
Help: "10 MB",
|
||||
Help: "10 MiB",
|
||||
}},
|
||||
}, {
|
||||
Name: "info_age",
|
||||
@@ -132,13 +132,13 @@ oldest chunks until it goes under this value.`,
|
||||
Default: DefCacheTotalChunkSize,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "500M",
|
||||
Help: "500 MB",
|
||||
Help: "500 MiB",
|
||||
}, {
|
||||
Value: "1G",
|
||||
Help: "1 GB",
|
||||
Help: "1 GiB",
|
||||
}, {
|
||||
Value: "10G",
|
||||
Help: "10 GB",
|
||||
Help: "10 GiB",
|
||||
}},
|
||||
}, {
|
||||
Name: "db_path",
|
||||
@@ -339,8 +339,14 @@ func parseRootPath(path string) (string, error) {
|
||||
return strings.Trim(path, "/"), nil
|
||||
}
|
||||
|
||||
var warnDeprecated sync.Once
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
warnDeprecated.Do(func() {
|
||||
fs.Logf(nil, "WARNING: Cache backend is deprecated and may be removed in future. Please use VFS instead.")
|
||||
})
|
||||
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
|
||||
2
backend/cache/cache_internal_test.go
vendored
2
backend/cache/cache_internal_test.go
vendored
@@ -836,7 +836,7 @@ func newRun() *run {
|
||||
if uploadDir == "" {
|
||||
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create temp dir: %v", err)
|
||||
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
|
||||
}
|
||||
} else {
|
||||
r.tmpUploadDir = uploadDir
|
||||
|
||||
@@ -155,7 +155,7 @@ Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Advanced: false,
|
||||
Default: fs.SizeSuffix(2147483648), // 2GB
|
||||
Default: fs.SizeSuffix(2147483648), // 2 GiB
|
||||
Help: `Files larger than chunk size will be split in chunks.`,
|
||||
}, {
|
||||
Name: "name_format",
|
||||
@@ -1448,7 +1448,7 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
|
||||
c.accountBytes(size)
|
||||
return nil
|
||||
}
|
||||
const bufLen = 1048576 // 1MB
|
||||
const bufLen = 1048576 // 1 MiB
|
||||
buf := make([]byte, bufLen)
|
||||
for size > 0 {
|
||||
n := size
|
||||
|
||||
@@ -33,7 +33,7 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
|
||||
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
|
||||
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
||||
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
|
||||
Size: int64(kilobytes) * int64(fs.KibiByte),
|
||||
Size: int64(kilobytes) * int64(fs.Kibi),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ import (
|
||||
// Globals
|
||||
const (
|
||||
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
||||
maxChunkSize = 8388608 // at 256KB and 8 MB.
|
||||
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
|
||||
|
||||
bufferSize = 8388608
|
||||
heuristicBytes = 1048576
|
||||
@@ -53,7 +53,7 @@ const (
|
||||
Gzip = 2
|
||||
)
|
||||
|
||||
var nameRegexp = regexp.MustCompile("^(.+?)\\.([A-Za-z0-9+_]{11})$")
|
||||
var nameRegexp = regexp.MustCompile("^(.+?)\\.([A-Za-z0-9-_]{11})$")
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
|
||||
@@ -12,12 +12,14 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
"github.com/rfjakob/eme"
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
"golang.org/x/crypto/scrypt"
|
||||
@@ -442,11 +444,32 @@ func (c *Cipher) encryptFileName(in string) string {
|
||||
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Strip version string so that only the non-versioned part
|
||||
// of the file name gets encrypted/obfuscated
|
||||
hasVersion := false
|
||||
var t time.Time
|
||||
if i == (len(segments)-1) && version.Match(segments[i]) {
|
||||
var s string
|
||||
t, s = version.Remove(segments[i])
|
||||
// version.Remove can fail, in which case it returns segments[i]
|
||||
if s != segments[i] {
|
||||
segments[i] = s
|
||||
hasVersion = true
|
||||
}
|
||||
}
|
||||
|
||||
if c.mode == NameEncryptionStandard {
|
||||
segments[i] = c.encryptSegment(segments[i])
|
||||
} else {
|
||||
segments[i] = c.obfuscateSegment(segments[i])
|
||||
}
|
||||
|
||||
// Add back a version to the encrypted/obfuscated
|
||||
// file name, if we stripped it off earlier
|
||||
if hasVersion {
|
||||
segments[i] = version.Add(segments[i], t)
|
||||
}
|
||||
}
|
||||
return strings.Join(segments, "/")
|
||||
}
|
||||
@@ -477,6 +500,21 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
||||
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Strip version string so that only the non-versioned part
|
||||
// of the file name gets decrypted/deobfuscated
|
||||
hasVersion := false
|
||||
var t time.Time
|
||||
if i == (len(segments)-1) && version.Match(segments[i]) {
|
||||
var s string
|
||||
t, s = version.Remove(segments[i])
|
||||
// version.Remove can fail, in which case it returns segments[i]
|
||||
if s != segments[i] {
|
||||
segments[i] = s
|
||||
hasVersion = true
|
||||
}
|
||||
}
|
||||
|
||||
if c.mode == NameEncryptionStandard {
|
||||
segments[i], err = c.decryptSegment(segments[i])
|
||||
} else {
|
||||
@@ -486,6 +524,12 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Add back a version to the decrypted/deobfuscated
|
||||
// file name, if we stripped it off earlier
|
||||
if hasVersion {
|
||||
segments[i] = version.Add(segments[i], t)
|
||||
}
|
||||
}
|
||||
return strings.Join(segments, "/"), nil
|
||||
}
|
||||
@@ -494,10 +538,18 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
||||
func (c *Cipher) DecryptFileName(in string) (string, error) {
|
||||
if c.mode == NameEncryptionOff {
|
||||
remainingLength := len(in) - len(encryptedSuffix)
|
||||
if remainingLength > 0 && strings.HasSuffix(in, encryptedSuffix) {
|
||||
return in[:remainingLength], nil
|
||||
if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) {
|
||||
return "", ErrorNotAnEncryptedFile
|
||||
}
|
||||
return "", ErrorNotAnEncryptedFile
|
||||
decrypted := in[:remainingLength]
|
||||
if version.Match(decrypted) {
|
||||
_, unversioned := version.Remove(decrypted)
|
||||
if unversioned == "" {
|
||||
return "", ErrorNotAnEncryptedFile
|
||||
}
|
||||
}
|
||||
// Leave the version string on, if it was there
|
||||
return decrypted, nil
|
||||
}
|
||||
return c.decryptFileName(in)
|
||||
}
|
||||
|
||||
@@ -160,22 +160,29 @@ func TestEncryptFileName(t *testing.T) {
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
|
||||
// Standard mode with directory name encryption off
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false)
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
||||
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
||||
assert.Equal(t, "1/12/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
|
||||
// Now off mode
|
||||
c, _ = newCipher(NameEncryptionOff, "", "", true)
|
||||
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
|
||||
// Obfuscation mode
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", true)
|
||||
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||
assert.Equal(t, "49.6/99.23/150.890/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "49.6/99.23/150.890/162.uryyB-v2001-02-03-040506-123.GKG", c.EncryptFileName("1/12/123/hello-v2001-02-03-040506-123.txt"))
|
||||
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
||||
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
||||
// Obfuscation mode with directory name encryption off
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", false)
|
||||
assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||
assert.Equal(t, "1/12/123/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
||||
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
||||
}
|
||||
@@ -194,14 +201,19 @@ func TestDecryptFileName(t *testing.T) {
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
|
||||
{NameEncryptionStandard, false, "1/12/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", "1-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
|
||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
|
||||
actual, actualErr := c.DecryptFileName(test.in)
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"mime"
|
||||
"net/http"
|
||||
"path"
|
||||
@@ -68,8 +67,8 @@ const (
|
||||
defaultScope = "drive"
|
||||
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
minChunkSize = 256 * fs.KibiByte
|
||||
defaultChunkSize = 8 * fs.MebiByte
|
||||
minChunkSize = 256 * fs.Kibi
|
||||
defaultChunkSize = 8 * fs.Mebi
|
||||
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
|
||||
listRGrouping = 50 // number of IDs to search at once when using ListR
|
||||
listRInputBuffer = 1000 // size of input buffer when using ListR
|
||||
@@ -183,32 +182,71 @@ func init() {
|
||||
Description: "Google Drive",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||
return
|
||||
return nil, errors.Wrap(err, "couldn't parse config into struct")
|
||||
}
|
||||
|
||||
// Fill in the scopes
|
||||
driveConfig.Scopes = driveScopes(opt.Scope)
|
||||
// Set the root_folder_id if using drive.appfolder
|
||||
if driveScopesContainsAppFolder(driveConfig.Scopes) {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
switch config.State {
|
||||
case "":
|
||||
// Fill in the scopes
|
||||
driveConfig.Scopes = driveScopes(opt.Scope)
|
||||
|
||||
if opt.ServiceAccountFile == "" {
|
||||
err = oauthutil.Config(ctx, "drive", name, m, driveConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
// Set the root_folder_id if using drive.appfolder
|
||||
if driveScopesContainsAppFolder(driveConfig.Scopes) {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
|
||||
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
|
||||
return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{
|
||||
OAuth2Config: driveConfig,
|
||||
})
|
||||
}
|
||||
return fs.ConfigGoto("teamdrive")
|
||||
case "teamdrive":
|
||||
if opt.TeamDriveID == "" {
|
||||
return fs.ConfigConfirm("teamdrive_ok", false, "config_change_team_drive", "Configure this as a Shared Drive (Team Drive)?\n")
|
||||
}
|
||||
return fs.ConfigConfirm("teamdrive_change", false, "config_change_team_drive", fmt.Sprintf("Change current Shared Drive (Team Drive) ID %q?\n", opt.TeamDriveID))
|
||||
case "teamdrive_ok":
|
||||
if config.Result == "false" {
|
||||
m.Set("team_drive", "")
|
||||
return nil, nil
|
||||
}
|
||||
return fs.ConfigGoto("teamdrive_config")
|
||||
case "teamdrive_change":
|
||||
if config.Result == "false" {
|
||||
return nil, nil
|
||||
}
|
||||
return fs.ConfigGoto("teamdrive_config")
|
||||
case "teamdrive_config":
|
||||
f, err := newFs(ctx, name, "", m)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make Fs to list Shared Drives")
|
||||
}
|
||||
teamDrives, err := f.listTeamDrives(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(teamDrives) == 0 {
|
||||
return fs.ConfigError("", "No Shared Drives found in your account")
|
||||
}
|
||||
return fs.ConfigChoose("teamdrive_final", "config_team_drive", "Shared Drive", len(teamDrives), func(i int) (string, string) {
|
||||
teamDrive := teamDrives[i]
|
||||
return teamDrive.Id, teamDrive.Name
|
||||
})
|
||||
case "teamdrive_final":
|
||||
driveID := config.Result
|
||||
m.Set("team_drive", driveID)
|
||||
m.Set("root_folder_id", "")
|
||||
opt.TeamDriveID = driveID
|
||||
opt.RootFolderID = ""
|
||||
return nil, nil
|
||||
}
|
||||
err = configTeamDrive(ctx, opt, m, name)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Shared Drive: %v", err)
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
},
|
||||
Options: append(driveOAuthOptions(), []fs.Option{{
|
||||
Name: "scope",
|
||||
@@ -467,7 +505,7 @@ See: https://github.com/rclone/rclone/issues/3631
|
||||
Default: false,
|
||||
Help: `Make upload limit errors be fatal
|
||||
|
||||
At the time of writing it is only possible to upload 750GB of data to
|
||||
At the time of writing it is only possible to upload 750 GiB of data to
|
||||
Google Drive a day (this is an undocumented limit). When this limit is
|
||||
reached Google Drive produces a slightly different error message. When
|
||||
this flag is set it causes these errors to be fatal. These will stop
|
||||
@@ -484,7 +522,7 @@ See: https://github.com/rclone/rclone/issues/3857
|
||||
Default: false,
|
||||
Help: `Make download limit errors be fatal
|
||||
|
||||
At the time of writing it is only possible to download 10TB of data from
|
||||
At the time of writing it is only possible to download 10 TiB of data from
|
||||
Google Drive a day (this is an undocumented limit). When this limit is
|
||||
reached Google Drive produces a slightly different error message. When
|
||||
this flag is set it causes these errors to be fatal. These will stop
|
||||
@@ -522,7 +560,7 @@ If this flag is set then rclone will ignore shortcut files completely.
|
||||
} {
|
||||
for mimeType, extension := range m {
|
||||
if err := mime.AddExtensionType(extension, mimeType); err != nil {
|
||||
log.Fatalf("Failed to register MIME type %q: %v", mimeType, err)
|
||||
fs.Errorf("Failed to register MIME type %q: %v", mimeType, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -949,48 +987,6 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
|
||||
return
|
||||
}
|
||||
|
||||
// Figure out if the user wants to use a team drive
|
||||
func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error {
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
// Stop if we are running non-interactive config
|
||||
if ci.AutoConfirm {
|
||||
return nil
|
||||
}
|
||||
if opt.TeamDriveID == "" {
|
||||
fmt.Printf("Configure this as a Shared Drive (Team Drive)?\n")
|
||||
} else {
|
||||
fmt.Printf("Change current Shared Drive (Team Drive) ID %q?\n", opt.TeamDriveID)
|
||||
}
|
||||
if !config.Confirm(false) {
|
||||
return nil
|
||||
}
|
||||
f, err := newFs(ctx, name, "", m)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to make Fs to list Shared Drives")
|
||||
}
|
||||
fmt.Printf("Fetching Shared Drive list...\n")
|
||||
teamDrives, err := f.listTeamDrives(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(teamDrives) == 0 {
|
||||
fmt.Printf("No Shared Drives found in your account")
|
||||
return nil
|
||||
}
|
||||
var driveIDs, driveNames []string
|
||||
for _, teamDrive := range teamDrives {
|
||||
driveIDs = append(driveIDs, teamDrive.Id)
|
||||
driveNames = append(driveNames, teamDrive.Name)
|
||||
}
|
||||
driveID := config.Choose("Enter a Shared Drive ID", driveIDs, driveNames, true)
|
||||
m.Set("team_drive", driveID)
|
||||
m.Set("root_folder_id", "")
|
||||
opt.TeamDriveID = driveID
|
||||
opt.RootFolderID = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
// getClient makes an http client according to the options
|
||||
func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||
t := fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
|
||||
@@ -1169,7 +1165,7 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
}
|
||||
f.rootFolderID = rootID
|
||||
fs.Debugf(f, "root_folder_id = %q - save this in the config to speed up startup", rootID)
|
||||
fs.Debugf(f, "'root_folder_id = %s' - save this in the config to speed up startup", rootID)
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(f.root, f.rootFolderID, f)
|
||||
@@ -1332,8 +1328,8 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
|
||||
//
|
||||
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.File) (fs.Object, error) {
|
||||
// If item has MD5 sum or a length it is a file stored on drive
|
||||
if info.Md5Checksum != "" || info.Size > 0 {
|
||||
// If item has MD5 sum it is a file stored on drive
|
||||
if info.Md5Checksum != "" {
|
||||
return f.newRegularObject(remote, info), nil
|
||||
}
|
||||
|
||||
@@ -1366,8 +1362,8 @@ func (f *Fs) newObjectWithExportInfo(
|
||||
// Pretend a dangling shortcut is a regular object
|
||||
// It will error if used, but appear in listings so it can be deleted
|
||||
return f.newRegularObject(remote, info), nil
|
||||
case info.Md5Checksum != "" || info.Size > 0:
|
||||
// If item has MD5 sum or a length it is a file stored on drive
|
||||
case info.Md5Checksum != "":
|
||||
// If item has MD5 sum it is a file stored on drive
|
||||
return f.newRegularObject(remote, info), nil
|
||||
case f.opt.SkipGdocs:
|
||||
fs.Debugf(remote, "Skipping google document type %q", info.MimeType)
|
||||
@@ -2959,12 +2955,12 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
||||
}
|
||||
|
||||
// List all team drives
|
||||
func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.TeamDrive, err error) {
|
||||
drives = []*drive.TeamDrive{}
|
||||
listTeamDrives := f.svc.Teamdrives.List().PageSize(100)
|
||||
func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.Drive, err error) {
|
||||
drives = []*drive.Drive{}
|
||||
listTeamDrives := f.svc.Drives.List().PageSize(100)
|
||||
var defaultFs Fs // default Fs with default Options
|
||||
for {
|
||||
var teamDrives *drive.TeamDriveList
|
||||
var teamDrives *drive.DriveList
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
teamDrives, err = listTeamDrives.Context(ctx).Do()
|
||||
return defaultFs.shouldRetry(ctx, err)
|
||||
@@ -2972,7 +2968,7 @@ func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.TeamDrive, err
|
||||
if err != nil {
|
||||
return drives, errors.Wrap(err, "listing Team Drives failed")
|
||||
}
|
||||
drives = append(drives, teamDrives.TeamDrives...)
|
||||
drives = append(drives, teamDrives.Drives...)
|
||||
if teamDrives.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
@@ -3069,7 +3065,7 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
return err
|
||||
}
|
||||
if destLeaf == "" {
|
||||
destLeaf = info.Name
|
||||
destLeaf = path.Base(o.Remote())
|
||||
}
|
||||
if destDir == "" {
|
||||
destDir = "."
|
||||
|
||||
350
backend/dropbox/batcher.go
Normal file
350
backend/dropbox/batcher.go
Normal file
@@ -0,0 +1,350 @@
|
||||
// This file contains the implementation of the sync batcher for uploads
|
||||
//
|
||||
// Dropbox rules say you can start as many batches as you want, but
|
||||
// you may only have one batch being committed and must wait for the
|
||||
// batch to be finished before committing another.
|
||||
|
||||
package dropbox
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/async"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
)
|
||||
|
||||
const (
|
||||
maxBatchSize = 1000 // max size the batch can be
|
||||
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
|
||||
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
|
||||
defaultBatchSizeAsync = 100 // default batch size if async
|
||||
)
|
||||
|
||||
// batcher holds info about the current items waiting for upload
|
||||
type batcher struct {
|
||||
f *Fs // Fs this batch is part of
|
||||
mode string // configured batch mode
|
||||
size int // maximum size for batch
|
||||
timeout time.Duration // idle timeout for batch
|
||||
async bool // whether we are using async batching
|
||||
in chan batcherRequest // incoming items to batch
|
||||
closed chan struct{} // close to indicate batcher shut down
|
||||
atexit atexit.FnHandle // atexit handle
|
||||
shutOnce sync.Once // make sure we shutdown once only
|
||||
wg sync.WaitGroup // wait for shutdown
|
||||
}
|
||||
|
||||
// batcherRequest holds an incoming request with a place for a reply
|
||||
type batcherRequest struct {
|
||||
commitInfo *files.UploadSessionFinishArg
|
||||
result chan<- batcherResponse
|
||||
}
|
||||
|
||||
// Return true if batcherRequest is the quit request
|
||||
func (br *batcherRequest) isQuit() bool {
|
||||
return br.commitInfo == nil
|
||||
}
|
||||
|
||||
// Send this to get the engine to quit
|
||||
var quitRequest = batcherRequest{}
|
||||
|
||||
// batcherResponse holds a response to be delivered to clients waiting
|
||||
// for a batch to complete.
|
||||
type batcherResponse struct {
|
||||
err error
|
||||
entry *files.FileMetadata
|
||||
}
|
||||
|
||||
// newBatcher creates a new batcher structure
|
||||
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
|
||||
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
|
||||
if size > maxBatchSize || size < 0 {
|
||||
return nil, errors.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
|
||||
}
|
||||
|
||||
async := false
|
||||
|
||||
switch mode {
|
||||
case "sync":
|
||||
if size <= 0 {
|
||||
ci := fs.GetConfig(ctx)
|
||||
size = ci.Transfers
|
||||
}
|
||||
if timeout <= 0 {
|
||||
timeout = defaultTimeoutSync
|
||||
}
|
||||
case "async":
|
||||
if size <= 0 {
|
||||
size = defaultBatchSizeAsync
|
||||
}
|
||||
if timeout <= 0 {
|
||||
timeout = defaultTimeoutAsync
|
||||
}
|
||||
async = true
|
||||
case "off":
|
||||
size = 0
|
||||
default:
|
||||
return nil, errors.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
|
||||
}
|
||||
|
||||
b := &batcher{
|
||||
f: f,
|
||||
mode: mode,
|
||||
size: size,
|
||||
timeout: timeout,
|
||||
async: async,
|
||||
in: make(chan batcherRequest, size),
|
||||
closed: make(chan struct{}),
|
||||
}
|
||||
if b.Batching() {
|
||||
b.atexit = atexit.Register(b.Shutdown)
|
||||
b.wg.Add(1)
|
||||
go b.commitLoop(context.Background())
|
||||
}
|
||||
return b, nil
|
||||
|
||||
}
|
||||
|
||||
// Batching returns true if batching is active
|
||||
func (b *batcher) Batching() bool {
|
||||
return b.size > 0
|
||||
}
|
||||
|
||||
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
||||
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (batchStatus *files.UploadSessionFinishBatchLaunch, err error) {
|
||||
var arg = &files.UploadSessionFinishBatchArg{
|
||||
Entries: items,
|
||||
}
|
||||
err = b.f.pacer.Call(func() (bool, error) {
|
||||
batchStatus, err = b.f.srv.UploadSessionFinishBatch(arg)
|
||||
// If error is insufficient space then don't retry
|
||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||
err = fserrors.NoRetryError(err)
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
// after the first chunk is uploaded, we retry everything
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "batch commit failed")
|
||||
}
|
||||
return batchStatus, nil
|
||||
}
|
||||
|
||||
// finishBatchJobStatus waits for the batch to complete returning completed entries
|
||||
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
|
||||
if launchBatchStatus.AsyncJobId == "" {
|
||||
return nil, errors.New("wait for batch completion: empty job ID")
|
||||
}
|
||||
var batchStatus *files.UploadSessionFinishBatchJobStatus
|
||||
sleepTime := 100 * time.Millisecond
|
||||
const maxTries = 120
|
||||
for try := 1; try <= maxTries; try++ {
|
||||
err = b.f.pacer.Call(func() (bool, error) {
|
||||
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
|
||||
AsyncJobId: launchBatchStatus.AsyncJobId,
|
||||
})
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d/%d", sleepTime, err, try, maxTries)
|
||||
} else {
|
||||
if batchStatus.Tag == "complete" {
|
||||
return batchStatus.Complete, nil
|
||||
}
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d/%d", sleepTime, batchStatus.Tag, try, maxTries)
|
||||
}
|
||||
time.Sleep(sleepTime)
|
||||
sleepTime *= 2
|
||||
if sleepTime > time.Second {
|
||||
sleepTime = time.Second
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
err = errors.New("batch didn't complete")
|
||||
}
|
||||
return nil, errors.Wrapf(err, "wait for batch failed after %d tries", maxTries)
|
||||
}
|
||||
|
||||
// commit a batch
|
||||
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
|
||||
// If commit fails then signal clients if sync
|
||||
var signalled = b.async
|
||||
defer func() {
|
||||
if err != nil && signalled {
|
||||
// Signal to clients that there was an error
|
||||
for _, result := range results {
|
||||
result <- batcherResponse{err: err}
|
||||
}
|
||||
}
|
||||
}()
|
||||
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
|
||||
fs.Debugf(b.f, "Committing %s", desc)
|
||||
|
||||
// finalise the batch getting either a result or a job id to poll
|
||||
batchStatus, err := b.finishBatch(ctx, items)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check whether batch is complete
|
||||
var complete *files.UploadSessionFinishBatchResult
|
||||
switch batchStatus.Tag {
|
||||
case "async_job_id":
|
||||
// wait for batch to complete
|
||||
complete, err = b.finishBatchJobStatus(ctx, batchStatus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case "complete":
|
||||
complete = batchStatus.Complete
|
||||
default:
|
||||
return errors.Errorf("batch returned unknown status %q", batchStatus.Tag)
|
||||
}
|
||||
|
||||
// Check we got the right number of entries
|
||||
entries := complete.Entries
|
||||
if len(entries) != len(results) {
|
||||
return errors.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
|
||||
}
|
||||
|
||||
// Report results to clients
|
||||
var (
|
||||
errorTag = ""
|
||||
errorCount = 0
|
||||
)
|
||||
for i := range results {
|
||||
item := entries[i]
|
||||
resp := batcherResponse{}
|
||||
if item.Tag == "success" {
|
||||
resp.entry = item.Success
|
||||
} else {
|
||||
errorCount++
|
||||
errorTag = item.Tag
|
||||
if item.Failure != nil {
|
||||
errorTag = item.Failure.Tag
|
||||
if item.Failure.LookupFailed != nil {
|
||||
errorTag += "/" + item.Failure.LookupFailed.Tag
|
||||
}
|
||||
if item.Failure.Path != nil {
|
||||
errorTag += "/" + item.Failure.Path.Tag
|
||||
}
|
||||
if item.Failure.PropertiesError != nil {
|
||||
errorTag += "/" + item.Failure.PropertiesError.Tag
|
||||
}
|
||||
}
|
||||
resp.err = errors.Errorf("batch upload failed: %s", errorTag)
|
||||
}
|
||||
if !b.async {
|
||||
results[i] <- resp
|
||||
}
|
||||
}
|
||||
// Show signalled so no need to report error to clients from now on
|
||||
signalled = true
|
||||
|
||||
// Report an error if any failed in the batch
|
||||
if errorTag != "" {
|
||||
return errors.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
|
||||
}
|
||||
|
||||
fs.Debugf(b.f, "Committed %s", desc)
|
||||
return nil
|
||||
}
|
||||
|
||||
// commitLoop runs the commit engine in the background
|
||||
func (b *batcher) commitLoop(ctx context.Context) {
|
||||
var (
|
||||
items []*files.UploadSessionFinishArg // current batch of uncommitted files
|
||||
results []chan<- batcherResponse // current batch of clients awaiting results
|
||||
idleTimer = time.NewTimer(b.timeout)
|
||||
commit = func() {
|
||||
err := b.commitBatch(ctx, items, results)
|
||||
if err != nil {
|
||||
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
|
||||
}
|
||||
items, results = nil, nil
|
||||
}
|
||||
)
|
||||
defer b.wg.Done()
|
||||
defer idleTimer.Stop()
|
||||
idleTimer.Stop()
|
||||
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case req := <-b.in:
|
||||
if req.isQuit() {
|
||||
break outer
|
||||
}
|
||||
items = append(items, req.commitInfo)
|
||||
results = append(results, req.result)
|
||||
idleTimer.Stop()
|
||||
if len(items) >= b.size {
|
||||
commit()
|
||||
} else {
|
||||
idleTimer.Reset(b.timeout)
|
||||
}
|
||||
case <-idleTimer.C:
|
||||
if len(items) > 0 {
|
||||
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
|
||||
commit()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
// commit any remaining items
|
||||
if len(items) > 0 {
|
||||
commit()
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown finishes any pending batches then shuts everything down
|
||||
//
|
||||
// Can be called from atexit handler
|
||||
func (b *batcher) Shutdown() {
|
||||
b.shutOnce.Do(func() {
|
||||
atexit.Unregister(b.atexit)
|
||||
fs.Infof(b.f, "Commiting uploads - please wait...")
|
||||
// show that batcher is shutting down
|
||||
close(b.closed)
|
||||
// quit the commitLoop by sending a quitRequest message
|
||||
//
|
||||
// Note that we don't close b.in because that will
|
||||
// cause write to closed channel in Commit when we are
|
||||
// exiting due to a signal.
|
||||
b.in <- quitRequest
|
||||
b.wg.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
// Commit commits the file using a batch call, first adding it to the
|
||||
// batch and then waiting for the batch to complete in a synchronous
|
||||
// way if async is not set.
|
||||
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
|
||||
select {
|
||||
case <-b.closed:
|
||||
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
|
||||
default:
|
||||
}
|
||||
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
|
||||
resp := make(chan batcherResponse, 1)
|
||||
b.in <- batcherRequest{
|
||||
commitInfo: commitInfo,
|
||||
result: resp,
|
||||
}
|
||||
// If running async then don't wait for the result
|
||||
if b.async {
|
||||
return nil, nil
|
||||
}
|
||||
result := <-resp
|
||||
return result.entry, result.err
|
||||
}
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
@@ -65,9 +64,9 @@ const (
|
||||
// Upload chunk size - setting too small makes uploads slow.
|
||||
// Chunks are buffered into memory for retries.
|
||||
//
|
||||
// Speed vs chunk size uploading a 1 GB file on 2017-11-22
|
||||
// Speed vs chunk size uploading a 1 GiB file on 2017-11-22
|
||||
//
|
||||
// Chunk Size MB, Speed Mbyte/s, % of max
|
||||
// Chunk Size MiB, Speed MiByte/s, % of max
|
||||
// 1 1.364 11%
|
||||
// 2 2.443 19%
|
||||
// 4 4.288 33%
|
||||
@@ -82,11 +81,11 @@ const (
|
||||
// 96 12.302 95%
|
||||
// 128 12.945 100%
|
||||
//
|
||||
// Choose 48MB which is 91% of Maximum speed. rclone by
|
||||
// default does 4 transfers so this should use 4*48MB = 192MB
|
||||
// Choose 48 MiB which is 91% of Maximum speed. rclone by
|
||||
// default does 4 transfers so this should use 4*48 MiB = 192 MiB
|
||||
// by default.
|
||||
defaultChunkSize = 48 * fs.MebiByte
|
||||
maxChunkSize = 150 * fs.MebiByte
|
||||
defaultChunkSize = 48 * fs.Mebi
|
||||
maxChunkSize = 150 * fs.Mebi
|
||||
// Max length of filename parts: https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
|
||||
maxFileNameLength = 255
|
||||
)
|
||||
@@ -99,8 +98,10 @@ var (
|
||||
"files.content.write",
|
||||
"files.content.read",
|
||||
"sharing.write",
|
||||
"account_info.read", // needed for About
|
||||
// "file_requests.write",
|
||||
// "members.read", // needed for impersonate - but causes app to need to be approved by Dropbox Team Admin during the flow
|
||||
// "team_data.member"
|
||||
},
|
||||
// Endpoint: oauth2.Endpoint{
|
||||
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
||||
@@ -130,29 +131,26 @@ func getOauthConfig(m configmap.Mapper) *oauth2.Config {
|
||||
}
|
||||
// Make a copy of the config
|
||||
config := *dropboxConfig
|
||||
// Make a copy of the scopes with "members.read" appended
|
||||
config.Scopes = append(config.Scopes, "members.read")
|
||||
// Make a copy of the scopes with extra scopes requires appended
|
||||
config.Scopes = append(config.Scopes, "members.read", "team_data.member")
|
||||
return &config
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
|
||||
DbHashType = hash.RegisterHash("dropbox", "DropboxHash", 64, dbhash.New)
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "dropbox",
|
||||
Description: "Dropbox",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
opt := oauthutil.Options{
|
||||
NoOffline: true,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: getOauthConfig(m),
|
||||
NoOffline: true,
|
||||
OAuth2Opts: []oauth2.AuthCodeOption{
|
||||
oauth2.SetAuthURLParam("token_access_type", "offline"),
|
||||
},
|
||||
}
|
||||
err := oauthutil.Config(ctx, "dropbox", name, m, getOauthConfig(m), &opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "chunk_size",
|
||||
@@ -162,7 +160,7 @@ Any files larger than this will be uploaded in chunks of this size.
|
||||
|
||||
Note that chunks are buffered in memory (one at a time) so rclone can
|
||||
deal with retries. Setting this larger will increase the speed
|
||||
slightly (at most 10%% for 128MB in tests) at the cost of using more
|
||||
slightly (at most 10%% for 128 MiB in tests) at the cost of using more
|
||||
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
@@ -211,6 +209,63 @@ Note that we don't unmount the shared folder afterwards so the
|
||||
shared folder.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "batch_mode",
|
||||
Help: `Upload file batching sync|async|off.
|
||||
|
||||
This sets the batch mode used by rclone.
|
||||
|
||||
For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)
|
||||
|
||||
This has 3 possible values
|
||||
|
||||
- off - no batching
|
||||
- sync - batch uploads and check completion (default)
|
||||
- async - batch upload and don't check completion
|
||||
|
||||
Rclone will close any outstanding batches when it exits which may make
|
||||
a delay on quit.
|
||||
`,
|
||||
Default: "sync",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "batch_size",
|
||||
Help: `Max number of files in upload batch.
|
||||
|
||||
This sets the batch size of files to upload. It has to be less than 1000.
|
||||
|
||||
By default this is 0 which means rclone which calculate the batch size
|
||||
depending on the setting of batch_mode.
|
||||
|
||||
- batch_mode: async - default batch_size is 100
|
||||
- batch_mode: sync - default batch_size is the same as --transfers
|
||||
- batch_mode: off - not in use
|
||||
|
||||
Rclone will close any outstanding batches when it exits which may make
|
||||
a delay on quit.
|
||||
|
||||
Setting this is a great idea if you are uploading lots of small files
|
||||
as it will make them a lot quicker. You can use --transfers 32 to
|
||||
maximise throughput.
|
||||
`,
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "batch_timeout",
|
||||
Help: `Max time to allow an idle upload batch before uploading
|
||||
|
||||
If an upload batch is idle for more than this long then it will be
|
||||
uploaded.
|
||||
|
||||
The default for this is 0 which means rclone will choose a sensible
|
||||
default based on the batch_mode in use.
|
||||
|
||||
- batch_mode: async - default batch_timeout is 500ms
|
||||
- batch_mode: sync - default batch_timeout is 10s
|
||||
- batch_mode: off - not in use
|
||||
`,
|
||||
Default: fs.Duration(0),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -234,6 +289,10 @@ type Options struct {
|
||||
Impersonate string `config:"impersonate"`
|
||||
SharedFiles bool `config:"shared_files"`
|
||||
SharedFolders bool `config:"shared_folders"`
|
||||
BatchMode string `config:"batch_mode"`
|
||||
BatchSize int `config:"batch_size"`
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
AsyncBatch bool `config:"async_batch"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -253,6 +312,7 @@ type Fs struct {
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
ns string // The namespace we are using or "" for none
|
||||
batcher *batcher // batch builder
|
||||
}
|
||||
|
||||
// Object describes a dropbox object
|
||||
@@ -268,8 +328,6 @@ type Object struct {
|
||||
hash string // content_hash of the object
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
@@ -323,7 +381,7 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
const minChunkSize = fs.SizeSuffixBase
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
@@ -380,6 +438,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ci: ci,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfg := dropbox.Config{
|
||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||
Client: oAuthClient, // maybe???
|
||||
@@ -1084,13 +1146,30 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
|
||||
createArg := sharing.CreateSharedLinkWithSettingsArg{
|
||||
Path: absPath,
|
||||
// FIXME this gives settings_error/not_authorized/.. errors
|
||||
// and the expires setting isn't in the documentation so remove
|
||||
// for now.
|
||||
// Settings: &sharing.SharedLinkSettings{
|
||||
// Expires: time.Now().Add(time.Duration(expire)).UTC().Round(time.Second),
|
||||
// },
|
||||
Settings: &sharing.SharedLinkSettings{
|
||||
RequestedVisibility: &sharing.RequestedVisibility{
|
||||
Tagged: dropbox.Tagged{Tag: sharing.RequestedVisibilityPublic},
|
||||
},
|
||||
Audience: &sharing.LinkAudience{
|
||||
Tagged: dropbox.Tagged{Tag: sharing.LinkAudiencePublic},
|
||||
},
|
||||
Access: &sharing.RequestedLinkAccessLevel{
|
||||
Tagged: dropbox.Tagged{Tag: sharing.RequestedLinkAccessLevelViewer},
|
||||
},
|
||||
},
|
||||
}
|
||||
if expire < fs.DurationOff {
|
||||
expiryTime := time.Now().Add(time.Duration(expire)).UTC().Round(time.Second)
|
||||
createArg.Settings.Expires = expiryTime
|
||||
}
|
||||
// FIXME note we can't set Settings for non enterprise dropbox
|
||||
// because of https://github.com/dropbox/dropbox-sdk-go-unofficial/issues/75
|
||||
// however this only goes wrong when we set Expires, so as a
|
||||
// work-around remove Settings unless expire is set.
|
||||
if expire == fs.DurationOff {
|
||||
createArg.Settings = nil
|
||||
}
|
||||
|
||||
var linkRes sharing.IsSharedLinkMetadata
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
|
||||
@@ -1334,13 +1413,13 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
switch info := entry.(type) {
|
||||
case *files.FolderMetadata:
|
||||
entryType = fs.EntryDirectory
|
||||
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
|
||||
entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
|
||||
case *files.FileMetadata:
|
||||
entryType = fs.EntryObject
|
||||
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
|
||||
entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
|
||||
case *files.DeletedMetadata:
|
||||
entryType = fs.EntryObject
|
||||
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
|
||||
entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
|
||||
default:
|
||||
fs.Errorf(entry, "dropbox ChangeNotify: ignoring unknown EntryType %T", entry)
|
||||
continue
|
||||
@@ -1362,6 +1441,13 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(DbHashType)
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
f.batcher.Shutdown()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
@@ -1521,97 +1607,83 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// uploadChunked uploads the object in parts
|
||||
//
|
||||
// Will work optimally if size is >= uploadChunkSize. If the size is either
|
||||
// unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
|
||||
// avoidable request to the Dropbox API that does not carry payload.
|
||||
// Will introduce two additional network requests to start and finish the session.
|
||||
// If the size is unknown (i.e. -1) the method incurs one additional
|
||||
// request to the Dropbox API that does not carry a payload to close the append session.
|
||||
func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
chunks := 0
|
||||
if size != -1 {
|
||||
chunks = int(size/chunkSize) + 1
|
||||
}
|
||||
in := readers.NewCountingReader(in0)
|
||||
buf := make([]byte, int(chunkSize))
|
||||
|
||||
fmtChunk := func(cur int, last bool) {
|
||||
if chunks == 0 && last {
|
||||
fs.Debugf(o, "Streaming chunk %d/%d", cur, cur)
|
||||
} else if chunks == 0 {
|
||||
fs.Debugf(o, "Streaming chunk %d/unknown", cur)
|
||||
} else {
|
||||
fs.Debugf(o, "Uploading chunk %d/%d", cur, chunks)
|
||||
}
|
||||
}
|
||||
|
||||
// write the first chunk
|
||||
fmtChunk(1, false)
|
||||
// start upload
|
||||
var res *files.UploadSessionStartResult
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, chunk)
|
||||
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, nil)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
chunks, remainder := size/chunkSize, size%chunkSize
|
||||
if remainder > 0 {
|
||||
chunks++
|
||||
}
|
||||
|
||||
// write chunks
|
||||
in := readers.NewCountingReader(in0)
|
||||
buf := make([]byte, int(chunkSize))
|
||||
cursor := files.UploadSessionCursor{
|
||||
SessionId: res.SessionId,
|
||||
Offset: 0,
|
||||
}
|
||||
appendArg := files.UploadSessionAppendArg{
|
||||
Cursor: &cursor,
|
||||
Close: false,
|
||||
}
|
||||
|
||||
// write more whole chunks (if any)
|
||||
currentChunk := 2
|
||||
for {
|
||||
if chunks > 0 && currentChunk >= chunks {
|
||||
// if the size is known, only upload full chunks. Remaining bytes are uploaded with
|
||||
// the UploadSessionFinish request.
|
||||
break
|
||||
} else if chunks == 0 && in.BytesRead()-cursor.Offset < uint64(chunkSize) {
|
||||
// if the size is unknown, upload as long as we can read full chunks from the reader.
|
||||
// The UploadSessionFinish request will not contain any payload.
|
||||
break
|
||||
}
|
||||
appendArg := files.UploadSessionAppendArg{Cursor: &cursor}
|
||||
for currentChunk := 1; ; currentChunk++ {
|
||||
cursor.Offset = in.BytesRead()
|
||||
fmtChunk(currentChunk, false)
|
||||
chunk = readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
|
||||
if chunks < 0 {
|
||||
fs.Debugf(o, "Streaming chunk %d/unknown", currentChunk)
|
||||
} else {
|
||||
fs.Debugf(o, "Uploading chunk %d/%d", currentChunk, chunks)
|
||||
}
|
||||
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
|
||||
// after the first chunk is uploaded, we retry everything
|
||||
// after session is started, we retry everything
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentChunk++
|
||||
if appendArg.Close {
|
||||
break
|
||||
}
|
||||
|
||||
if size > 0 {
|
||||
// if size is known, check if next chunk is final
|
||||
appendArg.Close = uint64(size)-in.BytesRead() <= uint64(chunkSize)
|
||||
} else {
|
||||
// if size is unknown, upload as long as we can read full chunks from the reader
|
||||
appendArg.Close = in.BytesRead()-cursor.Offset < uint64(chunkSize)
|
||||
}
|
||||
}
|
||||
|
||||
// write the remains
|
||||
// finish upload
|
||||
cursor.Offset = in.BytesRead()
|
||||
args := &files.UploadSessionFinishArg{
|
||||
Cursor: &cursor,
|
||||
Commit: commitInfo,
|
||||
}
|
||||
fmtChunk(currentChunk, true)
|
||||
chunk = readers.NewRepeatableReaderBuffer(in, buf)
|
||||
// If we are batching then we should have written all the data now
|
||||
// store the commit info now for a batch commit
|
||||
if o.fs.batcher.Batching() {
|
||||
return o.fs.batcher.Commit(ctx, args)
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
entry, err = o.fs.srv.UploadSessionFinish(args, chunk)
|
||||
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
|
||||
// If error is insufficient space then don't retry
|
||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||
@@ -1678,7 +1750,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
size := src.Size()
|
||||
var err error
|
||||
var entry *files.FileMetadata
|
||||
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
|
||||
if size > int64(o.fs.opt.ChunkSize) || size < 0 || o.fs.batcher.Batching() {
|
||||
entry, err = o.uploadChunked(ctx, in, commitInfo, size)
|
||||
} else {
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
@@ -1689,6 +1761,15 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "upload failed")
|
||||
}
|
||||
// If we haven't received data back from batch upload then fake it
|
||||
//
|
||||
// This will only happen if we are uploading async batches
|
||||
if entry == nil {
|
||||
o.bytes = size
|
||||
o.modTime = commitInfo.ClientModified
|
||||
o.hash = "" // we don't have this
|
||||
return nil
|
||||
}
|
||||
return o.setMetadataFromEntry(entry)
|
||||
}
|
||||
|
||||
@@ -1716,6 +1797,7 @@ var (
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = &Fs{}
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -86,10 +87,16 @@ func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
|
||||
return &file, err
|
||||
}
|
||||
|
||||
// maybe do some actual validation later if necessary
|
||||
func validToken(token *GetTokenResponse) bool {
|
||||
return token.Status == "OK"
|
||||
}
|
||||
|
||||
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
|
||||
request := DownloadRequest{
|
||||
URL: url,
|
||||
Single: 1,
|
||||
Pass: f.opt.FilePassword,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -99,7 +106,8 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
|
||||
var token GetTokenResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
doretry, err := shouldRetry(ctx, resp, err)
|
||||
return doretry || !validToken(&token), err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
@@ -118,10 +126,16 @@ func fileFromSharedFile(file *SharedFile) File {
|
||||
|
||||
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://1fichier.com/dir/",
|
||||
Path: id,
|
||||
Parameters: map[string][]string{"json": {"1"}},
|
||||
Method: "GET",
|
||||
RootURL: "https://1fichier.com/dir/",
|
||||
Path: id,
|
||||
Parameters: map[string][]string{"json": {"1"}},
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
}
|
||||
if f.opt.FolderPassword != "" {
|
||||
opts.Method = "POST"
|
||||
opts.Parameters = nil
|
||||
opts.Body = strings.NewReader("json=1&pass=" + url.QueryEscape(f.opt.FolderPassword))
|
||||
}
|
||||
|
||||
var sharedFiles SharedFolderResponse
|
||||
@@ -311,7 +325,7 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
|
||||
return nil, errors.Wrap(err, "couldn't remove folder")
|
||||
}
|
||||
if response.Status != "OK" {
|
||||
return nil, errors.New("Can't remove non-empty dir")
|
||||
return nil, errors.Errorf("can't remove folder: %s", response.Message)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
|
||||
@@ -396,6 +410,34 @@ func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename stri
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) renameFile(ctx context.Context, url string, newName string) (response *RenameFileResponse, err error) {
|
||||
request := &RenameFileRequest{
|
||||
URLs: []RenameFileURL{
|
||||
{
|
||||
URL: url,
|
||||
Filename: newName,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/rename.cgi",
|
||||
}
|
||||
|
||||
response = &RenameFileResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't rename file")
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
|
||||
// fs.Debugf(f, "Requesting Upload node")
|
||||
|
||||
|
||||
@@ -35,9 +35,7 @@ func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "fichier",
|
||||
Description: "1Fichier",
|
||||
Config: func(ctx context.Context, name string, config configmap.Mapper) {
|
||||
},
|
||||
NewFs: NewFs,
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
|
||||
Name: "api_key",
|
||||
@@ -46,6 +44,18 @@ func init() {
|
||||
Name: "shared_folder",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Help: "If you want to download a shared file that is password protected, add this parameter",
|
||||
Name: "file_password",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Help: "If you want to list the files in a shared folder that is password protected, add this parameter",
|
||||
Name: "folder_password",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -77,9 +87,11 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
APIKey string `config:"api_key"`
|
||||
SharedFolder string `config:"shared_folder"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
APIKey string `config:"api_key"`
|
||||
SharedFolder string `config:"shared_folder"`
|
||||
FilePassword string `config:"file_password"`
|
||||
FolderPassword string `config:"folder_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs is the interface a cloud storage system must provide
|
||||
@@ -348,8 +360,10 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(fileUploadResponse.Links) != 1 {
|
||||
return nil, errors.New("unexpected amount of files")
|
||||
if len(fileUploadResponse.Links) == 0 {
|
||||
return nil, errors.New("upload response not found")
|
||||
} else if len(fileUploadResponse.Links) > 1 {
|
||||
fs.Debugf(remote, "Multiple upload responses found, using the first")
|
||||
}
|
||||
|
||||
link := fileUploadResponse.Links[0]
|
||||
@@ -423,25 +437,45 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Find current directory ID
|
||||
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't move file")
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return nil, errors.New("couldn't move file")
|
||||
// If it is in the correct directory, just rename it
|
||||
var url string
|
||||
if currentDirectoryID == directoryID {
|
||||
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't rename file")
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return nil, errors.Errorf("couldn't rename file: %s", resp.Message)
|
||||
}
|
||||
url = resp.URLs[0].URL
|
||||
} else {
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't move file")
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
|
||||
}
|
||||
url = resp.URLs[0]
|
||||
}
|
||||
|
||||
file, err := f.readFileInfo(ctx, resp.URLs[0])
|
||||
file, err := f.readFileInfo(ctx, url)
|
||||
if err != nil {
|
||||
return nil, errors.New("couldn't read file data")
|
||||
}
|
||||
@@ -472,7 +506,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, errors.Wrap(err, "couldn't move file")
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return nil, errors.New("couldn't move file")
|
||||
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
|
||||
}
|
||||
|
||||
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
|
||||
|
||||
@@ -19,6 +19,7 @@ type ListFilesRequest struct {
|
||||
type DownloadRequest struct {
|
||||
URL string `json:"url"`
|
||||
Single int `json:"single"`
|
||||
Pass string `json:"pass,omitempty"`
|
||||
}
|
||||
|
||||
// RemoveFolderRequest is the request structure of the corresponding request
|
||||
@@ -63,8 +64,9 @@ type MoveFileRequest struct {
|
||||
|
||||
// MoveFileResponse is the response structure of the corresponding request
|
||||
type MoveFileResponse struct {
|
||||
Status string `json:"status"`
|
||||
URLs []string `json:"urls"`
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
URLs []string `json:"urls"`
|
||||
}
|
||||
|
||||
// CopyFileRequest is the request structure of the corresponding request
|
||||
@@ -76,9 +78,10 @@ type CopyFileRequest struct {
|
||||
|
||||
// CopyFileResponse is the response structure of the corresponding request
|
||||
type CopyFileResponse struct {
|
||||
Status string `json:"status"`
|
||||
Copied int `json:"copied"`
|
||||
URLs []FileCopy `json:"urls"`
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
Copied int `json:"copied"`
|
||||
URLs []FileCopy `json:"urls"`
|
||||
}
|
||||
|
||||
// FileCopy is used in the the CopyFileResponse
|
||||
@@ -87,6 +90,30 @@ type FileCopy struct {
|
||||
ToURL string `json:"to_url"`
|
||||
}
|
||||
|
||||
// RenameFileURL is the data structure to rename a single file
|
||||
type RenameFileURL struct {
|
||||
URL string `json:"url"`
|
||||
Filename string `json:"filename"`
|
||||
}
|
||||
|
||||
// RenameFileRequest is the request structure of the corresponding request
|
||||
type RenameFileRequest struct {
|
||||
URLs []RenameFileURL `json:"urls"`
|
||||
Pretty int `json:"pretty"`
|
||||
}
|
||||
|
||||
// RenameFileResponse is the response structure of the corresponding request
|
||||
type RenameFileResponse struct {
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
Renamed int `json:"renamed"`
|
||||
URLs []struct {
|
||||
URL string `json:"url"`
|
||||
OldFilename string `json:"old_filename"`
|
||||
NewFilename string `json:"new_filename"`
|
||||
} `json:"urls"`
|
||||
}
|
||||
|
||||
// GetUploadNodeResponse is the response structure of the corresponding request
|
||||
type GetUploadNodeResponse struct {
|
||||
ID string `json:"id"`
|
||||
|
||||
@@ -5,6 +5,7 @@ package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
@@ -51,6 +52,23 @@ func (t Time) String() string {
|
||||
return time.Time(t).UTC().Format(timeFormatParameters)
|
||||
}
|
||||
|
||||
// Int represents an integer which can be represented in JSON as a
|
||||
// quoted integer or an integer.
|
||||
type Int int
|
||||
|
||||
// MarshalJSON turns a Int into JSON
|
||||
func (i *Int) MarshalJSON() (out []byte, err error) {
|
||||
return json.Marshal((*int)(i))
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Int
|
||||
func (i *Int) UnmarshalJSON(data []byte) error {
|
||||
if len(data) >= 2 && data[0] == '"' && data[len(data)-1] == '"' {
|
||||
data = data[1 : len(data)-1]
|
||||
}
|
||||
return json.Unmarshal(data, (*int)(i))
|
||||
}
|
||||
|
||||
// Status return returned in all status responses
|
||||
type Status struct {
|
||||
Code string `json:"status"`
|
||||
@@ -115,7 +133,7 @@ type GetFolderContentsResponse struct {
|
||||
Total int `json:"total,string"`
|
||||
Items []Item `json:"filelist"`
|
||||
Folder Item `json:"folder"`
|
||||
From int `json:"from,string"`
|
||||
From Int `json:"from"`
|
||||
//Count int `json:"count"`
|
||||
Pid string `json:"pid"`
|
||||
RefreshResult Status `json:"refreshresult"`
|
||||
|
||||
@@ -241,23 +241,6 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
type dialCtx struct {
|
||||
f *Fs
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// dial a new connection with fshttp dialer
|
||||
func (d *dialCtx) dial(network, address string) (net.Conn, error) {
|
||||
conn, err := fshttp.NewDialer(d.ctx).Dial(network, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if d.f.tlsConf != nil {
|
||||
conn = tls.Client(conn, d.f.tlsConf)
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserve to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
@@ -277,9 +260,22 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
// Open a new connection to the FTP server.
|
||||
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
fs.Debugf(f, "Connecting to FTP server")
|
||||
dCtx := dialCtx{f, ctx}
|
||||
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dCtx.dial)}
|
||||
if f.opt.ExplicitTLS {
|
||||
|
||||
// Make ftp library dial with fshttp dialer optionally using TLS
|
||||
dial := func(network, address string) (conn net.Conn, err error) {
|
||||
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
|
||||
if f.tlsConf != nil && err == nil {
|
||||
conn = tls.Client(conn, f.tlsConf)
|
||||
}
|
||||
return
|
||||
}
|
||||
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dial)}
|
||||
|
||||
if f.opt.TLS {
|
||||
// Our dialer takes care of TLS but ftp library also needs tlsConf
|
||||
// as a trigger for sending PSBZ and PROT options to server.
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
|
||||
} else if f.opt.ExplicitTLS {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
|
||||
// Initial connection needs to be cleartext for explicit TLS
|
||||
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
|
||||
@@ -1054,6 +1050,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return errors.Wrap(err, "Update")
|
||||
}
|
||||
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
|
||||
// Ignore error 250 here - send by some servers
|
||||
if err != nil {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusRequestedFileActionOK:
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
_ = c.Quit() // toss this connection to avoid sync errors
|
||||
remove()
|
||||
|
||||
@@ -19,9 +19,9 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -51,10 +51,10 @@ import (
|
||||
const (
|
||||
rcloneClientID = "202264815644.apps.googleusercontent.com"
|
||||
rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
metaMtime = "mtime" // key to store mtime under in metadata
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
timeFormat = time.RFC3339Nano
|
||||
metaMtime = "mtime" // key to store mtime in metadata
|
||||
metaMtimeGsutil = "goog-reserved-file-mtime" // key used by GSUtil to store mtime in metadata
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
minSleep = 10 * time.Millisecond
|
||||
)
|
||||
|
||||
@@ -76,17 +76,16 @@ func init() {
|
||||
Prefix: "gcs",
|
||||
Description: "Google Cloud Storage (this is not Google Drive)",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
saFile, _ := m.Get("service_account_file")
|
||||
saCreds, _ := m.Get("service_account_credentials")
|
||||
anonymous, _ := m.Get("anonymous")
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" {
|
||||
return
|
||||
}
|
||||
err := oauthutil.Config(ctx, "google cloud storage", name, m, storageConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return nil, nil
|
||||
}
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: storageConfig,
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "project_number",
|
||||
@@ -922,7 +921,7 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
// read mtime out of metadata if available
|
||||
mtimeString, ok := info.Metadata[metaMtime]
|
||||
if ok {
|
||||
modTime, err := time.Parse(timeFormatIn, mtimeString)
|
||||
modTime, err := time.Parse(timeFormat, mtimeString)
|
||||
if err == nil {
|
||||
o.modTime = modTime
|
||||
return
|
||||
@@ -930,8 +929,19 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
|
||||
}
|
||||
|
||||
// Fallback to GSUtil mtime
|
||||
mtimeGsutilString, ok := info.Metadata[metaMtimeGsutil]
|
||||
if ok {
|
||||
unixTimeSec, err := strconv.ParseInt(mtimeGsutilString, 10, 64)
|
||||
if err == nil {
|
||||
o.modTime = time.Unix(unixTimeSec, 0)
|
||||
return
|
||||
}
|
||||
fs.Debugf(o, "Failed to read GSUtil mtime from metadata: %s", err)
|
||||
}
|
||||
|
||||
// Fallback to the Updated time
|
||||
modTime, err := time.Parse(timeFormatIn, info.Updated)
|
||||
modTime, err := time.Parse(timeFormat, info.Updated)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Bad time decode: %v", err)
|
||||
} else {
|
||||
@@ -988,7 +998,8 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
// Returns metadata for an object
|
||||
func metadataFromModTime(modTime time.Time) map[string]string {
|
||||
metadata := make(map[string]string, 1)
|
||||
metadata[metaMtime] = modTime.Format(timeFormatOut)
|
||||
metadata[metaMtime] = modTime.Format(timeFormat)
|
||||
metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
|
||||
return metadata
|
||||
}
|
||||
|
||||
@@ -1000,11 +1011,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
|
||||
return err
|
||||
}
|
||||
// Add the mtime to the existing metadata
|
||||
mtime := modTime.Format(timeFormatOut)
|
||||
if object.Metadata == nil {
|
||||
object.Metadata = make(map[string]string, 1)
|
||||
}
|
||||
object.Metadata[metaMtime] = mtime
|
||||
object.Metadata[metaMtime] = modTime.Format(timeFormat)
|
||||
object.Metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
|
||||
// Copy the object to itself to update the metadata
|
||||
// Using PATCH requires too many permissions
|
||||
bucket, bucketPath := o.split()
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
golog "log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -54,6 +53,7 @@ const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
|
||||
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
|
||||
scopeAccess = 2 // position of access scope in list
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -62,7 +62,7 @@ var (
|
||||
Scopes: []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeReadWrite,
|
||||
scopeReadWrite, // this must be at position scopeAccess
|
||||
},
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
@@ -78,36 +78,36 @@ func init() {
|
||||
Prefix: "gphotos",
|
||||
Description: "Google Photos",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||
return
|
||||
return nil, errors.Wrap(err, "couldn't parse config into struct")
|
||||
}
|
||||
|
||||
// Fill in the scopes
|
||||
if opt.ReadOnly {
|
||||
oauthConfig.Scopes[0] = scopeReadOnly
|
||||
} else {
|
||||
oauthConfig.Scopes[0] = scopeReadWrite
|
||||
switch config.State {
|
||||
case "":
|
||||
// Fill in the scopes
|
||||
if opt.ReadOnly {
|
||||
oauthConfig.Scopes[scopeAccess] = scopeReadOnly
|
||||
} else {
|
||||
oauthConfig.Scopes[scopeAccess] = scopeReadWrite
|
||||
}
|
||||
return oauthutil.ConfigOut("warning", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
case "warning":
|
||||
// Warn the user as required by google photos integration
|
||||
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
|
||||
|
||||
IMPORTANT: All media items uploaded to Google Photos with rclone
|
||||
are stored in full resolution at original quality. These uploads
|
||||
will count towards storage in your Google Account.`)
|
||||
case "warning_done":
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Do the oauth
|
||||
err = oauthutil.Config(ctx, "google photos", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
golog.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
|
||||
// Warn the user
|
||||
fmt.Print(`
|
||||
*** IMPORTANT: All media items uploaded to Google Photos with rclone
|
||||
*** are stored in full resolution at original quality. These uploads
|
||||
*** will count towards storage in your Google Account.
|
||||
|
||||
`)
|
||||
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "read_only",
|
||||
|
||||
@@ -37,7 +37,7 @@ func init() {
|
||||
Help: `Kerberos service principal name for the namenode
|
||||
|
||||
Enables KERBEROS authentication. Specifies the Service Principal Name
|
||||
(<SERVICE>/<FQDN>) for the namenode.`,
|
||||
(SERVICE/FQDN) for the namenode.`,
|
||||
Required: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "hdfs/namenode.hadoop.docker",
|
||||
|
||||
@@ -47,7 +47,7 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
ts := httptest.NewServer(handler)
|
||||
|
||||
// Configure the remote
|
||||
configfile.LoadConfig(context.Background())
|
||||
configfile.Install()
|
||||
// fs.Config.LogLevel = fs.LogLevelDebug
|
||||
// fs.Config.DumpHeaders = true
|
||||
// fs.Config.DumpBodies = true
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -56,11 +55,10 @@ func init() {
|
||||
Name: "hubic",
|
||||
Description: "Hubic",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config(ctx, "hubic", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
|
||||
})
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -49,37 +48,29 @@ const (
|
||||
rootURL = "https://jfs.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com/"
|
||||
baseURL = "https://www.jottacloud.com/"
|
||||
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
configDevice = "device"
|
||||
configMountpoint = "mountpoint"
|
||||
configTokenURL = "tokenURL"
|
||||
configClientID = "client_id"
|
||||
configClientSecret = "client_secret"
|
||||
configUsername = "username"
|
||||
configVersion = 1
|
||||
|
||||
v1tokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||
v1registerURL = "https://api.jottacloud.com/auth/v1/register"
|
||||
v1ClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
v1EncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
v1configVersion = 0
|
||||
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
|
||||
defaultClientID = "jottacli"
|
||||
|
||||
legacyTokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||
legacyRegisterURL = "https://api.jottacloud.com/auth/v1/register"
|
||||
legacyClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
legacyConfigVersion = 0
|
||||
|
||||
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
|
||||
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
|
||||
teliaCloudClientID = "desktop"
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app for a personal account
|
||||
oauthConfig = &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: defaultTokenURL,
|
||||
TokenURL: defaultTokenURL,
|
||||
},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
// needs to be done early so we can use oauth during config
|
||||
@@ -87,42 +78,7 @@ func init() {
|
||||
Name: "jottacloud",
|
||||
Description: "Jottacloud",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
refresh := false
|
||||
if version, ok := m.Get("configVersion"); ok {
|
||||
ver, err := strconv.Atoi(version)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse config version - corrupted config")
|
||||
}
|
||||
refresh = (ver != configVersion) && (ver != v1configVersion)
|
||||
}
|
||||
|
||||
if refresh {
|
||||
fmt.Printf("Config outdated - refreshing\n")
|
||||
} else {
|
||||
tokenString, ok := m.Get("token")
|
||||
if ok && tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !config.Confirm(false) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Choose authentication type:\n" +
|
||||
"1: Standard authentication - use this if you're a normal Jottacloud user.\n" +
|
||||
"2: Legacy authentication - this is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n" +
|
||||
"3: Telia Cloud authentication - use this if you are using Telia Cloud.\n")
|
||||
|
||||
switch config.ChooseNumber("Your choice", 1, 3) {
|
||||
case 1:
|
||||
v2config(ctx, name, m)
|
||||
case 2:
|
||||
v1config(ctx, name, m)
|
||||
case 3:
|
||||
teliaCloudConfig(ctx, name, m)
|
||||
}
|
||||
},
|
||||
Config: Config,
|
||||
Options: []fs.Option{{
|
||||
Name: "md5_memory_limit",
|
||||
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
|
||||
@@ -143,6 +99,11 @@ func init() {
|
||||
Help: "Files bigger than this can be resumed if the upload fail's.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_versions",
|
||||
Help: "Avoid server side versioning by deleting files and recreating files instead of overwriting them.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -157,6 +118,183 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
// Config runs the backend configuration protocol
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
switch config.State {
|
||||
case "":
|
||||
return fs.ConfigChooseFixed("auth_type_done", "config_type", `Authentication type`, []fs.OptionExample{{
|
||||
Value: "standard",
|
||||
Help: "Standard authentication - use this if you're a normal Jottacloud user.",
|
||||
}, {
|
||||
Value: "legacy",
|
||||
Help: "Legacy authentication - this is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
|
||||
}, {
|
||||
Value: "telia",
|
||||
Help: "Telia Cloud authentication - use this if you are using Telia Cloud.",
|
||||
}})
|
||||
case "auth_type_done":
|
||||
// Jump to next state according to config chosen
|
||||
return fs.ConfigGoto(config.Result)
|
||||
case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\n\nGenerate here: https://www.jottacloud.com/web/secure")
|
||||
case "standard_token":
|
||||
loginToken := config.Result
|
||||
m.Set(configClientID, defaultClientID)
|
||||
m.Set(configClientSecret, "")
|
||||
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
token, tokenEndpoint, err := doTokenAuth(ctx, srv, loginToken)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get oauth token")
|
||||
}
|
||||
m.Set(configTokenURL, tokenEndpoint)
|
||||
err = oauthutil.PutToken(name, m, &token, true)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error while saving token")
|
||||
}
|
||||
return fs.ConfigGoto("choose_device")
|
||||
case "legacy": // configure a jottacloud backend using legacy authentication
|
||||
m.Set("configVersion", fmt.Sprint(legacyConfigVersion))
|
||||
return fs.ConfigConfirm("legacy_api", false, "config_machine_specific", `Do you want to create a machine specific API key?
|
||||
|
||||
Rclone has it's own Jottacloud API KEY which works fine as long as one
|
||||
only uses rclone on a single machine. When you want to use rclone with
|
||||
this account on more than one machine it's recommended to create a
|
||||
machine specific API key. These keys can NOT be shared between
|
||||
machines.`)
|
||||
case "legacy_api":
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
if config.Result == "true" {
|
||||
deviceRegistration, err := registerDevice(ctx, srv)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to register device")
|
||||
}
|
||||
m.Set(configClientID, deviceRegistration.ClientID)
|
||||
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
||||
fs.Debugf(nil, "Got clientID %q and clientSecret %q", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
|
||||
}
|
||||
return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address)")
|
||||
case "legacy_username":
|
||||
m.Set(configUsername, config.Result)
|
||||
return fs.ConfigPassword("legacy_password", "config_password", "Password (only used in setup, will not be stored)")
|
||||
case "legacy_password":
|
||||
m.Set("password", config.Result)
|
||||
m.Set("auth_code", "")
|
||||
return fs.ConfigGoto("legacy_do_auth")
|
||||
case "legacy_auth_code":
|
||||
authCode := strings.Replace(config.Result, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
|
||||
m.Set("auth_code", authCode)
|
||||
return fs.ConfigGoto("legacy_do_auth")
|
||||
case "legacy_do_auth":
|
||||
username, _ := m.Get(configUsername)
|
||||
password, _ := m.Get("password")
|
||||
password = obscure.MustReveal(password)
|
||||
authCode, _ := m.Get("auth_code")
|
||||
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID = legacyClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret = legacyEncryptedClientSecret
|
||||
}
|
||||
|
||||
oauthConfig := &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: legacyTokenURL,
|
||||
},
|
||||
ClientID: clientID,
|
||||
ClientSecret: obscure.MustReveal(clientSecret),
|
||||
}
|
||||
token, err := doLegacyAuth(ctx, srv, oauthConfig, username, password, authCode)
|
||||
if err == errAuthCodeRequired {
|
||||
return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification Code\nThis account uses 2 factor authentication you will receive a verification code via SMS.")
|
||||
}
|
||||
m.Set("password", "")
|
||||
m.Set("auth_code", "")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get oauth token")
|
||||
}
|
||||
err = oauthutil.PutToken(name, m, &token, true)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error while saving token")
|
||||
}
|
||||
return fs.ConfigGoto("choose_device")
|
||||
case "telia": // telia cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, teliaCloudClientID)
|
||||
m.Set(configTokenURL, teliaCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: teliaCloudAuthURL,
|
||||
TokenURL: teliaCloudTokenURL,
|
||||
},
|
||||
ClientID: teliaCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "choose_device":
|
||||
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", "Use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?")
|
||||
case "choose_device_query":
|
||||
if config.Result != "true" {
|
||||
m.Set(configDevice, "")
|
||||
m.Set(configMountpoint, "")
|
||||
return fs.ConfigGoto("end")
|
||||
}
|
||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.Set(configUsername, cust.Username)
|
||||
|
||||
acc, err := getDriveInfo(ctx, srv, cust.Username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fs.ConfigChoose("choose_device_result", "config_device", `Please select the device to use. Normally this will be Jotta`, len(acc.Devices), func(i int) (string, string) {
|
||||
return acc.Devices[i].Name, ""
|
||||
})
|
||||
case "choose_device_result":
|
||||
device := config.Result
|
||||
m.Set(configDevice, device)
|
||||
|
||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
|
||||
username, _ := m.Get(configUsername)
|
||||
dev, err := getDeviceInfo(ctx, srv, path.Join(username, device))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fs.ConfigChoose("choose_device_mountpoint", "config_mountpoint", `Please select the mountpoint to use. Normally this will be Archive.`, len(dev.MountPoints), func(i int) (string, string) {
|
||||
return dev.MountPoints[i].Name, ""
|
||||
})
|
||||
case "choose_device_mountpoint":
|
||||
mountpoint := config.Result
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
return fs.ConfigGoto("end")
|
||||
case "end":
|
||||
// All the config flows end up here in case we need to carry on with something
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Device string `config:"device"`
|
||||
@@ -164,6 +302,7 @@ type Options struct {
|
||||
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
|
||||
TrashedOnly bool `config:"trashed_only"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
NoVersions bool `config:"no_versions"`
|
||||
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
@@ -217,10 +356,21 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// parsePath parses a box 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
// joinPath joins two path/url elements
|
||||
//
|
||||
// Does not perform clean on the result like path.Join does,
|
||||
// which breaks urls by changing prefix "https://" into "https:/".
|
||||
func joinPath(base string, rel string) string {
|
||||
if rel == "" {
|
||||
return base
|
||||
}
|
||||
if strings.HasSuffix(base, "/") {
|
||||
return base + strings.TrimPrefix(rel, "/")
|
||||
}
|
||||
if strings.HasPrefix(rel, "/") {
|
||||
return strings.TrimSuffix(base, "/") + rel
|
||||
}
|
||||
return base + "/" + rel
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
@@ -242,110 +392,6 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
func teliaCloudConfig(ctx context.Context, name string, m configmap.Mapper) {
|
||||
teliaCloudOauthConfig := &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: teliaCloudAuthURL,
|
||||
TokenURL: teliaCloudTokenURL,
|
||||
},
|
||||
ClientID: teliaCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
|
||||
err := oauthutil.Config(ctx, "jottacloud", name, m, teliaCloudOauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm(false) {
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, teliaCloudOauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
}
|
||||
|
||||
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to setup mountpoint: %s", err)
|
||||
}
|
||||
m.Set(configDevice, device)
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
}
|
||||
|
||||
m.Set("configVersion", strconv.Itoa(configVersion))
|
||||
m.Set(configClientID, teliaCloudClientID)
|
||||
m.Set(configTokenURL, teliaCloudTokenURL)
|
||||
}
|
||||
|
||||
// v1config configure a jottacloud backend using legacy authentication
|
||||
func v1config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
|
||||
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
|
||||
if config.Confirm(false) {
|
||||
deviceRegistration, err := registerDevice(ctx, srv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to register device: %v", err)
|
||||
}
|
||||
|
||||
m.Set(configClientID, deviceRegistration.ClientID)
|
||||
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
||||
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
|
||||
}
|
||||
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID = v1ClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret = v1EncryptedClientSecret
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
oauthConfig.Endpoint.AuthURL = v1tokenURL
|
||||
oauthConfig.Endpoint.TokenURL = v1tokenURL
|
||||
|
||||
fmt.Printf("Username> ")
|
||||
username := config.ReadLine()
|
||||
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
|
||||
|
||||
token, err := doAuthV1(ctx, srv, username, password)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get oauth token: %s", err)
|
||||
}
|
||||
err = oauthutil.PutToken(name, m, &token, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Error while saving token: %s", err)
|
||||
}
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm(false) {
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
}
|
||||
|
||||
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to setup mountpoint: %s", err)
|
||||
}
|
||||
m.Set(configDevice, device)
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
}
|
||||
|
||||
m.Set("configVersion", strconv.Itoa(v1configVersion))
|
||||
}
|
||||
|
||||
// registerDevice register a new device for use with the jottacloud API
|
||||
func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
|
||||
// random generator to generate random device names
|
||||
@@ -364,7 +410,7 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: v1registerURL,
|
||||
RootURL: legacyRegisterURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
|
||||
Parameters: values,
|
||||
@@ -375,8 +421,13 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
|
||||
return deviceRegistration, err
|
||||
}
|
||||
|
||||
// doAuthV1 runs the actual token request for V1 authentication
|
||||
func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
|
||||
var errAuthCodeRequired = errors.New("auth code required")
|
||||
|
||||
// doLegacyAuth runs the actual token request for V1 authentication
|
||||
//
|
||||
// Call this first with blank authCode. If errAuthCodeRequired is
|
||||
// returned then call it again with an authCode
|
||||
func doLegacyAuth(ctx context.Context, srv *rest.Client, oauthConfig *oauth2.Config, username, password, authCode string) (token oauth2.Token, err error) {
|
||||
// prepare out token request with username and password
|
||||
values := url.Values{}
|
||||
values.Set("grant_type", "PASSWORD")
|
||||
@@ -390,22 +441,19 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Parameters: values,
|
||||
}
|
||||
if authCode != "" {
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||
}
|
||||
|
||||
// do the first request
|
||||
var jsonToken api.TokenJSON
|
||||
resp, err := srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
if err != nil {
|
||||
if err != nil && authCode == "" {
|
||||
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
|
||||
if resp != nil {
|
||||
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
|
||||
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
|
||||
fmt.Printf("Enter verification code> ")
|
||||
authCode := config.ReadLine()
|
||||
|
||||
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
return token, errAuthCodeRequired
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -417,51 +465,11 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
|
||||
return token, err
|
||||
}
|
||||
|
||||
// v2config configure a jottacloud backend using the modern JottaCli token based authentication
|
||||
func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
|
||||
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
|
||||
fmt.Printf("Login Token> ")
|
||||
loginToken := config.ReadLine()
|
||||
|
||||
m.Set(configClientID, "jottacli")
|
||||
m.Set(configClientSecret, "")
|
||||
|
||||
token, err := doAuthV2(ctx, srv, loginToken, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get oauth token: %s", err)
|
||||
}
|
||||
err = oauthutil.PutToken(name, m, &token, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Error while saving token: %s", err)
|
||||
}
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm(false) {
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
}
|
||||
|
||||
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to setup mountpoint: %s", err)
|
||||
}
|
||||
m.Set(configDevice, device)
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
}
|
||||
|
||||
m.Set("configVersion", strconv.Itoa(configVersion))
|
||||
}
|
||||
|
||||
// doAuthV2 runs the actual token request for V2 authentication
|
||||
func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
|
||||
// doTokenAuth runs the actual token request for V2 authentication
|
||||
func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 string) (token oauth2.Token, tokenEndpoint string, err error) {
|
||||
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
|
||||
if err != nil {
|
||||
return token, err
|
||||
return token, "", err
|
||||
}
|
||||
|
||||
// decode login token
|
||||
@@ -469,7 +477,7 @@ func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m
|
||||
decoder := json.NewDecoder(bytes.NewReader(loginTokenBytes))
|
||||
err = decoder.Decode(&loginToken)
|
||||
if err != nil {
|
||||
return token, err
|
||||
return token, "", err
|
||||
}
|
||||
|
||||
// retrieve endpoint urls
|
||||
@@ -478,19 +486,14 @@ func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m
|
||||
RootURL: loginToken.WellKnownLink,
|
||||
}
|
||||
var wellKnown api.WellKnown
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &wellKnown)
|
||||
_, err = apiSrv.CallJSON(ctx, &opts, nil, &wellKnown)
|
||||
if err != nil {
|
||||
return token, err
|
||||
return token, "", err
|
||||
}
|
||||
|
||||
// save the tokenurl
|
||||
oauthConfig.Endpoint.AuthURL = wellKnown.TokenEndpoint
|
||||
oauthConfig.Endpoint.TokenURL = wellKnown.TokenEndpoint
|
||||
m.Set(configTokenURL, wellKnown.TokenEndpoint)
|
||||
|
||||
// prepare out token request with username and password
|
||||
values := url.Values{}
|
||||
values.Set("client_id", "jottacli")
|
||||
values.Set("client_id", defaultClientID)
|
||||
values.Set("grant_type", "password")
|
||||
values.Set("password", loginToken.AuthToken)
|
||||
values.Set("scope", "offline_access+openid")
|
||||
@@ -498,68 +501,33 @@ func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m
|
||||
values.Encode()
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: oauthConfig.Endpoint.AuthURL,
|
||||
RootURL: wellKnown.TokenEndpoint,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Body: strings.NewReader(values.Encode()),
|
||||
}
|
||||
|
||||
// do the first request
|
||||
var jsonToken api.TokenJSON
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
_, err = apiSrv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
if err != nil {
|
||||
return token, err
|
||||
return token, "", err
|
||||
}
|
||||
|
||||
token.AccessToken = jsonToken.AccessToken
|
||||
token.RefreshToken = jsonToken.RefreshToken
|
||||
token.TokenType = jsonToken.TokenType
|
||||
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
|
||||
return token, err
|
||||
}
|
||||
|
||||
// setupMountpoint sets up a custom device and mountpoint if desired by the user
|
||||
func setupMountpoint(ctx context.Context, srv *rest.Client, apiSrv *rest.Client) (device, mountpoint string, err error) {
|
||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
acc, err := getDriveInfo(ctx, srv, cust.Username)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
var deviceNames []string
|
||||
for i := range acc.Devices {
|
||||
deviceNames = append(deviceNames, acc.Devices[i].Name)
|
||||
}
|
||||
fmt.Printf("Please select the device to use. Normally this will be Jotta\n")
|
||||
device = config.Choose("Devices", deviceNames, nil, false)
|
||||
|
||||
dev, err := getDeviceInfo(ctx, srv, path.Join(cust.Username, device))
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if len(dev.MountPoints) == 0 {
|
||||
return "", "", errors.New("no mountpoints for selected device")
|
||||
}
|
||||
var mountpointNames []string
|
||||
for i := range dev.MountPoints {
|
||||
mountpointNames = append(mountpointNames, dev.MountPoints[i].Name)
|
||||
}
|
||||
fmt.Printf("Please select the mountpoint to user. Normally this will be Archive\n")
|
||||
mountpoint = config.Choose("Mountpoints", mountpointNames, nil, false)
|
||||
|
||||
return device, mountpoint, err
|
||||
return token, wellKnown.TokenEndpoint, err
|
||||
}
|
||||
|
||||
// getCustomerInfo queries general information about the account
|
||||
func getCustomerInfo(ctx context.Context, srv *rest.Client) (info *api.CustomerInfo, err error) {
|
||||
func getCustomerInfo(ctx context.Context, apiSrv *rest.Client) (info *api.CustomerInfo, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "account/v1/customer",
|
||||
}
|
||||
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &info)
|
||||
_, err = apiSrv.CallJSON(ctx, &opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get customer info")
|
||||
}
|
||||
@@ -676,7 +644,7 @@ func (f *Fs) filePath(file string) string {
|
||||
// This filter catches all refresh requests, reads the body,
|
||||
// changes the case and then sends it on
|
||||
func grantTypeFilter(req *http.Request) {
|
||||
if v1tokenURL == req.URL.String() {
|
||||
if legacyTokenURL == req.URL.String() {
|
||||
// read the entire body
|
||||
refreshBody, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
@@ -692,53 +660,50 @@ func grantTypeFilter(req *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuthClient *http.Client, ts *oauthutil.TokenSource, err error) {
|
||||
// Check config version
|
||||
var ver int
|
||||
version, ok := m.Get("configVersion")
|
||||
if ok {
|
||||
ver, err = strconv.Atoi(version)
|
||||
if err != nil {
|
||||
return nil, errors.New("Failed to parse config version")
|
||||
return nil, nil, errors.New("Failed to parse config version")
|
||||
}
|
||||
ok = (ver == configVersion) || (ver == v1configVersion)
|
||||
ok = (ver == configVersion) || (ver == legacyConfigVersion)
|
||||
}
|
||||
if !ok {
|
||||
return nil, errors.New("Outdated config - please reconfigure this backend")
|
||||
return nil, nil, errors.New("Outdated config - please reconfigure this backend")
|
||||
}
|
||||
|
||||
baseClient := fshttp.NewClient(ctx)
|
||||
|
||||
oauthConfig := &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: defaultTokenURL,
|
||||
TokenURL: defaultTokenURL,
|
||||
},
|
||||
}
|
||||
if ver == configVersion {
|
||||
oauthConfig.ClientID = "jottacli"
|
||||
oauthConfig.ClientID = defaultClientID
|
||||
// if custom endpoints are set use them else stick with defaults
|
||||
if tokenURL, ok := m.Get(configTokenURL); ok {
|
||||
oauthConfig.Endpoint.TokenURL = tokenURL
|
||||
// jottacloud is weird. we need to use the tokenURL as authURL
|
||||
oauthConfig.Endpoint.AuthURL = tokenURL
|
||||
}
|
||||
} else if ver == v1configVersion {
|
||||
} else if ver == legacyConfigVersion {
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID = v1ClientID
|
||||
clientID = legacyClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret = v1EncryptedClientSecret
|
||||
clientSecret = legacyEncryptedClientSecret
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
oauthConfig.Endpoint.TokenURL = v1tokenURL
|
||||
oauthConfig.Endpoint.AuthURL = v1tokenURL
|
||||
oauthConfig.Endpoint.TokenURL = legacyTokenURL
|
||||
oauthConfig.Endpoint.AuthURL = legacyTokenURL
|
||||
|
||||
// add the request filter to fix token refresh
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
@@ -751,13 +716,29 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
// Create OAuth Client
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||
oAuthClient, ts, err = oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
|
||||
return nil, nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
|
||||
}
|
||||
return oAuthClient, ts, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
oAuthClient, ts, err := getOAuthClient(ctx, name, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rootIsDir := strings.HasSuffix(root, "/")
|
||||
root = parsePath(root)
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
@@ -1295,8 +1276,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
if result.PublicSharePath == "" {
|
||||
return "", errors.New("couldn't create public link - no link path received")
|
||||
}
|
||||
link = path.Join(baseURL, result.PublicSharePath)
|
||||
return link, nil
|
||||
return joinPath(baseURL, result.PublicSharePath), nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
@@ -1520,6 +1500,20 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
if o.fs.opt.NoVersions {
|
||||
err := o.readMetaData(ctx, false)
|
||||
if err == nil {
|
||||
// if the object exists delete it
|
||||
err = o.remove(ctx, true)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to remove old object")
|
||||
}
|
||||
}
|
||||
// if the object does not exist we can just continue but if the error is something different we should report that
|
||||
if err != fs.ErrorObjectNotFound {
|
||||
return err
|
||||
}
|
||||
}
|
||||
o.fs.tokenRenewer.Start()
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
size := src.Size()
|
||||
@@ -1610,8 +1604,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
func (o *Object) remove(ctx context.Context, hard bool) error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: o.filePath(),
|
||||
@@ -1619,7 +1612,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
if o.fs.opt.HardDelete {
|
||||
if hard {
|
||||
opts.Parameters.Set("rm", "true")
|
||||
} else {
|
||||
opts.Parameters.Set("dl", "true")
|
||||
@@ -1631,6 +1624,11 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
})
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.remove(ctx, o.fs.opt.HardDelete)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
|
||||
@@ -534,7 +534,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return nil
|
||||
}
|
||||
|
||||
// About reports space usage (with a MB precision)
|
||||
// About reports space usage (with a MiB precision)
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
mount, err := f.client.MountsDetails(f.mountID)
|
||||
if err != nil {
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// Constants
|
||||
@@ -73,25 +74,34 @@ points, as you explicitly acknowledge that they should be skipped.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "zero_size_links",
|
||||
Help: `Assume the Stat size of links is zero (and read them instead)
|
||||
Help: `Assume the Stat size of links is zero (and read them instead) (Deprecated)
|
||||
|
||||
On some virtual filesystems (such ash LucidLink), reading a link size via a Stat call always returns 0.
|
||||
However, on unix it reads as the length of the text in the link. This may cause errors like this when
|
||||
syncing:
|
||||
Rclone used to use the Stat size of links as the link size, but this fails in quite a few places
|
||||
|
||||
Failed to copy: corrupted on transfer: sizes differ 0 vs 13
|
||||
- Windows
|
||||
- On some virtual filesystems (such ash LucidLink)
|
||||
- Android
|
||||
|
||||
Setting this flag causes rclone to read the link and use that as the size of the link
|
||||
instead of 0 which in most cases fixes the problem.`,
|
||||
So rclone now always reads the link
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_unicode_normalization",
|
||||
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
Name: "unicode_normalization",
|
||||
Help: `Apply unicode NFC normalization to paths and filenames
|
||||
|
||||
This flag is deprecated now. Rclone no longer normalizes unicode file
|
||||
names, but it compares them with unicode normalization in the sync
|
||||
routine instead.`,
|
||||
This flag can be used to normalize file names into unicode NFC form
|
||||
that are read from the local filesystem.
|
||||
|
||||
Rclone does not normally touch the encoding of file names it reads from
|
||||
the file system.
|
||||
|
||||
This can be useful when using macOS as it normally provides decomposed (NFD)
|
||||
unicode which in some language (eg Korean) doesn't display properly on
|
||||
some OSes.
|
||||
|
||||
Note that rclone compares filenames with unicode normalization in the sync
|
||||
routine so this flag shouldn't normally be used.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -196,8 +206,7 @@ type Options struct {
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
TranslateSymlinks bool `config:"links"`
|
||||
SkipSymlinks bool `config:"skip_links"`
|
||||
ZeroSizeLinks bool `config:"zero_size_links"`
|
||||
NoUTFNorm bool `config:"no_unicode_normalization"`
|
||||
UTFNorm bool `config:"unicode_normalization"`
|
||||
NoCheckUpdated bool `config:"no_check_updated"`
|
||||
NoUNC bool `config:"nounc"`
|
||||
OneFileSystem bool `config:"one_file_system"`
|
||||
@@ -256,10 +265,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, errLinksAndCopyLinks
|
||||
}
|
||||
|
||||
if opt.NoUTFNorm {
|
||||
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
@@ -462,6 +467,10 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
for _, name := range names {
|
||||
namepath := filepath.Join(fsDirPath, name)
|
||||
fi, fierr := os.Lstat(namepath)
|
||||
if os.IsNotExist(fierr) {
|
||||
// skip entry removed by a concurrent goroutine
|
||||
continue
|
||||
}
|
||||
if fierr != nil {
|
||||
err = errors.Wrapf(err, "failed to read directory %q", namepath)
|
||||
fs.Errorf(dir, "%v", fierr)
|
||||
@@ -522,6 +531,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
|
||||
func (f *Fs) cleanRemote(dir, filename string) (remote string) {
|
||||
if f.opt.UTFNorm {
|
||||
filename = norm.NFC.String(filename)
|
||||
}
|
||||
remote = path.Join(dir, f.opt.Enc.ToStandardName(filename))
|
||||
|
||||
if !utf8.ValidString(filename) {
|
||||
@@ -1267,9 +1279,13 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
||||
o.modTime = info.ModTime()
|
||||
o.mode = info.Mode()
|
||||
o.fs.objectMetaMu.Unlock()
|
||||
// On Windows links read as 0 size so set the correct size here
|
||||
// Optionally, users can turn this feature on with the zero_size_links flag
|
||||
if (runtime.GOOS == "windows" || o.fs.opt.ZeroSizeLinks) && o.translatedLink {
|
||||
// Read the size of the link.
|
||||
//
|
||||
// The value in info.Size() is not always correct
|
||||
// - Windows links read as 0 size
|
||||
// - Some virtual filesystems (such ash LucidLink) links read as 0 size
|
||||
// - Android - some versions the links are larger than readlink suggests
|
||||
if o.translatedLink {
|
||||
linkdst, err := os.Readlink(o.path)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to read link size: %v", err)
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -48,7 +48,7 @@ func (w *BinWriter) Reader() io.Reader {
|
||||
// WritePu16 writes a short as unsigned varint
|
||||
func (w *BinWriter) WritePu16(val int) {
|
||||
if val < 0 || val > 65535 {
|
||||
log.Fatalf("Invalid UInt16 %v", val)
|
||||
panic(fmt.Sprintf("Invalid UInt16 %v", val))
|
||||
}
|
||||
w.WritePu64(int64(val))
|
||||
}
|
||||
@@ -56,7 +56,7 @@ func (w *BinWriter) WritePu16(val int) {
|
||||
// WritePu32 writes a signed long as unsigned varint
|
||||
func (w *BinWriter) WritePu32(val int64) {
|
||||
if val < 0 || val > 4294967295 {
|
||||
log.Fatalf("Invalid UInt32 %v", val)
|
||||
panic(fmt.Sprintf("Invalid UInt32 %v", val))
|
||||
}
|
||||
w.WritePu64(val)
|
||||
}
|
||||
@@ -64,7 +64,7 @@ func (w *BinWriter) WritePu32(val int64) {
|
||||
// WritePu64 writes an unsigned (actually, signed) long as unsigned varint
|
||||
func (w *BinWriter) WritePu64(val int64) {
|
||||
if val < 0 {
|
||||
log.Fatalf("Invalid UInt64 %v", val)
|
||||
panic(fmt.Sprintf("Invalid UInt64 %v", val))
|
||||
}
|
||||
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
|
||||
}
|
||||
@@ -123,7 +123,7 @@ func (r *BinReader) check(err error) bool {
|
||||
r.err = err
|
||||
}
|
||||
if err != io.EOF {
|
||||
log.Fatalf("Error parsing response: %v", err)
|
||||
panic(fmt.Sprintf("Error parsing response: %v", err))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ var oauthConfig = &oauth2.Config{
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
MrHashType = hash.RegisterHash("MailruHash", 40, mrhash.New)
|
||||
MrHashType = hash.RegisterHash("mailru", "MailruHash", 40, mrhash.New)
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "mailru",
|
||||
Description: "Mail.ru Cloud",
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -52,8 +51,8 @@ const (
|
||||
driveTypePersonal = "personal"
|
||||
driveTypeBusiness = "business"
|
||||
driveTypeSharepoint = "documentLibrary"
|
||||
defaultChunkSize = 10 * fs.MebiByte
|
||||
chunkSizeMultiple = 320 * fs.KibiByte
|
||||
defaultChunkSize = 10 * fs.Mebi
|
||||
chunkSizeMultiple = 320 * fs.Kibi
|
||||
|
||||
regionGlobal = "global"
|
||||
regionUS = "us"
|
||||
@@ -94,215 +93,12 @@ var (
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
QuickXorHashType = hash.RegisterHash("QuickXorHash", 40, quickxorhash.New)
|
||||
QuickXorHashType = hash.RegisterHash("quickxor", "QuickXorHash", 40, quickxorhash.New)
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "onedrive",
|
||||
Description: "Microsoft OneDrive",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
region, _ := m.Get("region")
|
||||
graphURL := graphAPIEndpoint[region] + "/v1.0"
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[region] + authPath,
|
||||
TokenURL: authEndpoint[region] + tokenPath,
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
err := oauthutil.Config(ctx, "onedrive", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Stop if we are running non-interactive config
|
||||
if ci.AutoConfirm {
|
||||
return
|
||||
}
|
||||
|
||||
type driveResource struct {
|
||||
DriveID string `json:"id"`
|
||||
DriveName string `json:"name"`
|
||||
DriveType string `json:"driveType"`
|
||||
}
|
||||
type drivesResponse struct {
|
||||
Drives []driveResource `json:"value"`
|
||||
}
|
||||
|
||||
type siteResource struct {
|
||||
SiteID string `json:"id"`
|
||||
SiteName string `json:"displayName"`
|
||||
SiteURL string `json:"webUrl"`
|
||||
}
|
||||
type siteResponse struct {
|
||||
Sites []siteResource `json:"value"`
|
||||
}
|
||||
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure OneDrive: %v", err)
|
||||
}
|
||||
srv := rest.NewClient(oAuthClient)
|
||||
|
||||
var opts rest.Opts
|
||||
var finalDriveID string
|
||||
var siteID string
|
||||
var relativePath string
|
||||
switch config.Choose("Your choice",
|
||||
[]string{"onedrive", "sharepoint", "url", "search", "driveid", "siteid", "path"},
|
||||
[]string{
|
||||
"OneDrive Personal or Business",
|
||||
"Root Sharepoint site",
|
||||
"Sharepoint site name or URL (e.g. mysite or https://contoso.sharepoint.com/sites/mysite)",
|
||||
"Search for a Sharepoint site",
|
||||
"Type in driveID (advanced)",
|
||||
"Type in SiteID (advanced)",
|
||||
"Sharepoint server-relative path (advanced, e.g. /teams/hr)",
|
||||
},
|
||||
false) {
|
||||
|
||||
case "onedrive":
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/me/drives",
|
||||
}
|
||||
case "sharepoint":
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/sites/root/drives",
|
||||
}
|
||||
case "driveid":
|
||||
fmt.Printf("Paste your Drive ID here> ")
|
||||
finalDriveID = config.ReadLine()
|
||||
case "siteid":
|
||||
fmt.Printf("Paste your Site ID here> ")
|
||||
siteID = config.ReadLine()
|
||||
case "url":
|
||||
fmt.Println("Example: \"https://contoso.sharepoint.com/sites/mysite\" or \"mysite\"")
|
||||
fmt.Printf("Paste your Site URL here> ")
|
||||
siteURL := config.ReadLine()
|
||||
re := regexp.MustCompile(`https://.*\.sharepoint.com/sites/(.*)`)
|
||||
match := re.FindStringSubmatch(siteURL)
|
||||
if len(match) == 2 {
|
||||
relativePath = "/sites/" + match[1]
|
||||
} else {
|
||||
relativePath = "/sites/" + siteURL
|
||||
}
|
||||
case "path":
|
||||
fmt.Printf("Enter server-relative URL here> ")
|
||||
relativePath = config.ReadLine()
|
||||
case "search":
|
||||
fmt.Printf("What to search for> ")
|
||||
searchTerm := config.ReadLine()
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/sites?search=" + searchTerm,
|
||||
}
|
||||
|
||||
sites := siteResponse{}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &sites)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available sites: %v", err)
|
||||
}
|
||||
|
||||
if len(sites.Sites) == 0 {
|
||||
log.Fatalf("Search for '%s' returned no results", searchTerm)
|
||||
} else {
|
||||
fmt.Printf("Found %d sites, please select the one you want to use:\n", len(sites.Sites))
|
||||
for index, site := range sites.Sites {
|
||||
fmt.Printf("%d: %s (%s) id=%s\n", index, site.SiteName, site.SiteURL, site.SiteID)
|
||||
}
|
||||
siteID = sites.Sites[config.ChooseNumber("Chose drive to use:", 0, len(sites.Sites)-1)].SiteID
|
||||
}
|
||||
}
|
||||
|
||||
// if we use server-relative URL for finding the drive
|
||||
if relativePath != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/sites/root:" + relativePath,
|
||||
}
|
||||
site := siteResource{}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &site)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available site by relative path: %v", err)
|
||||
}
|
||||
siteID = site.SiteID
|
||||
}
|
||||
|
||||
// if we have a siteID we need to ask for the drives
|
||||
if siteID != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/sites/" + siteID + "/drives",
|
||||
}
|
||||
}
|
||||
|
||||
// We don't have the final ID yet?
|
||||
// query Microsoft Graph
|
||||
if finalDriveID == "" {
|
||||
drives := drivesResponse{}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &drives)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available drives: %v", err)
|
||||
}
|
||||
|
||||
// Also call /me/drive as sometimes /me/drives doesn't return it #4068
|
||||
if opts.Path == "/me/drives" {
|
||||
opts.Path = "/me/drive"
|
||||
meDrive := driveResource{}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &meDrive)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available drives: %v", err)
|
||||
}
|
||||
found := false
|
||||
for _, drive := range drives.Drives {
|
||||
if drive.DriveID == meDrive.DriveID {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// add the me drive if not found already
|
||||
if !found {
|
||||
fs.Debugf(nil, "Adding %v to drives list from /me/drive", meDrive)
|
||||
drives.Drives = append(drives.Drives, meDrive)
|
||||
}
|
||||
}
|
||||
|
||||
if len(drives.Drives) == 0 {
|
||||
log.Fatalf("No drives found")
|
||||
} else {
|
||||
fmt.Printf("Found %d drives, please select the one you want to use:\n", len(drives.Drives))
|
||||
for index, drive := range drives.Drives {
|
||||
fmt.Printf("%d: %s (%s) id=%s\n", index, drive.DriveName, drive.DriveType, drive.DriveID)
|
||||
}
|
||||
finalDriveID = drives.Drives[config.ChooseNumber("Chose drive to use:", 0, len(drives.Drives)-1)].DriveID
|
||||
}
|
||||
}
|
||||
|
||||
// Test the driveID and get drive type
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/drives/" + finalDriveID + "/root"}
|
||||
var rootItem api.Item
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &rootItem)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query root for drive %s: %v", finalDriveID, err)
|
||||
}
|
||||
|
||||
fmt.Printf("Found drive '%s' of type '%s', URL: %s\nIs that okay?\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL)
|
||||
// This does not work, YET :)
|
||||
if !config.ConfirmWithConfig(ctx, m, "config_drive_ok", true) {
|
||||
log.Fatalf("Cancelled by user")
|
||||
}
|
||||
|
||||
m.Set(configDriveID, finalDriveID)
|
||||
m.Set(configDriveType, rootItem.ParentReference.DriveType)
|
||||
},
|
||||
Config: Config,
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "region",
|
||||
Help: "Choose national cloud region for OneDrive.",
|
||||
@@ -361,6 +157,11 @@ This will only work if you are copying between two OneDrive *Personal* drives AN
|
||||
the files to copy are already shared between them. In other cases, rclone will
|
||||
fall back to normal copy (which will be slightly slower).`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Help: "Size of listing chunk.",
|
||||
Default: 1000,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_versions",
|
||||
Default: false,
|
||||
@@ -460,6 +261,266 @@ At the time of writing this only works with OneDrive personal paid accounts.
|
||||
})
|
||||
}
|
||||
|
||||
type driveResource struct {
|
||||
DriveID string `json:"id"`
|
||||
DriveName string `json:"name"`
|
||||
DriveType string `json:"driveType"`
|
||||
}
|
||||
type drivesResponse struct {
|
||||
Drives []driveResource `json:"value"`
|
||||
}
|
||||
|
||||
type siteResource struct {
|
||||
SiteID string `json:"id"`
|
||||
SiteName string `json:"displayName"`
|
||||
SiteURL string `json:"webUrl"`
|
||||
}
|
||||
type siteResponse struct {
|
||||
Sites []siteResource `json:"value"`
|
||||
}
|
||||
|
||||
// Get the region and graphURL from the config
|
||||
func getRegionURL(m configmap.Mapper) (region, graphURL string) {
|
||||
region, _ = m.Get("region")
|
||||
graphURL = graphAPIEndpoint[region] + "/v1.0"
|
||||
return region, graphURL
|
||||
}
|
||||
|
||||
// Config for chooseDrive
|
||||
type chooseDriveOpt struct {
|
||||
opts rest.Opts
|
||||
finalDriveID string
|
||||
siteID string
|
||||
relativePath string
|
||||
}
|
||||
|
||||
// chooseDrive returns a query to choose which drive the user is interested in
|
||||
func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest.Client, opt chooseDriveOpt) (*fs.ConfigOut, error) {
|
||||
_, graphURL := getRegionURL(m)
|
||||
|
||||
// if we use server-relative URL for finding the drive
|
||||
if opt.relativePath != "" {
|
||||
opt.opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/sites/root:" + opt.relativePath,
|
||||
}
|
||||
site := siteResource{}
|
||||
_, err := srv.CallJSON(ctx, &opt.opts, nil, &site)
|
||||
if err != nil {
|
||||
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available site by relative path: %v", err))
|
||||
}
|
||||
opt.siteID = site.SiteID
|
||||
}
|
||||
|
||||
// if we have a siteID we need to ask for the drives
|
||||
if opt.siteID != "" {
|
||||
opt.opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/sites/" + opt.siteID + "/drives",
|
||||
}
|
||||
}
|
||||
|
||||
drives := drivesResponse{}
|
||||
|
||||
// We don't have the final ID yet?
|
||||
// query Microsoft Graph
|
||||
if opt.finalDriveID == "" {
|
||||
_, err := srv.CallJSON(ctx, &opt.opts, nil, &drives)
|
||||
if err != nil {
|
||||
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available drives: %v", err))
|
||||
}
|
||||
|
||||
// Also call /me/drive as sometimes /me/drives doesn't return it #4068
|
||||
if opt.opts.Path == "/me/drives" {
|
||||
opt.opts.Path = "/me/drive"
|
||||
meDrive := driveResource{}
|
||||
_, err := srv.CallJSON(ctx, &opt.opts, nil, &meDrive)
|
||||
if err != nil {
|
||||
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available drives: %v", err))
|
||||
}
|
||||
found := false
|
||||
for _, drive := range drives.Drives {
|
||||
if drive.DriveID == meDrive.DriveID {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// add the me drive if not found already
|
||||
if !found {
|
||||
fs.Debugf(nil, "Adding %v to drives list from /me/drive", meDrive)
|
||||
drives.Drives = append(drives.Drives, meDrive)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
drives.Drives = append(drives.Drives, driveResource{
|
||||
DriveID: opt.finalDriveID,
|
||||
DriveName: "Chosen Drive ID",
|
||||
DriveType: "drive",
|
||||
})
|
||||
}
|
||||
if len(drives.Drives) == 0 {
|
||||
return fs.ConfigError("choose_type", "No drives found")
|
||||
}
|
||||
return fs.ConfigChoose("driveid_final", "config_driveid", "Select drive you want to use", len(drives.Drives), func(i int) (string, string) {
|
||||
drive := drives.Drives[i]
|
||||
return drive.DriveID, fmt.Sprintf("%s (%s)", drive.DriveName, drive.DriveType)
|
||||
})
|
||||
}
|
||||
|
||||
// Config the backend
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
region, graphURL := getRegionURL(m)
|
||||
|
||||
if config.State == "" {
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[region] + authPath,
|
||||
TokenURL: authEndpoint[region] + tokenPath,
|
||||
}
|
||||
return oauthutil.ConfigOut("choose_type", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
}
|
||||
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure OneDrive")
|
||||
}
|
||||
srv := rest.NewClient(oAuthClient)
|
||||
|
||||
switch config.State {
|
||||
case "choose_type":
|
||||
return fs.ConfigChooseFixed("choose_type_done", "config_type", "Type of connection", []fs.OptionExample{{
|
||||
Value: "onedrive",
|
||||
Help: "OneDrive Personal or Business",
|
||||
}, {
|
||||
Value: "sharepoint",
|
||||
Help: "Root Sharepoint site",
|
||||
}, {
|
||||
Value: "url",
|
||||
Help: "Sharepoint site name or URL (e.g. mysite or https://contoso.sharepoint.com/sites/mysite)",
|
||||
}, {
|
||||
Value: "search",
|
||||
Help: "Search for a Sharepoint site",
|
||||
}, {
|
||||
Value: "driveid",
|
||||
Help: "Type in driveID (advanced)",
|
||||
}, {
|
||||
Value: "siteid",
|
||||
Help: "Type in SiteID (advanced)",
|
||||
}, {
|
||||
Value: "path",
|
||||
Help: "Sharepoint server-relative path (advanced, e.g. /teams/hr)",
|
||||
}})
|
||||
case "choose_type_done":
|
||||
// Jump to next state according to config chosen
|
||||
return fs.ConfigGoto(config.Result)
|
||||
case "onedrive":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
opts: rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/me/drives",
|
||||
},
|
||||
})
|
||||
case "sharepoint":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
opts: rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/sites/root/drives",
|
||||
},
|
||||
})
|
||||
case "driveid":
|
||||
return fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
|
||||
case "driveid_end":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
finalDriveID: config.Result,
|
||||
})
|
||||
case "siteid":
|
||||
return fs.ConfigInput("siteid_end", "config_siteid", "Site ID")
|
||||
case "siteid_end":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
siteID: config.Result,
|
||||
})
|
||||
case "url":
|
||||
return fs.ConfigInput("url_end", "config_site_url", `Site URL
|
||||
|
||||
Example: "https://contoso.sharepoint.com/sites/mysite" or "mysite"
|
||||
`)
|
||||
case "url_end":
|
||||
siteURL := config.Result
|
||||
re := regexp.MustCompile(`https://.*\.sharepoint.com/sites/(.*)`)
|
||||
match := re.FindStringSubmatch(siteURL)
|
||||
if len(match) == 2 {
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
relativePath: "/sites/" + match[1],
|
||||
})
|
||||
}
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
relativePath: "/sites/" + siteURL,
|
||||
})
|
||||
case "path":
|
||||
return fs.ConfigInput("path_end", "config_sharepoint_url", `Server-relative URL`)
|
||||
case "path_end":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
relativePath: config.Result,
|
||||
})
|
||||
case "search":
|
||||
return fs.ConfigInput("search_end", "config_search_term", `Search term`)
|
||||
case "search_end":
|
||||
searchTerm := config.Result
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/sites?search=" + searchTerm,
|
||||
}
|
||||
|
||||
sites := siteResponse{}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &sites)
|
||||
if err != nil {
|
||||
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available sites: %v", err))
|
||||
}
|
||||
|
||||
if len(sites.Sites) == 0 {
|
||||
return fs.ConfigError("choose_type", fmt.Sprintf("search for %q returned no results", searchTerm))
|
||||
}
|
||||
return fs.ConfigChoose("search_sites", "config_site", `Select the Site you want to use`, len(sites.Sites), func(i int) (string, string) {
|
||||
site := sites.Sites[i]
|
||||
return site.SiteID, fmt.Sprintf("%s (%s)", site.SiteName, site.SiteURL)
|
||||
})
|
||||
case "search_sites":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
siteID: config.Result,
|
||||
})
|
||||
case "driveid_final":
|
||||
finalDriveID := config.Result
|
||||
|
||||
// Test the driveID and get drive type
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/drives/" + finalDriveID + "/root"}
|
||||
var rootItem api.Item
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &rootItem)
|
||||
if err != nil {
|
||||
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query root for drive %q: %v", finalDriveID, err))
|
||||
}
|
||||
|
||||
m.Set(configDriveID, finalDriveID)
|
||||
m.Set(configDriveType, rootItem.ParentReference.DriveType)
|
||||
|
||||
return fs.ConfigConfirm("driveid_final_end", true, "config_drive_ok", fmt.Sprintf("Drive OK?\n\nFound drive %q of type %q\nURL: %s\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL))
|
||||
case "driveid_final_end":
|
||||
if config.Result == "true" {
|
||||
return nil, nil
|
||||
}
|
||||
return fs.ConfigGoto("choose_type")
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Region string `config:"region"`
|
||||
@@ -468,6 +529,7 @@ type Options struct {
|
||||
DriveType string `config:"drive_type"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
NoVersions bool `config:"no_versions"`
|
||||
LinkScope string `config:"link_scope"`
|
||||
LinkType string `config:"link_type"`
|
||||
@@ -560,6 +622,9 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
||||
if len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
||||
retry = true
|
||||
fs.Debugf(nil, "Should retry: %v", err)
|
||||
} else if err != nil && strings.Contains(err.Error(), "Unable to initialize RPS") {
|
||||
retry = true
|
||||
fs.Debugf(nil, "HTTP 401: Unable to initialize RPS. Trying again.")
|
||||
}
|
||||
case 429: // Too Many Requests.
|
||||
// see https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online
|
||||
@@ -687,7 +752,7 @@ func errorHandler(resp *http.Response) error {
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
const minChunkSize = fs.SizeSuffixBase
|
||||
if cs%chunkSizeMultiple != 0 {
|
||||
return errors.Errorf("%s is not a multiple of %s", cs, chunkSizeMultiple)
|
||||
}
|
||||
@@ -896,7 +961,7 @@ type listAllFn func(*api.Item) bool
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
// Top parameter asks for bigger pages of data
|
||||
// https://dev.onedrive.com/odata/optional-query-parameters.htm
|
||||
opts := f.newOptsCall(dirID, "GET", "/children?$top=1000")
|
||||
opts := f.newOptsCall(dirID, "GET", fmt.Sprintf("/children?$top=%d", f.opt.ListChunk))
|
||||
OUTER:
|
||||
for {
|
||||
var result api.ListChildrenResponse
|
||||
@@ -1423,7 +1488,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
Password: f.opt.LinkPassword,
|
||||
}
|
||||
|
||||
if expire < fs.Duration(time.Hour*24*365*100) {
|
||||
if expire < fs.DurationOff {
|
||||
expiry := time.Now().Add(time.Duration(expire))
|
||||
share.Expiry = &expiry
|
||||
}
|
||||
@@ -1435,10 +1500,85 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
if resp != nil && resp.StatusCode == 400 && f.driveType != driveTypePersonal {
|
||||
return "", errors.Errorf("%v (is making public links permitted by the org admin?)", err)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
return result.Link.WebURL, nil
|
||||
|
||||
shareURL := result.Link.WebURL
|
||||
|
||||
// Convert share link to direct download link if target is not a folder
|
||||
// Not attempting to do the conversion for regional versions, just to be safe
|
||||
if f.opt.Region != regionGlobal {
|
||||
return shareURL, nil
|
||||
}
|
||||
if info.Folder != nil {
|
||||
fs.Debugf(nil, "Can't convert share link for folder to direct link - returning the link as is")
|
||||
return shareURL, nil
|
||||
}
|
||||
|
||||
cnvFailMsg := "Don't know how to convert share link to direct link - returning the link as is"
|
||||
directURL := ""
|
||||
segments := strings.Split(shareURL, "/")
|
||||
switch f.driveType {
|
||||
case driveTypePersonal:
|
||||
// Method: https://stackoverflow.com/questions/37951114/direct-download-link-to-onedrive-file
|
||||
if len(segments) != 5 {
|
||||
fs.Logf(f, cnvFailMsg)
|
||||
return shareURL, nil
|
||||
}
|
||||
enc := base64.StdEncoding.EncodeToString([]byte(shareURL))
|
||||
enc = strings.ReplaceAll(enc, "/", "_")
|
||||
enc = strings.ReplaceAll(enc, "+", "-")
|
||||
enc = strings.ReplaceAll(enc, "=", "")
|
||||
directURL = fmt.Sprintf("https://api.onedrive.com/v1.0/shares/u!%s/root/content", enc)
|
||||
case driveTypeBusiness:
|
||||
// Method: https://docs.microsoft.com/en-us/sharepoint/dev/spfx/shorter-share-link-format
|
||||
// Example:
|
||||
// https://{tenant}-my.sharepoint.com/:t:/g/personal/{user_email}/{Opaque_String}
|
||||
// --convert to->
|
||||
// https://{tenant}-my.sharepoint.com/personal/{user_email}/_layouts/15/download.aspx?share={Opaque_String}
|
||||
if len(segments) != 8 {
|
||||
fs.Logf(f, cnvFailMsg)
|
||||
return shareURL, nil
|
||||
}
|
||||
directURL = fmt.Sprintf("https://%s/%s/%s/_layouts/15/download.aspx?share=%s",
|
||||
segments[2], segments[5], segments[6], segments[7])
|
||||
case driveTypeSharepoint:
|
||||
// Method: Similar to driveTypeBusiness
|
||||
// Example:
|
||||
// https://{tenant}.sharepoint.com/:t:/s/{site_name}/{Opaque_String}
|
||||
// --convert to->
|
||||
// https://{tenant}.sharepoint.com/sites/{site_name}/_layouts/15/download.aspx?share={Opaque_String}
|
||||
//
|
||||
// https://{tenant}.sharepoint.com/:t:/t/{team_name}/{Opaque_String}
|
||||
// --convert to->
|
||||
// https://{tenant}.sharepoint.com/teams/{team_name}/_layouts/15/download.aspx?share={Opaque_String}
|
||||
//
|
||||
// https://{tenant}.sharepoint.com/:t:/g/{Opaque_String}
|
||||
// --convert to->
|
||||
// https://{tenant}.sharepoint.com/_layouts/15/download.aspx?share={Opaque_String}
|
||||
if len(segments) < 6 || len(segments) > 7 {
|
||||
fs.Logf(f, cnvFailMsg)
|
||||
return shareURL, nil
|
||||
}
|
||||
pathPrefix := ""
|
||||
switch segments[4] {
|
||||
case "s": // Site
|
||||
pathPrefix = "/sites/" + segments[5]
|
||||
case "t": // Team
|
||||
pathPrefix = "/teams/" + segments[5]
|
||||
case "g": // Root site
|
||||
default:
|
||||
fs.Logf(f, cnvFailMsg)
|
||||
return shareURL, nil
|
||||
}
|
||||
directURL = fmt.Sprintf("https://%s%s/_layouts/15/download.aspx?share=%s",
|
||||
segments[2], pathPrefix, segments[len(segments)-1])
|
||||
}
|
||||
|
||||
return directURL, nil
|
||||
}
|
||||
|
||||
// CleanUp deletes all the hidden files.
|
||||
@@ -1851,7 +1991,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
fs.Debugf(o, "Cancelling multipart upload: %v", err)
|
||||
cancelErr := o.cancelUploadSession(ctx, uploadURL)
|
||||
if cancelErr != nil {
|
||||
fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr)
|
||||
fs.Logf(o, "Failed to cancel multipart upload: %v (upload failed due to: %v)", cancelErr, err)
|
||||
}
|
||||
})()
|
||||
|
||||
@@ -1876,11 +2016,11 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Update the content of a remote file within 4MB size in one single request
|
||||
// Update the content of a remote file within 4 MiB size in one single request
|
||||
// This function will set modtime after uploading, which will create a new version for the remote file
|
||||
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, modTime time.Time, options ...fs.OpenOption) (info *api.Item, err error) {
|
||||
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
|
||||
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
|
||||
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4 MiB")
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Starting singlepart upload")
|
||||
|
||||
@@ -88,7 +88,7 @@ func init() {
|
||||
|
||||
Note that these chunks are buffered in memory so increasing them will
|
||||
increase memory use.`,
|
||||
Default: 10 * fs.MebiByte,
|
||||
Default: 10 * fs.Mebi,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -72,7 +71,7 @@ func init() {
|
||||
Name: "pcloud",
|
||||
Description: "Pcloud",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
optc := new(Options)
|
||||
err := configstruct.Set(m, optc)
|
||||
if err != nil {
|
||||
@@ -94,14 +93,11 @@ func init() {
|
||||
fs.Debugf(nil, "pcloud: got hostname %q", hostname)
|
||||
return nil
|
||||
}
|
||||
opt := oauthutil.Options{
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
CheckAuth: checkAuth,
|
||||
StateBlankOK: true, // pCloud seems to drop the state parameter now - see #4210
|
||||
}
|
||||
err = oauthutil.Config(ctx, "pcloud", name, m, oauthConfig, &opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: config.ConfigEncoding,
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -78,11 +77,10 @@ func init() {
|
||||
Name: "premiumizeme",
|
||||
Description: "premiumize.me",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config(ctx, "premiumizeme", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "api_key",
|
||||
|
||||
@@ -2,7 +2,6 @@ package putio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
@@ -35,7 +34,7 @@ const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultChunkSize = 48 * fs.MebiByte
|
||||
defaultChunkSize = 48 * fs.Mebi
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -60,14 +59,11 @@ func init() {
|
||||
Name: "putio",
|
||||
Description: "Put.io",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
opt := oauthutil.Options{
|
||||
NoOffline: true,
|
||||
}
|
||||
err := oauthutil.Config(ctx, "putio", name, m, putioConfig, &opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: putioConfig,
|
||||
NoOffline: true,
|
||||
})
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigEncoding,
|
||||
|
||||
@@ -80,7 +80,7 @@ func init() {
|
||||
Help: `Cutoff for switching to chunked upload
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5GB.`,
|
||||
The minimum is 0 and the maximum is 5 GiB.`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
|
||||
109
backend/s3/s3.go
109
backend/s3/s3.go
@@ -26,7 +26,6 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go/aws/defaults"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
@@ -59,7 +58,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, and Tencent COS",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, SeaweedFS, and Tencent COS",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
@@ -92,6 +91,9 @@ func init() {
|
||||
}, {
|
||||
Value: "Scaleway",
|
||||
Help: "Scaleway Object Storage",
|
||||
}, {
|
||||
Value: "SeaweedFS",
|
||||
Help: "SeaweedFS S3",
|
||||
}, {
|
||||
Value: "StackPath",
|
||||
Help: "StackPath Object Storage",
|
||||
@@ -428,6 +430,12 @@ func init() {
|
||||
Help: "Endpoint for OSS API.",
|
||||
Provider: "Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "oss-accelerate.aliyuncs.com",
|
||||
Help: "Global Accelerate",
|
||||
}, {
|
||||
Value: "oss-accelerate-overseas.aliyuncs.com",
|
||||
Help: "Global Accelerate (outside mainland China)",
|
||||
}, {
|
||||
Value: "oss-cn-hangzhou.aliyuncs.com",
|
||||
Help: "East China 1 (Hangzhou)",
|
||||
}, {
|
||||
@@ -444,10 +452,22 @@ func init() {
|
||||
Help: "North China 3 (Zhangjiakou)",
|
||||
}, {
|
||||
Value: "oss-cn-huhehaote.aliyuncs.com",
|
||||
Help: "North China 5 (Huhehaote)",
|
||||
Help: "North China 5 (Hohhot)",
|
||||
}, {
|
||||
Value: "oss-cn-wulanchabu.aliyuncs.com",
|
||||
Help: "North China 6 (Ulanqab)",
|
||||
}, {
|
||||
Value: "oss-cn-shenzhen.aliyuncs.com",
|
||||
Help: "South China 1 (Shenzhen)",
|
||||
}, {
|
||||
Value: "oss-cn-heyuan.aliyuncs.com",
|
||||
Help: "South China 2 (Heyuan)",
|
||||
}, {
|
||||
Value: "oss-cn-guangzhou.aliyuncs.com",
|
||||
Help: "South China 3 (Guangzhou)",
|
||||
}, {
|
||||
Value: "oss-cn-chengdu.aliyuncs.com",
|
||||
Help: "West China 1 (Chengdu)",
|
||||
}, {
|
||||
Value: "oss-cn-hongkong.aliyuncs.com",
|
||||
Help: "Hong Kong (Hong Kong)",
|
||||
@@ -593,6 +613,10 @@ func init() {
|
||||
Value: "sgp1.digitaloceanspaces.com",
|
||||
Help: "Digital Ocean Spaces Singapore 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "localhost:8333",
|
||||
Help: "SeaweedFS S3 localhost",
|
||||
Provider: "SeaweedFS",
|
||||
}, {
|
||||
Value: "s3.wasabisys.com",
|
||||
Help: "Wasabi US East endpoint",
|
||||
@@ -1017,7 +1041,7 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Help: `Cutoff for switching to chunked upload
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5GB.`,
|
||||
The minimum is 0 and the maximum is 5 GiB.`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -1039,9 +1063,9 @@ Rclone will automatically increase the chunk size when uploading a
|
||||
large file of known size to stay below the 10,000 chunks limit.
|
||||
|
||||
Files of unknown size are uploaded with the configured
|
||||
chunk_size. Since the default chunk size is 5MB and there can be at
|
||||
chunk_size. Since the default chunk size is 5 MiB and there can be at
|
||||
most 10,000 chunks, this means that by default the maximum size of
|
||||
a file you can stream upload is 48GB. If you wish to stream upload
|
||||
a file you can stream upload is 48 GiB. If you wish to stream upload
|
||||
larger files then you will need to increase chunk_size.`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
@@ -1067,7 +1091,7 @@ large file of a known size to stay below this number of chunks limit.
|
||||
Any files larger than this that need to be server-side copied will be
|
||||
copied in chunks of this size.
|
||||
|
||||
The minimum is 0 and the maximum is 5GB.`,
|
||||
The minimum is 0 and the maximum is 5 GiB.`,
|
||||
Default: fs.SizeSuffix(maxSizeForCopy),
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -1221,6 +1245,11 @@ very small even with this flag.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_head_object",
|
||||
Help: `If set, don't HEAD objects`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -1271,7 +1300,7 @@ See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rcl
|
||||
const (
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - e.g. X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
// The maximum size of object we can COPY - this should be 5GiB but is < 5GB for b2 compatibility
|
||||
// The maximum size of object we can COPY - this should be 5 GiB but is < 5 GB for b2 compatibility
|
||||
// See https://forum.rclone.org/t/copying-files-within-a-b2-bucket/16680/76
|
||||
maxSizeForCopy = 4768 * 1024 * 1024
|
||||
maxUploadParts = 10000 // maximum allowed number of parts in a multi-part upload
|
||||
@@ -1319,6 +1348,7 @@ type Options struct {
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
NoHead bool `config:"no_head"`
|
||||
NoHeadObject bool `config:"no_head_object"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
@@ -1511,11 +1541,6 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
||||
}),
|
||||
ExpiryWindow: 3 * time.Minute,
|
||||
},
|
||||
|
||||
// Pick up IAM role if we are in EKS
|
||||
&stscreds.WebIdentityRoleProvider{
|
||||
ExpiryWindow: 3 * time.Minute,
|
||||
},
|
||||
}
|
||||
cred := credentials.NewChainCredentials(providers)
|
||||
|
||||
@@ -1693,7 +1718,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
GetTier: true,
|
||||
SlowModTime: true,
|
||||
}).Fill(ctx, f)
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
if f.rootBucket != "" && f.rootDirectory != "" && !opt.NoHeadObject && !strings.HasSuffix(root, "/") {
|
||||
// Check to see if the (bucket,directory) is actually an existing file
|
||||
oldRoot := f.root
|
||||
newRoot, leaf := path.Split(oldRoot)
|
||||
@@ -1730,7 +1755,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Obje
|
||||
o.setMD5FromEtag(aws.StringValue(info.ETag))
|
||||
o.bytes = aws.Int64Value(info.Size)
|
||||
o.storageClass = aws.StringValue(info.StorageClass)
|
||||
} else {
|
||||
} else if !o.fs.opt.NoHeadObject {
|
||||
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -2831,15 +2856,23 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.LastModified == nil {
|
||||
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
|
||||
}
|
||||
o.setMetaData(resp.ETag, resp.ContentLength, resp.LastModified, resp.Metadata, resp.ContentType, resp.StorageClass)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) setMetaData(etag *string, contentLength *int64, lastModified *time.Time, meta map[string]*string, mimeType *string, storageClass *string) {
|
||||
var size int64
|
||||
// Ignore missing Content-Length assuming it is 0
|
||||
// Some versions of ceph do this due their apache proxies
|
||||
if resp.ContentLength != nil {
|
||||
size = *resp.ContentLength
|
||||
if contentLength != nil {
|
||||
size = *contentLength
|
||||
}
|
||||
o.setMD5FromEtag(aws.StringValue(resp.ETag))
|
||||
o.setMD5FromEtag(aws.StringValue(etag))
|
||||
o.bytes = size
|
||||
o.meta = resp.Metadata
|
||||
o.meta = meta
|
||||
if o.meta == nil {
|
||||
o.meta = map[string]*string{}
|
||||
}
|
||||
@@ -2854,15 +2887,13 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
o.md5 = hex.EncodeToString(md5sumBytes)
|
||||
}
|
||||
}
|
||||
o.storageClass = aws.StringValue(resp.StorageClass)
|
||||
if resp.LastModified == nil {
|
||||
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
|
||||
o.storageClass = aws.StringValue(storageClass)
|
||||
if lastModified == nil {
|
||||
o.lastModified = time.Now()
|
||||
} else {
|
||||
o.lastModified = *resp.LastModified
|
||||
o.lastModified = *lastModified
|
||||
}
|
||||
o.mimeType = aws.StringValue(resp.ContentType)
|
||||
return nil
|
||||
o.mimeType = aws.StringValue(mimeType)
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
@@ -2972,6 +3003,26 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.LastModified == nil {
|
||||
fs.Logf(o, "Failed to read last modified: %v", err)
|
||||
}
|
||||
// read size from ContentLength or ContentRange
|
||||
size := resp.ContentLength
|
||||
if resp.ContentRange != nil {
|
||||
var contentRange = *resp.ContentRange
|
||||
slash := strings.IndexRune(contentRange, '/')
|
||||
if slash >= 0 {
|
||||
i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
|
||||
if err == nil {
|
||||
size = &i
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err)
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to find length in %q", contentRange)
|
||||
}
|
||||
}
|
||||
o.setMetaData(resp.ETag, size, resp.LastModified, resp.Metadata, resp.ContentType, resp.StorageClass)
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
@@ -2997,9 +3048,9 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
// calculate size of parts
|
||||
partSize := int(f.opt.ChunkSize)
|
||||
|
||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||
// buffers here (default 5MB). With a maximum number of parts (10,000) this will be a file of
|
||||
// 48GB which seems like a not too unreasonable limit.
|
||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||
// buffers here (default 5 MiB). With a maximum number of parts (10,000) this will be a file of
|
||||
// 48 GiB which seems like a not too unreasonable limit.
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
@@ -3008,7 +3059,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
} else {
|
||||
// Adjust partSize until the number of parts is small enough.
|
||||
if size/int64(partSize) >= uploadParts {
|
||||
// Calculate partition size rounded up to the nearest MB
|
||||
// Calculate partition size rounded up to the nearest MiB
|
||||
partSize = int((((size / uploadParts) >> 20) + 1) << 20)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -296,86 +296,86 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
// Config callback for 2FA
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
serverURL, ok := m.Get(configURL)
|
||||
if !ok || serverURL == "" {
|
||||
// If there's no server URL, it means we're trying an operation at the backend level, like a "rclone authorize seafile"
|
||||
fmt.Print("\nOperation not supported on this remote.\nIf you need a 2FA code on your account, use the command:\n\nrclone config reconnect <remote name>:\n\n")
|
||||
return
|
||||
}
|
||||
|
||||
// Stop if we are running non-interactive config
|
||||
if ci.AutoConfirm {
|
||||
return
|
||||
return nil, errors.New("operation not supported on this remote. If you need a 2FA code on your account, use the command: rclone config reconnect <remote name>: ")
|
||||
}
|
||||
|
||||
u, err := url.Parse(serverURL)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Invalid server URL %s", serverURL)
|
||||
return
|
||||
return nil, errors.Errorf("invalid server URL %s", serverURL)
|
||||
}
|
||||
|
||||
is2faEnabled, _ := m.Get(config2FA)
|
||||
if is2faEnabled != "true" {
|
||||
fmt.Println("Two-factor authentication is not enabled on this account.")
|
||||
return
|
||||
return nil, errors.New("two-factor authentication is not enabled on this account")
|
||||
}
|
||||
|
||||
username, _ := m.Get(configUser)
|
||||
if username == "" {
|
||||
fs.Errorf(nil, "A username is required")
|
||||
return
|
||||
return nil, errors.New("a username is required")
|
||||
}
|
||||
|
||||
password, _ := m.Get(configPassword)
|
||||
if password != "" {
|
||||
password, _ = obscure.Reveal(password)
|
||||
}
|
||||
// Just make sure we do have a password
|
||||
for password == "" {
|
||||
fmt.Print("Two-factor authentication: please enter your password (it won't be saved in the configuration)\npassword> ")
|
||||
password = config.ReadPassword()
|
||||
}
|
||||
|
||||
// Create rest client for getAuthorizationToken
|
||||
url := u.String()
|
||||
if !strings.HasPrefix(url, "/") {
|
||||
url += "/"
|
||||
}
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(url)
|
||||
|
||||
// We loop asking for a 2FA code
|
||||
for {
|
||||
code := ""
|
||||
for code == "" {
|
||||
fmt.Print("Two-factor authentication: please enter your 2FA code\n2fa code> ")
|
||||
code = config.ReadLine()
|
||||
switch config.State {
|
||||
case "":
|
||||
// Just make sure we do have a password
|
||||
if password == "" {
|
||||
return fs.ConfigPassword("", "config_password", "Two-factor authentication: please enter your password (it won't be saved in the configuration)")
|
||||
}
|
||||
return fs.ConfigGoto("password")
|
||||
case "password":
|
||||
password = config.Result
|
||||
if password == "" {
|
||||
return fs.ConfigError("password", "Password can't be blank")
|
||||
}
|
||||
m.Set(configPassword, obscure.MustObscure(config.Result))
|
||||
return fs.ConfigGoto("2fa")
|
||||
case "2fa":
|
||||
return fs.ConfigInput("2fa_do", "config_2fa", "Two-factor authentication: please enter your 2FA code")
|
||||
case "2fa_do":
|
||||
code := config.Result
|
||||
if code == "" {
|
||||
return fs.ConfigError("2fa", "2FA codes can't be blank")
|
||||
}
|
||||
|
||||
// Create rest client for getAuthorizationToken
|
||||
url := u.String()
|
||||
if !strings.HasPrefix(url, "/") {
|
||||
url += "/"
|
||||
}
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(url)
|
||||
|
||||
// We loop asking for a 2FA code
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
fmt.Println("Authenticating...")
|
||||
token, err := getAuthorizationToken(ctx, srv, username, password, code)
|
||||
if err != nil {
|
||||
fmt.Printf("Authentication failed: %v\n", err)
|
||||
tryAgain := strings.ToLower(config.ReadNonEmptyLine("Do you want to try again (y/n)?"))
|
||||
if tryAgain != "y" && tryAgain != "yes" {
|
||||
// The user is giving up, we're done here
|
||||
break
|
||||
}
|
||||
return fs.ConfigConfirm("2fa_error", true, "config_retry", fmt.Sprintf("Authentication failed: %v\n\nTry Again?", err))
|
||||
}
|
||||
if token != "" {
|
||||
fmt.Println("Success!")
|
||||
// Let's save the token into the configuration
|
||||
m.Set(configAuthToken, token)
|
||||
// And delete any previous entry for password
|
||||
m.Set(configPassword, "")
|
||||
// And we're done here
|
||||
break
|
||||
if token == "" {
|
||||
return fs.ConfigConfirm("2fa_error", true, "config_retry", "Authentication failed - no token returned.\n\nTry Again?")
|
||||
}
|
||||
// Let's save the token into the configuration
|
||||
m.Set(configAuthToken, token)
|
||||
// And delete any previous entry for password
|
||||
m.Set(configPassword, "")
|
||||
// And we're done here
|
||||
return nil, nil
|
||||
case "2fa_error":
|
||||
if config.Result == "true" {
|
||||
return fs.ConfigGoto("2fa")
|
||||
}
|
||||
return nil, errors.New("2fa authentication failed")
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
}
|
||||
|
||||
// sets the AuthorizationToken up
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -223,6 +224,17 @@ have a server which returns
|
||||
Then you may need to enable this flag.
|
||||
|
||||
If concurrent reads are disabled, the use_fstat option is ignored.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_concurrent_writes",
|
||||
Default: false,
|
||||
Help: `If set don't use concurrent writes
|
||||
|
||||
Normally rclone uses concurrent writes to upload files. This improves
|
||||
the performance greatly, especially for distant servers.
|
||||
|
||||
This option disables concurrent writes should that be necessary.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -243,29 +255,30 @@ Set to 0 to keep connections indefinitely.
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Port string `config:"port"`
|
||||
Pass string `config:"pass"`
|
||||
KeyPem string `config:"key_pem"`
|
||||
KeyFile string `config:"key_file"`
|
||||
KeyFilePass string `config:"key_file_pass"`
|
||||
PubKeyFile string `config:"pubkey_file"`
|
||||
KnownHostsFile string `config:"known_hosts_file"`
|
||||
KeyUseAgent bool `config:"key_use_agent"`
|
||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
PathOverride string `config:"path_override"`
|
||||
SetModTime bool `config:"set_modtime"`
|
||||
Md5sumCommand string `config:"md5sum_command"`
|
||||
Sha1sumCommand string `config:"sha1sum_command"`
|
||||
SkipLinks bool `config:"skip_links"`
|
||||
Subsystem string `config:"subsystem"`
|
||||
ServerCommand string `config:"server_command"`
|
||||
UseFstat bool `config:"use_fstat"`
|
||||
DisableConcurrentReads bool `config:"disable_concurrent_reads"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Port string `config:"port"`
|
||||
Pass string `config:"pass"`
|
||||
KeyPem string `config:"key_pem"`
|
||||
KeyFile string `config:"key_file"`
|
||||
KeyFilePass string `config:"key_file_pass"`
|
||||
PubKeyFile string `config:"pubkey_file"`
|
||||
KnownHostsFile string `config:"known_hosts_file"`
|
||||
KeyUseAgent bool `config:"key_use_agent"`
|
||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
PathOverride string `config:"path_override"`
|
||||
SetModTime bool `config:"set_modtime"`
|
||||
Md5sumCommand string `config:"md5sum_command"`
|
||||
Sha1sumCommand string `config:"sha1sum_command"`
|
||||
SkipLinks bool `config:"skip_links"`
|
||||
Subsystem string `config:"subsystem"`
|
||||
ServerCommand string `config:"server_command"`
|
||||
UseFstat bool `config:"use_fstat"`
|
||||
DisableConcurrentReads bool `config:"disable_concurrent_reads"`
|
||||
DisableConcurrentWrites bool `config:"disable_concurrent_writes"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
@@ -286,6 +299,7 @@ type Fs struct {
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
pacer *fs.Pacer // pacer for operations
|
||||
savedpswd string
|
||||
transfers int32 // count in use references
|
||||
}
|
||||
|
||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
@@ -299,6 +313,13 @@ type Object struct {
|
||||
sha1sum *string // Cached SHA1 checksum
|
||||
}
|
||||
|
||||
// debugf calls fs.Debugf if --dump bodies or --dump headers is set
|
||||
func (f *Fs) debugf(o interface{}, text string, args ...interface{}) {
|
||||
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||
fs.Debugf(o, text, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// dial starts a client connection to the given SSH server. It is a
|
||||
// convenience function that connects to the given network address,
|
||||
// initiates the SSH handshake, and then sets up a Client.
|
||||
@@ -348,6 +369,23 @@ func (c *conn) closed() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Show that we are doing an upload or download
|
||||
//
|
||||
// Call removeTransfer() when done
|
||||
func (f *Fs) addTransfer() {
|
||||
atomic.AddInt32(&f.transfers, 1)
|
||||
}
|
||||
|
||||
// Show the upload or download done
|
||||
func (f *Fs) removeTransfer() {
|
||||
atomic.AddInt32(&f.transfers, -1)
|
||||
}
|
||||
|
||||
// getTransfers shows whether there are any transfers in progress
|
||||
func (f *Fs) getTransfers() int32 {
|
||||
return atomic.LoadInt32(&f.transfers)
|
||||
}
|
||||
|
||||
// Open a new connection to the SFTP server.
|
||||
func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
|
||||
// Rate limit rate of new connections
|
||||
@@ -396,8 +434,8 @@ func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.C
|
||||
opts = append(opts,
|
||||
sftp.UseFstat(f.opt.UseFstat),
|
||||
sftp.UseConcurrentReads(!f.opt.DisableConcurrentReads),
|
||||
sftp.UseConcurrentWrites(!f.opt.DisableConcurrentWrites),
|
||||
)
|
||||
|
||||
return sftp.NewClientPipe(pr, pw, opts...)
|
||||
}
|
||||
|
||||
@@ -474,6 +512,13 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
||||
func (f *Fs) drainPool(ctx context.Context) (err error) {
|
||||
f.poolMu.Lock()
|
||||
defer f.poolMu.Unlock()
|
||||
if transfers := f.getTransfers(); transfers != 0 {
|
||||
fs.Debugf(f, "Not closing %d unused connections as %d transfers in progress", len(f.pool), transfers)
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Stop()
|
||||
}
|
||||
@@ -524,7 +569,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
if opt.KnownHostsFile != "" {
|
||||
hostcallback, err := knownhosts.New(opt.KnownHostsFile)
|
||||
hostcallback, err := knownhosts.New(env.ShellExpand(opt.KnownHostsFile))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't parse known_hosts_file")
|
||||
}
|
||||
@@ -726,7 +771,9 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFs")
|
||||
}
|
||||
f.debugf(f, "> Getwd")
|
||||
cwd, err := c.sftpClient.Getwd()
|
||||
f.debugf(f, "< Getwd: %q, err=%#v", cwd, err)
|
||||
f.putSftpConnection(&c, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Failed to read current directory - using relative paths: %v", err)
|
||||
@@ -807,7 +854,9 @@ func (f *Fs) dirExists(ctx context.Context, dir string) (bool, error) {
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "dirExists")
|
||||
}
|
||||
f.debugf(f, "> Stat dirExists: %q", dir)
|
||||
info, err := c.sftpClient.Stat(dir)
|
||||
f.debugf(f, "< Stat dirExists: %#v, err=%#v", info, err)
|
||||
f.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
@@ -847,7 +896,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "List")
|
||||
}
|
||||
f.debugf(f, "> ReadDir: %q", sftpDir)
|
||||
infos, err := c.sftpClient.ReadDir(sftpDir)
|
||||
f.debugf(f, "< ReadDir: %#v, err=%#v", infos, err)
|
||||
f.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error listing %q", dir)
|
||||
@@ -938,7 +989,9 @@ func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "mkdir")
|
||||
}
|
||||
f.debugf(f, "> Mkdir: %q", dirPath)
|
||||
err = c.sftpClient.Mkdir(dirPath)
|
||||
f.debugf(f, "< Mkdir: err=%#v", err)
|
||||
f.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "mkdir %q failed", dirPath)
|
||||
@@ -969,7 +1022,9 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
f.debugf(f, "> Rmdir: %q", root)
|
||||
err = c.sftpClient.RemoveDirectory(root)
|
||||
f.debugf(f, "< Rmdir: err=%#v", err)
|
||||
f.putSftpConnection(&c, err)
|
||||
return err
|
||||
}
|
||||
@@ -989,10 +1044,10 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move")
|
||||
}
|
||||
err = c.sftpClient.Rename(
|
||||
srcObj.path(),
|
||||
path.Join(f.absRoot, remote),
|
||||
)
|
||||
srcPath, dstPath := srcObj.path(), path.Join(f.absRoot, remote)
|
||||
f.debugf(f, "> Rename file: src=%q, dst=%q", srcPath, dstPath)
|
||||
err = c.sftpClient.Rename(srcPath, dstPath)
|
||||
f.debugf(f, "< Rename file: err=%#v", err)
|
||||
f.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move Rename failed")
|
||||
@@ -1041,10 +1096,12 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove")
|
||||
}
|
||||
f.debugf(f, "> Rename dir: src=%q, dst=%q", srcPath, dstPath)
|
||||
err = c.sftpClient.Rename(
|
||||
srcPath,
|
||||
dstPath,
|
||||
)
|
||||
f.debugf(f, "< Rename dir: err=%#v", err)
|
||||
f.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "DirMove Rename(%q,%q) failed", srcPath, dstPath)
|
||||
@@ -1060,7 +1117,9 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
|
||||
}
|
||||
defer f.putSftpConnection(&c, err)
|
||||
|
||||
f.debugf(f, "> NewSession run")
|
||||
session, err := c.sshClient.NewSession()
|
||||
f.debugf(f, "< NewSession run: %#v, err=%#v", session, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "run: get SFTP session")
|
||||
}
|
||||
@@ -1072,7 +1131,9 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
|
||||
session.Stdout = &stdout
|
||||
session.Stderr = &stderr
|
||||
|
||||
f.debugf(f, "> Run cmd: %q", cmd)
|
||||
err = session.Run(cmd)
|
||||
f.debugf(f, "< Run cmd: err=%#v", err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to run %q: %s", cmd, stderr.Bytes())
|
||||
}
|
||||
@@ -1219,7 +1280,9 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "Hash get SFTP connection")
|
||||
}
|
||||
o.fs.debugf(o, "> NewSession hash")
|
||||
session, err := c.sshClient.NewSession()
|
||||
o.fs.debugf(o, "< NewSession hash: %#v, err=%#v", session, err)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "Hash put SFTP connection")
|
||||
@@ -1329,7 +1392,9 @@ func (f *Fs) stat(ctx context.Context, remote string) (info os.FileInfo, err err
|
||||
return nil, errors.Wrap(err, "stat")
|
||||
}
|
||||
absPath := path.Join(f.absRoot, remote)
|
||||
f.debugf(f, "> Stat file: %q", absPath)
|
||||
info, err = c.sftpClient.Stat(absPath)
|
||||
f.debugf(f, "< Stat file: %#v, err=%#v", info, err)
|
||||
f.putSftpConnection(&c, err)
|
||||
return info, err
|
||||
}
|
||||
@@ -1361,7 +1426,9 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime")
|
||||
}
|
||||
o.fs.debugf(o, "> Chtimes: %q, %v", o.path(), modTime)
|
||||
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
|
||||
o.fs.debugf(o, "< Chtimes: err=%#v", err)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime failed")
|
||||
@@ -1380,18 +1447,22 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// objectReader represents a file open for reading on the SFTP server
|
||||
type objectReader struct {
|
||||
f *Fs
|
||||
sftpFile *sftp.File
|
||||
pipeReader *io.PipeReader
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func newObjectReader(sftpFile *sftp.File) *objectReader {
|
||||
func (f *Fs) newObjectReader(sftpFile *sftp.File) *objectReader {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
file := &objectReader{
|
||||
f: f,
|
||||
sftpFile: sftpFile,
|
||||
pipeReader: pipeReader,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
// Show connection in use
|
||||
f.addTransfer()
|
||||
|
||||
go func() {
|
||||
// Use sftpFile.WriteTo to pump data so that it gets a
|
||||
@@ -1421,6 +1492,8 @@ func (file *objectReader) Close() (err error) {
|
||||
_ = file.pipeReader.Close()
|
||||
// Wait for the background process to finish
|
||||
<-file.done
|
||||
// Show connection no longer in use
|
||||
file.f.removeTransfer()
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1443,7 +1516,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open")
|
||||
}
|
||||
o.fs.debugf(o, "> Open read: %q", o.path())
|
||||
sftpFile, err := c.sftpClient.Open(o.path())
|
||||
o.fs.debugf(o, "< Open read: %#v, err=%#v", sftpFile, err)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open failed")
|
||||
@@ -1454,12 +1529,27 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, errors.Wrap(err, "Open Seek failed")
|
||||
}
|
||||
}
|
||||
in = readers.NewLimitedReadCloser(newObjectReader(sftpFile), limit)
|
||||
in = readers.NewLimitedReadCloser(o.fs.newObjectReader(sftpFile), limit)
|
||||
return in, nil
|
||||
}
|
||||
|
||||
type sizeReader struct {
|
||||
io.Reader
|
||||
size int64
|
||||
}
|
||||
|
||||
// Size returns the expected size of the stream
|
||||
//
|
||||
// It is used in sftpFile.ReadFrom as a hint to work out the
|
||||
// concurrency needed
|
||||
func (sr *sizeReader) Size() int64 {
|
||||
return sr.size
|
||||
}
|
||||
|
||||
// Update a remote sftp file using the data <in> and ModTime from <src>
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
o.fs.addTransfer() // Show transfer in progress
|
||||
defer o.fs.removeTransfer()
|
||||
// Clear the hash cache since we are about to update the object
|
||||
o.md5sum = nil
|
||||
o.sha1sum = nil
|
||||
@@ -1467,7 +1557,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Update")
|
||||
}
|
||||
o.fs.debugf(o, "> OpenFile write: %q", o.path())
|
||||
file, err := c.sftpClient.OpenFile(o.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
|
||||
o.fs.debugf(o, "< OpenFile write: %#v, err=%#v", file, err)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Update Create failed")
|
||||
@@ -1479,7 +1571,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
fs.Debugf(src, "Failed to open new SSH connection for delete: %v", removeErr)
|
||||
return
|
||||
}
|
||||
o.fs.debugf(o, "> Remove file: %q", o.path())
|
||||
removeErr = c.sftpClient.Remove(o.path())
|
||||
o.fs.debugf(o, "< Remove file: err=%#v", removeErr)
|
||||
o.fs.putSftpConnection(&c, removeErr)
|
||||
if removeErr != nil {
|
||||
fs.Debugf(src, "Failed to remove: %v", removeErr)
|
||||
@@ -1487,7 +1581,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
fs.Debugf(src, "Removed after failed upload: %v", err)
|
||||
}
|
||||
}
|
||||
_, err = file.ReadFrom(in)
|
||||
_, err = file.ReadFrom(&sizeReader{Reader: in, size: src.Size()})
|
||||
if err != nil {
|
||||
remove()
|
||||
return errors.Wrap(err, "Update ReadFrom failed")
|
||||
@@ -1528,7 +1622,9 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Remove")
|
||||
}
|
||||
o.fs.debugf(o, "> Remove: %q", o.path())
|
||||
err = c.sftpClient.Remove(o.path())
|
||||
o.fs.debugf(o, "< Remove: err=%#v", err)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -77,7 +77,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -110,10 +109,10 @@ const (
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
apiPath = "/sf/v3" // add to endpoint to get API path
|
||||
tokenPath = "/oauth/token" // add to endpoint to get Token path
|
||||
minChunkSize = 256 * fs.KibiByte
|
||||
maxChunkSize = 2 * fs.GibiByte
|
||||
defaultChunkSize = 64 * fs.MebiByte
|
||||
defaultUploadCutoff = 128 * fs.MebiByte
|
||||
minChunkSize = 256 * fs.Kibi
|
||||
maxChunkSize = 2 * fs.Gibi
|
||||
defaultChunkSize = 64 * fs.Mebi
|
||||
defaultUploadCutoff = 128 * fs.Mebi
|
||||
)
|
||||
|
||||
// Generate a new oauth2 config which we will update when we know the TokenURL
|
||||
@@ -136,7 +135,7 @@ func init() {
|
||||
Name: "sharefile",
|
||||
Description: "Citrix Sharefile",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
oauthConfig := newOauthConfig("")
|
||||
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
|
||||
if auth == nil || auth.Form == nil {
|
||||
@@ -152,13 +151,10 @@ func init() {
|
||||
oauthConfig.Endpoint.TokenURL = endpoint + tokenPath
|
||||
return nil
|
||||
}
|
||||
opt := oauthutil.Options{
|
||||
CheckAuth: checkAuth,
|
||||
}
|
||||
err := oauthutil.Config(ctx, "sharefile", name, m, oauthConfig, &opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
CheckAuth: checkAuth,
|
||||
})
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "upload_cutoff",
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -76,50 +75,63 @@ func init() {
|
||||
Name: "sugarsync",
|
||||
Description: "Sugarsync",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read options: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to read options")
|
||||
}
|
||||
|
||||
if opt.RefreshToken != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !config.ConfirmWithConfig(ctx, m, "config_refresh_token", true) {
|
||||
return
|
||||
switch config.State {
|
||||
case "":
|
||||
if opt.RefreshToken == "" {
|
||||
return fs.ConfigGoto("username")
|
||||
}
|
||||
}
|
||||
fmt.Printf("Username (email address)> ")
|
||||
username := config.ReadLine()
|
||||
password := config.GetPassword("Your Sugarsync password is only required during setup and will not be stored.")
|
||||
return fs.ConfigConfirm("refresh", true, "config_refresh", "Already have a token - refresh?")
|
||||
case "refresh":
|
||||
if config.Result == "false" {
|
||||
return nil, nil
|
||||
}
|
||||
return fs.ConfigGoto("username")
|
||||
case "username":
|
||||
return fs.ConfigInput("password", "config_username", "username (email address)")
|
||||
case "password":
|
||||
m.Set("username", config.Result)
|
||||
return fs.ConfigPassword("auth", "config_password", "Your Sugarsync password.\n\nOnly required during setup and will not be stored.")
|
||||
case "auth":
|
||||
username, _ := m.Get("username")
|
||||
m.Set("username", "")
|
||||
password := config.Result
|
||||
|
||||
authRequest := api.AppAuthorization{
|
||||
Username: username,
|
||||
Password: password,
|
||||
Application: withDefault(opt.AppID, appID),
|
||||
AccessKeyID: withDefault(opt.AccessKeyID, accessKeyID),
|
||||
PrivateAccessKey: withDefault(opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)),
|
||||
}
|
||||
authRequest := api.AppAuthorization{
|
||||
Username: username,
|
||||
Password: password,
|
||||
Application: withDefault(opt.AppID, appID),
|
||||
AccessKeyID: withDefault(opt.AccessKeyID, accessKeyID),
|
||||
PrivateAccessKey: withDefault(opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)),
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/app-authorization",
|
||||
}
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(rootURL) // FIXME
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/app-authorization",
|
||||
}
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(rootURL) // FIXME
|
||||
|
||||
// FIXME
|
||||
//err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = srv.CallXML(context.Background(), &opts, &authRequest, nil)
|
||||
// return shouldRetry(ctx, resp, err)
|
||||
//})
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get token: %v", err)
|
||||
// FIXME
|
||||
//err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = srv.CallXML(context.Background(), &opts, &authRequest, nil)
|
||||
// return shouldRetry(ctx, resp, err)
|
||||
//})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get token")
|
||||
}
|
||||
opt.RefreshToken = resp.Header.Get("Location")
|
||||
m.Set("refresh_token", opt.RefreshToken)
|
||||
return nil, nil
|
||||
}
|
||||
opt.RefreshToken = resp.Header.Get("Location")
|
||||
m.Set("refresh_token", opt.RefreshToken)
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
}, Options: []fs.Option{{
|
||||
Name: "app_id",
|
||||
Help: "Sugarsync App ID.\n\nLeave blank to use rclone's.",
|
||||
}, {
|
||||
|
||||
@@ -36,7 +36,7 @@ import (
|
||||
const (
|
||||
directoryMarkerContentType = "application/directory" // content type of directory marker objects
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
defaultChunkSize = 5 * fs.GibiByte
|
||||
defaultChunkSize = 5 * fs.Gibi
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
)
|
||||
|
||||
@@ -46,7 +46,7 @@ var SharedOptions = []fs.Option{{
|
||||
Help: `Above this size files will be chunked into a _segments container.
|
||||
|
||||
Above this size files will be chunked into a _segments container. The
|
||||
default for this is 5GB which is its maximum value.`,
|
||||
default for this is 5 GiB which is its maximum value.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -56,7 +56,7 @@ default for this is 5GB which is its maximum value.`,
|
||||
When doing streaming uploads (e.g. using rcat or mount) setting this
|
||||
flag will cause the swift backend to not upload chunked files.
|
||||
|
||||
This will limit the maximum upload size to 5GB. However non chunked
|
||||
This will limit the maximum upload size to 5 GiB. However non chunked
|
||||
files are easier to deal with and have an MD5SUM.
|
||||
|
||||
Rclone will still chunk files bigger than chunk_size when doing normal
|
||||
@@ -419,7 +419,7 @@ func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Con
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
const minChunkSize = fs.SizeSuffixBase
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
|
||||
@@ -87,7 +87,7 @@ func (f *Fs) testWithChunk(t *testing.T) {
|
||||
preConfChunkSize := f.opt.ChunkSize
|
||||
preConfChunk := f.opt.NoChunk
|
||||
f.opt.NoChunk = false
|
||||
f.opt.ChunkSize = 1024 * fs.Byte
|
||||
f.opt.ChunkSize = 1024 * fs.SizeSuffixBase
|
||||
defer func() {
|
||||
//restore old config after test
|
||||
f.opt.ChunkSize = preConfChunkSize
|
||||
@@ -117,7 +117,7 @@ func (f *Fs) testWithChunkFail(t *testing.T) {
|
||||
preConfChunkSize := f.opt.ChunkSize
|
||||
preConfChunk := f.opt.NoChunk
|
||||
f.opt.NoChunk = false
|
||||
f.opt.ChunkSize = 1024 * fs.Byte
|
||||
f.opt.ChunkSize = 1024 * fs.SizeSuffixBase
|
||||
segmentContainer := f.root + "_segments"
|
||||
defer func() {
|
||||
//restore config
|
||||
@@ -159,7 +159,7 @@ func (f *Fs) testCopyLargeObject(t *testing.T) {
|
||||
preConfChunkSize := f.opt.ChunkSize
|
||||
preConfChunk := f.opt.NoChunk
|
||||
f.opt.NoChunk = false
|
||||
f.opt.ChunkSize = 1024 * fs.Byte
|
||||
f.opt.ChunkSize = 1024 * fs.SizeSuffixBase
|
||||
defer func() {
|
||||
//restore old config after test
|
||||
f.opt.ChunkSize = preConfChunkSize
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -42,19 +41,19 @@ func init() {
|
||||
Name: "tardigrade",
|
||||
Description: "Tardigrade Decentralized Cloud Storage",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, configMapper configmap.Mapper) {
|
||||
provider, _ := configMapper.Get(fs.ConfigProvider)
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, configIn fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
provider, _ := m.Get(fs.ConfigProvider)
|
||||
|
||||
config.FileDeleteKey(name, fs.ConfigProvider)
|
||||
|
||||
if provider == newProvider {
|
||||
satelliteString, _ := configMapper.Get("satellite_address")
|
||||
apiKey, _ := configMapper.Get("api_key")
|
||||
passphrase, _ := configMapper.Get("passphrase")
|
||||
satelliteString, _ := m.Get("satellite_address")
|
||||
apiKey, _ := m.Get("api_key")
|
||||
passphrase, _ := m.Get("passphrase")
|
||||
|
||||
// satelliteString contains always default and passphrase can be empty
|
||||
if apiKey == "" {
|
||||
return
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
satellite, found := satMap[satelliteString]
|
||||
@@ -64,22 +63,23 @@ func init() {
|
||||
|
||||
access, err := uplink.RequestAccessWithPassphrase(context.TODO(), satellite, apiKey, passphrase)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't create access grant: %v", err)
|
||||
return nil, errors.Wrap(err, "couldn't create access grant")
|
||||
}
|
||||
|
||||
serializedAccess, err := access.Serialize()
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't serialize access grant: %v", err)
|
||||
return nil, errors.Wrap(err, "couldn't serialize access grant")
|
||||
}
|
||||
configMapper.Set("satellite_address", satellite)
|
||||
configMapper.Set("access_grant", serializedAccess)
|
||||
m.Set("satellite_address", satellite)
|
||||
m.Set("access_grant", serializedAccess)
|
||||
} else if provider == existingProvider {
|
||||
config.FileDeleteKey(name, "satellite_address")
|
||||
config.FileDeleteKey(name, "api_key")
|
||||
config.FileDeleteKey(name, "passphrase")
|
||||
} else {
|
||||
log.Fatalf("Invalid provider type: %s", provider)
|
||||
return nil, errors.Errorf("invalid provider type: %s", provider)
|
||||
}
|
||||
return nil, nil
|
||||
},
|
||||
Options: []fs.Option{
|
||||
{
|
||||
|
||||
@@ -148,13 +148,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (_ io.ReadC
|
||||
case s && !e:
|
||||
offset = opt.Start
|
||||
case !s && e:
|
||||
object, err := o.fs.project.StatObject(ctx, bucketName, bucketPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
offset = object.System.ContentLength - opt.End
|
||||
length = opt.End
|
||||
offset = -opt.End
|
||||
}
|
||||
case *fs.SeekOption:
|
||||
offset = opt.Offset
|
||||
|
||||
170
backend/uptobox/api/types.go
Normal file
170
backend/uptobox/api/types.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package api
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Error contains the error code and message returned by the API
|
||||
type Error struct {
|
||||
Success bool `json:"success,omitempty"`
|
||||
StatusCode int `json:"statusCode,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Data string `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e Error) Error() string {
|
||||
out := fmt.Sprintf("api error %d", e.StatusCode)
|
||||
if e.Message != "" {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
if e.Data != "" {
|
||||
out += ": " + e.Data
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// FolderEntry represents a Uptobox subfolder when listing folder contents
|
||||
type FolderEntry struct {
|
||||
FolderID uint64 `json:"fld_id"`
|
||||
Description string `json:"fld_descr"`
|
||||
Password string `json:"fld_password"`
|
||||
FullPath string `json:"fullPath"`
|
||||
Path string `json:"fld_name"`
|
||||
Name string `json:"name"`
|
||||
Hash string `json:"hash"`
|
||||
}
|
||||
|
||||
// FolderInfo represents the current folder when listing folder contents
|
||||
type FolderInfo struct {
|
||||
FolderID uint64 `json:"fld_id"`
|
||||
Hash string `json:"hash"`
|
||||
FileCount uint64 `json:"fileCount"`
|
||||
TotalFileSize int64 `json:"totalFileSize"`
|
||||
}
|
||||
|
||||
// FileInfo represents a file when listing folder contents
|
||||
type FileInfo struct {
|
||||
Name string `json:"file_name"`
|
||||
Description string `json:"file_descr"`
|
||||
Created string `json:"file_created"`
|
||||
Size int64 `json:"file_size"`
|
||||
Downloads uint64 `json:"file_downloads"`
|
||||
Code string `json:"file_code"`
|
||||
Password string `json:"file_password"`
|
||||
Public int `json:"file_public"`
|
||||
LastDownload string `json:"file_last_download"`
|
||||
ID uint64 `json:"id"`
|
||||
}
|
||||
|
||||
// ReadMetadataResponse is the response when listing folder contents
|
||||
type ReadMetadataResponse struct {
|
||||
StatusCode int `json:"statusCode"`
|
||||
Message string `json:"message"`
|
||||
Data struct {
|
||||
CurrentFolder FolderInfo `json:"currentFolder"`
|
||||
Folders []FolderEntry `json:"folders"`
|
||||
Files []FileInfo `json:"files"`
|
||||
PageCount int `json:"pageCount"`
|
||||
TotalFileCount int `json:"totalFileCount"`
|
||||
TotalFileSize int64 `json:"totalFileSize"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadInfo is the response when initiating an upload
|
||||
type UploadInfo struct {
|
||||
StatusCode int `json:"statusCode"`
|
||||
Message string `json:"message"`
|
||||
Data struct {
|
||||
UploadLink string `json:"uploadLink"`
|
||||
MaxUpload string `json:"maxUpload"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadResponse is the respnse to a successful upload
|
||||
type UploadResponse struct {
|
||||
Files []struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
URL string `json:"url"`
|
||||
DeleteURL string `json:"deleteUrl"`
|
||||
} `json:"files"`
|
||||
}
|
||||
|
||||
// UpdateResponse is a generic response to various action on files (rename/copy/move)
|
||||
type UpdateResponse struct {
|
||||
Message string `json:"message"`
|
||||
StatusCode int `json:"statusCode"`
|
||||
}
|
||||
|
||||
// Download is the response when requesting a download link
|
||||
type Download struct {
|
||||
StatusCode int `json:"statusCode"`
|
||||
Message string `json:"message"`
|
||||
Data struct {
|
||||
DownloadLink string `json:"dlLink"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// MetadataRequestOptions represents all the options when listing folder contents
|
||||
type MetadataRequestOptions struct {
|
||||
Limit uint64
|
||||
Offset uint64
|
||||
SearchField string
|
||||
Search string
|
||||
}
|
||||
|
||||
// CreateFolderRequest is used for creating a folder
|
||||
type CreateFolderRequest struct {
|
||||
Token string `json:"token"`
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// DeleteFolderRequest is used for deleting a folder
|
||||
type DeleteFolderRequest struct {
|
||||
Token string `json:"token"`
|
||||
FolderID uint64 `json:"fld_id"`
|
||||
}
|
||||
|
||||
// CopyMoveFileRequest is used for moving/copying a file
|
||||
type CopyMoveFileRequest struct {
|
||||
Token string `json:"token"`
|
||||
FileCodes string `json:"file_codes"`
|
||||
DestinationFolderID uint64 `json:"destination_fld_id"`
|
||||
Action string `json:"action"`
|
||||
}
|
||||
|
||||
// MoveFolderRequest is used for moving a folder
|
||||
type MoveFolderRequest struct {
|
||||
Token string `json:"token"`
|
||||
FolderID uint64 `json:"fld_id"`
|
||||
DestinationFolderID uint64 `json:"destination_fld_id"`
|
||||
Action string `json:"action"`
|
||||
}
|
||||
|
||||
// RenameFolderRequest is used for renaming a folder
|
||||
type RenameFolderRequest struct {
|
||||
Token string `json:"token"`
|
||||
FolderID uint64 `json:"fld_id"`
|
||||
NewName string `json:"new_name"`
|
||||
}
|
||||
|
||||
// UpdateFileInformation is used for renaming a file
|
||||
type UpdateFileInformation struct {
|
||||
Token string `json:"token"`
|
||||
FileCode string `json:"file_code"`
|
||||
NewName string `json:"new_name,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Public string `json:"public,omitempty"`
|
||||
}
|
||||
|
||||
// RemoveFileRequest is used for deleting a file
|
||||
type RemoveFileRequest struct {
|
||||
Token string `json:"token"`
|
||||
FileCodes string `json:"file_codes"`
|
||||
}
|
||||
|
||||
// Token represents the authentication token
|
||||
type Token struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
1053
backend/uptobox/uptobox.go
Normal file
1053
backend/uptobox/uptobox.go
Normal file
File diff suppressed because it is too large
Load Diff
21
backend/uptobox/uptobox_test.go
Normal file
21
backend/uptobox/uptobox_test.go
Normal file
@@ -0,0 +1,21 @@
|
||||
// Test Uptobox filesystem interface
|
||||
package uptobox_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/uptobox"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
*fstest.RemoteName = "TestUptobox:"
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*uptobox.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -125,7 +125,7 @@ func (ca *CookieAuth) getSPCookie(conf *SharepointSuccessResponse) (*CookieRespo
|
||||
return nil, errors.Wrap(err, "Error while constructing endpoint URL")
|
||||
}
|
||||
|
||||
u, err := url.Parse("https://" + spRoot.Host + "/_forms/default.aspx?wa=wsignin1.0")
|
||||
u, err := url.Parse(spRoot.Scheme + "://" + spRoot.Host + "/_forms/default.aspx?wa=wsignin1.0")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error while constructing login URL")
|
||||
}
|
||||
|
||||
@@ -113,6 +113,21 @@ func init() {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: configEncodingHelp,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "headers",
|
||||
Help: `Set HTTP headers for all transactions
|
||||
|
||||
Use this to set additional HTTP headers for all transactions
|
||||
|
||||
The input format is comma separated list of key,value pairs. Standard
|
||||
[CSV encoding](https://godoc.org/encoding/csv) may be used.
|
||||
|
||||
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||
|
||||
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
|
||||
`,
|
||||
Default: fs.CommaSepList{},
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -126,6 +141,7 @@ type Options struct {
|
||||
BearerToken string `config:"bearer_token"`
|
||||
BearerTokenCommand string `config:"bearer_token_command"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
Headers fs.CommaSepList `config:"headers"`
|
||||
}
|
||||
|
||||
// Fs represents a remote webdav
|
||||
@@ -359,6 +375,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(opt.Headers)%2 != 0 {
|
||||
return nil, errors.New("odd number of headers supplied")
|
||||
}
|
||||
fs.Debugf(nil, "found headers: %v", opt.Headers)
|
||||
|
||||
rootIsDir := strings.HasSuffix(root, "/")
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
@@ -428,6 +450,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if opt.Headers != nil {
|
||||
f.addHeaders(opt.Headers)
|
||||
}
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
err = f.setQuirks(ctx, opt.Vendor)
|
||||
if err != nil {
|
||||
@@ -487,6 +512,15 @@ func (f *Fs) fetchBearerToken(cmd string) (string, error) {
|
||||
return stdoutString, nil
|
||||
}
|
||||
|
||||
// Adds the configured headers to the request if any
|
||||
func (f *Fs) addHeaders(headers fs.CommaSepList) {
|
||||
for i := 0; i < len(headers); i += 2 {
|
||||
key := f.opt.Headers[i]
|
||||
value := f.opt.Headers[i+1]
|
||||
f.srv.SetHeader(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
// fetch the bearer token and set it if successful
|
||||
func (f *Fs) fetchAndSetBearerToken() error {
|
||||
if f.opt.BearerTokenCommand == "" {
|
||||
|
||||
74
backend/webdav/webdav_internal_test.go
Normal file
74
backend/webdav/webdav_internal_test.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package webdav_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/webdav"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
remoteName = "TestWebDAV"
|
||||
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
|
||||
)
|
||||
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
// with each request the headers option tests are executed
|
||||
func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// file server
|
||||
fileServer := http.FileServer(http.Dir(""))
|
||||
|
||||
// test the headers are there then pass on to fileServer
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
|
||||
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
|
||||
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
|
||||
fileServer.ServeHTTP(w, r)
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
ts := httptest.NewServer(handler)
|
||||
|
||||
// Configure the remote
|
||||
configfile.Install()
|
||||
|
||||
m := configmap.Simple{
|
||||
"type": "webdav",
|
||||
"url": ts.URL,
|
||||
// add headers to test the headers option
|
||||
"headers": strings.Join(headers, ","),
|
||||
}
|
||||
|
||||
// return a function to tidy up
|
||||
return m, ts.Close
|
||||
}
|
||||
|
||||
// prepare the test server and return a function to tidy it up afterwards
|
||||
func prepare(t *testing.T) (fs.Fs, func()) {
|
||||
m, tidy := prepareServer(t)
|
||||
|
||||
// Instantiate the WebDAV server
|
||||
f, err := webdav.NewFs(context.Background(), remoteName, "", m)
|
||||
require.NoError(t, err)
|
||||
|
||||
return f, tidy
|
||||
}
|
||||
|
||||
// TestHeaders any request will test the headers option
|
||||
func TestHeaders(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
// any request will do
|
||||
_, err := f.Features().About(context.Background())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -60,12 +60,10 @@ func init() {
|
||||
Name: "yandex",
|
||||
Description: "Yandex Disk",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config(ctx, "yandex", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return
|
||||
}
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: config.ConfigEncoding,
|
||||
@@ -251,22 +249,22 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
token, err := oauthutil.GetToken(name, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't read OAuth token (this should never happen).")
|
||||
return nil, errors.Wrap(err, "couldn't read OAuth token")
|
||||
}
|
||||
if token.RefreshToken == "" {
|
||||
log.Fatalf("Unable to get RefreshToken. If you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend.")
|
||||
return nil, errors.New("unable to get RefreshToken. If you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
|
||||
}
|
||||
if token.TokenType != "OAuth" {
|
||||
token.TokenType = "OAuth"
|
||||
err = oauthutil.PutToken(name, m, token, false)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't save OAuth token (this should never happen).")
|
||||
return nil, errors.Wrap(err, "couldn't save OAuth token")
|
||||
}
|
||||
log.Printf("Automatically upgraded OAuth config.")
|
||||
}
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Yandex: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Yandex")
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -73,36 +72,97 @@ func init() {
|
||||
Name: "zoho",
|
||||
Description: "Zoho",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
// Need to setup region before configuring oauth
|
||||
setupRegion(m)
|
||||
opt := oauthutil.Options{
|
||||
// No refresh token unless ApprovalForce is set
|
||||
OAuth2Opts: []oauth2.AuthCodeOption{oauth2.ApprovalForce},
|
||||
}
|
||||
if err := oauthutil.Config(ctx, "zoho", name, m, oauthConfig, &opt); err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
// We need to rewrite the token type to "Zoho-oauthtoken" because Zoho wants
|
||||
// it's own custom type
|
||||
token, err := oauthutil.GetToken(name, m)
|
||||
err := setupRegion(m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read token: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
if token.TokenType != "Zoho-oauthtoken" {
|
||||
token.TokenType = "Zoho-oauthtoken"
|
||||
err = oauthutil.PutToken(name, m, token, false)
|
||||
getSrvs := func() (authSrv, apiSrv *rest.Client, err error) {
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return nil, nil, errors.Wrap(err, "failed to load oAuthClient")
|
||||
}
|
||||
authSrv = rest.NewClient(oAuthClient).SetRoot(accountsURL)
|
||||
apiSrv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
return authSrv, apiSrv, nil
|
||||
}
|
||||
if err = setupRoot(ctx, name, m); err != nil {
|
||||
log.Fatalf("Failed to configure root directory: %v", err)
|
||||
|
||||
switch config.State {
|
||||
case "":
|
||||
return oauthutil.ConfigOut("teams", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
// No refresh token unless ApprovalForce is set
|
||||
OAuth2Opts: []oauth2.AuthCodeOption{oauth2.ApprovalForce},
|
||||
})
|
||||
case "teams":
|
||||
// We need to rewrite the token type to "Zoho-oauthtoken" because Zoho wants
|
||||
// it's own custom type
|
||||
token, err := oauthutil.GetToken(name, m)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read token")
|
||||
}
|
||||
if token.TokenType != "Zoho-oauthtoken" {
|
||||
token.TokenType = "Zoho-oauthtoken"
|
||||
err = oauthutil.PutToken(name, m, token, false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure token")
|
||||
}
|
||||
}
|
||||
|
||||
authSrv, apiSrv, err := getSrvs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the user Info
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/oauth/user/info",
|
||||
}
|
||||
var user api.User
|
||||
_, err = authSrv.CallJSON(ctx, &opts, nil, &user)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the teams
|
||||
teams, err := listTeams(ctx, user.ZUID, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fs.ConfigChoose("workspace", "config_team_drive_id", "Team Drive ID", len(teams), func(i int) (string, string) {
|
||||
team := teams[i]
|
||||
return team.ID, team.Attributes.Name
|
||||
})
|
||||
case "workspace":
|
||||
_, apiSrv, err := getSrvs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
teamID := config.Result
|
||||
workspaces, err := listWorkspaces(ctx, teamID, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fs.ConfigChoose("workspace_end", "config_workspace", "Workspace ID", len(workspaces), func(i int) (string, string) {
|
||||
workspace := workspaces[i]
|
||||
return workspace.ID, workspace.Attributes.Name
|
||||
})
|
||||
case "workspace_end":
|
||||
worksspaceID := config.Result
|
||||
m.Set(configRootID, worksspaceID)
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "region",
|
||||
Help: "Zoho region to connect to. You'll have to use the region you organization is registered in.",
|
||||
Help: `Zoho region to connect to.
|
||||
|
||||
You'll have to use the region your organization is registered in. If
|
||||
not sure use the same top level domain as you connect to in your
|
||||
browser.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "com",
|
||||
Help: "United states / Global",
|
||||
@@ -159,15 +219,16 @@ type Object struct {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
func setupRegion(m configmap.Mapper) {
|
||||
func setupRegion(m configmap.Mapper) error {
|
||||
region, ok := m.Get("region")
|
||||
if !ok {
|
||||
log.Fatalf("No region set\n")
|
||||
if !ok || region == "" {
|
||||
return errors.New("no region set")
|
||||
}
|
||||
rootURL = fmt.Sprintf("https://workdrive.zoho.%s/api/v1", region)
|
||||
accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region)
|
||||
oauthConfig.Endpoint.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
|
||||
oauthConfig.Endpoint.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -200,49 +261,6 @@ func listWorkspaces(ctx context.Context, teamID string, srv *rest.Client) ([]api
|
||||
return workspaceList.TeamWorkspace, nil
|
||||
}
|
||||
|
||||
func setupRoot(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
}
|
||||
authSrv := rest.NewClient(oAuthClient).SetRoot(accountsURL)
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/oauth/user/info",
|
||||
}
|
||||
|
||||
var user api.User
|
||||
_, err = authSrv.CallJSON(ctx, &opts, nil, &user)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
teams, err := listTeams(ctx, user.ZUID, apiSrv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var teamIDs, teamNames []string
|
||||
for _, team := range teams {
|
||||
teamIDs = append(teamIDs, team.ID)
|
||||
teamNames = append(teamNames, team.Attributes.Name)
|
||||
}
|
||||
teamID := config.Choose("Enter a Team Drive ID", teamIDs, teamNames, true)
|
||||
|
||||
workspaces, err := listWorkspaces(ctx, teamID, apiSrv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var workspaceIDs, workspaceNames []string
|
||||
for _, workspace := range workspaces {
|
||||
workspaceIDs = append(workspaceIDs, workspace.ID)
|
||||
workspaceNames = append(workspaceNames, workspace.Attributes.Name)
|
||||
}
|
||||
worksspaceID := config.Choose("Enter a Workspace ID", workspaceIDs, workspaceNames, true)
|
||||
m.Set(configRootID, worksspaceID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
@@ -372,7 +390,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err := configstruct.Set(m, opt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setupRegion(m)
|
||||
err := setupRegion(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
# Email addresses to ignore in the git log when making the authors.md file
|
||||
<nick@raig-wood.com>
|
||||
<anaghk.dos@gmail.com>
|
||||
<33207650+sp31415t1@users.noreply.github.com>
|
||||
<unknown>
|
||||
|
||||
203
bin/config.py
Executable file
203
bin/config.py
Executable file
@@ -0,0 +1,203 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test program to demonstrate the remote config interfaces in
|
||||
rclone.
|
||||
|
||||
This program can simulate
|
||||
|
||||
rclone config create
|
||||
rclone config update
|
||||
rclone config password - NOT implemented yet
|
||||
rclone authorize - NOT implemented yet
|
||||
|
||||
Pass the desired action as the first argument then any parameters.
|
||||
|
||||
This assumes passwords will be passed in the clear.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
import json
|
||||
from pprint import pprint
|
||||
|
||||
sep = "-"*60
|
||||
|
||||
def rpc(args, command, params):
|
||||
"""
|
||||
Run the command. This could be either over the CLI or the API.
|
||||
|
||||
Here we run over the API either using `rclone rc --loopback` which
|
||||
is useful for making sure state is saved properly or to an
|
||||
existing rclone rcd if `--rc` is used on the command line.
|
||||
"""
|
||||
if args.rc:
|
||||
import requests
|
||||
kwargs = {
|
||||
"json": params,
|
||||
}
|
||||
if args.user:
|
||||
kwargs["auth"] = (args.user, args.password)
|
||||
r = requests.post('http://localhost:5572/'+command, **kwargs)
|
||||
if r.status_code != 200:
|
||||
raise ValueError(f"RC command failed: Error {r.status_code}: {r.text}")
|
||||
return r.json()
|
||||
cmd = ["rclone", "-vv", "rc", "--loopback", command, "--json", json.dumps(params)]
|
||||
result = subprocess.run(cmd, stdout=subprocess.PIPE, check=True)
|
||||
return json.loads(result.stdout)
|
||||
|
||||
def parse_parameters(parameters):
|
||||
"""
|
||||
Parse the incoming key=value parameters into a dict
|
||||
"""
|
||||
d = {}
|
||||
for param in parameters:
|
||||
parts = param.split("=", 1)
|
||||
if len(parts) != 2:
|
||||
raise ValueError("bad format for parameter need name=value")
|
||||
d[parts[0]] = parts[1]
|
||||
return d
|
||||
|
||||
def ask(opt):
|
||||
"""
|
||||
Ask the user to enter the option
|
||||
|
||||
This is the user interface for asking a user a question.
|
||||
|
||||
If there are examples they should be presented.
|
||||
"""
|
||||
while True:
|
||||
if opt["IsPassword"]:
|
||||
print("*** Inputting a password")
|
||||
print(opt['Help'])
|
||||
examples = opt.get("Examples", ())
|
||||
or_number = ""
|
||||
if len(examples) > 0:
|
||||
or_number = " or choice number"
|
||||
for i, example in enumerate(examples):
|
||||
print(f"{i:3} value: {example['Value']}")
|
||||
print(f" help: {example['Help']}")
|
||||
print(f"Enter a {opt['Type']} value{or_number}. Press Enter for the default ('{opt['DefaultStr']}')")
|
||||
print(f"{opt['Name']}> ", end='')
|
||||
s = input()
|
||||
if s == "":
|
||||
return opt["DefaultStr"]
|
||||
try:
|
||||
i = int(s)
|
||||
if i >= 0 and i < len(examples):
|
||||
return examples[i]["Value"]
|
||||
except ValueError:
|
||||
pass
|
||||
if opt["Exclusive"]:
|
||||
for example in examples:
|
||||
if s == example["Value"]:
|
||||
return s
|
||||
# Exclusive is set but the value isn't one of the accepted
|
||||
# ones so continue
|
||||
print("Value isn't one of the acceptable values")
|
||||
else:
|
||||
return s
|
||||
return s
|
||||
|
||||
def create_or_update(what, args):
|
||||
"""
|
||||
Run the equivalent of rclone config create
|
||||
or rclone config update
|
||||
|
||||
what should either be "create" or "update
|
||||
"""
|
||||
print(what, args)
|
||||
params = parse_parameters(args.parameters)
|
||||
inp = {
|
||||
"name": args.name,
|
||||
"parameters": params,
|
||||
"opt": {
|
||||
"nonInteractive": True,
|
||||
"all": args.all,
|
||||
"noObscure": args.obscured_passwords,
|
||||
"obscure": not args.obscured_passwords,
|
||||
},
|
||||
}
|
||||
if what == "create":
|
||||
inp["type"] = args.type
|
||||
while True:
|
||||
print(sep)
|
||||
print("Input to API")
|
||||
pprint(inp)
|
||||
print(sep)
|
||||
out = rpc(args, "config/"+what, inp)
|
||||
print(sep)
|
||||
print("Output from API")
|
||||
pprint(out)
|
||||
print(sep)
|
||||
if out["State"] == "":
|
||||
return
|
||||
if out["Error"]:
|
||||
print("Error", out["Error"])
|
||||
result = ask(out["Option"])
|
||||
inp["opt"]["state"] = out["State"]
|
||||
inp["opt"]["result"] = result
|
||||
inp["opt"]["continue"] = True
|
||||
|
||||
def create(args):
|
||||
"""Run the equivalent of rclone config create"""
|
||||
create_or_update("create", args)
|
||||
|
||||
def update(args):
|
||||
"""Run the equivalent of rclone config update"""
|
||||
create_or_update("update", args)
|
||||
|
||||
def password(args):
|
||||
"""Run the equivalent of rclone config password"""
|
||||
print("password", args)
|
||||
raise NotImplementedError()
|
||||
|
||||
def authorize(args):
|
||||
"""Run the equivalent of rclone authorize"""
|
||||
print("authorize", args)
|
||||
raise NotImplementedError()
|
||||
|
||||
def main():
|
||||
"""
|
||||
Make the command line parser and dispatch
|
||||
"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description=__doc__,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
)
|
||||
parser.add_argument("-a", "--all", action='store_true',
|
||||
help="Ask all the config questions if set")
|
||||
parser.add_argument("-o", "--obscured-passwords", action='store_true',
|
||||
help="If set assume the passwords are obscured")
|
||||
parser.add_argument("--rc", action='store_true',
|
||||
help="If set use the rc (you'll need to start an rclone rcd)")
|
||||
parser.add_argument("--user", type=str, default="",
|
||||
help="Username for use with --rc")
|
||||
parser.add_argument("--pass", type=str, default="", dest='password',
|
||||
help="Password for use with --rc")
|
||||
|
||||
subparsers = parser.add_subparsers(dest='command', required=True)
|
||||
|
||||
subparser = subparsers.add_parser('create')
|
||||
subparser.add_argument("name", type=str, help="Name of remote to create")
|
||||
subparser.add_argument("type", type=str, help="Type of remote to create")
|
||||
subparser.add_argument("parameters", type=str, nargs='*', help="Config parameters name=value name=value")
|
||||
subparser.set_defaults(func=create)
|
||||
|
||||
subparser = subparsers.add_parser('update')
|
||||
subparser.add_argument("name", type=str, help="Name of remote to update")
|
||||
subparser.add_argument("parameters", type=str, nargs='*', help="Config parameters name=value name=value")
|
||||
subparser.set_defaults(func=update)
|
||||
|
||||
subparser = subparsers.add_parser('password')
|
||||
subparser.add_argument("name", type=str, help="Name of remote to update")
|
||||
subparser.add_argument("parameters", type=str, nargs='*', help="Config parameters name=value name=value")
|
||||
subparser.set_defaults(func=password)
|
||||
|
||||
subparser = subparsers.add_parser('authorize')
|
||||
subparser.set_defaults(func=authorize)
|
||||
|
||||
args = parser.parse_args()
|
||||
args.func(args)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -23,6 +23,7 @@ docs = [
|
||||
"rc.md",
|
||||
"overview.md",
|
||||
"flags.md",
|
||||
"docker.md",
|
||||
|
||||
# Keep these alphabetical by full name
|
||||
"fichier.md",
|
||||
@@ -62,6 +63,7 @@ docs = [
|
||||
"sftp.md",
|
||||
"sugarsync.md",
|
||||
"tardigrade.md",
|
||||
"uptobox.md",
|
||||
"union.md",
|
||||
"webdav.md",
|
||||
"yandex.md",
|
||||
|
||||
@@ -44,10 +44,10 @@ var commandDefinition = &cobra.Command{
|
||||
Use: "about remote:",
|
||||
Short: `Get quota information from the remote.`,
|
||||
Long: `
|
||||
` + "`rclone about`" + `prints quota information about a remote to standard
|
||||
` + "`rclone about`" + ` prints quota information about a remote to standard
|
||||
output. The output is typically used, free, quota and trash contents.
|
||||
|
||||
E.g. Typical output from` + "`rclone about remote:`" + `is:
|
||||
E.g. Typical output from ` + "`rclone about remote:`" + ` is:
|
||||
|
||||
Total: 17G
|
||||
Used: 7.444G
|
||||
@@ -75,7 +75,7 @@ Applying a ` + "`--full`" + ` flag to the command prints the bytes in full, e.g.
|
||||
Trashed: 104857602
|
||||
Other: 8849156022
|
||||
|
||||
A ` + "`--json`" + `flag generates conveniently computer readable output, e.g.
|
||||
A ` + "`--json`" + ` flag generates conveniently computer readable output, e.g.
|
||||
|
||||
{
|
||||
"total": 18253611008,
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
_ "github.com/rclone/rclone/cmd/cachestats"
|
||||
_ "github.com/rclone/rclone/cmd/cat"
|
||||
_ "github.com/rclone/rclone/cmd/check"
|
||||
_ "github.com/rclone/rclone/cmd/checksum"
|
||||
_ "github.com/rclone/rclone/cmd/cleanup"
|
||||
_ "github.com/rclone/rclone/cmd/cmount"
|
||||
_ "github.com/rclone/rclone/cmd/config"
|
||||
@@ -18,7 +19,6 @@ import (
|
||||
_ "github.com/rclone/rclone/cmd/copyurl"
|
||||
_ "github.com/rclone/rclone/cmd/cryptcheck"
|
||||
_ "github.com/rclone/rclone/cmd/cryptdecode"
|
||||
_ "github.com/rclone/rclone/cmd/dbhashsum"
|
||||
_ "github.com/rclone/rclone/cmd/dedupe"
|
||||
_ "github.com/rclone/rclone/cmd/delete"
|
||||
_ "github.com/rclone/rclone/cmd/deletefile"
|
||||
@@ -54,6 +54,7 @@ import (
|
||||
_ "github.com/rclone/rclone/cmd/size"
|
||||
_ "github.com/rclone/rclone/cmd/sync"
|
||||
_ "github.com/rclone/rclone/cmd/test"
|
||||
_ "github.com/rclone/rclone/cmd/test/changenotify"
|
||||
_ "github.com/rclone/rclone/cmd/test/histogram"
|
||||
_ "github.com/rclone/rclone/cmd/test/info"
|
||||
_ "github.com/rclone/rclone/cmd/test/makefiles"
|
||||
|
||||
@@ -2,6 +2,7 @@ package check
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
@@ -9,6 +10,7 @@ import (
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
@@ -16,20 +18,22 @@ import (
|
||||
|
||||
// Globals
|
||||
var (
|
||||
download = false
|
||||
oneway = false
|
||||
combined = ""
|
||||
missingOnSrc = ""
|
||||
missingOnDst = ""
|
||||
match = ""
|
||||
differ = ""
|
||||
errFile = ""
|
||||
download = false
|
||||
oneway = false
|
||||
combined = ""
|
||||
missingOnSrc = ""
|
||||
missingOnDst = ""
|
||||
match = ""
|
||||
differ = ""
|
||||
errFile = ""
|
||||
checkFileHashType = ""
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash.")
|
||||
flags.StringVarP(cmdFlags, &checkFileHashType, "checkfile", "C", checkFileHashType, "Treat source:path as a SUM file with hashes of given type")
|
||||
AddFlags(cmdFlags)
|
||||
}
|
||||
|
||||
@@ -125,7 +129,6 @@ func GetCheckOpt(fsrc, fdst fs.Fs) (opt *operations.CheckOpt, close func(), err
|
||||
}
|
||||
|
||||
return opt, close, nil
|
||||
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -143,20 +146,50 @@ If you supply the |--download| flag, it will download the data from
|
||||
both remotes and check them against each other on the fly. This can
|
||||
be useful for remotes that don't support hashes or if you really want
|
||||
to check all the data.
|
||||
|
||||
If you supply the |--checkfile HASH| flag with a valid hash name,
|
||||
the |source:path| must point to a text file in the SUM format.
|
||||
`, "|", "`") + FlagsHelp,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, fdst := cmd.NewFsSrcDst(args)
|
||||
var (
|
||||
fsrc, fdst fs.Fs
|
||||
hashType hash.Type
|
||||
fsum fs.Fs
|
||||
sumFile string
|
||||
)
|
||||
if checkFileHashType != "" {
|
||||
if err := hashType.Set(checkFileHashType); err != nil {
|
||||
fmt.Println(hash.HelpString(0))
|
||||
return err
|
||||
}
|
||||
fsum, sumFile, fsrc = cmd.NewFsSrcFileDst(args)
|
||||
} else {
|
||||
fsrc, fdst = cmd.NewFsSrcDst(args)
|
||||
}
|
||||
|
||||
cmd.Run(false, true, command, func() error {
|
||||
opt, close, err := GetCheckOpt(fsrc, fdst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer close()
|
||||
|
||||
if checkFileHashType != "" {
|
||||
return operations.CheckSum(context.Background(), fsrc, fsum, sumFile, hashType, opt, download)
|
||||
}
|
||||
|
||||
if download {
|
||||
return operations.CheckDownload(context.Background(), opt)
|
||||
}
|
||||
hashType := fsrc.Hashes().Overlap(fdst.Hashes()).GetOne()
|
||||
if hashType == hash.None {
|
||||
fs.Errorf(nil, "No common hash found - not using a hash for checks")
|
||||
} else {
|
||||
fs.Infof(nil, "Using %v for hash comparisons", hashType)
|
||||
}
|
||||
return operations.Check(context.Background(), opt)
|
||||
})
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
57
cmd/checksum/checksum.go
Normal file
57
cmd/checksum/checksum.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package checksum
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/check" // for common flags
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var download = false
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by hashing the contents.")
|
||||
check.AddFlags(cmdFlags)
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "checksum <hash> sumfile src:path",
|
||||
Short: `Checks the files in the source against a SUM file.`,
|
||||
Long: strings.ReplaceAll(`
|
||||
Checks that hashsums of source files match the SUM file.
|
||||
It compares hashes (MD5, SHA1, etc) and logs a report of files which
|
||||
don't match. It doesn't alter the file system.
|
||||
|
||||
If you supply the |--download| flag, it will download the data from remote
|
||||
and calculate the contents hash on the fly. This can be useful for remotes
|
||||
that don't support hashes or if you really want to check all the data.
|
||||
`, "|", "`") + check.FlagsHelp,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(3, 3, command, args)
|
||||
var hashType hash.Type
|
||||
if err := hashType.Set(args[0]); err != nil {
|
||||
fmt.Println(hash.HelpString(0))
|
||||
return err
|
||||
}
|
||||
fsum, sumFile, fsrc := cmd.NewFsSrcFileDst(args[1:])
|
||||
|
||||
cmd.Run(false, true, command, func() error {
|
||||
opt, close, err := check.GetCheckOpt(nil, fsrc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer close()
|
||||
|
||||
return operations.CheckSum(context.Background(), fsrc, fsum, sumFile, hashType, opt, download)
|
||||
})
|
||||
return nil
|
||||
},
|
||||
}
|
||||
54
cmd/cmd.go
54
cmd/cmd.go
@@ -37,6 +37,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/rc/rcserver"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/buildinfo"
|
||||
"github.com/rclone/rclone/lib/exitcode"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -49,7 +50,7 @@ var (
|
||||
cpuProfile = flags.StringP("cpuprofile", "", "", "Write cpu profile to file")
|
||||
memProfile = flags.StringP("memprofile", "", "", "Write memory profile to file")
|
||||
statsInterval = flags.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable)")
|
||||
dataRateUnit = flags.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes'/s")
|
||||
dataRateUnit = flags.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes' per second")
|
||||
version bool
|
||||
retries = flags.IntP("retries", "", 3, "Retry operations this many times if they fail")
|
||||
retriesInterval = flags.DurationP("retries-sleep", "", 0, "Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)")
|
||||
@@ -60,23 +61,21 @@ var (
|
||||
errorTooManyArguments = errors.New("too many arguments")
|
||||
)
|
||||
|
||||
const (
|
||||
exitCodeSuccess = iota
|
||||
exitCodeUsageError
|
||||
exitCodeUncategorizedError
|
||||
exitCodeDirNotFound
|
||||
exitCodeFileNotFound
|
||||
exitCodeRetryError
|
||||
exitCodeNoRetryError
|
||||
exitCodeFatalError
|
||||
exitCodeTransferExceeded
|
||||
exitCodeNoFilesTransferred
|
||||
)
|
||||
|
||||
// ShowVersion prints the version to stdout
|
||||
func ShowVersion() {
|
||||
osVersion, osKernel := buildinfo.GetOSVersion()
|
||||
if osVersion == "" {
|
||||
osVersion = "unknown"
|
||||
}
|
||||
if osKernel == "" {
|
||||
osKernel = "unknown"
|
||||
}
|
||||
|
||||
linking, tagString := buildinfo.GetLinkingAndTags()
|
||||
|
||||
fmt.Printf("rclone %s\n", fs.Version)
|
||||
fmt.Printf("- os/version: %s\n", osVersion)
|
||||
fmt.Printf("- os/kernel: %s\n", osKernel)
|
||||
fmt.Printf("- os/type: %s\n", runtime.GOOS)
|
||||
fmt.Printf("- os/arch: %s\n", runtime.GOARCH)
|
||||
fmt.Printf("- go/version: %s\n", runtime.Version())
|
||||
@@ -389,7 +388,7 @@ func initConfig() {
|
||||
configflags.SetFlags(ci)
|
||||
|
||||
// Load the config
|
||||
configfile.LoadConfig(ctx)
|
||||
configfile.Install()
|
||||
|
||||
// Start accounting
|
||||
accounting.Start(ctx)
|
||||
@@ -473,31 +472,31 @@ func resolveExitCode(err error) {
|
||||
if err == nil {
|
||||
if ci.ErrorOnNoTransfer {
|
||||
if accounting.GlobalStats().GetTransfers() == 0 {
|
||||
os.Exit(exitCodeNoFilesTransferred)
|
||||
os.Exit(exitcode.NoFilesTransferred)
|
||||
}
|
||||
}
|
||||
os.Exit(exitCodeSuccess)
|
||||
os.Exit(exitcode.Success)
|
||||
}
|
||||
|
||||
_, unwrapped := fserrors.Cause(err)
|
||||
|
||||
switch {
|
||||
case unwrapped == fs.ErrorDirNotFound:
|
||||
os.Exit(exitCodeDirNotFound)
|
||||
os.Exit(exitcode.DirNotFound)
|
||||
case unwrapped == fs.ErrorObjectNotFound:
|
||||
os.Exit(exitCodeFileNotFound)
|
||||
os.Exit(exitcode.FileNotFound)
|
||||
case unwrapped == errorUncategorized:
|
||||
os.Exit(exitCodeUncategorizedError)
|
||||
os.Exit(exitcode.UncategorizedError)
|
||||
case unwrapped == accounting.ErrorMaxTransferLimitReached:
|
||||
os.Exit(exitCodeTransferExceeded)
|
||||
os.Exit(exitcode.TransferExceeded)
|
||||
case fserrors.ShouldRetry(err):
|
||||
os.Exit(exitCodeRetryError)
|
||||
os.Exit(exitcode.RetryError)
|
||||
case fserrors.IsNoRetryError(err):
|
||||
os.Exit(exitCodeNoRetryError)
|
||||
os.Exit(exitcode.NoRetryError)
|
||||
case fserrors.IsFatalError(err):
|
||||
os.Exit(exitCodeFatalError)
|
||||
os.Exit(exitcode.FatalError)
|
||||
default:
|
||||
os.Exit(exitCodeUsageError)
|
||||
os.Exit(exitcode.UsageError)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -528,7 +527,8 @@ func AddBackendFlags() {
|
||||
if opt.IsPassword {
|
||||
help += " (obscured)"
|
||||
}
|
||||
flag := flags.VarPF(pflag.CommandLine, opt, name, opt.ShortOpt, help)
|
||||
flag := pflag.CommandLine.VarPF(opt, name, opt.ShortOpt, help)
|
||||
flags.SetDefaultFromEnv(pflag.CommandLine, name)
|
||||
if _, isBool := opt.Default.(bool); isBool {
|
||||
flag.NoOptDefVal = "true"
|
||||
}
|
||||
@@ -553,7 +553,7 @@ func Main() {
|
||||
setupRootCommand(Root)
|
||||
AddBackendFlags()
|
||||
if err := Root.Execute(); err != nil {
|
||||
if strings.HasPrefix(err.Error(), "unknown command") {
|
||||
if strings.HasPrefix(err.Error(), "unknown command") && selfupdateEnabled {
|
||||
Root.PrintErrf("You could use '%s selfupdate' to get latest features.\n\n", Root.CommandPath())
|
||||
}
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/buildinfo"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
@@ -35,6 +36,7 @@ func init() {
|
||||
cmd.Aliases = append(cmd.Aliases, "cmount")
|
||||
}
|
||||
mountlib.AddRc("cmount", mount)
|
||||
buildinfo.Tags = append(buildinfo.Tags, "cmount")
|
||||
}
|
||||
|
||||
// Find the option string in the current options
|
||||
|
||||
@@ -22,6 +22,7 @@ func init() {
|
||||
cmd.Root.AddCommand(configCommand)
|
||||
configCommand.AddCommand(configEditCommand)
|
||||
configCommand.AddCommand(configFileCommand)
|
||||
configCommand.AddCommand(configTouchCommand)
|
||||
configCommand.AddCommand(configShowCommand)
|
||||
configCommand.AddCommand(configDumpCommand)
|
||||
configCommand.AddCommand(configProvidersCommand)
|
||||
@@ -41,9 +42,9 @@ var configCommand = &cobra.Command{
|
||||
remotes and manage existing ones. You may also set or remove a
|
||||
password to protect your configuration.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
config.EditConfig(context.Background())
|
||||
return config.EditConfig(context.Background())
|
||||
},
|
||||
}
|
||||
|
||||
@@ -63,6 +64,15 @@ var configFileCommand = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var configTouchCommand = &cobra.Command{
|
||||
Use: "touch",
|
||||
Short: `Ensure configuration file exists.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
config.SaveConfig()
|
||||
},
|
||||
}
|
||||
|
||||
var configShowCommand = &cobra.Command{
|
||||
Use: "show [<remote>]",
|
||||
Short: `Print (decrypted) config file, or the config for a single remote.`,
|
||||
@@ -95,12 +105,14 @@ var configProvidersCommand = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
configObscure bool
|
||||
configNoObscure bool
|
||||
)
|
||||
var updateRemoteOpt config.UpdateRemoteOpt
|
||||
|
||||
var configPasswordHelp = strings.ReplaceAll(`
|
||||
Note that if the config process would normally ask a question the
|
||||
default is taken (unless |--non-interactive| is used). Each time
|
||||
that happens rclone will print or DEBUG a message saying how to
|
||||
affect the value taken.
|
||||
|
||||
const configPasswordHelp = `
|
||||
If any of the parameters passed is a password field, then rclone will
|
||||
automatically obscure them if they aren't already obscured before
|
||||
putting them in the config file.
|
||||
@@ -109,84 +121,170 @@ putting them in the config file.
|
||||
consists only of base64 characters then rclone can get confused about
|
||||
whether the password is already obscured or not and put unobscured
|
||||
passwords into the config file. If you want to be 100% certain that
|
||||
the passwords get obscured then use the "--obscure" flag, or if you
|
||||
the passwords get obscured then use the |--obscure| flag, or if you
|
||||
are 100% certain you are already passing obscured passwords then use
|
||||
"--no-obscure". You can also set obscured passwords using the
|
||||
"rclone config password" command.
|
||||
`
|
||||
|--no-obscure|. You can also set obscured passwords using the
|
||||
|rclone config password| command.
|
||||
|
||||
The flag |--non-interactive| is for use by applications that wish to
|
||||
configure rclone themeselves, rather than using rclone's text based
|
||||
configuration questions. If this flag is set, and rclone needs to ask
|
||||
the user a question, a JSON blob will be returned with the question in
|
||||
it.
|
||||
|
||||
This will look something like (some irrelevant detail removed):
|
||||
|
||||
|||
|
||||
{
|
||||
"State": "*oauth-islocal,teamdrive,,",
|
||||
"Option": {
|
||||
"Name": "config_is_local",
|
||||
"Help": "Use auto config?\n * Say Y if not sure\n * Say N if you are working on a remote or headless machine\n",
|
||||
"Default": true,
|
||||
"Examples": [
|
||||
{
|
||||
"Value": "true",
|
||||
"Help": "Yes"
|
||||
},
|
||||
{
|
||||
"Value": "false",
|
||||
"Help": "No"
|
||||
}
|
||||
],
|
||||
"Required": false,
|
||||
"IsPassword": false,
|
||||
"Type": "bool",
|
||||
"Exclusive": true,
|
||||
},
|
||||
"Error": "",
|
||||
}
|
||||
|||
|
||||
|
||||
The format of |Option| is the same as returned by |rclone config
|
||||
providers|. The question should be asked to the user and returned to
|
||||
rclone as the |--result| option along with the |--state| parameter.
|
||||
|
||||
The keys of |Option| are used as follows:
|
||||
|
||||
- |Name| - name of variable - show to user
|
||||
- |Help| - help text. Hard wrapped at 80 chars. Any URLs should be clicky.
|
||||
- |Default| - default value - return this if the user just wants the default.
|
||||
- |Examples| - the user should be able to choose one of these
|
||||
- |Required| - the value should be non-empty
|
||||
- |IsPassword| - the value is a password and should be edited as such
|
||||
- |Type| - type of value, eg |bool|, |string|, |int| and others
|
||||
- |Exclusive| - if set no free-form entry allowed only the |Examples|
|
||||
- Irrelevant keys |Provider|, |ShortOpt|, |Hide|, |NoPrefix|, |Advanced|
|
||||
|
||||
If |Error| is set then it should be shown to the user at the same
|
||||
time as the question.
|
||||
|
||||
rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
|
||||
|
||||
Note that when using |--continue| all passwords should be passed in
|
||||
the clear (not obscured). Any default config values should be passed
|
||||
in with each invocation of |--continue|.
|
||||
|
||||
At the end of the non interactive process, rclone will return a result
|
||||
with |State| as empty string.
|
||||
|
||||
If |--all| is passed then rclone will ask all the config questions,
|
||||
not just the post config questions. Any parameters are used as
|
||||
defaults for questions as usual.
|
||||
|
||||
Note that |bin/config.py| in the rclone source implements this protocol
|
||||
as a readable demonstration.
|
||||
`, "|", "`")
|
||||
var configCreateCommand = &cobra.Command{
|
||||
Use: "create `name` `type` [`key` `value`]*",
|
||||
Short: `Create a new remote with name, type and options.`,
|
||||
Long: `
|
||||
Create a new remote of ` + "`name`" + ` with ` + "`type`" + ` and options. The options
|
||||
should be passed in pairs of ` + "`key` `value`" + `.
|
||||
Long: strings.ReplaceAll(`
|
||||
Create a new remote of |name| with |type| and options. The options
|
||||
should be passed in pairs of |key| |value| or as |key=value|.
|
||||
|
||||
For example to make a swift remote of name myremote using auto config
|
||||
you would do:
|
||||
|
||||
rclone config create myremote swift env_auth true
|
||||
rclone config create myremote swift env_auth=true
|
||||
|
||||
Note that if the config process would normally ask a question the
|
||||
default is taken. Each time that happens rclone will print a message
|
||||
saying how to affect the value taken.
|
||||
` + configPasswordHelp + `
|
||||
So for example if you wanted to configure a Google Drive remote but
|
||||
using remote authorization you would do this:
|
||||
|
||||
rclone config create mydrive drive config_is_local false
|
||||
`,
|
||||
rclone config create mydrive drive config_is_local=false
|
||||
`, "|", "`") + configPasswordHelp,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 256, command, args)
|
||||
in, err := argsToMap(args[2:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = config.CreateRemote(context.Background(), args[0], args[1], in, configObscure, configNoObscure)
|
||||
return doConfig(args[0], in, func(opts config.UpdateRemoteOpt) (*fs.ConfigOut, error) {
|
||||
return config.CreateRemote(context.Background(), args[0], args[1], in, opts)
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
func doConfig(name string, in rc.Params, do func(config.UpdateRemoteOpt) (*fs.ConfigOut, error)) error {
|
||||
out, err := do(updateRemoteOpt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !(updateRemoteOpt.NonInteractive || updateRemoteOpt.Continue) {
|
||||
config.ShowRemote(name)
|
||||
} else {
|
||||
if out == nil {
|
||||
out = &fs.ConfigOut{}
|
||||
}
|
||||
outBytes, err := json.MarshalIndent(out, "", "\t")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.ShowRemote(args[0])
|
||||
return nil
|
||||
},
|
||||
_, _ = os.Stdout.Write(outBytes)
|
||||
_, _ = os.Stdout.WriteString("\n")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
for _, cmdFlags := range []*pflag.FlagSet{configCreateCommand.Flags(), configUpdateCommand.Flags()} {
|
||||
flags.BoolVarP(cmdFlags, &configObscure, "obscure", "", false, "Force any passwords to be obscured.")
|
||||
flags.BoolVarP(cmdFlags, &configNoObscure, "no-obscure", "", false, "Force any passwords not to be obscured.")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.Obscure, "obscure", "", false, "Force any passwords to be obscured.")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NoObscure, "no-obscure", "", false, "Force any passwords not to be obscured.")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NonInteractive, "non-interactive", "", false, "Don't interact with user and return questions.")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.Continue, "continue", "", false, "Continue the configuration process with an answer.")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.All, "all", "", false, "Ask the full set of config questions.")
|
||||
flags.StringVarP(cmdFlags, &updateRemoteOpt.State, "state", "", "", "State - use with --continue.")
|
||||
flags.StringVarP(cmdFlags, &updateRemoteOpt.Result, "result", "", "", "Result - use with --continue.")
|
||||
}
|
||||
}
|
||||
|
||||
var configUpdateCommand = &cobra.Command{
|
||||
Use: "update `name` [`key` `value`]+",
|
||||
Short: `Update options in an existing remote.`,
|
||||
Long: `
|
||||
Long: strings.ReplaceAll(`
|
||||
Update an existing remote's options. The options should be passed in
|
||||
in pairs of ` + "`key` `value`" + `.
|
||||
pairs of |key| |value| or as |key=value|.
|
||||
|
||||
For example to update the env_auth field of a remote of name myremote
|
||||
you would do:
|
||||
|
||||
rclone config update myremote swift env_auth true
|
||||
` + configPasswordHelp + `
|
||||
rclone config update myremote env_auth true
|
||||
rclone config update myremote env_auth=true
|
||||
|
||||
If the remote uses OAuth the token will be updated, if you don't
|
||||
require this add an extra parameter thus:
|
||||
|
||||
rclone config update myremote swift env_auth true config_refresh_token false
|
||||
`,
|
||||
rclone config update myremote env_auth=true config_refresh_token=false
|
||||
`, "|", "`") + configPasswordHelp,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(3, 256, command, args)
|
||||
cmd.CheckArgs(1, 256, command, args)
|
||||
in, err := argsToMap(args[1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = config.UpdateRemote(context.Background(), args[0], in, configObscure, configNoObscure)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.ShowRemote(args[0])
|
||||
return nil
|
||||
return doConfig(args[0], in, func(opts config.UpdateRemoteOpt) (*fs.ConfigOut, error) {
|
||||
return config.UpdateRemote(context.Background(), args[0], in, opts)
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
@@ -202,19 +300,21 @@ var configDeleteCommand = &cobra.Command{
|
||||
var configPasswordCommand = &cobra.Command{
|
||||
Use: "password `name` [`key` `value`]+",
|
||||
Short: `Update password in an existing remote.`,
|
||||
Long: `
|
||||
Long: strings.ReplaceAll(`
|
||||
Update an existing remote's password. The password
|
||||
should be passed in pairs of ` + "`key` `value`" + `.
|
||||
should be passed in pairs of |key| |password| or as |key=password|.
|
||||
The |password| should be passed in in clear (unobscured).
|
||||
|
||||
For example to set password of a remote of name myremote you would do:
|
||||
|
||||
rclone config password myremote fieldname mypassword
|
||||
rclone config password myremote fieldname=mypassword
|
||||
|
||||
This command is obsolete now that "config update" and "config create"
|
||||
both support obscuring passwords directly.
|
||||
`,
|
||||
`, "|", "`"),
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(3, 256, command, args)
|
||||
cmd.CheckArgs(1, 256, command, args)
|
||||
in, err := argsToMap(args[1:])
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -228,16 +328,24 @@ both support obscuring passwords directly.
|
||||
},
|
||||
}
|
||||
|
||||
// This takes a list of arguments in key value key value form and
|
||||
// converts it into a map
|
||||
// This takes a list of arguments in key value key value form, or
|
||||
// key=value key=value form and converts it into a map
|
||||
func argsToMap(args []string) (out rc.Params, err error) {
|
||||
if len(args)%2 != 0 {
|
||||
return nil, errors.New("found key without value")
|
||||
}
|
||||
out = rc.Params{}
|
||||
// Set the config
|
||||
for i := 0; i < len(args); i += 2 {
|
||||
out[args[i]] = args[i+1]
|
||||
for i := 0; i < len(args); i++ {
|
||||
key := args[i]
|
||||
equals := strings.IndexRune(key, '=')
|
||||
var value string
|
||||
if equals >= 0 {
|
||||
key, value = key[:equals], key[equals+1:]
|
||||
} else {
|
||||
i++
|
||||
if i >= len(args) {
|
||||
return nil, errors.New("found key without value")
|
||||
}
|
||||
value = args[i]
|
||||
}
|
||||
out[key] = value
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
@@ -255,15 +363,11 @@ This normally means going through the interactive oauth flow again.
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsInfo, configName, _, config, err := fs.ConfigFs(args[0])
|
||||
fsInfo, configName, _, m, err := fs.ConfigFs(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fsInfo.Config == nil {
|
||||
return errors.Errorf("%s: doesn't support Reconnect", configName)
|
||||
}
|
||||
fsInfo.Config(ctx, configName, config)
|
||||
return nil
|
||||
return config.PostConfig(ctx, configName, m, fsInfo)
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
59
cmd/config/config_test.go
Normal file
59
cmd/config/config_test.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestArgsToMap(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
args []string
|
||||
want rc.Params
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
args: []string{},
|
||||
want: rc.Params{},
|
||||
},
|
||||
{
|
||||
args: []string{"hello", "42"},
|
||||
want: rc.Params{"hello": "42"},
|
||||
},
|
||||
{
|
||||
args: []string{"hello", "42", "bye", "43"},
|
||||
want: rc.Params{"hello": "42", "bye": "43"},
|
||||
},
|
||||
{
|
||||
args: []string{"hello=42", "bye", "43"},
|
||||
want: rc.Params{"hello": "42", "bye": "43"},
|
||||
},
|
||||
{
|
||||
args: []string{"hello", "42", "bye=43"},
|
||||
want: rc.Params{"hello": "42", "bye": "43"},
|
||||
},
|
||||
{
|
||||
args: []string{"hello=42", "bye=43"},
|
||||
want: rc.Params{"hello": "42", "bye": "43"},
|
||||
},
|
||||
{
|
||||
args: []string{"hello", "42", "bye", "43", "unused"},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
args: []string{"hello=42", "bye=43", "unused"},
|
||||
wantErr: true,
|
||||
},
|
||||
} {
|
||||
what := fmt.Sprintf("args = %#v", test.args)
|
||||
got, err := argsToMap(test.args)
|
||||
if test.wantErr {
|
||||
assert.Error(t, err, what)
|
||||
} else {
|
||||
assert.NoError(t, err, what)
|
||||
assert.Equal(t, test.want, got, what)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -36,9 +36,9 @@ var commandDefinition = &cobra.Command{
|
||||
Download a URL's content and copy it to the destination without saving
|
||||
it in temporary storage.
|
||||
|
||||
Setting ` + "`--auto-filename`" + `will cause the file name to be retrieved from
|
||||
the from URL (after any redirections) and used in the destination
|
||||
path. With ` + "`--print-filename`" + ` in addition, the resuling file name will
|
||||
Setting ` + "`--auto-filename`" + ` will cause the file name to be retrieved from
|
||||
the URL (after any redirections) and used in the destination
|
||||
path. With ` + "`--print-filename`" + ` in addition, the resulting file name will
|
||||
be printed.
|
||||
|
||||
Setting ` + "`--no-clobber`" + ` will prevent overwriting file on the
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
package dbhashsum
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/backend/dropbox"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/hashsum"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
hashsum.AddHashFlags(cmdFlags)
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "dbhashsum remote:path",
|
||||
Short: `Produces a Dropbox hash file for all the objects in the path.`,
|
||||
Long: `
|
||||
Produces a Dropbox hash file for all the objects in the path. The
|
||||
hashes are calculated according to [Dropbox content hash
|
||||
rules](https://www.dropbox.com/developers/reference/content-hash).
|
||||
The output is in the same format as md5sum and sha1sum.
|
||||
|
||||
By default, the hash is requested from the remote. If Dropbox hash is
|
||||
not supported by the remote, no hash will be returned. With the
|
||||
download flag, the file will be downloaded from the remote and
|
||||
hashed locally enabling Dropbox hash for any remote.
|
||||
`,
|
||||
Hidden: true,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
fs.Logf(nil, `"rclone dbhashsum" is deprecated, use "rclone hashsum %v %s" instead`, dropbox.DbHashType, args[0])
|
||||
cmd.Run(false, false, command, func() error {
|
||||
if hashsum.HashsumOutfile == "" {
|
||||
return operations.HashLister(context.Background(), dropbox.DbHashType, hashsum.OutputBase64, hashsum.DownloadFlag, fsrc, nil)
|
||||
}
|
||||
output, close, err := hashsum.GetHashsumOutput(hashsum.HashsumOutfile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer close()
|
||||
return operations.HashLister(context.Background(), dropbox.DbHashType, hashsum.OutputBase64, hashsum.DownloadFlag, fsrc, output)
|
||||
})
|
||||
},
|
||||
}
|
||||
@@ -36,8 +36,8 @@ If you supply the |--rmdirs| flag, it will remove all empty directories along wi
|
||||
You can also use the separate command |rmdir| or |rmdirs| to
|
||||
delete empty directories only.
|
||||
|
||||
For example, to delete all files bigger than 100MBytes, you may first want to check what
|
||||
would be deleted (use either):
|
||||
For example, to delete all files bigger than 100 MiB, you may first want to
|
||||
check what would be deleted (use either):
|
||||
|
||||
rclone --min-size 100M lsl remote:path
|
||||
rclone --dry-run --min-size 100M delete remote:path
|
||||
@@ -46,8 +46,8 @@ Then proceed with the actual delete:
|
||||
|
||||
rclone --min-size 100M delete remote:path
|
||||
|
||||
That reads "delete everything with a minimum size of 100 MB", hence
|
||||
delete all files bigger than 100MBytes.
|
||||
That reads "delete everything with a minimum size of 100 MiB", hence
|
||||
delete all files bigger than 100 MiB.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
|--dry-run| or the |--interactive|/|-i| flag.
|
||||
|
||||
@@ -15,11 +15,12 @@ import (
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Global hashsum flags for reuse in md5sum, sha1sum, and dbhashsum
|
||||
// Global hashsum flags for reuse in hashsum, md5sum, sha1sum
|
||||
var (
|
||||
OutputBase64 = false
|
||||
DownloadFlag = false
|
||||
HashsumOutfile = ""
|
||||
ChecksumFile = ""
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -28,10 +29,11 @@ func init() {
|
||||
AddHashFlags(cmdFlags)
|
||||
}
|
||||
|
||||
// AddHashFlags is a convenience function to add the command flags OutputBase64 and DownloadFlag to hashsum, md5sum, sha1sum, and dbhashsum
|
||||
// AddHashFlags is a convenience function to add the command flags OutputBase64 and DownloadFlag to hashsum, md5sum, sha1sum
|
||||
func AddHashFlags(cmdFlags *pflag.FlagSet) {
|
||||
flags.BoolVarP(cmdFlags, &OutputBase64, "base64", "", OutputBase64, "Output base64 encoded hashsum")
|
||||
flags.StringVarP(cmdFlags, &HashsumOutfile, "output-file", "", HashsumOutfile, "Output hashsums to a file rather than the terminal")
|
||||
flags.StringVarP(cmdFlags, &ChecksumFile, "checkfile", "C", ChecksumFile, "Validate hashes against a given SUM file instead of printing them")
|
||||
flags.BoolVarP(cmdFlags, &DownloadFlag, "download", "", DownloadFlag, "Download the file and hash it locally; if this flag is not specified, the hash is requested from the remote")
|
||||
}
|
||||
|
||||
@@ -69,23 +71,17 @@ hashed locally enabling any hash for any remote.
|
||||
Run without a hash to see the list of all supported hashes, e.g.
|
||||
|
||||
$ rclone hashsum
|
||||
Supported hashes are:
|
||||
* MD5
|
||||
* SHA-1
|
||||
* DropboxHash
|
||||
* QuickXorHash
|
||||
|
||||
` + hash.HelpString(4) + `
|
||||
Then
|
||||
|
||||
$ rclone hashsum MD5 remote:path
|
||||
|
||||
Note that hash names are case insensitive.
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 2, command, args)
|
||||
if len(args) == 0 {
|
||||
fmt.Printf("Supported hashes are:\n")
|
||||
for _, ht := range hash.Supported().Array() {
|
||||
fmt.Printf(" * %v\n", ht)
|
||||
}
|
||||
fmt.Print(hash.HelpString(0))
|
||||
return nil
|
||||
} else if len(args) == 1 {
|
||||
return errors.New("need hash type and remote")
|
||||
@@ -93,11 +89,16 @@ Then
|
||||
var ht hash.Type
|
||||
err := ht.Set(args[0])
|
||||
if err != nil {
|
||||
fmt.Println(hash.HelpString(0))
|
||||
return err
|
||||
}
|
||||
fsrc := cmd.NewFsSrc(args[1:])
|
||||
|
||||
cmd.Run(false, false, command, func() error {
|
||||
if ChecksumFile != "" {
|
||||
fsum, sumFile := cmd.NewFsFile(ChecksumFile)
|
||||
return operations.CheckSum(context.Background(), fsrc, fsum, sumFile, ht, nil, DownloadFlag)
|
||||
}
|
||||
if HashsumOutfile == "" {
|
||||
return operations.HashLister(context.Background(), ht, OutputBase64, DownloadFlag, fsrc, nil)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package link
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -13,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
expire = fs.Duration(time.Hour * 24 * 365 * 100)
|
||||
expire = fs.DurationOff
|
||||
unlink = false
|
||||
)
|
||||
|
||||
|
||||
@@ -32,6 +32,10 @@ hashed locally enabling MD5 for any remote.
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
if hashsum.ChecksumFile != "" {
|
||||
fsum, sumFile := cmd.NewFsFile(hashsum.ChecksumFile)
|
||||
return operations.CheckSum(context.Background(), fsrc, fsum, sumFile, hash.MD5, nil, hashsum.DownloadFlag)
|
||||
}
|
||||
if hashsum.HashsumOutfile == "" {
|
||||
return operations.HashLister(context.Background(), hash.MD5, hashsum.OutputBase64, hashsum.DownloadFlag, fsrc, nil)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Daemonization interface for non-Unix variants only
|
||||
|
||||
// +build windows
|
||||
// +build windows plan9 js
|
||||
|
||||
package mountlib
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Daemonization interface for Unix variants only
|
||||
|
||||
// +build !windows
|
||||
// +build !windows,!plan9,!js
|
||||
|
||||
package mountlib
|
||||
|
||||
|
||||
302
cmd/mountlib/help.go
Normal file
302
cmd/mountlib/help.go
Normal file
@@ -0,0 +1,302 @@
|
||||
package mountlib
|
||||
|
||||
// "@" will be replaced by the command name, "|" will be replaced by backticks
|
||||
var mountHelp = `
|
||||
rclone @ allows Linux, FreeBSD, macOS and Windows to
|
||||
mount any of Rclone's cloud storage systems as a file system with
|
||||
FUSE.
|
||||
|
||||
First set up your remote using |rclone config|. Check it works with |rclone ls| etc.
|
||||
|
||||
On Linux and OSX, you can either run mount in foreground mode or background (daemon) mode.
|
||||
Mount runs in foreground mode by default, use the |--daemon| flag to specify background mode.
|
||||
You can only run mount in foreground mode on Windows.
|
||||
|
||||
On Linux/macOS/FreeBSD start the mount like this, where |/path/to/local/mount|
|
||||
is an **empty** **existing** directory:
|
||||
|
||||
rclone @ remote:path/to/files /path/to/local/mount
|
||||
|
||||
On Windows you can start a mount in different ways. See [below](#mounting-modes-on-windows)
|
||||
for details. The following examples will mount to an automatically assigned drive,
|
||||
to specific drive letter |X:|, to path |C:\path\parent\mount|
|
||||
(where parent directory or drive must exist, and mount must **not** exist,
|
||||
and is not supported when [mounting as a network drive](#mounting-modes-on-windows)), and
|
||||
the last example will mount as network share |\\cloud\remote| and map it to an
|
||||
automatically assigned drive:
|
||||
|
||||
rclone @ remote:path/to/files *
|
||||
rclone @ remote:path/to/files X:
|
||||
rclone @ remote:path/to/files C:\path\parent\mount
|
||||
rclone @ remote:path/to/files \\cloud\remote
|
||||
|
||||
When the program ends while in foreground mode, either via Ctrl+C or receiving
|
||||
a SIGINT or SIGTERM signal, the mount should be automatically stopped.
|
||||
|
||||
When running in background mode the user will have to stop the mount manually:
|
||||
|
||||
# Linux
|
||||
fusermount -u /path/to/local/mount
|
||||
# OS X
|
||||
umount /path/to/local/mount
|
||||
|
||||
The umount operation can fail, for example when the mountpoint is busy.
|
||||
When that happens, it is the user's responsibility to stop the mount manually.
|
||||
|
||||
The size of the mounted file system will be set according to information retrieved
|
||||
from the remote, the same as returned by the [rclone about](https://rclone.org/commands/rclone_about/)
|
||||
command. Remotes with unlimited storage may report the used size only,
|
||||
then an additional 1 PiB of free space is assumed. If the remote does not
|
||||
[support](https://rclone.org/overview/#optional-features) the about feature
|
||||
at all, then 1 PiB is set as both the total and the free size.
|
||||
|
||||
**Note**: As of |rclone| 1.52.2, |rclone mount| now requires Go version 1.13
|
||||
or newer on some platforms depending on the underlying FUSE library in use.
|
||||
|
||||
### Installing on Windows
|
||||
|
||||
To run rclone @ on Windows, you will need to
|
||||
download and install [WinFsp](http://www.secfs.net/winfsp/).
|
||||
|
||||
[WinFsp](https://github.com/billziss-gh/winfsp) is an open source
|
||||
Windows File System Proxy which makes it easy to write user space file
|
||||
systems for Windows. It provides a FUSE emulation layer which rclone
|
||||
uses combination with [cgofuse](https://github.com/billziss-gh/cgofuse).
|
||||
Both of these packages are by Bill Zissimopoulos who was very helpful
|
||||
during the implementation of rclone @ for Windows.
|
||||
|
||||
#### Mounting modes on windows
|
||||
|
||||
Unlike other operating systems, Microsoft Windows provides a different filesystem
|
||||
type for network and fixed drives. It optimises access on the assumption fixed
|
||||
disk drives are fast and reliable, while network drives have relatively high latency
|
||||
and less reliability. Some settings can also be differentiated between the two types,
|
||||
for example that Windows Explorer should just display icons and not create preview
|
||||
thumbnails for image and video files on network drives.
|
||||
|
||||
In most cases, rclone will mount the remote as a normal, fixed disk drive by default.
|
||||
However, you can also choose to mount it as a remote network drive, often described
|
||||
as a network share. If you mount an rclone remote using the default, fixed drive mode
|
||||
and experience unexpected program errors, freezes or other issues, consider mounting
|
||||
as a network drive instead.
|
||||
|
||||
When mounting as a fixed disk drive you can either mount to an unused drive letter,
|
||||
or to a path representing a **non-existent** subdirectory of an **existing** parent
|
||||
directory or drive. Using the special value |*| will tell rclone to
|
||||
automatically assign the next available drive letter, starting with Z: and moving backward.
|
||||
Examples:
|
||||
|
||||
rclone @ remote:path/to/files *
|
||||
rclone @ remote:path/to/files X:
|
||||
rclone @ remote:path/to/files C:\path\parent\mount
|
||||
rclone @ remote:path/to/files X:
|
||||
|
||||
Option |--volname| can be used to set a custom volume name for the mounted
|
||||
file system. The default is to use the remote name and path.
|
||||
|
||||
To mount as network drive, you can add option |--network-mode|
|
||||
to your @ command. Mounting to a directory path is not supported in
|
||||
this mode, it is a limitation Windows imposes on junctions, so the remote must always
|
||||
be mounted to a drive letter.
|
||||
|
||||
rclone @ remote:path/to/files X: --network-mode
|
||||
|
||||
A volume name specified with |--volname| will be used to create the network share path.
|
||||
A complete UNC path, such as |\\cloud\remote|, optionally with path
|
||||
|\\cloud\remote\madeup\path|, will be used as is. Any other
|
||||
string will be used as the share part, after a default prefix |\\server\|.
|
||||
If no volume name is specified then |\\server\share| will be used.
|
||||
You must make sure the volume name is unique when you are mounting more than one drive,
|
||||
or else the mount command will fail. The share name will treated as the volume label for
|
||||
the mapped drive, shown in Windows Explorer etc, while the complete
|
||||
|\\server\share| will be reported as the remote UNC path by
|
||||
|net use| etc, just like a normal network drive mapping.
|
||||
|
||||
If you specify a full network share UNC path with |--volname|, this will implicitely
|
||||
set the |--network-mode| option, so the following two examples have same result:
|
||||
|
||||
rclone @ remote:path/to/files X: --network-mode
|
||||
rclone @ remote:path/to/files X: --volname \\server\share
|
||||
|
||||
You may also specify the network share UNC path as the mountpoint itself. Then rclone
|
||||
will automatically assign a drive letter, same as with |*| and use that as
|
||||
mountpoint, and instead use the UNC path specified as the volume name, as if it were
|
||||
specified with the |--volname| option. This will also implicitely set
|
||||
the |--network-mode| option. This means the following two examples have same result:
|
||||
|
||||
rclone @ remote:path/to/files \\cloud\remote
|
||||
rclone @ remote:path/to/files * --volname \\cloud\remote
|
||||
|
||||
There is yet another way to enable network mode, and to set the share path,
|
||||
and that is to pass the "native" libfuse/WinFsp option directly:
|
||||
|--fuse-flag --VolumePrefix=\server\share|. Note that the path
|
||||
must be with just a single backslash prefix in this case.
|
||||
|
||||
|
||||
*Note:* In previous versions of rclone this was the only supported method.
|
||||
|
||||
[Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping)
|
||||
|
||||
See also [Limitations](#limitations) section below.
|
||||
|
||||
#### Windows filesystem permissions
|
||||
|
||||
The FUSE emulation layer on Windows must convert between the POSIX-based
|
||||
permission model used in FUSE, and the permission model used in Windows,
|
||||
based on access-control lists (ACL).
|
||||
|
||||
The mounted filesystem will normally get three entries in its access-control list (ACL),
|
||||
representing permissions for the POSIX permission scopes: Owner, group and others.
|
||||
By default, the owner and group will be taken from the current user, and the built-in
|
||||
group "Everyone" will be used to represent others. The user/group can be customized
|
||||
with FUSE options "UserName" and "GroupName",
|
||||
e.g. |-o UserName=user123 -o GroupName="Authenticated Users"|.
|
||||
|
||||
The permissions on each entry will be set according to
|
||||
[options](#options) |--dir-perms| and |--file-perms|,
|
||||
which takes a value in traditional [numeric notation](https://en.wikipedia.org/wiki/File-system_permissions#Numeric_notation),
|
||||
where the default corresponds to |--file-perms 0666 --dir-perms 0777|.
|
||||
|
||||
Note that the mapping of permissions is not always trivial, and the result
|
||||
you see in Windows Explorer may not be exactly like you expected.
|
||||
For example, when setting a value that includes write access, this will be
|
||||
mapped to individual permissions "write attributes", "write data" and "append data",
|
||||
but not "write extended attributes". Windows will then show this as basic
|
||||
permission "Special" instead of "Write", because "Write" includes the
|
||||
"write extended attributes" permission.
|
||||
|
||||
If you set POSIX permissions for only allowing access to the owner, using
|
||||
|--file-perms 0600 --dir-perms 0700|, the user group and the built-in "Everyone"
|
||||
group will still be given some special permissions, such as "read attributes"
|
||||
and "read permissions", in Windows. This is done for compatibility reasons,
|
||||
e.g. to allow users without additional permissions to be able to read basic
|
||||
metadata about files like in UNIX. One case that may arise is that other programs
|
||||
(incorrectly) interprets this as the file being accessible by everyone. For example
|
||||
an SSH client may warn about "unprotected private key file".
|
||||
|
||||
WinFsp 2021 (version 1.9) introduces a new FUSE option "FileSecurity",
|
||||
that allows the complete specification of file security descriptors using
|
||||
[SDDL](https://docs.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format).
|
||||
With this you can work around issues such as the mentioned "unprotected private key file"
|
||||
by specifying |-o FileSecurity="D:P(A;;FA;;;OW)"|, for file all access (FA) to the owner (OW).
|
||||
|
||||
#### Windows caveats
|
||||
|
||||
Drives created as Administrator are not visible to other accounts,
|
||||
not even an account that was elevated to Administrator with the
|
||||
User Account Control (UAC) feature. A result of this is that if you mount
|
||||
to a drive letter from a Command Prompt run as Administrator, and then try
|
||||
to access the same drive from Windows Explorer (which does not run as
|
||||
Administrator), you will not be able to see the mounted drive.
|
||||
|
||||
If you don't need to access the drive from applications running with
|
||||
administrative privileges, the easiest way around this is to always
|
||||
create the mount from a non-elevated command prompt.
|
||||
|
||||
To make mapped drives available to the user account that created them
|
||||
regardless if elevated or not, there is a special Windows setting called
|
||||
[linked connections](https://docs.microsoft.com/en-us/troubleshoot/windows-client/networking/mapped-drives-not-available-from-elevated-command#detail-to-configure-the-enablelinkedconnections-registry-entry)
|
||||
that can be enabled.
|
||||
|
||||
It is also possible to make a drive mount available to everyone on the system,
|
||||
by running the process creating it as the built-in SYSTEM account.
|
||||
There are several ways to do this: One is to use the command-line
|
||||
utility [PsExec](https://docs.microsoft.com/en-us/sysinternals/downloads/psexec),
|
||||
from Microsoft's Sysinternals suite, which has option |-s| to start
|
||||
processes as the SYSTEM account. Another alternative is to run the mount
|
||||
command from a Windows Scheduled Task, or a Windows Service, configured
|
||||
to run as the SYSTEM account. A third alternative is to use the
|
||||
[WinFsp.Launcher infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Architecture)).
|
||||
Note that when running rclone as another user, it will not use
|
||||
the configuration file from your profile unless you tell it to
|
||||
with the [|--config|](https://rclone.org/docs/#config-config-file) option.
|
||||
Read more in the [install documentation](https://rclone.org/install/).
|
||||
|
||||
Note that mapping to a directory path, instead of a drive letter,
|
||||
does not suffer from the same limitations.
|
||||
|
||||
### Limitations
|
||||
|
||||
Without the use of |--vfs-cache-mode| this can only write files
|
||||
sequentially, it can only seek when reading. This means that many
|
||||
applications won't work with their files on an rclone mount without
|
||||
|--vfs-cache-mode writes| or |--vfs-cache-mode full|.
|
||||
See the [VFS File Caching](#vfs-file-caching) section for more info.
|
||||
|
||||
The bucket based remotes (e.g. Swift, S3, Google Compute Storage, B2,
|
||||
Hubic) do not support the concept of empty directories, so empty
|
||||
directories will have a tendency to disappear once they fall out of
|
||||
the directory cache.
|
||||
|
||||
Only supported on Linux, FreeBSD, OS X and Windows at the moment.
|
||||
|
||||
### rclone @ vs rclone sync/copy
|
||||
|
||||
File systems expect things to be 100% reliable, whereas cloud storage
|
||||
systems are a long way from 100% reliable. The rclone sync/copy
|
||||
commands cope with this with lots of retries. However rclone @
|
||||
can't use retries in the same way without making local copies of the
|
||||
uploads. Look at the [VFS File Caching](#vfs-file-caching)
|
||||
for solutions to make @ more reliable.
|
||||
|
||||
### Attribute caching
|
||||
|
||||
You can use the flag |--attr-timeout| to set the time the kernel caches
|
||||
the attributes (size, modification time, etc.) for directory entries.
|
||||
|
||||
The default is |1s| which caches files just long enough to avoid
|
||||
too many callbacks to rclone from the kernel.
|
||||
|
||||
In theory 0s should be the correct value for filesystems which can
|
||||
change outside the control of the kernel. However this causes quite a
|
||||
few problems such as
|
||||
[rclone using too much memory](https://github.com/rclone/rclone/issues/2157),
|
||||
[rclone not serving files to samba](https://forum.rclone.org/t/rclone-1-39-vs-1-40-mount-issue/5112)
|
||||
and [excessive time listing directories](https://github.com/rclone/rclone/issues/2095#issuecomment-371141147).
|
||||
|
||||
The kernel can cache the info about a file for the time given by
|
||||
|--attr-timeout|. You may see corruption if the remote file changes
|
||||
length during this window. It will show up as either a truncated file
|
||||
or a file with garbage on the end. With |--attr-timeout 1s| this is
|
||||
very unlikely but not impossible. The higher you set |--attr-timeout|
|
||||
the more likely it is. The default setting of "1s" is the lowest
|
||||
setting which mitigates the problems above.
|
||||
|
||||
If you set it higher (|10s| or |1m| say) then the kernel will call
|
||||
back to rclone less often making it more efficient, however there is
|
||||
more chance of the corruption issue above.
|
||||
|
||||
If files don't change on the remote outside of the control of rclone
|
||||
then there is no chance of corruption.
|
||||
|
||||
This is the same as setting the attr_timeout option in mount.fuse.
|
||||
|
||||
### Filters
|
||||
|
||||
Note that all the rclone filters can be used to select a subset of the
|
||||
files to be visible in the mount.
|
||||
|
||||
### systemd
|
||||
|
||||
When running rclone @ as a systemd service, it is possible
|
||||
to use Type=notify. In this case the service will enter the started state
|
||||
after the mountpoint has been successfully set up.
|
||||
Units having the rclone @ service specified as a requirement
|
||||
will see all files and folders immediately in this mode.
|
||||
|
||||
### chunked reading
|
||||
|
||||
|--vfs-read-chunk-size| will enable reading the source objects in parts.
|
||||
This can reduce the used download quota for some remotes by requesting only chunks
|
||||
from the remote that are actually read at the cost of an increased number of requests.
|
||||
|
||||
When |--vfs-read-chunk-size-limit| is also specified and greater than
|
||||
|--vfs-read-chunk-size|, the chunk size for each open file will get doubled
|
||||
for each chunk read, until the specified value is reached. A value of |-1| will disable
|
||||
the limit and the chunk size will grow indefinitely.
|
||||
|
||||
With |--vfs-read-chunk-size 100M| and |--vfs-read-chunk-size-limit 0|
|
||||
the following parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
|
||||
When |--vfs-read-chunk-size-limit 500M| is specified, the result would be
|
||||
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
|
||||
`
|
||||
@@ -1,19 +1,14 @@
|
||||
package mountlib
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
sysdnotify "github.com/iguanesolutions/go-systemd/v5/notify"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -21,7 +16,11 @@ import (
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
|
||||
sysdnotify "github.com/iguanesolutions/go-systemd/v5/notify"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
@@ -63,6 +62,19 @@ type (
|
||||
MountFn func(VFS *vfs.VFS, mountpoint string, opt *Options) (<-chan error, func() error, error)
|
||||
)
|
||||
|
||||
// MountPoint represents a mount with options and runtime state
|
||||
type MountPoint struct {
|
||||
MountPoint string
|
||||
MountedOn time.Time
|
||||
MountOpt Options
|
||||
VFSOpt vfscommon.Options
|
||||
Fs fs.Fs
|
||||
VFS *vfs.VFS
|
||||
MountFn MountFn
|
||||
UnmountFn UnmountFn
|
||||
ErrChan <-chan error
|
||||
}
|
||||
|
||||
// Global constants
|
||||
const (
|
||||
MaxLeafSize = 1024 // don't pass file names longer than this
|
||||
@@ -106,405 +118,37 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.BoolVarP(flagSet, &Opt.NetworkMode, "network-mode", "", Opt.NetworkMode, "Mount as remote network drive, instead of fixed disk drive. Supported on Windows only")
|
||||
}
|
||||
|
||||
// Check if folder is empty
|
||||
func checkMountEmpty(mountpoint string) error {
|
||||
fp, fpErr := os.Open(mountpoint)
|
||||
|
||||
if fpErr != nil {
|
||||
return errors.Wrap(fpErr, "Can not open: "+mountpoint)
|
||||
}
|
||||
defer fs.CheckClose(fp, &fpErr)
|
||||
|
||||
_, fpErr = fp.Readdirnames(1)
|
||||
|
||||
// directory is not empty
|
||||
if fpErr != io.EOF {
|
||||
var e error
|
||||
var errorMsg = "Directory is not empty: " + mountpoint + " If you want to mount it anyway use: --allow-non-empty option"
|
||||
if fpErr == nil {
|
||||
e = errors.New(errorMsg)
|
||||
} else {
|
||||
e = errors.Wrap(fpErr, errorMsg)
|
||||
}
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check the root doesn't overlap the mountpoint
|
||||
func checkMountpointOverlap(root, mountpoint string) error {
|
||||
abs := func(x string) string {
|
||||
if absX, err := filepath.EvalSymlinks(x); err == nil {
|
||||
x = absX
|
||||
}
|
||||
if absX, err := filepath.Abs(x); err == nil {
|
||||
x = absX
|
||||
}
|
||||
x = filepath.ToSlash(x)
|
||||
if !strings.HasSuffix(x, "/") {
|
||||
x += "/"
|
||||
}
|
||||
return x
|
||||
}
|
||||
rootAbs, mountpointAbs := abs(root), abs(mountpoint)
|
||||
if strings.HasPrefix(rootAbs, mountpointAbs) || strings.HasPrefix(mountpointAbs, rootAbs) {
|
||||
return errors.Errorf("mount point %q and directory to be mounted %q mustn't overlap", mountpoint, root)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewMountCommand makes a mount command with the given name and Mount function
|
||||
func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Command {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: commandName + " remote:path /path/to/mountpoint",
|
||||
Hidden: hidden,
|
||||
Short: `Mount the remote as file system on a mountpoint.`,
|
||||
// Warning! "|" will be replaced by backticks below
|
||||
// "@" will be replaced by the command name
|
||||
Long: strings.ReplaceAll(strings.ReplaceAll(`
|
||||
rclone @ allows Linux, FreeBSD, macOS and Windows to
|
||||
mount any of Rclone's cloud storage systems as a file system with
|
||||
FUSE.
|
||||
|
||||
First set up your remote using |rclone config|. Check it works with |rclone ls| etc.
|
||||
|
||||
On Linux and OSX, you can either run mount in foreground mode or background (daemon) mode.
|
||||
Mount runs in foreground mode by default, use the |--daemon| flag to specify background mode.
|
||||
You can only run mount in foreground mode on Windows.
|
||||
|
||||
On Linux/macOS/FreeBSD start the mount like this, where |/path/to/local/mount|
|
||||
is an **empty** **existing** directory:
|
||||
|
||||
rclone @ remote:path/to/files /path/to/local/mount
|
||||
|
||||
On Windows you can start a mount in different ways. See [below](#mounting-modes-on-windows)
|
||||
for details. The following examples will mount to an automatically assigned drive,
|
||||
to specific drive letter |X:|, to path |C:\path\parent\mount|
|
||||
(where parent directory or drive must exist, and mount must **not** exist,
|
||||
and is not supported when [mounting as a network drive](#mounting-modes-on-windows)), and
|
||||
the last example will mount as network share |\\cloud\remote| and map it to an
|
||||
automatically assigned drive:
|
||||
|
||||
rclone @ remote:path/to/files *
|
||||
rclone @ remote:path/to/files X:
|
||||
rclone @ remote:path/to/files C:\path\parent\mount
|
||||
rclone @ remote:path/to/files \\cloud\remote
|
||||
|
||||
When the program ends while in foreground mode, either via Ctrl+C or receiving
|
||||
a SIGINT or SIGTERM signal, the mount should be automatically stopped.
|
||||
|
||||
When running in background mode the user will have to stop the mount manually:
|
||||
|
||||
# Linux
|
||||
fusermount -u /path/to/local/mount
|
||||
# OS X
|
||||
umount /path/to/local/mount
|
||||
|
||||
The umount operation can fail, for example when the mountpoint is busy.
|
||||
When that happens, it is the user's responsibility to stop the mount manually.
|
||||
|
||||
The size of the mounted file system will be set according to information retrieved
|
||||
from the remote, the same as returned by the [rclone about](https://rclone.org/commands/rclone_about/)
|
||||
command. Remotes with unlimited storage may report the used size only,
|
||||
then an additional 1PB of free space is assumed. If the remote does not
|
||||
[support](https://rclone.org/overview/#optional-features) the about feature
|
||||
at all, then 1PB is set as both the total and the free size.
|
||||
|
||||
**Note**: As of |rclone| 1.52.2, |rclone mount| now requires Go version 1.13
|
||||
or newer on some platforms depending on the underlying FUSE library in use.
|
||||
|
||||
### Installing on Windows
|
||||
|
||||
To run rclone @ on Windows, you will need to
|
||||
download and install [WinFsp](http://www.secfs.net/winfsp/).
|
||||
|
||||
[WinFsp](https://github.com/billziss-gh/winfsp) is an open source
|
||||
Windows File System Proxy which makes it easy to write user space file
|
||||
systems for Windows. It provides a FUSE emulation layer which rclone
|
||||
uses combination with [cgofuse](https://github.com/billziss-gh/cgofuse).
|
||||
Both of these packages are by Bill Zissimopoulos who was very helpful
|
||||
during the implementation of rclone @ for Windows.
|
||||
|
||||
#### Mounting modes on windows
|
||||
|
||||
Unlike other operating systems, Microsoft Windows provides a different filesystem
|
||||
type for network and fixed drives. It optimises access on the assumption fixed
|
||||
disk drives are fast and reliable, while network drives have relatively high latency
|
||||
and less reliability. Some settings can also be differentiated between the two types,
|
||||
for example that Windows Explorer should just display icons and not create preview
|
||||
thumbnails for image and video files on network drives.
|
||||
|
||||
In most cases, rclone will mount the remote as a normal, fixed disk drive by default.
|
||||
However, you can also choose to mount it as a remote network drive, often described
|
||||
as a network share. If you mount an rclone remote using the default, fixed drive mode
|
||||
and experience unexpected program errors, freezes or other issues, consider mounting
|
||||
as a network drive instead.
|
||||
|
||||
When mounting as a fixed disk drive you can either mount to an unused drive letter,
|
||||
or to a path representing a **non-existent** subdirectory of an **existing** parent
|
||||
directory or drive. Using the special value |*| will tell rclone to
|
||||
automatically assign the next available drive letter, starting with Z: and moving backward.
|
||||
Examples:
|
||||
|
||||
rclone @ remote:path/to/files *
|
||||
rclone @ remote:path/to/files X:
|
||||
rclone @ remote:path/to/files C:\path\parent\mount
|
||||
rclone @ remote:path/to/files X:
|
||||
|
||||
Option |--volname| can be used to set a custom volume name for the mounted
|
||||
file system. The default is to use the remote name and path.
|
||||
|
||||
To mount as network drive, you can add option |--network-mode|
|
||||
to your @ command. Mounting to a directory path is not supported in
|
||||
this mode, it is a limitation Windows imposes on junctions, so the remote must always
|
||||
be mounted to a drive letter.
|
||||
|
||||
rclone @ remote:path/to/files X: --network-mode
|
||||
|
||||
A volume name specified with |--volname| will be used to create the network share path.
|
||||
A complete UNC path, such as |\\cloud\remote|, optionally with path
|
||||
|\\cloud\remote\madeup\path|, will be used as is. Any other
|
||||
string will be used as the share part, after a default prefix |\\server\|.
|
||||
If no volume name is specified then |\\server\share| will be used.
|
||||
You must make sure the volume name is unique when you are mounting more than one drive,
|
||||
or else the mount command will fail. The share name will treated as the volume label for
|
||||
the mapped drive, shown in Windows Explorer etc, while the complete
|
||||
|\\server\share| will be reported as the remote UNC path by
|
||||
|net use| etc, just like a normal network drive mapping.
|
||||
|
||||
If you specify a full network share UNC path with |--volname|, this will implicitely
|
||||
set the |--network-mode| option, so the following two examples have same result:
|
||||
|
||||
rclone @ remote:path/to/files X: --network-mode
|
||||
rclone @ remote:path/to/files X: --volname \\server\share
|
||||
|
||||
You may also specify the network share UNC path as the mountpoint itself. Then rclone
|
||||
will automatically assign a drive letter, same as with |*| and use that as
|
||||
mountpoint, and instead use the UNC path specified as the volume name, as if it were
|
||||
specified with the |--volname| option. This will also implicitely set
|
||||
the |--network-mode| option. This means the following two examples have same result:
|
||||
|
||||
rclone @ remote:path/to/files \\cloud\remote
|
||||
rclone @ remote:path/to/files * --volname \\cloud\remote
|
||||
|
||||
There is yet another way to enable network mode, and to set the share path,
|
||||
and that is to pass the "native" libfuse/WinFsp option directly:
|
||||
|--fuse-flag --VolumePrefix=\server\share|. Note that the path
|
||||
must be with just a single backslash prefix in this case.
|
||||
|
||||
|
||||
*Note:* In previous versions of rclone this was the only supported method.
|
||||
|
||||
[Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping)
|
||||
|
||||
See also [Limitations](#limitations) section below.
|
||||
|
||||
#### Windows filesystem permissions
|
||||
|
||||
The FUSE emulation layer on Windows must convert between the POSIX-based
|
||||
permission model used in FUSE, and the permission model used in Windows,
|
||||
based on access-control lists (ACL).
|
||||
|
||||
The mounted filesystem will normally get three entries in its access-control list (ACL),
|
||||
representing permissions for the POSIX permission scopes: Owner, group and others.
|
||||
By default, the owner and group will be taken from the current user, and the built-in
|
||||
group "Everyone" will be used to represent others. The user/group can be customized
|
||||
with FUSE options "UserName" and "GroupName",
|
||||
e.g. |-o UserName=user123 -o GroupName="Authenticated Users"|.
|
||||
|
||||
The permissions on each entry will be set according to
|
||||
[options](#options) |--dir-perms| and |--file-perms|,
|
||||
which takes a value in traditional [numeric notation](https://en.wikipedia.org/wiki/File-system_permissions#Numeric_notation),
|
||||
where the default corresponds to |--file-perms 0666 --dir-perms 0777|.
|
||||
|
||||
Note that the mapping of permissions is not always trivial, and the result
|
||||
you see in Windows Explorer may not be exactly like you expected.
|
||||
For example, when setting a value that includes write access, this will be
|
||||
mapped to individual permissions "write attributes", "write data" and "append data",
|
||||
but not "write extended attributes". Windows will then show this as basic
|
||||
permission "Special" instead of "Write", because "Write" includes the
|
||||
"write extended attributes" permission.
|
||||
|
||||
If you set POSIX permissions for only allowing access to the owner, using
|
||||
|--file-perms 0600 --dir-perms 0700|, the user group and the built-in "Everyone"
|
||||
group will still be given some special permissions, such as "read attributes"
|
||||
and "read permissions", in Windows. This is done for compatibility reasons,
|
||||
e.g. to allow users without additional permissions to be able to read basic
|
||||
metadata about files like in UNIX. One case that may arise is that other programs
|
||||
(incorrectly) interprets this as the file being accessible by everyone. For example
|
||||
an SSH client may warn about "unprotected private key file".
|
||||
|
||||
WinFsp 2021 (version 1.9, still in beta) introduces a new FUSE option "FileSecurity",
|
||||
that allows the complete specification of file security descriptors using
|
||||
[SDDL](https://docs.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format).
|
||||
With this you can work around issues such as the mentioned "unprotected private key file"
|
||||
by specifying |-o FileSecurity="D:P(A;;FA;;;OW)"|, for file all access (FA) to the owner (OW).
|
||||
|
||||
#### Windows caveats
|
||||
|
||||
Note that drives created as Administrator are not visible by other
|
||||
accounts (including the account that was elevated as
|
||||
Administrator). So if you start a Windows drive from an Administrative
|
||||
Command Prompt and then try to access the same drive from Explorer
|
||||
(which does not run as Administrator), you will not be able to see the
|
||||
new drive.
|
||||
|
||||
The easiest way around this is to start the drive from a normal
|
||||
command prompt. It is also possible to start a drive from the SYSTEM
|
||||
account (using [the WinFsp.Launcher
|
||||
infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Architecture))
|
||||
which creates drives accessible for everyone on the system or
|
||||
alternatively using [the nssm service manager](https://nssm.cc/usage).
|
||||
|
||||
### Limitations
|
||||
|
||||
Without the use of |--vfs-cache-mode| this can only write files
|
||||
sequentially, it can only seek when reading. This means that many
|
||||
applications won't work with their files on an rclone mount without
|
||||
|--vfs-cache-mode writes| or |--vfs-cache-mode full|.
|
||||
See the [VFS File Caching](#vfs-file-caching) section for more info.
|
||||
|
||||
The bucket based remotes (e.g. Swift, S3, Google Compute Storage, B2,
|
||||
Hubic) do not support the concept of empty directories, so empty
|
||||
directories will have a tendency to disappear once they fall out of
|
||||
the directory cache.
|
||||
|
||||
Only supported on Linux, FreeBSD, OS X and Windows at the moment.
|
||||
|
||||
### rclone @ vs rclone sync/copy
|
||||
|
||||
File systems expect things to be 100% reliable, whereas cloud storage
|
||||
systems are a long way from 100% reliable. The rclone sync/copy
|
||||
commands cope with this with lots of retries. However rclone @
|
||||
can't use retries in the same way without making local copies of the
|
||||
uploads. Look at the [VFS File Caching](#vfs-file-caching)
|
||||
for solutions to make @ more reliable.
|
||||
|
||||
### Attribute caching
|
||||
|
||||
You can use the flag |--attr-timeout| to set the time the kernel caches
|
||||
the attributes (size, modification time, etc.) for directory entries.
|
||||
|
||||
The default is |1s| which caches files just long enough to avoid
|
||||
too many callbacks to rclone from the kernel.
|
||||
|
||||
In theory 0s should be the correct value for filesystems which can
|
||||
change outside the control of the kernel. However this causes quite a
|
||||
few problems such as
|
||||
[rclone using too much memory](https://github.com/rclone/rclone/issues/2157),
|
||||
[rclone not serving files to samba](https://forum.rclone.org/t/rclone-1-39-vs-1-40-mount-issue/5112)
|
||||
and [excessive time listing directories](https://github.com/rclone/rclone/issues/2095#issuecomment-371141147).
|
||||
|
||||
The kernel can cache the info about a file for the time given by
|
||||
|--attr-timeout|. You may see corruption if the remote file changes
|
||||
length during this window. It will show up as either a truncated file
|
||||
or a file with garbage on the end. With |--attr-timeout 1s| this is
|
||||
very unlikely but not impossible. The higher you set |--attr-timeout|
|
||||
the more likely it is. The default setting of "1s" is the lowest
|
||||
setting which mitigates the problems above.
|
||||
|
||||
If you set it higher (|10s| or |1m| say) then the kernel will call
|
||||
back to rclone less often making it more efficient, however there is
|
||||
more chance of the corruption issue above.
|
||||
|
||||
If files don't change on the remote outside of the control of rclone
|
||||
then there is no chance of corruption.
|
||||
|
||||
This is the same as setting the attr_timeout option in mount.fuse.
|
||||
|
||||
### Filters
|
||||
|
||||
Note that all the rclone filters can be used to select a subset of the
|
||||
files to be visible in the mount.
|
||||
|
||||
### systemd
|
||||
|
||||
When running rclone @ as a systemd service, it is possible
|
||||
to use Type=notify. In this case the service will enter the started state
|
||||
after the mountpoint has been successfully set up.
|
||||
Units having the rclone @ service specified as a requirement
|
||||
will see all files and folders immediately in this mode.
|
||||
|
||||
### chunked reading
|
||||
|
||||
|--vfs-read-chunk-size| will enable reading the source objects in parts.
|
||||
This can reduce the used download quota for some remotes by requesting only chunks
|
||||
from the remote that are actually read at the cost of an increased number of requests.
|
||||
|
||||
When |--vfs-read-chunk-size-limit| is also specified and greater than
|
||||
|--vfs-read-chunk-size|, the chunk size for each open file will get doubled
|
||||
for each chunk read, until the specified value is reached. A value of |-1| will disable
|
||||
the limit and the chunk size will grow indefinitely.
|
||||
|
||||
With |--vfs-read-chunk-size 100M| and |--vfs-read-chunk-size-limit 0|
|
||||
the following parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
|
||||
When |--vfs-read-chunk-size-limit 500M| is specified, the result would be
|
||||
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
|
||||
`, "|", "`"), "@", commandName) + vfs.Help,
|
||||
Long: strings.ReplaceAll(strings.ReplaceAll(mountHelp, "|", "`"), "@", commandName) + vfs.Help,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
opt := Opt // make a copy of the options
|
||||
|
||||
if opt.Daemon {
|
||||
if Opt.Daemon {
|
||||
config.PassConfigKeyForDaemonization = true
|
||||
}
|
||||
|
||||
mountpoint := args[1]
|
||||
fdst := cmd.NewFsDir(args)
|
||||
if fdst.Name() == "" || fdst.Name() == "local" {
|
||||
err := checkMountpointOverlap(fdst.Root(), mountpoint)
|
||||
if err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Show stats if the user has specifically requested them
|
||||
if cmd.ShowStats() {
|
||||
defer cmd.StartStats()()
|
||||
}
|
||||
|
||||
// Inform about ignored flags on Windows,
|
||||
// and if not on Windows and not --allow-non-empty flag is used
|
||||
// verify that mountpoint is empty.
|
||||
if runtime.GOOS == "windows" {
|
||||
if opt.AllowNonEmpty {
|
||||
fs.Logf(nil, "--allow-non-empty flag does nothing on Windows")
|
||||
}
|
||||
if opt.AllowRoot {
|
||||
fs.Logf(nil, "--allow-root flag does nothing on Windows")
|
||||
}
|
||||
if opt.AllowOther {
|
||||
fs.Logf(nil, "--allow-other flag does nothing on Windows")
|
||||
}
|
||||
} else if !opt.AllowNonEmpty {
|
||||
err := checkMountEmpty(mountpoint)
|
||||
if err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
mnt := &MountPoint{
|
||||
MountFn: mount,
|
||||
MountPoint: args[1],
|
||||
Fs: cmd.NewFsDir(args),
|
||||
MountOpt: Opt,
|
||||
VFSOpt: vfsflags.Opt,
|
||||
}
|
||||
|
||||
// Work out the volume name, removing special
|
||||
// characters from it if necessary
|
||||
if opt.VolumeName == "" {
|
||||
opt.VolumeName = fdst.Name() + ":" + fdst.Root()
|
||||
daemonized, err := mnt.Mount()
|
||||
if !daemonized && err == nil {
|
||||
err = mnt.Wait()
|
||||
}
|
||||
opt.VolumeName = strings.Replace(opt.VolumeName, ":", " ", -1)
|
||||
opt.VolumeName = strings.Replace(opt.VolumeName, "/", " ", -1)
|
||||
opt.VolumeName = strings.TrimSpace(opt.VolumeName)
|
||||
if runtime.GOOS == "windows" && len(opt.VolumeName) > 32 {
|
||||
opt.VolumeName = opt.VolumeName[:32]
|
||||
}
|
||||
|
||||
// Start background task if --background is specified
|
||||
if opt.Daemon {
|
||||
daemonized := startBackgroundMode()
|
||||
if daemonized {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
VFS := vfs.New(fdst, &vfsflags.Opt)
|
||||
err := Mount(VFS, mountpoint, mount, &opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
@@ -522,49 +166,94 @@ When |--vfs-read-chunk-size-limit 500M| is specified, the result would be
|
||||
return commandDefinition
|
||||
}
|
||||
|
||||
// ClipBlocks clips the blocks pointed to the OS max
|
||||
func ClipBlocks(b *uint64) {
|
||||
var max uint64
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
if runtime.GOARCH == "386" {
|
||||
max = (1 << 32) - 1
|
||||
} else {
|
||||
max = (1 << 43) - 1
|
||||
// Mount the remote at mountpoint
|
||||
func (m *MountPoint) Mount() (daemonized bool, err error) {
|
||||
if err = m.CheckOverlap(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if err = m.CheckAllowings(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
m.SetVolumeName(m.MountOpt.VolumeName)
|
||||
|
||||
// Start background task if --daemon is specified
|
||||
if m.MountOpt.Daemon {
|
||||
daemonized = startBackgroundMode()
|
||||
if daemonized {
|
||||
return true, nil
|
||||
}
|
||||
case "darwin":
|
||||
// OSX FUSE only supports 32 bit number of blocks
|
||||
// https://github.com/osxfuse/osxfuse/issues/396
|
||||
max = (1 << 32) - 1
|
||||
default:
|
||||
// no clipping
|
||||
return
|
||||
}
|
||||
if *b > max {
|
||||
*b = max
|
||||
|
||||
m.VFS = vfs.New(m.Fs, &m.VFSOpt)
|
||||
|
||||
m.ErrChan, m.UnmountFn, err = m.MountFn(m.VFS, m.MountPoint, &m.MountOpt)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "failed to mount FUSE fs")
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Mount mounts the remote at mountpoint.
|
||||
//
|
||||
// If noModTime is set then it
|
||||
func Mount(VFS *vfs.VFS, mountpoint string, mount MountFn, opt *Options) error {
|
||||
if opt == nil {
|
||||
opt = &DefaultOpt
|
||||
// CheckOverlap checks that root doesn't overlap with mountpoint
|
||||
func (m *MountPoint) CheckOverlap() error {
|
||||
name := m.Fs.Name()
|
||||
if name != "" && name != "local" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mount it
|
||||
errChan, unmount, err := mount(VFS, mountpoint, opt)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to mount FUSE fs")
|
||||
rootAbs := absPath(m.Fs.Root())
|
||||
mountpointAbs := absPath(m.MountPoint)
|
||||
if strings.HasPrefix(rootAbs, mountpointAbs) || strings.HasPrefix(mountpointAbs, rootAbs) {
|
||||
const msg = "mount point %q and directory to be mounted %q mustn't overlap"
|
||||
return errors.Errorf(msg, m.MountPoint, m.Fs.Root())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// absPath is a helper function for MountPoint.CheckOverlap
|
||||
func absPath(path string) string {
|
||||
if abs, err := filepath.EvalSymlinks(path); err == nil {
|
||||
path = abs
|
||||
}
|
||||
if abs, err := filepath.Abs(path); err == nil {
|
||||
path = abs
|
||||
}
|
||||
path = filepath.ToSlash(path)
|
||||
if !strings.HasSuffix(path, "/") {
|
||||
path += "/"
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
// CheckAllowings informs about ignored flags on Windows. If not on Windows
|
||||
// and not --allow-non-empty flag is used, verify that mountpoint is empty.
|
||||
func (m *MountPoint) CheckAllowings() error {
|
||||
opt := &m.MountOpt
|
||||
if runtime.GOOS == "windows" {
|
||||
if opt.AllowNonEmpty {
|
||||
fs.Logf(nil, "--allow-non-empty flag does nothing on Windows")
|
||||
}
|
||||
if opt.AllowRoot {
|
||||
fs.Logf(nil, "--allow-root flag does nothing on Windows")
|
||||
}
|
||||
if opt.AllowOther {
|
||||
fs.Logf(nil, "--allow-other flag does nothing on Windows")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if !opt.AllowNonEmpty {
|
||||
return CheckMountEmpty(m.MountPoint)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait for mount end
|
||||
func (m *MountPoint) Wait() error {
|
||||
// Unmount on exit
|
||||
var finaliseOnce sync.Once
|
||||
finalise := func() {
|
||||
finaliseOnce.Do(func() {
|
||||
_ = sysdnotify.Stopping()
|
||||
_ = unmount()
|
||||
_ = m.UnmountFn()
|
||||
})
|
||||
}
|
||||
fnHandle := atexit.Register(finalise)
|
||||
@@ -577,19 +266,20 @@ func Mount(VFS *vfs.VFS, mountpoint string, mount MountFn, opt *Options) error {
|
||||
|
||||
// Reload VFS cache on SIGHUP
|
||||
sigHup := make(chan os.Signal, 1)
|
||||
signal.Notify(sigHup, syscall.SIGHUP)
|
||||
NotifyOnSigHup(sigHup)
|
||||
var err error
|
||||
|
||||
waitloop:
|
||||
for {
|
||||
waiting := true
|
||||
for waiting {
|
||||
select {
|
||||
// umount triggered outside the app
|
||||
case err = <-errChan:
|
||||
break waitloop
|
||||
case err = <-m.ErrChan:
|
||||
waiting = false
|
||||
// user sent SIGHUP to clear the cache
|
||||
case <-sigHup:
|
||||
root, err := VFS.Root()
|
||||
root, err := m.VFS.Root()
|
||||
if err != nil {
|
||||
fs.Errorf(VFS.Fs(), "Error reading root: %v", err)
|
||||
fs.Errorf(m.VFS.Fs(), "Error reading root: %v", err)
|
||||
} else {
|
||||
root.ForgetAll()
|
||||
}
|
||||
@@ -601,6 +291,29 @@ waitloop:
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to umount FUSE fs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmount the specified mountpoint
|
||||
func (m *MountPoint) Unmount() (err error) {
|
||||
return m.UnmountFn()
|
||||
}
|
||||
|
||||
// SetVolumeName with sensible default
|
||||
func (m *MountPoint) SetVolumeName(vol string) {
|
||||
if vol == "" {
|
||||
vol = m.Fs.Name() + ":" + m.Fs.Root()
|
||||
}
|
||||
m.MountOpt.SetVolumeName(vol)
|
||||
}
|
||||
|
||||
// SetVolumeName removes special characters from volume name if necessary
|
||||
func (opt *Options) SetVolumeName(vol string) {
|
||||
vol = strings.ReplaceAll(vol, ":", " ")
|
||||
vol = strings.ReplaceAll(vol, "/", " ")
|
||||
vol = strings.TrimSpace(vol)
|
||||
if runtime.GOOS == "windows" && len(vol) > 32 {
|
||||
vol = vol[:32]
|
||||
}
|
||||
opt.VolumeName = vol
|
||||
}
|
||||
|
||||
@@ -11,29 +11,33 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
)
|
||||
|
||||
// MountInfo defines the configuration for a mount
|
||||
type MountInfo struct {
|
||||
unmountFn UnmountFn
|
||||
MountPoint string `json:"MountPoint"`
|
||||
MountedOn time.Time `json:"MountedOn"`
|
||||
Fs string `json:"Fs"`
|
||||
MountOpt *Options
|
||||
VFSOpt *vfscommon.Options
|
||||
}
|
||||
|
||||
var (
|
||||
// mutex to protect all the variables in this block
|
||||
mountMu sync.Mutex
|
||||
// Mount functions available
|
||||
mountFns = map[string]MountFn{}
|
||||
// Map of mounted path => MountInfo
|
||||
liveMounts = map[string]MountInfo{}
|
||||
liveMounts = map[string]*MountPoint{}
|
||||
// Supported mount types
|
||||
supportedMountTypes = []string{"mount", "cmount", "mount2"}
|
||||
)
|
||||
|
||||
// ResolveMountMethod returns mount function by name
|
||||
func ResolveMountMethod(mountType string) (string, MountFn) {
|
||||
if mountType != "" {
|
||||
return mountType, mountFns[mountType]
|
||||
}
|
||||
for _, mountType := range supportedMountTypes {
|
||||
if mountFns[mountType] != nil {
|
||||
return mountType, mountFns[mountType]
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// AddRc adds mount and unmount functionality to rc
|
||||
func AddRc(mountUtilName string, mountFunction MountFn) {
|
||||
mountMu.Lock()
|
||||
@@ -99,14 +103,12 @@ func mountRc(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
mountMu.Lock()
|
||||
defer mountMu.Unlock()
|
||||
|
||||
if err != nil || mountType == "" {
|
||||
if mountFns["mount"] != nil {
|
||||
mountType = "mount"
|
||||
} else if mountFns["cmount"] != nil {
|
||||
mountType = "cmount"
|
||||
} else if mountFns["mount2"] != nil {
|
||||
mountType = "mount2"
|
||||
}
|
||||
if err != nil {
|
||||
mountType = ""
|
||||
}
|
||||
mountType, mountFn := ResolveMountMethod(mountType)
|
||||
if mountFn == nil {
|
||||
return nil, errors.New("Mount Option specified is not registered, or is invalid")
|
||||
}
|
||||
|
||||
// Get Fs.fs to be mounted from fs parameter in the params
|
||||
@@ -115,28 +117,26 @@ func mountRc(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if mountFns[mountType] != nil {
|
||||
VFS := vfs.New(fdst, &vfsOpt)
|
||||
_, unmountFn, err := mountFns[mountType](VFS, mountPoint, &mountOpt)
|
||||
|
||||
if err != nil {
|
||||
log.Printf("mount FAILED: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
// Add mount to list if mount point was successfully created
|
||||
liveMounts[mountPoint] = MountInfo{
|
||||
unmountFn: unmountFn,
|
||||
MountedOn: time.Now(),
|
||||
Fs: fdst.Name(),
|
||||
MountPoint: mountPoint,
|
||||
VFSOpt: &vfsOpt,
|
||||
MountOpt: &mountOpt,
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "Mount for %s created at %s using %s", fdst.String(), mountPoint, mountType)
|
||||
return nil, nil
|
||||
VFS := vfs.New(fdst, &vfsOpt)
|
||||
_, unmountFn, err := mountFn(VFS, mountPoint, &mountOpt)
|
||||
if err != nil {
|
||||
log.Printf("mount FAILED: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return nil, errors.New("Mount Option specified is not registered, or is invalid")
|
||||
|
||||
// Add mount to list if mount point was successfully created
|
||||
liveMounts[mountPoint] = &MountPoint{
|
||||
MountPoint: mountPoint,
|
||||
MountedOn: time.Now(),
|
||||
MountFn: mountFn,
|
||||
UnmountFn: unmountFn,
|
||||
MountOpt: mountOpt,
|
||||
VFSOpt: vfsOpt,
|
||||
Fs: fdst,
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "Mount for %s created at %s using %s", fdst.String(), mountPoint, mountType)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -169,10 +169,14 @@ func unMountRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
}
|
||||
mountMu.Lock()
|
||||
defer mountMu.Unlock()
|
||||
err = performUnMount(mountPoint)
|
||||
if err != nil {
|
||||
mountInfo, found := liveMounts[mountPoint]
|
||||
if !found {
|
||||
return nil, errors.New("mount not found")
|
||||
}
|
||||
if err = mountInfo.Unmount(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
delete(liveMounts, mountPoint)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -231,16 +235,34 @@ Eg
|
||||
})
|
||||
}
|
||||
|
||||
// listMountsRc returns a list of current mounts
|
||||
// MountInfo is a transitional structure for json marshaling
|
||||
type MountInfo struct {
|
||||
Fs string `json:"Fs"`
|
||||
MountPoint string `json:"MountPoint"`
|
||||
MountedOn time.Time `json:"MountedOn"`
|
||||
}
|
||||
|
||||
// listMountsRc returns a list of current mounts sorted by mount path
|
||||
func listMountsRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
var mountTypes = []MountInfo{}
|
||||
mountMu.Lock()
|
||||
defer mountMu.Unlock()
|
||||
for _, a := range liveMounts {
|
||||
mountTypes = append(mountTypes, a)
|
||||
var keys []string
|
||||
for key := range liveMounts {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
mountPoints := []MountInfo{}
|
||||
for _, k := range keys {
|
||||
m := liveMounts[k]
|
||||
info := MountInfo{
|
||||
Fs: m.Fs.Name(),
|
||||
MountPoint: m.MountPoint,
|
||||
MountedOn: m.MountedOn,
|
||||
}
|
||||
mountPoints = append(mountPoints, info)
|
||||
}
|
||||
return rc.Params{
|
||||
"mountPoints": mountTypes,
|
||||
"mountPoints": mountPoints,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -265,27 +287,12 @@ Eg
|
||||
func unmountAll(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
mountMu.Lock()
|
||||
defer mountMu.Unlock()
|
||||
for key, mountInfo := range liveMounts {
|
||||
err = performUnMount(mountInfo.MountPoint)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Couldn't unmount : %s", key)
|
||||
for mountPoint, mountInfo := range liveMounts {
|
||||
if err = mountInfo.Unmount(); err != nil {
|
||||
fs.Debugf(nil, "Couldn't unmount : %s", mountPoint)
|
||||
return nil, err
|
||||
}
|
||||
delete(liveMounts, mountPoint)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// performUnMount unmounts the specified mountPoint
|
||||
func performUnMount(mountPoint string) (err error) {
|
||||
mountInfo, ok := liveMounts[mountPoint]
|
||||
if ok {
|
||||
err := mountInfo.unmountFn()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
delete(liveMounts, mountPoint)
|
||||
} else {
|
||||
return errors.New("mount not found")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
_ "github.com/rclone/rclone/cmd/cmount"
|
||||
_ "github.com/rclone/rclone/cmd/mount"
|
||||
_ "github.com/rclone/rclone/cmd/mount2"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -21,7 +22,7 @@ import (
|
||||
|
||||
func TestRc(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
configfile.LoadConfig(ctx)
|
||||
configfile.Install()
|
||||
mount := rc.Calls.Get("mount/mount")
|
||||
assert.NotNil(t, mount)
|
||||
unmount := rc.Calls.Get("mount/unmount")
|
||||
@@ -95,6 +96,22 @@ func TestRc(t *testing.T) {
|
||||
assert.Equal(t, os.FileMode(0400), fi.Mode())
|
||||
}
|
||||
|
||||
// check mount point list
|
||||
checkMountList := func() []mountlib.MountInfo {
|
||||
listCall := rc.Calls.Get("mount/listmounts")
|
||||
require.NotNil(t, listCall)
|
||||
listReply, err := listCall.Fn(ctx, rc.Params{})
|
||||
require.NoError(t, err)
|
||||
mountPointsReply, err := listReply.Get("mountPoints")
|
||||
require.NoError(t, err)
|
||||
mountPoints, ok := mountPointsReply.([]mountlib.MountInfo)
|
||||
require.True(t, ok)
|
||||
return mountPoints
|
||||
}
|
||||
mountPoints := checkMountList()
|
||||
require.Equal(t, 1, len(mountPoints))
|
||||
require.Equal(t, mountPoint, mountPoints[0].MountPoint)
|
||||
|
||||
// FIXME the OS sometimes appears to be using the mount
|
||||
// immediately after it appears so wait a moment
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
@@ -102,6 +119,7 @@ func TestRc(t *testing.T) {
|
||||
t.Run("Unmount", func(t *testing.T) {
|
||||
_, err := unmount.Fn(ctx, in)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(checkMountList()))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
14
cmd/mountlib/sighup.go
Normal file
14
cmd/mountlib/sighup.go
Normal file
@@ -0,0 +1,14 @@
|
||||
// +build !plan9,!js
|
||||
|
||||
package mountlib
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// NotifyOnSigHup makes SIGHUP notify given channel on supported systems
|
||||
func NotifyOnSigHup(sighupChan chan os.Signal) {
|
||||
signal.Notify(sighupChan, syscall.SIGHUP)
|
||||
}
|
||||
10
cmd/mountlib/sighup_unsupported.go
Normal file
10
cmd/mountlib/sighup_unsupported.go
Normal file
@@ -0,0 +1,10 @@
|
||||
// +build plan9 js
|
||||
|
||||
package mountlib
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// NotifyOnSigHup makes SIGHUP notify given channel on supported systems
|
||||
func NotifyOnSigHup(sighupChan chan os.Signal) {}
|
||||
55
cmd/mountlib/utils.go
Normal file
55
cmd/mountlib/utils.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package mountlib
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// CheckMountEmpty checks if folder is empty
|
||||
func CheckMountEmpty(mountpoint string) error {
|
||||
fp, fpErr := os.Open(mountpoint)
|
||||
|
||||
if fpErr != nil {
|
||||
return errors.Wrap(fpErr, "Can not open: "+mountpoint)
|
||||
}
|
||||
defer fs.CheckClose(fp, &fpErr)
|
||||
|
||||
_, fpErr = fp.Readdirnames(1)
|
||||
|
||||
if fpErr == io.EOF {
|
||||
return nil
|
||||
}
|
||||
|
||||
msg := "Directory is not empty: " + mountpoint + " If you want to mount it anyway use: --allow-non-empty option"
|
||||
if fpErr == nil {
|
||||
return errors.New(msg)
|
||||
}
|
||||
return errors.Wrap(fpErr, msg)
|
||||
}
|
||||
|
||||
// ClipBlocks clips the blocks pointed to the OS max
|
||||
func ClipBlocks(b *uint64) {
|
||||
var max uint64
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
if runtime.GOARCH == "386" {
|
||||
max = (1 << 32) - 1
|
||||
} else {
|
||||
max = (1 << 43) - 1
|
||||
}
|
||||
case "darwin":
|
||||
// OSX FUSE only supports 32 bit number of blocks
|
||||
// https://github.com/osxfuse/osxfuse/issues/396
|
||||
max = (1 << 32) - 1
|
||||
default:
|
||||
// no clipping
|
||||
return
|
||||
}
|
||||
if *b > max {
|
||||
*b = max
|
||||
}
|
||||
}
|
||||
@@ -373,7 +373,7 @@ func (u *UI) Draw() error {
|
||||
extras := ""
|
||||
if u.showCounts {
|
||||
if count > 0 {
|
||||
extras += fmt.Sprintf("%8v ", fs.SizeSuffix(count))
|
||||
extras += fmt.Sprintf("%8v ", fs.CountSuffix(count))
|
||||
} else {
|
||||
extras += " "
|
||||
}
|
||||
@@ -385,9 +385,9 @@ func (u *UI) Draw() error {
|
||||
}
|
||||
if u.showDirAverageSize {
|
||||
if averageSize > 0 {
|
||||
extras += fmt.Sprintf("%8v ", fs.SizeSuffix(int64(averageSize)))
|
||||
extras += fmt.Sprintf("%9v ", fs.SizeSuffix(int64(averageSize)))
|
||||
} else {
|
||||
extras += " "
|
||||
extras += " "
|
||||
}
|
||||
|
||||
}
|
||||
@@ -406,7 +406,7 @@ func (u *UI) Draw() error {
|
||||
}
|
||||
extras += "[" + graph[graphBars-bars:2*graphBars-bars] + "] "
|
||||
}
|
||||
Linef(0, y, w, fg, bg, ' ', "%c %8v %s%c%s%s", fileFlag, fs.SizeSuffix(size), extras, mark, path.Base(entry.Remote()), message)
|
||||
Linef(0, y, w, fg, bg, ' ', "%c %9v %s%c%s%s", fileFlag, fs.SizeSuffix(size), extras, mark, path.Base(entry.Remote()), message)
|
||||
y++
|
||||
}
|
||||
}
|
||||
@@ -485,11 +485,15 @@ func (u *UI) removeEntry(pos int) {
|
||||
|
||||
// delete the entry at the current position
|
||||
func (u *UI) delete() {
|
||||
if u.d == nil || len(u.entries) == 0 {
|
||||
return
|
||||
}
|
||||
ctx := context.Background()
|
||||
dirPos := u.sortPerm[u.dirPosMap[u.path].entry]
|
||||
entry := u.entries[dirPos]
|
||||
cursorPos := u.dirPosMap[u.path]
|
||||
dirPos := u.sortPerm[cursorPos.entry]
|
||||
dirEntry := u.entries[dirPos]
|
||||
u.boxMenu = []string{"cancel", "confirm"}
|
||||
if obj, isFile := entry.(fs.Object); isFile {
|
||||
if obj, isFile := dirEntry.(fs.Object); isFile {
|
||||
u.boxMenuHandler = func(f fs.Fs, p string, o int) (string, error) {
|
||||
if o != 1 {
|
||||
return "Aborted!", nil
|
||||
@@ -499,27 +503,33 @@ func (u *UI) delete() {
|
||||
return "", err
|
||||
}
|
||||
u.removeEntry(dirPos)
|
||||
if cursorPos.entry >= len(u.entries) {
|
||||
u.move(-1) // move back onto a valid entry
|
||||
}
|
||||
return "Successfully deleted file!", nil
|
||||
}
|
||||
u.popupBox([]string{
|
||||
"Delete this file?",
|
||||
u.fsName + entry.String()})
|
||||
u.fsName + dirEntry.String()})
|
||||
} else {
|
||||
u.boxMenuHandler = func(f fs.Fs, p string, o int) (string, error) {
|
||||
if o != 1 {
|
||||
return "Aborted!", nil
|
||||
}
|
||||
err := operations.Purge(ctx, f, entry.String())
|
||||
err := operations.Purge(ctx, f, dirEntry.String())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
u.removeEntry(dirPos)
|
||||
if cursorPos.entry >= len(u.entries) {
|
||||
u.move(-1) // move back onto a valid entry
|
||||
}
|
||||
return "Successfully purged folder!", nil
|
||||
}
|
||||
u.popupBox([]string{
|
||||
"Purge this directory?",
|
||||
"ALL files in it will be deleted",
|
||||
u.fsName + entry.String()})
|
||||
u.fsName + dirEntry.String()})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,12 +7,19 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
size = int64(-1)
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.Int64VarP(cmdFlags, &size, "size", "", size, "File size hint to preallocate")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -37,6 +44,13 @@ must fit into RAM. The cutoff needs to be small enough to adhere
|
||||
the limits of your remote, please see there. Generally speaking,
|
||||
setting this cutoff too high will decrease your performance.
|
||||
|
||||
Use the |--size| flag to preallocate the file in advance at the remote end
|
||||
and actually stream it, even if remote backend doesn't support streaming.
|
||||
|
||||
|--size| should be the exact size of the input stream in bytes. If the
|
||||
size of the stream is different in length to the |--size| passed in
|
||||
then the transfer will likely fail.
|
||||
|
||||
Note that the upload can also not be retried because the data is
|
||||
not kept around until the upload succeeds. If you need to transfer
|
||||
a lot of data, you're better off caching locally and then
|
||||
@@ -51,7 +65,7 @@ a lot of data, you're better off caching locally and then
|
||||
|
||||
fdst, dstFileName := cmd.NewFsDstFile(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
_, err := operations.Rcat(context.Background(), fdst, dstFileName, os.Stdin, time.Now())
|
||||
_, err := operations.RcatSize(context.Background(), fdst, dstFileName, os.Stdin, size, time.Now())
|
||||
return err
|
||||
})
|
||||
},
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build !noselfupdate
|
||||
|
||||
package selfupdate
|
||||
|
||||
// Note: "|" will be replaced by backticks in the help string below
|
||||
@@ -27,7 +29,7 @@ If the old version contains only dots and digits (for example |v1.54.0|)
|
||||
then it's a stable release so you won't need the |--beta| flag. Beta releases
|
||||
have an additional information similar to |v1.54.0-beta.5111.06f1c0c61|.
|
||||
(if you are a developer and use a locally built rclone, the version number
|
||||
will end with |-DEV|, you will have to rebuild it as it obvisously can't
|
||||
will end with |-DEV|, you will have to rebuild it as it obviously can't
|
||||
be distributed).
|
||||
|
||||
If you previously installed rclone via a package manager, the package may
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user