mirror of
https://github.com/rclone/rclone.git
synced 2026-01-09 03:53:17 +00:00
Compare commits
412 Commits
v1.53.4
...
fix-4883-c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
544757a33b | ||
|
|
bfcf6baf93 | ||
|
|
b2b5b7598c | ||
|
|
5f943aabc8 | ||
|
|
84c785bc36 | ||
|
|
993146375e | ||
|
|
bbe791a886 | ||
|
|
1545ace8f2 | ||
|
|
bcac8fdc83 | ||
|
|
15e1a6bee7 | ||
|
|
9710ded60f | ||
|
|
5f3672102c | ||
|
|
644cc69108 | ||
|
|
1415666074 | ||
|
|
bae550c71e | ||
|
|
beff081abb | ||
|
|
7f5ee5d81f | ||
|
|
8b41dfa50a | ||
|
|
0d8bcc08da | ||
|
|
d3b7f14b66 | ||
|
|
f66928a846 | ||
|
|
3b1122c888 | ||
|
|
463a18aa07 | ||
|
|
0a932dc1f2 | ||
|
|
8856e0e559 | ||
|
|
3b6df71838 | ||
|
|
31de631b22 | ||
|
|
189ef5f257 | ||
|
|
2f67681e3b | ||
|
|
41127965b0 | ||
|
|
8171671d82 | ||
|
|
75617c0c3b | ||
|
|
8b9d23916b | ||
|
|
e43b79e33d | ||
|
|
459cc70a50 | ||
|
|
20578f3f89 | ||
|
|
15da53696e | ||
|
|
2bddba118e | ||
|
|
c7e5976e11 | ||
|
|
f0bf9cfda1 | ||
|
|
671dd047f7 | ||
|
|
6272ca74bc | ||
|
|
f5af761466 | ||
|
|
06f1c0c61c | ||
|
|
e6a9f005d6 | ||
|
|
8f6f4b053c | ||
|
|
fe15a2eeeb | ||
|
|
019667170f | ||
|
|
7a496752f3 | ||
|
|
b569dc11a0 | ||
|
|
df4e6079f1 | ||
|
|
6156f90601 | ||
|
|
cdaea62932 | ||
|
|
78afe01d15 | ||
|
|
4eac88babf | ||
|
|
b4217fabd3 | ||
|
|
92b9dabf3c | ||
|
|
4323ff8a63 | ||
|
|
3e188495f5 | ||
|
|
acb9e17eb3 | ||
|
|
c8ab4f1d02 | ||
|
|
e776a1b122 | ||
|
|
c57af26de9 | ||
|
|
7d89912666 | ||
|
|
cd075f1703 | ||
|
|
35b2ca642c | ||
|
|
127f48e8ad | ||
|
|
3e986cdf54 | ||
|
|
b80d498304 | ||
|
|
757e696a6b | ||
|
|
e3979131f2 | ||
|
|
a774f6bfdb | ||
|
|
d7cd35e2ca | ||
|
|
38e70f1797 | ||
|
|
3b49440c25 | ||
|
|
7c0287b824 | ||
|
|
f97c2c85bd | ||
|
|
14c0d8a93e | ||
|
|
768ad4de2a | ||
|
|
817987dfc4 | ||
|
|
eb090d3544 | ||
|
|
4daf8b7083 | ||
|
|
0be69018b8 | ||
|
|
9b9ab5f3e8 | ||
|
|
072464cbdb | ||
|
|
b0491dec88 | ||
|
|
ccfefedb47 | ||
|
|
2fffcf9e7f | ||
|
|
a39a5d261c | ||
|
|
45b57822d5 | ||
|
|
d8984cd37f | ||
|
|
80e63af470 | ||
|
|
db2c38b21b | ||
|
|
cef51d58ac | ||
|
|
e0b5a13a13 | ||
|
|
de21356154 | ||
|
|
35a4de2030 | ||
|
|
847625822f | ||
|
|
3877df4e62 | ||
|
|
ec73d2fb9a | ||
|
|
a7689d7023 | ||
|
|
847a44e7ad | ||
|
|
b3710c962e | ||
|
|
35ccfe1721 | ||
|
|
ef2bfb9718 | ||
|
|
a97effa27c | ||
|
|
01adee7554 | ||
|
|
78a76b0d29 | ||
|
|
e775328523 | ||
|
|
50344e7792 | ||
|
|
d58fdb10db | ||
|
|
feaacfd226 | ||
|
|
e3c238ac95 | ||
|
|
752997c5e8 | ||
|
|
71edc75ca6 | ||
|
|
768e4c4735 | ||
|
|
c553ad5158 | ||
|
|
c66b901320 | ||
|
|
dd67a3d5f5 | ||
|
|
e972f2c98a | ||
|
|
acbcb1ea9d | ||
|
|
d4444375ac | ||
|
|
00bf40a8ef | ||
|
|
5d1f947f32 | ||
|
|
b594cb9430 | ||
|
|
add7a35e55 | ||
|
|
2af7b61fc3 | ||
|
|
cb97c2b0d3 | ||
|
|
35da38e93f | ||
|
|
963c0f28b9 | ||
|
|
b3815dc0c2 | ||
|
|
8053fc4e16 | ||
|
|
66c3f2f31f | ||
|
|
62c9074132 | ||
|
|
a854cb9617 | ||
|
|
fbf9942fe7 | ||
|
|
f425950a52 | ||
|
|
1d40bc1901 | ||
|
|
ba51409c3c | ||
|
|
a64fc05385 | ||
|
|
4d54454900 | ||
|
|
5601652d65 | ||
|
|
b218bc5bed | ||
|
|
65eee674b9 | ||
|
|
72eb74e94a | ||
|
|
6bfec25165 | ||
|
|
1c61d51448 | ||
|
|
f7fe1d766b | ||
|
|
55aec19389 | ||
|
|
9db51117dc | ||
|
|
a9c9467210 | ||
|
|
f50e15c77c | ||
|
|
e3191d096f | ||
|
|
07c40780b3 | ||
|
|
67b82b4a28 | ||
|
|
5f47e1e034 | ||
|
|
e92cb9e8f8 | ||
|
|
9ea990d5a2 | ||
|
|
08b9ede217 | ||
|
|
6342499c47 | ||
|
|
f347a198f7 | ||
|
|
060642ad14 | ||
|
|
629c0d0f65 | ||
|
|
f7404f52e7 | ||
|
|
74a321e156 | ||
|
|
fce885c0cd | ||
|
|
4028a245b0 | ||
|
|
c5b07a6714 | ||
|
|
b0965bf34f | ||
|
|
1eaca9fb45 | ||
|
|
d833e49db9 | ||
|
|
3aee544cee | ||
|
|
9e87f5090f | ||
|
|
c8cfa43ccc | ||
|
|
ed7af3f370 | ||
|
|
be19d6a403 | ||
|
|
46858ee6fe | ||
|
|
a94e4d803b | ||
|
|
dcbe62ab0a | ||
|
|
121b981b49 | ||
|
|
73bb9322f5 | ||
|
|
bdc2278a30 | ||
|
|
ea8d13d841 | ||
|
|
e45716cac2 | ||
|
|
c98dd8755c | ||
|
|
5ae5e1dd56 | ||
|
|
4f8ee736b1 | ||
|
|
816e68a274 | ||
|
|
6ab6c8eefa | ||
|
|
cb16f42075 | ||
|
|
7ae84a3c91 | ||
|
|
2fd543c989 | ||
|
|
50cf97fc72 | ||
|
|
4acd68188b | ||
|
|
b81b6da3fc | ||
|
|
56ad6aac4d | ||
|
|
1efb8ea280 | ||
|
|
9cfc01f791 | ||
|
|
86014cebd7 | ||
|
|
507f861c67 | ||
|
|
e073720a8f | ||
|
|
ce7cdadb71 | ||
|
|
a223b78872 | ||
|
|
d5181118cc | ||
|
|
886b3abac1 | ||
|
|
250f8d9371 | ||
|
|
8a429d12cf | ||
|
|
8bf4697dc2 | ||
|
|
584523672c | ||
|
|
a9585efd64 | ||
|
|
f6b1f05e0f | ||
|
|
cc8538e0d1 | ||
|
|
f7d9b15707 | ||
|
|
83406bc473 | ||
|
|
1cfce703b2 | ||
|
|
3b24a4cada | ||
|
|
135adb426e | ||
|
|
987dac9fe5 | ||
|
|
7fde48a805 | ||
|
|
ce9028bb5b | ||
|
|
52688a63c6 | ||
|
|
8904e81cdf | ||
|
|
bcbe393af3 | ||
|
|
47aada16a0 | ||
|
|
c22d04aa30 | ||
|
|
354b4f19ec | ||
|
|
0ed1857fa9 | ||
|
|
dfadd98969 | ||
|
|
19a8b66cee | ||
|
|
07dee18d6b | ||
|
|
70e8b11805 | ||
|
|
9d574c0d63 | ||
|
|
2e21c58e6a | ||
|
|
506342317b | ||
|
|
979bb07c86 | ||
|
|
dfeae0e70a | ||
|
|
f43a9ac17e | ||
|
|
c3ac9319f4 | ||
|
|
76ee3060d1 | ||
|
|
4bb241c435 | ||
|
|
a06f4c2514 | ||
|
|
53aa03cc44 | ||
|
|
1ce0b45965 | ||
|
|
7078311a84 | ||
|
|
ef9b717961 | ||
|
|
09246ed9d5 | ||
|
|
33ea55efed | ||
|
|
79474b2e4c | ||
|
|
fb001b6c01 | ||
|
|
2896f51a22 | ||
|
|
5b9115d87a | ||
|
|
211b08f771 | ||
|
|
f0905499e3 | ||
|
|
7985df3768 | ||
|
|
095c7bd801 | ||
|
|
23469c9c7c | ||
|
|
2347762b0d | ||
|
|
636fb5344a | ||
|
|
aaa8b7738a | ||
|
|
bc4282e49e | ||
|
|
2812816142 | ||
|
|
ceeac84cfe | ||
|
|
83d48f65b6 | ||
|
|
95d0410baa | ||
|
|
2708a7569e | ||
|
|
45e8bea8d0 | ||
|
|
f980f230c5 | ||
|
|
e204f89685 | ||
|
|
f7efce594b | ||
|
|
1fb6ad700f | ||
|
|
e3fe31f7cb | ||
|
|
8b96933e58 | ||
|
|
d69b96a94c | ||
|
|
d846210978 | ||
|
|
30c8b1b84f | ||
|
|
43e0929339 | ||
|
|
6c70c42577 | ||
|
|
cd2c06f2a7 | ||
|
|
af55a74bd2 | ||
|
|
d00c126cef | ||
|
|
bedf6e90d2 | ||
|
|
e8c84d8b53 | ||
|
|
f89ff3872d | ||
|
|
127f0fc64c | ||
|
|
0cfa89f316 | ||
|
|
bfcd4113c3 | ||
|
|
0e7fc7613f | ||
|
|
8ac2f52b6e | ||
|
|
1973fc1ecc | ||
|
|
7c39a13281 | ||
|
|
c5c503cbbe | ||
|
|
d09488b829 | ||
|
|
0a6196716c | ||
|
|
8bc9b2b883 | ||
|
|
a15f50254a | ||
|
|
5d4f77a022 | ||
|
|
a089de0964 | ||
|
|
3068ae8447 | ||
|
|
67ff153b0c | ||
|
|
3e1cb8302a | ||
|
|
e4a87f772f | ||
|
|
d4f38d45a5 | ||
|
|
bbe7eb35f1 | ||
|
|
87e54f2dde | ||
|
|
3f3afe489f | ||
|
|
70b21d9c87 | ||
|
|
e00bf3d723 | ||
|
|
605f2b819a | ||
|
|
bf2b975359 | ||
|
|
00a5086ff2 | ||
|
|
be6a888e50 | ||
|
|
dad8447423 | ||
|
|
65ff109065 | ||
|
|
b7253fc1c1 | ||
|
|
d143f576c6 | ||
|
|
a152351a71 | ||
|
|
a2fa1370c5 | ||
|
|
bed83b0b64 | ||
|
|
cf0bdad5de | ||
|
|
85d35ef03c | ||
|
|
514d10b314 | ||
|
|
5164c3d2d0 | ||
|
|
ffdd0719e7 | ||
|
|
4e2b5389d7 | ||
|
|
dc4e63631f | ||
|
|
275bf456d3 | ||
|
|
7dfa871095 | ||
|
|
70cc88de22 | ||
|
|
4bc0f46955 | ||
|
|
5b09599a23 | ||
|
|
f4dd8e3fe8 | ||
|
|
d0888edc0a | ||
|
|
51a230d7fd | ||
|
|
fc5b14b620 | ||
|
|
bbddadbd04 | ||
|
|
7428e47ebc | ||
|
|
72083c65ad | ||
|
|
70f92fd6b3 | ||
|
|
a86cedbc24 | ||
|
|
0906f8dd3b | ||
|
|
664213cedb | ||
|
|
75a7226174 | ||
|
|
9e925becb6 | ||
|
|
e3a5bb9b48 | ||
|
|
b7eeb0e260 | ||
|
|
84d64ddabc | ||
|
|
6c9f92aee6 | ||
|
|
893297760b | ||
|
|
c5c56cda02 | ||
|
|
2295123cad | ||
|
|
ff0280c0cb | ||
|
|
64d736a57b | ||
|
|
5f1d5a1897 | ||
|
|
aac2406e19 | ||
|
|
6dc28ef50a | ||
|
|
66def93373 | ||
|
|
c58023a9ba | ||
|
|
3edc9ff0b0 | ||
|
|
8e8ae1edc7 | ||
|
|
20b00db390 | ||
|
|
db4bbf9521 | ||
|
|
2b7994e739 | ||
|
|
e7fbdac8e0 | ||
|
|
41ec712aa9 | ||
|
|
17acae2b00 | ||
|
|
57261c7e97 | ||
|
|
d8239e0194 | ||
|
|
004c3796de | ||
|
|
18c7549770 | ||
|
|
e5190f14ce | ||
|
|
433b73a5a8 | ||
|
|
ab88a3341f | ||
|
|
181da3ce9b | ||
|
|
b14a58c9b8 | ||
|
|
60cc2cba1f | ||
|
|
c797494d88 | ||
|
|
e2a57182be | ||
|
|
8928441466 | ||
|
|
0e8965060f | ||
|
|
f3cf6fcdd7 | ||
|
|
18ccf0f871 | ||
|
|
313647bcf3 | ||
|
|
61fe068c90 | ||
|
|
5c49096e11 | ||
|
|
a73c78545d | ||
|
|
e0fd560711 | ||
|
|
6a56ac1032 | ||
|
|
96299629b4 | ||
|
|
75de30cfa8 | ||
|
|
233bed6a73 | ||
|
|
b3964efe4d | ||
|
|
575f061629 | ||
|
|
640d7d3b4e | ||
|
|
e92294b482 | ||
|
|
22937e8982 | ||
|
|
c3d1474eb9 | ||
|
|
e2426ea87b | ||
|
|
e58a61175f | ||
|
|
05bea46c3e | ||
|
|
c8a719ae0d | ||
|
|
c3884aafd9 | ||
|
|
0a9785a4ff | ||
|
|
8140f67092 | ||
|
|
4a001b8a02 | ||
|
|
525433e6dd | ||
|
|
f71f6c57d7 | ||
|
|
e35623c72e | ||
|
|
344bce7e2a | ||
|
|
3a4322a7ba | ||
|
|
27b9ae4fc3 | ||
|
|
7e2488af10 | ||
|
|
41ecb586c4 |
8
.github/ISSUE_TEMPLATE/Bug.md
vendored
8
.github/ISSUE_TEMPLATE/Bug.md
vendored
@@ -33,18 +33,18 @@ The Rclone Developers
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
#### Which OS you are using and how many bits (e.g. Windows 7, 64 bit)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### Which cloud storage system are you using? (eg Google Drive)
|
#### Which cloud storage system are you using? (e.g. Google Drive)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
#### The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
#### A log from the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
28
.github/workflows/build.yml
vendored
28
.github/workflows/build.yml
vendored
@@ -19,12 +19,12 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'go1.11', 'go1.12', 'go1.13', 'go1.14']
|
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'go1.13', 'go1.14', 'go1.15']
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- job_name: linux
|
- job_name: linux
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.15.x'
|
go: '1.16.0-rc1'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
build_flags: '-include "^linux/"'
|
build_flags: '-include "^linux/"'
|
||||||
check: true
|
check: true
|
||||||
@@ -34,7 +34,7 @@ jobs:
|
|||||||
|
|
||||||
- job_name: mac
|
- job_name: mac
|
||||||
os: macOS-latest
|
os: macOS-latest
|
||||||
go: '1.15.x'
|
go: '1.16.0-rc1'
|
||||||
gotags: 'cmount'
|
gotags: 'cmount'
|
||||||
build_flags: '-include "^darwin/amd64" -cgo'
|
build_flags: '-include "^darwin/amd64" -cgo'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
@@ -43,7 +43,7 @@ jobs:
|
|||||||
|
|
||||||
- job_name: windows_amd64
|
- job_name: windows_amd64
|
||||||
os: windows-latest
|
os: windows-latest
|
||||||
go: '1.15.x'
|
go: '1.16.0-rc1'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
build_flags: '-include "^windows/amd64" -cgo'
|
build_flags: '-include "^windows/amd64" -cgo'
|
||||||
build_args: '-buildmode exe'
|
build_args: '-buildmode exe'
|
||||||
@@ -53,7 +53,7 @@ jobs:
|
|||||||
|
|
||||||
- job_name: windows_386
|
- job_name: windows_386
|
||||||
os: windows-latest
|
os: windows-latest
|
||||||
go: '1.15.x'
|
go: '1.16.0-rc1'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
goarch: '386'
|
goarch: '386'
|
||||||
cgo: '1'
|
cgo: '1'
|
||||||
@@ -64,21 +64,11 @@ jobs:
|
|||||||
|
|
||||||
- job_name: other_os
|
- job_name: other_os
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.15.x'
|
go: '1.16.0-rc1'
|
||||||
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
|
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
|
||||||
compile_all: true
|
compile_all: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: go1.11
|
|
||||||
os: ubuntu-latest
|
|
||||||
go: '1.11.x'
|
|
||||||
quicktest: true
|
|
||||||
|
|
||||||
- job_name: go1.12
|
|
||||||
os: ubuntu-latest
|
|
||||||
go: '1.12.x'
|
|
||||||
quicktest: true
|
|
||||||
|
|
||||||
- job_name: go1.13
|
- job_name: go1.13
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.13.x'
|
go: '1.13.x'
|
||||||
@@ -90,6 +80,12 @@ jobs:
|
|||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
racequicktest: true
|
||||||
|
|
||||||
|
- job_name: go1.15
|
||||||
|
os: ubuntu-latest
|
||||||
|
go: '1.15.x'
|
||||||
|
quicktest: true
|
||||||
|
racequicktest: true
|
||||||
|
|
||||||
name: ${{ matrix.job_name }}
|
name: ${{ matrix.job_name }}
|
||||||
|
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
|
|||||||
@@ -12,10 +12,10 @@ When filing an issue, please include the following information if
|
|||||||
possible as well as a description of the problem. Make sure you test
|
possible as well as a description of the problem. Make sure you test
|
||||||
with the [latest beta of rclone](https://beta.rclone.org/):
|
with the [latest beta of rclone](https://beta.rclone.org/):
|
||||||
|
|
||||||
* Rclone version (eg output from `rclone -V`)
|
* Rclone version (e.g. output from `rclone -V`)
|
||||||
* Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
* Which OS you are using and how many bits (e.g. Windows 7, 64 bit)
|
||||||
* The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
||||||
* A log of the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||||
* if the log contains secrets then edit the file with a text editor first to obscure them
|
* if the log contains secrets then edit the file with a text editor first to obscure them
|
||||||
|
|
||||||
## Submitting a pull request ##
|
## Submitting a pull request ##
|
||||||
@@ -48,7 +48,7 @@ When ready - run the unit tests for the code you changed
|
|||||||
|
|
||||||
go test -v
|
go test -v
|
||||||
|
|
||||||
Note that you may need to make a test remote, eg `TestSwift` for some
|
Note that you may need to make a test remote, e.g. `TestSwift` for some
|
||||||
of the unit tests.
|
of the unit tests.
|
||||||
|
|
||||||
Note the top level Makefile targets
|
Note the top level Makefile targets
|
||||||
@@ -86,7 +86,7 @@ git reset --soft HEAD~2 # This squashes the 2 latest commits together.
|
|||||||
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
|
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
|
||||||
git commit # Add a new commit message.
|
git commit # Add a new commit message.
|
||||||
git push --force # Push the squashed commit to your GitHub repo.
|
git push --force # Push the squashed commit to your GitHub repo.
|
||||||
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also reccommends wizardzines.com
|
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com
|
||||||
```
|
```
|
||||||
|
|
||||||
## CI for your fork ##
|
## CI for your fork ##
|
||||||
@@ -170,7 +170,7 @@ with modules beneath.
|
|||||||
* log - logging facilities
|
* log - logging facilities
|
||||||
* march - iterates directories in lock step
|
* march - iterates directories in lock step
|
||||||
* object - in memory Fs objects
|
* object - in memory Fs objects
|
||||||
* operations - primitives for sync, eg Copy, Move
|
* operations - primitives for sync, e.g. Copy, Move
|
||||||
* sync - sync directories
|
* sync - sync directories
|
||||||
* walk - walk a directory
|
* walk - walk a directory
|
||||||
* fstest - provides integration test framework
|
* fstest - provides integration test framework
|
||||||
@@ -178,7 +178,7 @@ with modules beneath.
|
|||||||
* mockdir - mocks an fs.Directory
|
* mockdir - mocks an fs.Directory
|
||||||
* mockobject - mocks an fs.Object
|
* mockobject - mocks an fs.Object
|
||||||
* test_all - Runs integration tests for everything
|
* test_all - Runs integration tests for everything
|
||||||
* graphics - the images used in the website etc
|
* graphics - the images used in the website, etc.
|
||||||
* lib - libraries used by the backend
|
* lib - libraries used by the backend
|
||||||
* atexit - register functions to run when rclone exits
|
* atexit - register functions to run when rclone exits
|
||||||
* dircache - directory ID to name caching
|
* dircache - directory ID to name caching
|
||||||
@@ -202,12 +202,12 @@ for the flag help, the remainder is shown to the user in `rclone
|
|||||||
config` and is added to the docs with `make backenddocs`.
|
config` and is added to the docs with `make backenddocs`.
|
||||||
|
|
||||||
The only documentation you need to edit are the `docs/content/*.md`
|
The only documentation you need to edit are the `docs/content/*.md`
|
||||||
files. The MANUAL.*, rclone.1, web site etc are all auto generated
|
files. The MANUAL.*, rclone.1, web site, etc. are all auto generated
|
||||||
from those during the release process. See the `make doc` and `make
|
from those during the release process. See the `make doc` and `make
|
||||||
website` targets in the Makefile if you are interested in how. You
|
website` targets in the Makefile if you are interested in how. You
|
||||||
don't need to run these when adding a feature.
|
don't need to run these when adding a feature.
|
||||||
|
|
||||||
Documentation for rclone sub commands is with their code, eg
|
Documentation for rclone sub commands is with their code, e.g.
|
||||||
`cmd/ls/ls.go`.
|
`cmd/ls/ls.go`.
|
||||||
|
|
||||||
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||||
@@ -364,7 +364,7 @@ See the [testing](#testing) section for more information on integration tests.
|
|||||||
|
|
||||||
Add your fs to the docs - you'll need to pick an icon for it from
|
Add your fs to the docs - you'll need to pick an icon for it from
|
||||||
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
||||||
alphabetical order of full name of remote (eg `drive` is ordered as
|
alphabetical order of full name of remote (e.g. `drive` is ordered as
|
||||||
`Google Drive`) but with the local file system last.
|
`Google Drive`) but with the local file system last.
|
||||||
|
|
||||||
* `README.md` - main GitHub page
|
* `README.md` - main GitHub page
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ Current active maintainers of rclone are:
|
|||||||
| Fabian Möller | @B4dM4n | |
|
| Fabian Möller | @B4dM4n | |
|
||||||
| Alex Chen | @Cnly | onedrive backend |
|
| Alex Chen | @Cnly | onedrive backend |
|
||||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||||
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
|
| Sebastian Bünger | @buengese | jottacloud, yandex & compress backends |
|
||||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||||
| Max Sum | @Max-Sum | union backend |
|
| Max Sum | @Max-Sum | union backend |
|
||||||
| Fred | @creativeprojects | seafile backend |
|
| Fred | @creativeprojects | seafile backend |
|
||||||
@@ -37,7 +37,7 @@ Rclone uses the labels like this:
|
|||||||
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
|
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
|
||||||
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
|
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
|
||||||
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
||||||
* `maintenance` - internal enhancement, code re-organisation etc
|
* `maintenance` - internal enhancement, code re-organisation, etc.
|
||||||
* `Needs Go 1.XX` - waiting for that version of Go to be released
|
* `Needs Go 1.XX` - waiting for that version of Go to be released
|
||||||
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
|
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
|
||||||
* `Remote: XXX` - which rclone backend this affects
|
* `Remote: XXX` - which rclone backend this affects
|
||||||
@@ -45,7 +45,7 @@ Rclone uses the labels like this:
|
|||||||
|
|
||||||
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
|
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
|
||||||
|
|
||||||
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (eg the next go release).
|
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (e.g. the next go release).
|
||||||
|
|
||||||
The milestones have these meanings:
|
The milestones have these meanings:
|
||||||
|
|
||||||
|
|||||||
3566
MANUAL.html
generated
3566
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
4612
MANUAL.txt
generated
4612
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
5
Makefile
5
Makefile
@@ -93,8 +93,7 @@ build_dep:
|
|||||||
|
|
||||||
# Get the release dependencies we only install on linux
|
# Get the release dependencies we only install on linux
|
||||||
release_dep_linux:
|
release_dep_linux:
|
||||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
cd /tmp && go get github.com/goreleaser/nfpm/v2/...
|
||||||
go run bin/get-github-release.go -extract github-release aktau/github-release 'linux-amd64-github-release.tar.bz2'
|
|
||||||
|
|
||||||
# Get the release dependencies we only install on Windows
|
# Get the release dependencies we only install on Windows
|
||||||
release_dep_windows:
|
release_dep_windows:
|
||||||
@@ -233,7 +232,7 @@ tag: retag doc
|
|||||||
@echo "Edit the new changelog in docs/content/changelog.md"
|
@echo "Edit the new changelog in docs/content/changelog.md"
|
||||||
@echo "Then commit all the changes"
|
@echo "Then commit all the changes"
|
||||||
@echo git commit -m \"Version $(VERSION)\" -a -v
|
@echo git commit -m \"Version $(VERSION)\" -a -v
|
||||||
@echo "And finally run make retag before make cross etc"
|
@echo "And finally run make retag before make cross, etc."
|
||||||
|
|
||||||
retag:
|
retag:
|
||||||
@echo "Version is $(VERSION)"
|
@echo "Version is $(VERSION)"
|
||||||
|
|||||||
@@ -30,11 +30,13 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
|||||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||||
|
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||||
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
|
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||||
|
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||||
@@ -68,6 +70,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
|||||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||||
|
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
||||||
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||||
|
|
||||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||||
@@ -82,6 +85,7 @@ Please see [the full list of all storage providers and their features](https://r
|
|||||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||||
* Can sync to and from network, e.g. two different cloud accounts
|
* Can sync to and from network, e.g. two different cloud accounts
|
||||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||||
|
* Optional transparent compression ([Compress](https://rclone.org/compress/))
|
||||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||||
* Optional cache ([Cache](https://rclone.org/cache/))
|
* Optional cache ([Cache](https://rclone.org/cache/))
|
||||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ This file describes how to make the various kinds of releases
|
|||||||
|
|
||||||
## Extra required software for making a release
|
## Extra required software for making a release
|
||||||
|
|
||||||
* [github-release](https://github.com/aktau/github-release) for uploading packages
|
* [gh the github cli](https://github.com/cli/cli) for uploading packages
|
||||||
* pandoc for making the html and man pages
|
* pandoc for making the html and man pages
|
||||||
|
|
||||||
## Making a release
|
## Making a release
|
||||||
@@ -21,7 +21,7 @@ This file describes how to make the various kinds of releases
|
|||||||
* git status - to check for new man pages - git add them
|
* git status - to check for new man pages - git add them
|
||||||
* git commit -a -v -m "Version v1.XX.0"
|
* git commit -a -v -m "Version v1.XX.0"
|
||||||
* make retag
|
* make retag
|
||||||
* git push --tags origin master
|
* git push --follow-tags origin
|
||||||
* # Wait for the GitHub builds to complete then...
|
* # Wait for the GitHub builds to complete then...
|
||||||
* make fetch_binaries
|
* make fetch_binaries
|
||||||
* make tarball
|
* make tarball
|
||||||
@@ -48,8 +48,8 @@ If rclone needs a point release due to some horrendous bug:
|
|||||||
|
|
||||||
Set vars
|
Set vars
|
||||||
|
|
||||||
* BASE_TAG=v1.XX # eg v1.52
|
* BASE_TAG=v1.XX # e.g. v1.52
|
||||||
* NEW_TAG=${BASE_TAG}.Y # eg v1.52.1
|
* NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
|
||||||
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
||||||
|
|
||||||
First make the release branch. If this is a second point release then
|
First make the release branch. If this is a second point release then
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package alias
|
package alias
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@@ -34,7 +35,7 @@ type Options struct {
|
|||||||
// NewFs constructs an Fs from the path.
|
// NewFs constructs an Fs from the path.
|
||||||
//
|
//
|
||||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -47,5 +48,5 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if strings.HasPrefix(opt.Remote, name+":") {
|
if strings.HasPrefix(opt.Remote, name+":") {
|
||||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||||
}
|
}
|
||||||
return cache.Get(fspath.JoinRootPath(opt.Remote, root))
|
return cache.Get(ctx, fspath.JoinRootPath(opt.Remote, root))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func prepare(t *testing.T, root string) {
|
func prepare(t *testing.T, root string) {
|
||||||
config.LoadConfig()
|
config.LoadConfig(context.Background())
|
||||||
|
|
||||||
// Configure the remote
|
// Configure the remote
|
||||||
config.FileSet(remoteName, "type", "alias")
|
config.FileSet(remoteName, "type", "alias")
|
||||||
@@ -54,21 +54,22 @@ func TestNewFS(t *testing.T) {
|
|||||||
{"four/under four.txt", 9, false},
|
{"four/under four.txt", 9, false},
|
||||||
}},
|
}},
|
||||||
{"four", "..", "", true, []testEntry{
|
{"four", "..", "", true, []testEntry{
|
||||||
{"four", -1, true},
|
{"five", -1, true},
|
||||||
{"one%.txt", 6, false},
|
{"under four.txt", 9, false},
|
||||||
{"three", -1, true},
|
|
||||||
{"two.html", 7, false},
|
|
||||||
}},
|
}},
|
||||||
{"four", "../three", "", true, []testEntry{
|
{"", "../../three", "", true, []testEntry{
|
||||||
{"underthree.txt", 9, false},
|
{"underthree.txt", 9, false},
|
||||||
}},
|
}},
|
||||||
|
{"four", "../../five", "", true, []testEntry{
|
||||||
|
{"underfive.txt", 6, false},
|
||||||
|
}},
|
||||||
} {
|
} {
|
||||||
what := fmt.Sprintf("test %d remoteRoot=%q, fsRoot=%q, fsList=%q", testi, test.remoteRoot, test.fsRoot, test.fsList)
|
what := fmt.Sprintf("test %d remoteRoot=%q, fsRoot=%q, fsList=%q", testi, test.remoteRoot, test.fsRoot, test.fsList)
|
||||||
|
|
||||||
remoteRoot, err := filepath.Abs(filepath.FromSlash(path.Join("test/files", test.remoteRoot)))
|
remoteRoot, err := filepath.Abs(filepath.FromSlash(path.Join("test/files", test.remoteRoot)))
|
||||||
require.NoError(t, err, what)
|
require.NoError(t, err, what)
|
||||||
prepare(t, remoteRoot)
|
prepare(t, remoteRoot)
|
||||||
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
|
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
|
||||||
require.NoError(t, err, what)
|
require.NoError(t, err, what)
|
||||||
gotEntries, err := f.List(context.Background(), test.fsList)
|
gotEntries, err := f.List(context.Background(), test.fsList)
|
||||||
require.NoError(t, err, what)
|
require.NoError(t, err, what)
|
||||||
@@ -90,7 +91,7 @@ func TestNewFS(t *testing.T) {
|
|||||||
|
|
||||||
func TestNewFSNoRemote(t *testing.T) {
|
func TestNewFSNoRemote(t *testing.T) {
|
||||||
prepare(t, "")
|
prepare(t, "")
|
||||||
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
|
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:", remoteName))
|
||||||
|
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Nil(t, f)
|
require.Nil(t, f)
|
||||||
@@ -98,7 +99,7 @@ func TestNewFSNoRemote(t *testing.T) {
|
|||||||
|
|
||||||
func TestNewFSInvalidRemote(t *testing.T) {
|
func TestNewFSInvalidRemote(t *testing.T) {
|
||||||
prepare(t, "not_existing_test_remote:")
|
prepare(t, "not_existing_test_remote:")
|
||||||
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
|
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:", remoteName))
|
||||||
|
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Nil(t, f)
|
require.Nil(t, f)
|
||||||
|
|||||||
@@ -9,13 +9,16 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/box"
|
_ "github.com/rclone/rclone/backend/box"
|
||||||
_ "github.com/rclone/rclone/backend/cache"
|
_ "github.com/rclone/rclone/backend/cache"
|
||||||
_ "github.com/rclone/rclone/backend/chunker"
|
_ "github.com/rclone/rclone/backend/chunker"
|
||||||
|
_ "github.com/rclone/rclone/backend/compress"
|
||||||
_ "github.com/rclone/rclone/backend/crypt"
|
_ "github.com/rclone/rclone/backend/crypt"
|
||||||
_ "github.com/rclone/rclone/backend/drive"
|
_ "github.com/rclone/rclone/backend/drive"
|
||||||
_ "github.com/rclone/rclone/backend/dropbox"
|
_ "github.com/rclone/rclone/backend/dropbox"
|
||||||
_ "github.com/rclone/rclone/backend/fichier"
|
_ "github.com/rclone/rclone/backend/fichier"
|
||||||
|
_ "github.com/rclone/rclone/backend/filefabric"
|
||||||
_ "github.com/rclone/rclone/backend/ftp"
|
_ "github.com/rclone/rclone/backend/ftp"
|
||||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||||
|
_ "github.com/rclone/rclone/backend/hdfs"
|
||||||
_ "github.com/rclone/rclone/backend/http"
|
_ "github.com/rclone/rclone/backend/http"
|
||||||
_ "github.com/rclone/rclone/backend/hubic"
|
_ "github.com/rclone/rclone/backend/hubic"
|
||||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||||
@@ -40,4 +43,5 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/union"
|
_ "github.com/rclone/rclone/backend/union"
|
||||||
_ "github.com/rclone/rclone/backend/webdav"
|
_ "github.com/rclone/rclone/backend/webdav"
|
||||||
_ "github.com/rclone/rclone/backend/yandex"
|
_ "github.com/rclone/rclone/backend/yandex"
|
||||||
|
_ "github.com/rclone/rclone/backend/zoho"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -70,8 +70,8 @@ func init() {
|
|||||||
Prefix: "acd",
|
Prefix: "acd",
|
||||||
Description: "Amazon Drive",
|
Description: "Amazon Drive",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string, m configmap.Mapper) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
err := oauthutil.Config("amazon cloud drive", name, m, acdConfig, nil)
|
err := oauthutil.Config(ctx, "amazon cloud drive", name, m, acdConfig, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
@@ -144,6 +144,7 @@ type Fs struct {
|
|||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
opt Options // options for this Fs
|
opt Options // options for this Fs
|
||||||
|
ci *fs.ConfigInfo // global config
|
||||||
c *acd.Client // the connection to the acd server
|
c *acd.Client // the connection to the acd server
|
||||||
noAuthClient *http.Client // unauthenticated http client
|
noAuthClient *http.Client // unauthenticated http client
|
||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
@@ -239,8 +240,7 @@ func filterRequest(req *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.Background()
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -248,7 +248,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
root = parsePath(root)
|
root = parsePath(root)
|
||||||
baseClient := fshttp.NewClient(fs.Config)
|
baseClient := fshttp.NewClient(ctx)
|
||||||
if do, ok := baseClient.Transport.(interface {
|
if do, ok := baseClient.Transport.(interface {
|
||||||
SetRequestFilter(f func(req *http.Request))
|
SetRequestFilter(f func(req *http.Request))
|
||||||
}); ok {
|
}); ok {
|
||||||
@@ -256,25 +256,27 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
} else {
|
} else {
|
||||||
fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
|
fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
|
||||||
}
|
}
|
||||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
|
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, acdConfig, baseClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
|
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
|
||||||
}
|
}
|
||||||
|
|
||||||
c := acd.NewClient(oAuthClient)
|
c := acd.NewClient(oAuthClient)
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
|
ci: ci,
|
||||||
c: c,
|
c: c,
|
||||||
pacer: fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
|
pacer: fs.NewPacer(ctx, pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
|
||||||
noAuthClient: fshttp.NewClient(fs.Config),
|
noAuthClient: fshttp.NewClient(ctx),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
// Renew the token in the background
|
// Renew the token in the background
|
||||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||||
@@ -502,7 +504,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
maxTries := fs.Config.LowLevelRetries
|
maxTries := f.ci.LowLevelRetries
|
||||||
var iErr error
|
var iErr error
|
||||||
for tries := 1; tries <= maxTries; tries++ {
|
for tries := 1; tries <= maxTries; tries++ {
|
||||||
entries = nil
|
entries = nil
|
||||||
@@ -523,7 +525,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
}
|
}
|
||||||
entries = append(entries, o)
|
entries = append(entries, o)
|
||||||
default:
|
default:
|
||||||
// ignore ASSET etc
|
// ignore ASSET, etc.
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
@@ -680,7 +682,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -717,7 +719,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
dstObj fs.Object
|
dstObj fs.Object
|
||||||
srcErr, dstErr error
|
srcErr, dstErr error
|
||||||
)
|
)
|
||||||
for i := 1; i <= fs.Config.LowLevelRetries; i++ {
|
for i := 1; i <= f.ci.LowLevelRetries; i++ {
|
||||||
_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
|
_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
|
||||||
if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
|
if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
|
||||||
// exit if error on source
|
// exit if error on source
|
||||||
@@ -732,7 +734,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
// finished if src not found and dst found
|
// finished if src not found and dst found
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, fs.Config.LowLevelRetries)
|
fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, f.ci.LowLevelRetries)
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
}
|
}
|
||||||
return dstObj, dstErr
|
return dstObj, dstErr
|
||||||
@@ -745,7 +747,7 @@ func (f *Fs) DirCacheFlush() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -893,7 +895,7 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
return hash.Set(hash.MD5)
|
return hash.Set(hash.MD5)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
|
|||||||
@@ -1,17 +1,17 @@
|
|||||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||||
|
|
||||||
// +build !plan9,!solaris,!js,go1.13
|
// +build !plan9,!solaris,!js,go1.14
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"crypto/md5"
|
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
@@ -21,9 +21,9 @@ import (
|
|||||||
|
|
||||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||||
|
"github.com/Azure/go-autorest/autorest/adal"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
@@ -33,10 +33,9 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/walk"
|
"github.com/rclone/rclone/fs/walk"
|
||||||
"github.com/rclone/rclone/lib/bucket"
|
"github.com/rclone/rclone/lib/bucket"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
|
"github.com/rclone/rclone/lib/env"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/pool"
|
"github.com/rclone/rclone/lib/pool"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -47,15 +46,12 @@ const (
|
|||||||
modTimeKey = "mtime"
|
modTimeKey = "mtime"
|
||||||
timeFormatIn = time.RFC3339
|
timeFormatIn = time.RFC3339
|
||||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||||
maxTotalParts = 50000 // in multipart upload
|
|
||||||
storageDefaultBaseURL = "blob.core.windows.net"
|
storageDefaultBaseURL = "blob.core.windows.net"
|
||||||
// maxUncommittedSize = 9 << 30 // can't upload bigger than this
|
defaultChunkSize = 4 * fs.MebiByte
|
||||||
defaultChunkSize = 4 * fs.MebiByte
|
maxChunkSize = 100 * fs.MebiByte
|
||||||
maxChunkSize = 100 * fs.MebiByte
|
uploadConcurrency = 4
|
||||||
defaultUploadCutoff = 256 * fs.MebiByte
|
defaultAccessTier = azblob.AccessTierNone
|
||||||
maxUploadCutoff = 256 * fs.MebiByte
|
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
||||||
defaultAccessTier = azblob.AccessTierNone
|
|
||||||
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
|
||||||
// Default storage account, key and blob endpoint for emulator support,
|
// Default storage account, key and blob endpoint for emulator support,
|
||||||
// though it is a base64 key checked in here, it is publicly available secret.
|
// though it is a base64 key checked in here, it is publicly available secret.
|
||||||
emulatorAccount = "devstoreaccount1"
|
emulatorAccount = "devstoreaccount1"
|
||||||
@@ -65,6 +61,10 @@ const (
|
|||||||
memoryPoolUseMmap = false
|
memoryPoolUseMmap = false
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errCantUpdateArchiveTierBlobs = fserrors.NoRetryError(errors.New("can't update archive tier blob without --azureblob-archive-tier-delete"))
|
||||||
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
@@ -74,12 +74,51 @@ func init() {
|
|||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "account",
|
Name: "account",
|
||||||
Help: "Storage Account Name (leave blank to use SAS URL or Emulator)",
|
Help: "Storage Account Name (leave blank to use SAS URL or Emulator)",
|
||||||
|
}, {
|
||||||
|
Name: "service_principal_file",
|
||||||
|
Help: `Path to file containing credentials for use with a service principal.
|
||||||
|
|
||||||
|
Leave blank normally. Needed only if you want to use a service principal instead of interactive login.
|
||||||
|
|
||||||
|
$ az sp create-for-rbac --name "<name>" \
|
||||||
|
--role "Storage Blob Data Owner" \
|
||||||
|
--scopes "/subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Storage/storageAccounts/<storage-account>/blobServices/default/containers/<container>" \
|
||||||
|
> azure-principal.json
|
||||||
|
|
||||||
|
See [Use Azure CLI to assign an Azure role for access to blob and queue data](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli)
|
||||||
|
for more details.
|
||||||
|
`,
|
||||||
}, {
|
}, {
|
||||||
Name: "key",
|
Name: "key",
|
||||||
Help: "Storage Account Key (leave blank to use SAS URL or Emulator)",
|
Help: "Storage Account Key (leave blank to use SAS URL or Emulator)",
|
||||||
}, {
|
}, {
|
||||||
Name: "sas_url",
|
Name: "sas_url",
|
||||||
Help: "SAS URL for container level access only\n(leave blank if using account/key or Emulator)",
|
Help: "SAS URL for container level access only\n(leave blank if using account/key or Emulator)",
|
||||||
|
}, {
|
||||||
|
Name: "use_msi",
|
||||||
|
Help: `Use a managed service identity to authenticate (only works in Azure)
|
||||||
|
|
||||||
|
When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/)
|
||||||
|
to authenticate to Azure Storage instead of a SAS token or account key.
|
||||||
|
|
||||||
|
If the VM(SS) on which this program is running has a system-assigned identity, it will
|
||||||
|
be used by default. If the resource has no system-assigned but exactly one user-assigned identity,
|
||||||
|
the user-assigned identity will be used by default. If the resource has multiple user-assigned
|
||||||
|
identities, the identity to use must be explicitly specified using exactly one of the msi_object_id,
|
||||||
|
msi_client_id, or msi_mi_res_id parameters.`,
|
||||||
|
Default: false,
|
||||||
|
}, {
|
||||||
|
Name: "msi_object_id",
|
||||||
|
Help: "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_mi_res_id specified.",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "msi_client_id",
|
||||||
|
Help: "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_object_id or msi_mi_res_id specified.",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "msi_mi_res_id",
|
||||||
|
Help: "Azure resource ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_object_id specified.",
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "use_emulator",
|
Name: "use_emulator",
|
||||||
Help: "Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)",
|
Help: "Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)",
|
||||||
@@ -90,8 +129,7 @@ func init() {
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "upload_cutoff",
|
Name: "upload_cutoff",
|
||||||
Help: "Cutoff for switching to chunked upload (<= 256MB).",
|
Help: "Cutoff for switching to chunked upload (<= 256MB). (Deprecated)",
|
||||||
Default: defaultUploadCutoff,
|
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
@@ -129,6 +167,24 @@ If blobs are in "archive tier" at remote, trying to perform data transfer
|
|||||||
operations from remote will not be allowed. User should first restore by
|
operations from remote will not be allowed. User should first restore by
|
||||||
tiering blob to "Hot" or "Cool".`,
|
tiering blob to "Hot" or "Cool".`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "archive_tier_delete",
|
||||||
|
Default: false,
|
||||||
|
Help: fmt.Sprintf(`Delete archive tier blobs before overwriting.
|
||||||
|
|
||||||
|
Archive tier blobs cannot be updated. So without this flag, if you
|
||||||
|
attempt to update an archive tier blob, then rclone will produce the
|
||||||
|
error:
|
||||||
|
|
||||||
|
%v
|
||||||
|
|
||||||
|
With this flag set then before rclone attempts to overwrite an archive
|
||||||
|
tier blob, it will delete the existing blob before uploading its
|
||||||
|
replacement. This has the potential for data loss if the upload fails
|
||||||
|
(unlike updating a normal blob) and also may cost more since deleting
|
||||||
|
archive tier blobs early may be chargable.
|
||||||
|
`, errCantUpdateArchiveTierBlobs),
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "disable_checksum",
|
Name: "disable_checksum",
|
||||||
Help: `Don't store MD5 checksum with object metadata.
|
Help: `Don't store MD5 checksum with object metadata.
|
||||||
@@ -167,19 +223,24 @@ This option controls how often unused buffers will be removed from the pool.`,
|
|||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Account string `config:"account"`
|
Account string `config:"account"`
|
||||||
Key string `config:"key"`
|
ServicePrincipalFile string `config:"service_principal_file"`
|
||||||
Endpoint string `config:"endpoint"`
|
Key string `config:"key"`
|
||||||
SASURL string `config:"sas_url"`
|
UseMSI bool `config:"use_msi"`
|
||||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
MSIObjectID string `config:"msi_object_id"`
|
||||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
MSIClientID string `config:"msi_client_id"`
|
||||||
ListChunkSize uint `config:"list_chunk"`
|
MSIResourceID string `config:"msi_mi_res_id"`
|
||||||
AccessTier string `config:"access_tier"`
|
Endpoint string `config:"endpoint"`
|
||||||
UseEmulator bool `config:"use_emulator"`
|
SASURL string `config:"sas_url"`
|
||||||
DisableCheckSum bool `config:"disable_checksum"`
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
ListChunkSize uint `config:"list_chunk"`
|
||||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
AccessTier string `config:"access_tier"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
ArchiveTierDelete bool `config:"archive_tier_delete"`
|
||||||
|
UseEmulator bool `config:"use_emulator"`
|
||||||
|
DisableCheckSum bool `config:"disable_checksum"`
|
||||||
|
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||||
|
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||||
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote azure server
|
// Fs represents a remote azure server
|
||||||
@@ -187,6 +248,7 @@ type Fs struct {
|
|||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on if any
|
root string // the path we are working on if any
|
||||||
opt Options // parsed config options
|
opt Options // parsed config options
|
||||||
|
ci *fs.ConfigInfo // global config
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
client *http.Client // http client we are using
|
client *http.Client // http client we are using
|
||||||
svcURL *azblob.ServiceURL // reference to serviceURL
|
svcURL *azblob.ServiceURL // reference to serviceURL
|
||||||
@@ -197,6 +259,7 @@ type Fs struct {
|
|||||||
isLimited bool // if limited to one container
|
isLimited bool // if limited to one container
|
||||||
cache *bucket.Cache // cache for container creation status
|
cache *bucket.Cache // cache for container creation status
|
||||||
pacer *fs.Pacer // To pace and retry the API calls
|
pacer *fs.Pacer // To pace and retry the API calls
|
||||||
|
imdsPacer *fs.Pacer // Same but for IMDS
|
||||||
uploadToken *pacer.TokenDispenser // control concurrency
|
uploadToken *pacer.TokenDispenser // control concurrency
|
||||||
pool *pool.Pool // memory pool
|
pool *pool.Pool // memory pool
|
||||||
}
|
}
|
||||||
@@ -274,7 +337,7 @@ func validateAccessTier(tier string) bool {
|
|||||||
|
|
||||||
// retryErrorCodes is a slice of error codes that we will retry
|
// retryErrorCodes is a slice of error codes that we will retry
|
||||||
var retryErrorCodes = []int{
|
var retryErrorCodes = []int{
|
||||||
401, // Unauthorized (eg "Token has expired")
|
401, // Unauthorized (e.g. "Token has expired")
|
||||||
408, // Request Timeout
|
408, // Request Timeout
|
||||||
429, // Rate exceeded.
|
429, // Rate exceeded.
|
||||||
500, // Get occasional 500 Internal Server Error
|
500, // Get occasional 500 Internal Server Error
|
||||||
@@ -299,6 +362,8 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
|
|||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else if httpErr, ok := err.(httpError); ok {
|
||||||
|
return fserrors.ShouldRetryHTTP(httpErr.Response, retryErrorCodes), err
|
||||||
}
|
}
|
||||||
return fserrors.ShouldRetry(err), err
|
return fserrors.ShouldRetry(err), err
|
||||||
}
|
}
|
||||||
@@ -322,21 +387,6 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
|
||||||
if cs > maxUploadCutoff {
|
|
||||||
return errors.Errorf("%v must be less than or equal to %v", cs, maxUploadCutoff)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
|
||||||
err = checkUploadCutoff(cs)
|
|
||||||
if err == nil {
|
|
||||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// httpClientFactory creates a Factory object that sends HTTP requests
|
// httpClientFactory creates a Factory object that sends HTTP requests
|
||||||
// to an rclone's http.Client.
|
// to an rclone's http.Client.
|
||||||
//
|
//
|
||||||
@@ -353,6 +403,50 @@ func httpClientFactory(client *http.Client) pipeline.Factory {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type servicePrincipalCredentials struct {
|
||||||
|
AppID string `json:"appId"`
|
||||||
|
Password string `json:"password"`
|
||||||
|
Tenant string `json:"tenant"`
|
||||||
|
}
|
||||||
|
|
||||||
|
const azureActiveDirectoryEndpoint = "https://login.microsoftonline.com/"
|
||||||
|
const azureStorageEndpoint = "https://storage.azure.com/"
|
||||||
|
|
||||||
|
// newServicePrincipalTokenRefresher takes the client ID and secret, and returns a refresh-able access token.
|
||||||
|
func newServicePrincipalTokenRefresher(ctx context.Context, credentialsData []byte) (azblob.TokenRefresher, error) {
|
||||||
|
var spCredentials servicePrincipalCredentials
|
||||||
|
if err := json.Unmarshal(credentialsData, &spCredentials); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "error parsing credentials from JSON file")
|
||||||
|
}
|
||||||
|
oauthConfig, err := adal.NewOAuthConfig(azureActiveDirectoryEndpoint, spCredentials.Tenant)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "error creating oauth config")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create service principal token for Azure Storage.
|
||||||
|
servicePrincipalToken, err := adal.NewServicePrincipalToken(
|
||||||
|
*oauthConfig,
|
||||||
|
spCredentials.AppID,
|
||||||
|
spCredentials.Password,
|
||||||
|
azureStorageEndpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "error creating service principal token")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wrap token inside a refresher closure.
|
||||||
|
var tokenRefresher azblob.TokenRefresher = func(credential azblob.TokenCredential) time.Duration {
|
||||||
|
if err := servicePrincipalToken.Refresh(); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
refreshedToken := servicePrincipalToken.Token()
|
||||||
|
credential.SetToken(refreshedToken.AccessToken)
|
||||||
|
exp := refreshedToken.Expires().Sub(time.Now().Add(2 * time.Minute))
|
||||||
|
return exp
|
||||||
|
}
|
||||||
|
|
||||||
|
return tokenRefresher, nil
|
||||||
|
}
|
||||||
|
|
||||||
// newPipeline creates a Pipeline using the specified credentials and options.
|
// newPipeline creates a Pipeline using the specified credentials and options.
|
||||||
//
|
//
|
||||||
// this code was copied from azblob.NewPipeline
|
// this code was copied from azblob.NewPipeline
|
||||||
@@ -379,8 +473,7 @@ func (f *Fs) setRoot(root string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.Background()
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -388,10 +481,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = checkUploadCutoff(opt.UploadCutoff)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "azure: upload cutoff")
|
|
||||||
}
|
|
||||||
err = checkUploadChunkSize(opt.ChunkSize)
|
err = checkUploadChunkSize(opt.ChunkSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "azure: chunk size")
|
return nil, errors.Wrap(err, "azure: chunk size")
|
||||||
@@ -410,21 +499,25 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
|
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
ci: ci,
|
||||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
client: fshttp.NewClient(fs.Config),
|
imdsPacer: fs.NewPacer(ctx, pacer.NewAzureIMDS()),
|
||||||
|
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||||
|
client: fshttp.NewClient(ctx),
|
||||||
cache: bucket.NewCache(),
|
cache: bucket.NewCache(),
|
||||||
cntURLcache: make(map[string]*azblob.ContainerURL, 1),
|
cntURLcache: make(map[string]*azblob.ContainerURL, 1),
|
||||||
pool: pool.New(
|
pool: pool.New(
|
||||||
time.Duration(opt.MemoryPoolFlushTime),
|
time.Duration(opt.MemoryPoolFlushTime),
|
||||||
int(opt.ChunkSize),
|
int(opt.ChunkSize),
|
||||||
fs.Config.Transfers,
|
ci.Transfers,
|
||||||
opt.MemoryPoolUseMmap,
|
opt.MemoryPoolUseMmap,
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
f.imdsPacer.SetRetries(5) // per IMDS documentation
|
||||||
f.setRoot(root)
|
f.setRoot(root)
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
@@ -433,7 +526,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
BucketBasedRootOK: true,
|
BucketBasedRootOK: true,
|
||||||
SetTier: true,
|
SetTier: true,
|
||||||
GetTier: true,
|
GetTier: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
u *url.URL
|
u *url.URL
|
||||||
@@ -451,6 +544,76 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||||
|
case opt.UseMSI:
|
||||||
|
var token adal.Token
|
||||||
|
var userMSI *userMSI = &userMSI{}
|
||||||
|
if len(opt.MSIClientID) > 0 || len(opt.MSIObjectID) > 0 || len(opt.MSIResourceID) > 0 {
|
||||||
|
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
|
||||||
|
// Validate and ensure exactly one is set. (To do: better validation.)
|
||||||
|
if len(opt.MSIClientID) > 0 {
|
||||||
|
if len(opt.MSIObjectID) > 0 || len(opt.MSIResourceID) > 0 {
|
||||||
|
return nil, errors.New("more than one user-assigned identity ID is set")
|
||||||
|
}
|
||||||
|
userMSI.Type = msiClientID
|
||||||
|
userMSI.Value = opt.MSIClientID
|
||||||
|
}
|
||||||
|
if len(opt.MSIObjectID) > 0 {
|
||||||
|
if len(opt.MSIClientID) > 0 || len(opt.MSIResourceID) > 0 {
|
||||||
|
return nil, errors.New("more than one user-assigned identity ID is set")
|
||||||
|
}
|
||||||
|
userMSI.Type = msiObjectID
|
||||||
|
userMSI.Value = opt.MSIObjectID
|
||||||
|
}
|
||||||
|
if len(opt.MSIResourceID) > 0 {
|
||||||
|
if len(opt.MSIClientID) > 0 || len(opt.MSIObjectID) > 0 {
|
||||||
|
return nil, errors.New("more than one user-assigned identity ID is set")
|
||||||
|
}
|
||||||
|
userMSI.Type = msiResourceID
|
||||||
|
userMSI.Value = opt.MSIResourceID
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
userMSI = nil
|
||||||
|
}
|
||||||
|
err = f.imdsPacer.Call(func() (bool, error) {
|
||||||
|
// Retry as specified by the documentation:
|
||||||
|
// https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#retry-guidance
|
||||||
|
token, err = GetMSIToken(ctx, userMSI)
|
||||||
|
return f.shouldRetry(err)
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "Failed to acquire MSI token")
|
||||||
|
}
|
||||||
|
|
||||||
|
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||||
|
}
|
||||||
|
credential := azblob.NewTokenCredential(token.AccessToken, func(credential azblob.TokenCredential) time.Duration {
|
||||||
|
fs.Debugf(f, "Token refresher called.")
|
||||||
|
var refreshedToken adal.Token
|
||||||
|
err := f.imdsPacer.Call(func() (bool, error) {
|
||||||
|
refreshedToken, err = GetMSIToken(ctx, userMSI)
|
||||||
|
return f.shouldRetry(err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
// Failed to refresh.
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
credential.SetToken(refreshedToken.AccessToken)
|
||||||
|
now := time.Now().UTC()
|
||||||
|
// Refresh one minute before expiry.
|
||||||
|
refreshAt := refreshedToken.Expires().UTC().Add(-1 * time.Minute)
|
||||||
|
fs.Debugf(f, "Acquired new token that expires at %v; refreshing in %d s", refreshedToken.Expires(),
|
||||||
|
int(refreshAt.Sub(now).Seconds()))
|
||||||
|
if now.After(refreshAt) {
|
||||||
|
// Acquired a causality violation.
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return refreshAt.Sub(now)
|
||||||
|
})
|
||||||
|
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||||
|
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||||
case opt.Account != "" && opt.Key != "":
|
case opt.Account != "" && opt.Key != "":
|
||||||
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
|
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -482,8 +645,27 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
} else {
|
} else {
|
||||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||||
}
|
}
|
||||||
|
case opt.ServicePrincipalFile != "":
|
||||||
|
// Create a standard URL.
|
||||||
|
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||||
|
}
|
||||||
|
// Try loading service principal credentials from file.
|
||||||
|
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServicePrincipalFile))
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "error opening service principal credentials file")
|
||||||
|
}
|
||||||
|
// Create a token refresher from service principal credentials.
|
||||||
|
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, loadedCreds)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to create a service principal token")
|
||||||
|
}
|
||||||
|
options := azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}}
|
||||||
|
pipe := f.newPipeline(azblob.NewTokenCredential("", tokenRefresher), options)
|
||||||
|
serviceURL = azblob.NewServiceURL(*u, pipe)
|
||||||
default:
|
default:
|
||||||
return nil, errors.New("Need account+key or connectionString or sasURL")
|
return nil, errors.New("No authentication method configured")
|
||||||
}
|
}
|
||||||
f.svcURL = &serviceURL
|
f.svcURL = &serviceURL
|
||||||
|
|
||||||
@@ -524,7 +706,7 @@ func (f *Fs) cntURL(container string) (containerURL *azblob.ContainerURL) {
|
|||||||
// Return an Object from a path
|
// Return an Object from a path
|
||||||
//
|
//
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItem) (fs.Object, error) {
|
func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItemInternal) (fs.Object, error) {
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
@@ -581,7 +763,7 @@ func isDirectoryMarker(size int64, metadata azblob.Metadata, remote string) bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
// listFn is called from list to handle an object
|
// listFn is called from list to handle an object
|
||||||
type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
|
type listFn func(remote string, object *azblob.BlobItemInternal, isDirectory bool) error
|
||||||
|
|
||||||
// list lists the objects into the function supplied from
|
// list lists the objects into the function supplied from
|
||||||
// the container and root supplied
|
// the container and root supplied
|
||||||
@@ -680,7 +862,7 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Convert a list item into a DirEntry
|
// Convert a list item into a DirEntry
|
||||||
func (f *Fs) itemToDirEntry(remote string, object *azblob.BlobItem, isDirectory bool) (fs.DirEntry, error) {
|
func (f *Fs) itemToDirEntry(remote string, object *azblob.BlobItemInternal, isDirectory bool) (fs.DirEntry, error) {
|
||||||
if isDirectory {
|
if isDirectory {
|
||||||
d := fs.NewDir(remote, time.Time{})
|
d := fs.NewDir(remote, time.Time{})
|
||||||
return d, nil
|
return d, nil
|
||||||
@@ -692,9 +874,27 @@ func (f *Fs) itemToDirEntry(remote string, object *azblob.BlobItem, isDirectory
|
|||||||
return o, nil
|
return o, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check to see if this is a limited container and the container is not found
|
||||||
|
func (f *Fs) containerOK(container string) bool {
|
||||||
|
if !f.isLimited {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
f.cntURLcacheMu.Lock()
|
||||||
|
defer f.cntURLcacheMu.Unlock()
|
||||||
|
for limitedContainer := range f.cntURLcache {
|
||||||
|
if container == limitedContainer {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// listDir lists a single directory
|
// listDir lists a single directory
|
||||||
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
||||||
err = f.list(ctx, container, directory, prefix, addContainer, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
if !f.containerOK(container) {
|
||||||
|
return nil, fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
|
err = f.list(ctx, container, directory, prefix, addContainer, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItemInternal, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -775,7 +975,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
container, directory := f.split(dir)
|
container, directory := f.split(dir)
|
||||||
list := walk.NewListRHelper(callback)
|
list := walk.NewListRHelper(callback)
|
||||||
listR := func(container, directory, prefix string, addContainer bool) error {
|
listR := func(container, directory, prefix string, addContainer bool) error {
|
||||||
return f.list(ctx, container, directory, prefix, addContainer, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
return f.list(ctx, container, directory, prefix, addContainer, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItemInternal, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -802,6 +1002,9 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
f.cache.MarkOK(container)
|
f.cache.MarkOK(container)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
if !f.containerOK(container) {
|
||||||
|
return fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
err = listR(container, directory, f.rootDirectory, f.rootContainer == "")
|
err = listR(container, directory, f.rootDirectory, f.rootContainer == "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -903,7 +1106,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
|||||||
// isEmpty checks to see if a given (container, directory) is empty and returns an error if not
|
// isEmpty checks to see if a given (container, directory) is empty and returns an error if not
|
||||||
func (f *Fs) isEmpty(ctx context.Context, container, directory string) (err error) {
|
func (f *Fs) isEmpty(ctx context.Context, container, directory string) (err error) {
|
||||||
empty := true
|
empty := true
|
||||||
err = f.list(ctx, container, directory, f.rootDirectory, f.rootContainer == "", true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
err = f.list(ctx, container, directory, f.rootDirectory, f.rootContainer == "", true, 1, func(remote string, object *azblob.BlobItemInternal, isDirectory bool) error {
|
||||||
empty = false
|
empty = false
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@@ -976,7 +1179,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
return f.deleteContainer(ctx, container)
|
return f.deleteContainer(ctx, container)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -1008,7 +1211,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
var startCopy *azblob.BlobStartCopyFromURLResponse
|
var startCopy *azblob.BlobStartCopyFromURLResponse
|
||||||
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
startCopy, err = dstBlobURL.StartCopyFromURL(ctx, *source, nil, azblob.ModifiedAccessConditions{}, options)
|
startCopy, err = dstBlobURL.StartCopyFromURL(ctx, *source, nil, azblob.ModifiedAccessConditions{}, options, azblob.AccessTierType(f.opt.AccessTier), nil)
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1018,7 +1221,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
copyStatus := startCopy.CopyStatus()
|
copyStatus := startCopy.CopyStatus()
|
||||||
for copyStatus == azblob.CopyStatusPending {
|
for copyStatus == azblob.CopyStatusPending {
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
getMetadata, err := dstBlobURL.GetProperties(ctx, options)
|
getMetadata, err := dstBlobURL.GetProperties(ctx, options, azblob.ClientProvidedKeyOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1036,7 +1239,7 @@ func (f *Fs) getMemoryPool(size int64) *pool.Pool {
|
|||||||
return pool.New(
|
return pool.New(
|
||||||
time.Duration(f.opt.MemoryPoolFlushTime),
|
time.Duration(f.opt.MemoryPoolFlushTime),
|
||||||
int(size),
|
int(size),
|
||||||
fs.Config.Transfers,
|
f.ci.Transfers,
|
||||||
f.opt.MemoryPoolUseMmap,
|
f.opt.MemoryPoolUseMmap,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -1123,7 +1326,7 @@ func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetProper
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
|
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItemInternal) (err error) {
|
||||||
metadata := info.Metadata
|
metadata := info.Metadata
|
||||||
size := *info.Properties.ContentLength
|
size := *info.Properties.ContentLength
|
||||||
if isDirectoryMarker(size, metadata, o.remote) {
|
if isDirectoryMarker(size, metadata, o.remote) {
|
||||||
@@ -1169,7 +1372,7 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
var blobProperties *azblob.BlobGetPropertiesResponse
|
var blobProperties *azblob.BlobGetPropertiesResponse
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
blobProperties, err = blob.GetProperties(ctx, options)
|
blobProperties, err = blob.GetProperties(ctx, options, azblob.ClientProvidedKeyOptions{})
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1204,7 +1407,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|||||||
|
|
||||||
blob := o.getBlobReference()
|
blob := o.getBlobReference()
|
||||||
err := o.fs.pacer.Call(func() (bool, error) {
|
err := o.fs.pacer.Call(func() (bool, error) {
|
||||||
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{})
|
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1245,15 +1448,15 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
}
|
}
|
||||||
blob := o.getBlobReference()
|
blob := o.getBlobReference()
|
||||||
ac := azblob.BlobAccessConditions{}
|
ac := azblob.BlobAccessConditions{}
|
||||||
var dowloadResponse *azblob.DownloadResponse
|
var downloadResponse *azblob.DownloadResponse
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
dowloadResponse, err = blob.Download(ctx, offset, count, ac, false)
|
downloadResponse, err = blob.Download(ctx, offset, count, ac, false, azblob.ClientProvidedKeyOptions{})
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to open for download")
|
return nil, errors.Wrap(err, "failed to open for download")
|
||||||
}
|
}
|
||||||
in = dowloadResponse.Body(azblob.RetryReaderOptions{})
|
in = downloadResponse.Body(azblob.RetryReaderOptions{})
|
||||||
return in, nil
|
return in, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1278,12 +1481,6 @@ func init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// readSeeker joins an io.Reader and an io.Seeker
|
|
||||||
type readSeeker struct {
|
|
||||||
io.Reader
|
|
||||||
io.Seeker
|
|
||||||
}
|
|
||||||
|
|
||||||
// increment the slice passed in as LSB binary
|
// increment the slice passed in as LSB binary
|
||||||
func increment(xs []byte) {
|
func increment(xs []byte) {
|
||||||
for i, digit := range xs {
|
for i, digit := range xs {
|
||||||
@@ -1296,153 +1493,69 @@ func increment(xs []byte) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var warnStreamUpload sync.Once
|
// poolWrapper wraps a pool.Pool as an azblob.TransferManager
|
||||||
|
type poolWrapper struct {
|
||||||
|
pool *pool.Pool
|
||||||
|
bufToken chan struct{}
|
||||||
|
runToken chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
// uploadMultipart uploads a file using multipart upload
|
// newPoolWrapper creates an azblob.TransferManager that will use a
|
||||||
//
|
// pool.Pool with maximum concurrency as specified.
|
||||||
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
|
func (f *Fs) newPoolWrapper(concurrency int) azblob.TransferManager {
|
||||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, blob *azblob.BlobURL, httpHeaders *azblob.BlobHTTPHeaders) (err error) {
|
return &poolWrapper{
|
||||||
// Calculate correct chunkSize
|
pool: f.pool,
|
||||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
bufToken: make(chan struct{}, concurrency),
|
||||||
totalParts := -1
|
runToken: make(chan struct{}, concurrency),
|
||||||
|
|
||||||
// Note that the max size of file is 4.75 TB (100 MB X 50,000
|
|
||||||
// blocks) and this is bigger than the max uncommitted block
|
|
||||||
// size (9.52 TB) so we do not need to part commit block lists
|
|
||||||
// or garbage collect uncommitted blocks.
|
|
||||||
//
|
|
||||||
// See: https://docs.microsoft.com/en-gb/rest/api/storageservices/put-block
|
|
||||||
|
|
||||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
|
||||||
// buffers here (default 4MB). With a maximum number of parts (50,000) this will be a file of
|
|
||||||
// 195GB which seems like a not too unreasonable limit.
|
|
||||||
if size == -1 {
|
|
||||||
warnStreamUpload.Do(func() {
|
|
||||||
fs.Logf(o, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
|
||||||
o.fs.opt.ChunkSize, fs.SizeSuffix(chunkSize*maxTotalParts))
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
// Adjust partSize until the number of parts is small enough.
|
|
||||||
if size/chunkSize >= maxTotalParts {
|
|
||||||
// Calculate partition size rounded up to the nearest MB
|
|
||||||
chunkSize = (((size / maxTotalParts) >> 20) + 1) << 20
|
|
||||||
}
|
|
||||||
if chunkSize > int64(maxChunkSize) {
|
|
||||||
return errors.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), totalParts, fs.SizeSuffix(chunkSize/2))
|
|
||||||
}
|
|
||||||
totalParts = int(size / chunkSize)
|
|
||||||
if size%chunkSize != 0 {
|
|
||||||
totalParts++
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, fs.SizeSuffix(chunkSize))
|
// Get implements TransferManager.Get().
|
||||||
|
func (pw *poolWrapper) Get() []byte {
|
||||||
|
pw.bufToken <- struct{}{}
|
||||||
|
return pw.pool.Get()
|
||||||
|
}
|
||||||
|
|
||||||
// unwrap the accounting from the input, we use wrap to put it
|
// Put implements TransferManager.Put().
|
||||||
// back on after the buffering
|
func (pw *poolWrapper) Put(b []byte) {
|
||||||
in, wrap := accounting.UnWrap(in)
|
pw.pool.Put(b)
|
||||||
|
<-pw.bufToken
|
||||||
|
}
|
||||||
|
|
||||||
// Upload the chunks
|
// Run implements TransferManager.Run().
|
||||||
var (
|
func (pw *poolWrapper) Run(f func()) {
|
||||||
g, gCtx = errgroup.WithContext(ctx)
|
pw.runToken <- struct{}{}
|
||||||
remaining = size // remaining size in file for logging only, -1 if size < 0
|
go func() {
|
||||||
position = int64(0) // position in file
|
f()
|
||||||
memPool = o.fs.getMemoryPool(chunkSize) // pool to get memory from
|
<-pw.runToken
|
||||||
finished = false // set when we have read EOF
|
}()
|
||||||
blocks []string // list of blocks for finalize
|
}
|
||||||
blockBlobURL = blob.ToBlockBlobURL() // Get BlockBlobURL, we will use default pipeline here
|
|
||||||
ac = azblob.LeaseAccessConditions{} // Use default lease access conditions
|
|
||||||
binaryBlockID = make([]byte, 8) // block counter as LSB first 8 bytes
|
|
||||||
)
|
|
||||||
for part := 0; !finished; part++ {
|
|
||||||
// Get a block of memory from the pool and a token which limits concurrency
|
|
||||||
o.fs.uploadToken.Get()
|
|
||||||
buf := memPool.Get()
|
|
||||||
|
|
||||||
free := func() {
|
// Close implements TransferManager.Close().
|
||||||
memPool.Put(buf) // return the buf
|
func (pw *poolWrapper) Close() {
|
||||||
o.fs.uploadToken.Put() // return the token
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fail fast, in case an errgroup managed function returns an error
|
|
||||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
|
||||||
if gCtx.Err() != nil {
|
|
||||||
free()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the chunk
|
|
||||||
n, err := readers.ReadFill(in, buf) // this can never return 0, nil
|
|
||||||
if err == io.EOF {
|
|
||||||
if n == 0 { // end if no data
|
|
||||||
free()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
finished = true
|
|
||||||
} else if err != nil {
|
|
||||||
free()
|
|
||||||
return errors.Wrap(err, "multipart upload failed to read source")
|
|
||||||
}
|
|
||||||
buf = buf[:n]
|
|
||||||
|
|
||||||
// increment the blockID and save the blocks for finalize
|
|
||||||
increment(binaryBlockID)
|
|
||||||
blockID := base64.StdEncoding.EncodeToString(binaryBlockID)
|
|
||||||
blocks = append(blocks, blockID)
|
|
||||||
|
|
||||||
// Transfer the chunk
|
|
||||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
|
||||||
g.Go(func() (err error) {
|
|
||||||
defer free()
|
|
||||||
|
|
||||||
// Upload the block, with MD5 for check
|
|
||||||
md5sum := md5.Sum(buf)
|
|
||||||
transactionalMD5 := md5sum[:]
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
bufferReader := bytes.NewReader(buf)
|
|
||||||
wrappedReader := wrap(bufferReader)
|
|
||||||
rs := readSeeker{wrappedReader, bufferReader}
|
|
||||||
_, err = blockBlobURL.StageBlock(ctx, blockID, &rs, ac, transactionalMD5)
|
|
||||||
return o.fs.shouldRetry(err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "multipart upload failed to upload part")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
// ready for next block
|
|
||||||
if size >= 0 {
|
|
||||||
remaining -= chunkSize
|
|
||||||
}
|
|
||||||
position += chunkSize
|
|
||||||
}
|
|
||||||
err = g.Wait()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finalise the upload session
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
_, err := blockBlobURL.CommitBlockList(ctx, blocks, *httpHeaders, o.meta, azblob.BlobAccessConditions{})
|
|
||||||
return o.fs.shouldRetry(err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "multipart upload failed to finalize")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
|
if o.accessTier == azblob.AccessTierArchive {
|
||||||
|
if o.fs.opt.ArchiveTierDelete {
|
||||||
|
fs.Debugf(o, "deleting archive tier blob before updating")
|
||||||
|
err = o.Remove(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to delete archive blob before updating")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return errCantUpdateArchiveTierBlobs
|
||||||
|
}
|
||||||
|
}
|
||||||
container, _ := o.split()
|
container, _ := o.split()
|
||||||
err = o.fs.makeContainer(ctx, container)
|
err = o.fs.makeContainer(ctx, container)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
size := src.Size()
|
|
||||||
// Update Mod time
|
// Update Mod time
|
||||||
o.updateMetadataWithModTime(src.ModTime(ctx))
|
o.updateMetadataWithModTime(src.ModTime(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1451,11 +1564,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
blob := o.getBlobReference()
|
blob := o.getBlobReference()
|
||||||
httpHeaders := azblob.BlobHTTPHeaders{}
|
httpHeaders := azblob.BlobHTTPHeaders{}
|
||||||
httpHeaders.ContentType = fs.MimeType(ctx, o)
|
httpHeaders.ContentType = fs.MimeType(ctx, src)
|
||||||
// Compute the Content-MD5 of the file, for multiparts uploads it
|
|
||||||
|
// Compute the Content-MD5 of the file. As we stream all uploads it
|
||||||
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
||||||
// Note: If multipart, an MD5 checksum will also be computed for each uploaded block
|
|
||||||
// in order to validate its integrity during transport
|
|
||||||
if !o.fs.opt.DisableCheckSum {
|
if !o.fs.opt.DisableCheckSum {
|
||||||
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
|
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
|
||||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||||
@@ -1469,30 +1581,17 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
|
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
|
||||||
BufferSize: int(o.fs.opt.ChunkSize),
|
BufferSize: int(o.fs.opt.ChunkSize),
|
||||||
MaxBuffers: 4,
|
MaxBuffers: uploadConcurrency,
|
||||||
Metadata: o.meta,
|
Metadata: o.meta,
|
||||||
BlobHTTPHeaders: httpHeaders,
|
BlobHTTPHeaders: httpHeaders,
|
||||||
}
|
TransferManager: o.fs.newPoolWrapper(uploadConcurrency),
|
||||||
// FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75
|
|
||||||
// is merged the SDK can't upload a single blob of exactly the chunk
|
|
||||||
// size, so upload with a multpart upload to work around.
|
|
||||||
// See: https://github.com/rclone/rclone/issues/2653
|
|
||||||
multipartUpload := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
|
||||||
if size == int64(o.fs.opt.ChunkSize) {
|
|
||||||
multipartUpload = true
|
|
||||||
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't retry, return a retry error instead
|
// Don't retry, return a retry error instead
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
if multipartUpload {
|
// Stream contents of the reader object to the given blob URL
|
||||||
// If a large file upload in chunks
|
blockBlobURL := blob.ToBlockBlobURL()
|
||||||
err = o.uploadMultipart(ctx, in, size, &blob, &httpHeaders)
|
_, err = azblob.UploadStreamToBlockBlob(ctx, in, blockBlobURL, putBlobOptions)
|
||||||
} else {
|
|
||||||
// Write a small blob in one transaction
|
|
||||||
blockBlobURL := blob.ToBlockBlobURL()
|
|
||||||
_, err = azblob.UploadStreamToBlockBlob(ctx, in, blockBlobURL, putBlobOptions)
|
|
||||||
}
|
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// +build !plan9,!solaris,!js,go1.13
|
// +build !plan9,!solaris,!js,go1.14
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
|
|||||||
@@ -1,14 +1,16 @@
|
|||||||
// Test AzureBlob filesystem interface
|
// Test AzureBlob filesystem interface
|
||||||
|
|
||||||
// +build !plan9,!solaris,!js,go1.13
|
// +build !plan9,!solaris,!js,go1.14
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
@@ -27,11 +29,36 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|||||||
return f.setUploadChunkSize(cs)
|
return f.setUploadChunkSize(cs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|
||||||
return f.setUploadCutoff(cs)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
|
||||||
|
func TestServicePrincipalFileSuccess(t *testing.T) {
|
||||||
|
ctx := context.TODO()
|
||||||
|
credentials := `
|
||||||
|
{
|
||||||
|
"appId": "my application (client) ID",
|
||||||
|
"password": "my secret",
|
||||||
|
"tenant": "my active directory tenant ID"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
|
||||||
|
if assert.NoError(t, err) {
|
||||||
|
assert.NotNil(t, tokenRefresher)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
|
||||||
|
func TestServicePrincipalFileFailure(t *testing.T) {
|
||||||
|
ctx := context.TODO()
|
||||||
|
credentials := `
|
||||||
|
{
|
||||||
|
"appId": "my application (client) ID",
|
||||||
|
"tenant": "my active directory tenant ID"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Build for azureblob for unsupported platforms to stop go complaining
|
// Build for azureblob for unsupported platforms to stop go complaining
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
// +build plan9 solaris js !go1.13
|
// +build plan9 solaris js !go1.14
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|||||||
137
backend/azureblob/imds.go
Normal file
137
backend/azureblob/imds.go
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
// +build !plan9,!solaris,!js,go1.14
|
||||||
|
|
||||||
|
package azureblob
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/Azure/go-autorest/autorest/adal"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
azureResource = "https://storage.azure.com"
|
||||||
|
imdsAPIVersion = "2018-02-01"
|
||||||
|
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This custom type is used to add the port the test server has bound to
|
||||||
|
// to the request context.
|
||||||
|
type testPortKey string
|
||||||
|
|
||||||
|
type msiIdentifierType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
msiClientID msiIdentifierType = iota
|
||||||
|
msiObjectID
|
||||||
|
msiResourceID
|
||||||
|
)
|
||||||
|
|
||||||
|
type userMSI struct {
|
||||||
|
Type msiIdentifierType
|
||||||
|
Value string
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpError struct {
|
||||||
|
Response *http.Response
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e httpError) Error() string {
|
||||||
|
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
|
||||||
|
// Metadata Service.
|
||||||
|
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
||||||
|
// Attempt to get an MSI token; silently continue if unsuccessful.
|
||||||
|
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
|
||||||
|
result := adal.Token{}
|
||||||
|
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(nil, "Failed to create request: %v", err)
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
params := req.URL.Query()
|
||||||
|
params.Set("resource", azureResource)
|
||||||
|
params.Set("api-version", imdsAPIVersion)
|
||||||
|
|
||||||
|
// Specify user-assigned identity if requested.
|
||||||
|
if identity != nil {
|
||||||
|
switch identity.Type {
|
||||||
|
case msiClientID:
|
||||||
|
params.Set("client_id", identity.Value)
|
||||||
|
case msiObjectID:
|
||||||
|
params.Set("object_id", identity.Value)
|
||||||
|
case msiResourceID:
|
||||||
|
params.Set("mi_res_id", identity.Value)
|
||||||
|
default:
|
||||||
|
// If this happens, the calling function and this one don't agree on
|
||||||
|
// what valid ID types exist.
|
||||||
|
return result, fmt.Errorf("unknown MSI identity type specified")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
|
||||||
|
// The Metadata header is required by all calls to IMDS.
|
||||||
|
req.Header.Set("Metadata", "true")
|
||||||
|
|
||||||
|
// If this function is run in a test, query the test server instead of IMDS.
|
||||||
|
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
|
||||||
|
if isTest {
|
||||||
|
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
|
||||||
|
req.Host = req.URL.Host
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send request
|
||||||
|
httpClient := fshttp.NewClient(ctx)
|
||||||
|
resp, err := httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return result, errors.Wrap(err, "MSI is not enabled on this VM")
|
||||||
|
}
|
||||||
|
defer func() { // resp and Body should not be nil
|
||||||
|
_, err = io.Copy(ioutil.Discard, resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
|
||||||
|
}
|
||||||
|
err = resp.Body.Close()
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// Check if the status code indicates success
|
||||||
|
// The request returns 200 currently, add 201 and 202 as well for possible extension.
|
||||||
|
switch resp.StatusCode {
|
||||||
|
case 200, 201, 202:
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
body, _ := ioutil.ReadAll(resp.Body)
|
||||||
|
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
|
||||||
|
return result, httpError{Response: resp}
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return result, errors.Wrap(err, "Couldn't read IMDS response")
|
||||||
|
}
|
||||||
|
// Remove BOM, if any. azcopy does this so I'm following along.
|
||||||
|
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
|
||||||
|
|
||||||
|
// This would be a good place to persist the token if a large number of rclone
|
||||||
|
// invocations are being made in a short amount of time. If the token is
|
||||||
|
// persisted, the azureblob code will need to check for expiry before every
|
||||||
|
// storage API call.
|
||||||
|
err = json.Unmarshal(b, &result)
|
||||||
|
if err != nil {
|
||||||
|
return result, errors.Wrap(err, "Couldn't unmarshal IMDS response")
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
117
backend/azureblob/imds_test.go
Normal file
117
backend/azureblob/imds_test.go
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
// +build !plan9,!solaris,!js,go1.14
|
||||||
|
|
||||||
|
package azureblob
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/Azure/go-autorest/autorest/adal"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
err := r.ParseForm()
|
||||||
|
require.NoError(t, err)
|
||||||
|
parameters := r.URL.Query()
|
||||||
|
(*actual)["path"] = r.URL.Path
|
||||||
|
(*actual)["Metadata"] = r.Header.Get("Metadata")
|
||||||
|
(*actual)["method"] = r.Method
|
||||||
|
for paramName := range parameters {
|
||||||
|
(*actual)[paramName] = parameters.Get(paramName)
|
||||||
|
}
|
||||||
|
// Make response.
|
||||||
|
response := adal.Token{}
|
||||||
|
responseBytes, err := json.Marshal(response)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = w.Write(responseBytes)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestManagedIdentity(t *testing.T) {
|
||||||
|
// test user-assigned identity specifiers to use
|
||||||
|
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
|
||||||
|
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
|
||||||
|
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
|
||||||
|
tests := []struct {
|
||||||
|
identity *userMSI
|
||||||
|
identityParameterName string
|
||||||
|
expectedAbsent []string
|
||||||
|
}{
|
||||||
|
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
|
||||||
|
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
|
||||||
|
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
|
||||||
|
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
|
||||||
|
}
|
||||||
|
alwaysExpected := map[string]string{
|
||||||
|
"path": "/metadata/identity/oauth2/token",
|
||||||
|
"resource": "https://storage.azure.com",
|
||||||
|
"Metadata": "true",
|
||||||
|
"api-version": "2018-02-01",
|
||||||
|
"method": "GET",
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
actual := make(map[string]string, 10)
|
||||||
|
testServer := httptest.NewServer(handler(t, &actual))
|
||||||
|
defer testServer.Close()
|
||||||
|
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
|
||||||
|
require.NoError(t, err)
|
||||||
|
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
|
||||||
|
_, err = GetMSIToken(ctx, test.identity)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Validate expected query parameters present
|
||||||
|
expected := make(map[string]string)
|
||||||
|
for k, v := range alwaysExpected {
|
||||||
|
expected[k] = v
|
||||||
|
}
|
||||||
|
if test.identity != nil {
|
||||||
|
expected[test.identityParameterName] = test.identity.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
for key := range expected {
|
||||||
|
value, exists := actual[key]
|
||||||
|
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
|
||||||
|
test.identityParameterName, key) {
|
||||||
|
assert.Equalf(t, expected[key], value,
|
||||||
|
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate unexpected query parameters absent
|
||||||
|
for _, key := range test.expectedAbsent {
|
||||||
|
_, exists := actual[key]
|
||||||
|
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func errorHandler(resultCode int) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
http.Error(w, "Test error generated", resultCode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIMDSErrors(t *testing.T) {
|
||||||
|
errorCodes := []int{404, 429, 500}
|
||||||
|
for _, code := range errorCodes {
|
||||||
|
testServer := httptest.NewServer(errorHandler(code))
|
||||||
|
defer testServer.Close()
|
||||||
|
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
|
||||||
|
require.NoError(t, err)
|
||||||
|
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
|
||||||
|
_, err = GetMSIToken(ctx, nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
httpErr, ok := err.(httpError)
|
||||||
|
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
|
||||||
|
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
|
||||||
|
}
|
||||||
|
}
|
||||||
149
backend/b2/b2.go
149
backend/b2/b2.go
@@ -44,8 +44,10 @@ const (
|
|||||||
timeHeader = headerPrefix + timeKey
|
timeHeader = headerPrefix + timeKey
|
||||||
sha1Key = "large_file_sha1"
|
sha1Key = "large_file_sha1"
|
||||||
sha1Header = "X-Bz-Content-Sha1"
|
sha1Header = "X-Bz-Content-Sha1"
|
||||||
sha1InfoHeader = headerPrefix + sha1Key
|
|
||||||
testModeHeader = "X-Bz-Test-Mode"
|
testModeHeader = "X-Bz-Test-Mode"
|
||||||
|
idHeader = "X-Bz-File-Id"
|
||||||
|
nameHeader = "X-Bz-File-Name"
|
||||||
|
timestampHeader = "X-Bz-Upload-Timestamp"
|
||||||
retryAfterHeader = "Retry-After"
|
retryAfterHeader = "Retry-After"
|
||||||
minSleep = 10 * time.Millisecond
|
minSleep = 10 * time.Millisecond
|
||||||
maxSleep = 5 * time.Minute
|
maxSleep = 5 * time.Minute
|
||||||
@@ -121,7 +123,7 @@ This value should be set no larger than 4.657GiB (== 5GB).`,
|
|||||||
Name: "copy_cutoff",
|
Name: "copy_cutoff",
|
||||||
Help: `Cutoff for switching to multipart copy
|
Help: `Cutoff for switching to multipart copy
|
||||||
|
|
||||||
Any files larger than this that need to be server side copied will be
|
Any files larger than this that need to be server-side copied will be
|
||||||
copied in chunks of this size.
|
copied in chunks of this size.
|
||||||
|
|
||||||
The minimum is 0 and the maximum is 4.6GB.`,
|
The minimum is 0 and the maximum is 4.6GB.`,
|
||||||
@@ -153,7 +155,9 @@ to start uploading.`,
|
|||||||
|
|
||||||
This is usually set to a Cloudflare CDN URL as Backblaze offers
|
This is usually set to a Cloudflare CDN URL as Backblaze offers
|
||||||
free egress for data downloaded through the Cloudflare network.
|
free egress for data downloaded through the Cloudflare network.
|
||||||
This is probably only useful for a public bucket.
|
Rclone works with private buckets by sending an "Authorization" header.
|
||||||
|
If the custom endpoint rewrites the requests for authentication,
|
||||||
|
e.g., in Cloudflare Workers, this header needs to be handled properly.
|
||||||
Leave blank if you want to use the endpoint provided by Backblaze.`,
|
Leave blank if you want to use the endpoint provided by Backblaze.`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
@@ -214,6 +218,7 @@ type Fs struct {
|
|||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on if any
|
root string // the path we are working on if any
|
||||||
opt Options // parsed config options
|
opt Options // parsed config options
|
||||||
|
ci *fs.ConfigInfo // global config
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
srv *rest.Client // the connection to the b2 server
|
srv *rest.Client // the connection to the b2 server
|
||||||
rootBucket string // bucket part of root (if any)
|
rootBucket string // bucket part of root (if any)
|
||||||
@@ -290,7 +295,7 @@ func (o *Object) split() (bucket, bucketPath string) {
|
|||||||
|
|
||||||
// retryErrorCodes is a slice of error codes that we will retry
|
// retryErrorCodes is a slice of error codes that we will retry
|
||||||
var retryErrorCodes = []int{
|
var retryErrorCodes = []int{
|
||||||
401, // Unauthorized (eg "Token has expired")
|
401, // Unauthorized (e.g. "Token has expired")
|
||||||
408, // Request Timeout
|
408, // Request Timeout
|
||||||
429, // Rate exceeded.
|
429, // Rate exceeded.
|
||||||
500, // Get occasional 500 Internal Server Error
|
500, // Get occasional 500 Internal Server Error
|
||||||
@@ -391,14 +396,17 @@ func (f *Fs) setRoot(root string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, bucket:path
|
// NewFs constructs an Fs from the path, bucket:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.Background()
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if opt.UploadCutoff < opt.ChunkSize {
|
||||||
|
opt.UploadCutoff = opt.ChunkSize
|
||||||
|
fs.Infof(nil, "b2: raising upload cutoff to chunk size: %v", opt.UploadCutoff)
|
||||||
|
}
|
||||||
err = checkUploadCutoff(opt, opt.UploadCutoff)
|
err = checkUploadCutoff(opt, opt.UploadCutoff)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "b2: upload cutoff")
|
return nil, errors.Wrap(err, "b2: upload cutoff")
|
||||||
@@ -416,20 +424,22 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if opt.Endpoint == "" {
|
if opt.Endpoint == "" {
|
||||||
opt.Endpoint = defaultEndpoint
|
opt.Endpoint = defaultEndpoint
|
||||||
}
|
}
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
ci: ci,
|
||||||
|
srv: rest.NewClient(fshttp.NewClient(ctx)).SetErrorHandler(errorHandler),
|
||||||
cache: bucket.NewCache(),
|
cache: bucket.NewCache(),
|
||||||
_bucketID: make(map[string]string, 1),
|
_bucketID: make(map[string]string, 1),
|
||||||
_bucketType: make(map[string]string, 1),
|
_bucketType: make(map[string]string, 1),
|
||||||
uploads: make(map[string][]*api.GetUploadURLResponse),
|
uploads: make(map[string][]*api.GetUploadURLResponse),
|
||||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||||
pool: pool.New(
|
pool: pool.New(
|
||||||
time.Duration(opt.MemoryPoolFlushTime),
|
time.Duration(opt.MemoryPoolFlushTime),
|
||||||
int(opt.ChunkSize),
|
int(opt.ChunkSize),
|
||||||
fs.Config.Transfers,
|
ci.Transfers,
|
||||||
opt.MemoryPoolUseMmap,
|
opt.MemoryPoolUseMmap,
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
@@ -439,7 +449,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
BucketBasedRootOK: true,
|
BucketBasedRootOK: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
// Set the test flag if required
|
// Set the test flag if required
|
||||||
if opt.TestMode != "" {
|
if opt.TestMode != "" {
|
||||||
testMode := strings.TrimSpace(opt.TestMode)
|
testMode := strings.TrimSpace(opt.TestMode)
|
||||||
@@ -702,7 +712,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||||||
remote := file.Name[len(prefix):]
|
remote := file.Name[len(prefix):]
|
||||||
// Check for directory
|
// Check for directory
|
||||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||||
if isDirectory {
|
if isDirectory && len(remote) > 1 {
|
||||||
remote = remote[:len(remote)-1]
|
remote = remote[:len(remote)-1]
|
||||||
}
|
}
|
||||||
if addBucket {
|
if addBucket {
|
||||||
@@ -1168,10 +1178,10 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Delete Config.Transfers in parallel
|
// Delete Config.Transfers in parallel
|
||||||
toBeDeleted := make(chan *api.File, fs.Config.Transfers)
|
toBeDeleted := make(chan *api.File, f.ci.Transfers)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(fs.Config.Transfers)
|
wg.Add(f.ci.Transfers)
|
||||||
for i := 0; i < fs.Config.Transfers; i++ {
|
for i := 0; i < f.ci.Transfers; i++ {
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
for object := range toBeDeleted {
|
for object := range toBeDeleted {
|
||||||
@@ -1183,7 +1193,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
|||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||||
err = f.deleteByID(ctx, object.ID, object.Name)
|
err = f.deleteByID(ctx, object.ID, object.Name)
|
||||||
checkErr(err)
|
checkErr(err)
|
||||||
tr.Done(err)
|
tr.Done(ctx, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@@ -1211,7 +1221,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
|||||||
toBeDeleted <- object
|
toBeDeleted <- object
|
||||||
}
|
}
|
||||||
last = remote
|
last = remote
|
||||||
tr.Done(nil)
|
tr.Done(ctx, nil)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}))
|
}))
|
||||||
@@ -1234,7 +1244,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||||||
return f.purge(ctx, "", true)
|
return f.purge(ctx, "", true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy does a server side copy from dstObj <- srcObj
|
// copy does a server-side copy from dstObj <- srcObj
|
||||||
//
|
//
|
||||||
// If newInfo is nil then the metadata will be copied otherwise it
|
// If newInfo is nil then the metadata will be copied otherwise it
|
||||||
// will be replaced with newInfo
|
// will be replaced with newInfo
|
||||||
@@ -1291,7 +1301,7 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
|
|||||||
return dstObj.decodeMetaDataFileInfo(&response)
|
return dstObj.decodeMetaDataFileInfo(&response)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -1440,7 +1450,7 @@ func (o *Object) Size() int64 {
|
|||||||
// Make sure it is lower case
|
// Make sure it is lower case
|
||||||
//
|
//
|
||||||
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
||||||
// Some tools (eg Cyberduck) use this
|
// Some tools (e.g. Cyberduck) use this
|
||||||
func cleanSHA1(sha1 string) (out string) {
|
func cleanSHA1(sha1 string) (out string) {
|
||||||
out = strings.ToLower(sha1)
|
out = strings.ToLower(sha1)
|
||||||
const unverified = "unverified:"
|
const unverified = "unverified:"
|
||||||
@@ -1494,8 +1504,11 @@ func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
|
|||||||
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
|
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getMetaData gets the metadata from the object unconditionally
|
// getMetaDataListing gets the metadata from the object unconditionally from the listing
|
||||||
func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
//
|
||||||
|
// Note that listing is a class C transaction which costs more than
|
||||||
|
// the B transaction used in getMetaData
|
||||||
|
func (o *Object) getMetaDataListing(ctx context.Context) (info *api.File, err error) {
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
maxSearched := 1
|
maxSearched := 1
|
||||||
var timestamp api.Timestamp
|
var timestamp api.Timestamp
|
||||||
@@ -1528,6 +1541,19 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
|||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getMetaData gets the metadata from the object unconditionally
|
||||||
|
func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
||||||
|
// If using versions and have a version suffix, need to list the directory to find the correct versions
|
||||||
|
if o.fs.opt.Versions {
|
||||||
|
timestamp, _ := api.RemoveVersion(o.remote)
|
||||||
|
if !timestamp.IsZero() {
|
||||||
|
return o.getMetaDataListing(ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_, info, err = o.getOrHead(ctx, "HEAD", nil)
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
// readMetaData gets the metadata if it hasn't already been fetched
|
// readMetaData gets the metadata if it hasn't already been fetched
|
||||||
//
|
//
|
||||||
// Sets
|
// Sets
|
||||||
@@ -1657,12 +1683,11 @@ func (file *openFile) Close() (err error) {
|
|||||||
// Check it satisfies the interfaces
|
// Check it satisfies the interfaces
|
||||||
var _ io.ReadCloser = &openFile{}
|
var _ io.ReadCloser = &openFile{}
|
||||||
|
|
||||||
// Open an object for read
|
func (o *Object) getOrHead(ctx context.Context, method string, options []fs.OpenOption) (resp *http.Response, info *api.File, err error) {
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|
||||||
fs.FixRangeOption(options, o.size)
|
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: method,
|
||||||
Options: options,
|
Options: options,
|
||||||
|
NoResponse: method == "HEAD",
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use downloadUrl from backblaze if downloadUrl is not set
|
// Use downloadUrl from backblaze if downloadUrl is not set
|
||||||
@@ -1680,37 +1705,67 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath))
|
opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath))
|
||||||
}
|
}
|
||||||
var resp *http.Response
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||||
return o.fs.shouldRetry(ctx, resp, err)
|
return o.fs.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to open for download")
|
// 404 for files, 400 for directories
|
||||||
|
if resp != nil && (resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusBadRequest) {
|
||||||
|
return nil, nil, fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
return nil, nil, errors.Wrapf(err, "failed to %s for download", method)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse the time out of the headers if possible
|
// NB resp may be Open here - don't return err != nil without closing
|
||||||
err = o.parseTimeString(resp.Header.Get(timeHeader))
|
|
||||||
|
// Convert the Headers into an api.File
|
||||||
|
var uploadTimestamp api.Timestamp
|
||||||
|
err = uploadTimestamp.UnmarshalJSON([]byte(resp.Header.Get(timestampHeader)))
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(o, "Bad "+timestampHeader+" header: %v", err)
|
||||||
|
}
|
||||||
|
var Info = make(map[string]string)
|
||||||
|
for k, vs := range resp.Header {
|
||||||
|
k = strings.ToLower(k)
|
||||||
|
for _, v := range vs {
|
||||||
|
if strings.HasPrefix(k, headerPrefix) {
|
||||||
|
Info[k[len(headerPrefix):]] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
info = &api.File{
|
||||||
|
ID: resp.Header.Get(idHeader),
|
||||||
|
Name: resp.Header.Get(nameHeader),
|
||||||
|
Action: "upload",
|
||||||
|
Size: resp.ContentLength,
|
||||||
|
UploadTimestamp: uploadTimestamp,
|
||||||
|
SHA1: resp.Header.Get(sha1Header),
|
||||||
|
ContentType: resp.Header.Get("Content-Type"),
|
||||||
|
Info: Info,
|
||||||
|
}
|
||||||
|
return resp, info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open an object for read
|
||||||
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
|
fs.FixRangeOption(options, o.size)
|
||||||
|
|
||||||
|
resp, info, err := o.getOrHead(ctx, "GET", options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't check length or hash or metadata on partial content
|
||||||
|
if resp.StatusCode == http.StatusPartialContent {
|
||||||
|
return resp.Body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err = o.decodeMetaData(info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = resp.Body.Close()
|
_ = resp.Body.Close()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Read sha1 from header if it isn't set
|
|
||||||
if o.sha1 == "" {
|
|
||||||
o.sha1 = resp.Header.Get(sha1Header)
|
|
||||||
fs.Debugf(o, "Reading sha1 from header - %q", o.sha1)
|
|
||||||
// if sha1 header is "none" (in big files), then need
|
|
||||||
// to read it from the metadata
|
|
||||||
if o.sha1 == "none" {
|
|
||||||
o.sha1 = resp.Header.Get(sha1InfoHeader)
|
|
||||||
fs.Debugf(o, "Reading sha1 from info - %q", o.sha1)
|
|
||||||
}
|
|
||||||
o.sha1 = cleanSHA1(o.sha1)
|
|
||||||
}
|
|
||||||
// Don't check length or hash on partial content
|
|
||||||
if resp.StatusCode == http.StatusPartialContent {
|
|
||||||
return resp.Body, nil
|
|
||||||
}
|
|
||||||
return newOpenFile(o, resp), nil
|
return newOpenFile(o, resp), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -84,20 +84,20 @@ func init() {
|
|||||||
Name: "box",
|
Name: "box",
|
||||||
Description: "Box",
|
Description: "Box",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string, m configmap.Mapper) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
jsonFile, ok := m.Get("box_config_file")
|
jsonFile, ok := m.Get("box_config_file")
|
||||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||||
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
|
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
|
||||||
var err error
|
var err error
|
||||||
// If using box config.json, use JWT auth
|
// If using box config.json, use JWT auth
|
||||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||||
err = refreshJWTToken(jsonFile, boxSubType, name, m)
|
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
|
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
|
||||||
}
|
}
|
||||||
// Else, if not using an access token, use oauth2
|
// Else, if not using an access token, use oauth2
|
||||||
} else if boxAccessToken == "" || !boxAccessTokenOk {
|
} else if boxAccessToken == "" || !boxAccessTokenOk {
|
||||||
err = oauthutil.Config("box", name, m, oauthConfig, nil)
|
err = oauthutil.Config(ctx, "box", name, m, oauthConfig, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
|
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
|
||||||
}
|
}
|
||||||
@@ -153,7 +153,7 @@ func init() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func refreshJWTToken(jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
|
func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
|
||||||
jsonFile = env.ShellExpand(jsonFile)
|
jsonFile = env.ShellExpand(jsonFile)
|
||||||
boxConfig, err := getBoxConfig(jsonFile)
|
boxConfig, err := getBoxConfig(jsonFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -169,7 +169,7 @@ func refreshJWTToken(jsonFile string, boxSubType string, name string, m configma
|
|||||||
}
|
}
|
||||||
signingHeaders := getSigningHeaders(boxConfig)
|
signingHeaders := getSigningHeaders(boxConfig)
|
||||||
queryParams := getQueryParams(boxConfig)
|
queryParams := getQueryParams(boxConfig)
|
||||||
client := fshttp.NewClient(fs.Config)
|
client := fshttp.NewClient(ctx)
|
||||||
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -339,7 +339,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
|||||||
}
|
}
|
||||||
|
|
||||||
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
|
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
|
||||||
if item.Name == leaf {
|
if strings.EqualFold(item.Name, leaf) {
|
||||||
info = item
|
info = item
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -372,8 +372,7 @@ func errorHandler(resp *http.Response) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.Background()
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -387,28 +386,29 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
|
|
||||||
root = parsePath(root)
|
root = parsePath(root)
|
||||||
|
|
||||||
client := fshttp.NewClient(fs.Config)
|
client := fshttp.NewClient(ctx)
|
||||||
var ts *oauthutil.TokenSource
|
var ts *oauthutil.TokenSource
|
||||||
// If not using an accessToken, create an oauth client and tokensource
|
// If not using an accessToken, create an oauth client and tokensource
|
||||||
if opt.AccessToken == "" {
|
if opt.AccessToken == "" {
|
||||||
client, ts, err = oauthutil.NewClient(name, m, oauthConfig)
|
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure Box")
|
return nil, errors.Wrap(err, "failed to configure Box")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
f.srv.SetErrorHandler(errorHandler)
|
f.srv.SetErrorHandler(errorHandler)
|
||||||
|
|
||||||
// If using an accessToken, set the Authorization header
|
// If using an accessToken, set the Authorization header
|
||||||
@@ -424,7 +424,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
// should do so whether there are uploads pending or not.
|
// should do so whether there are uploads pending or not.
|
||||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||||
err := refreshJWTToken(jsonFile, boxSubType, name, m)
|
err := refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
f.tokenRenewer.Start()
|
f.tokenRenewer.Start()
|
||||||
@@ -463,7 +463,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
f.features.Fill(&tempF)
|
f.features.Fill(ctx, &tempF)
|
||||||
// XXX: update the old f here instead of returning tempF, since
|
// XXX: update the old f here instead of returning tempF, since
|
||||||
// `features` were already filled with functions having *f as a receiver.
|
// `features` were already filled with functions having *f as a receiver.
|
||||||
// See https://github.com/rclone/rclone/issues/2182
|
// See https://github.com/rclone/rclone/issues/2182
|
||||||
@@ -514,7 +514,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||||
// Find the leaf in pathID
|
// Find the leaf in pathID
|
||||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||||
if item.Name == leaf {
|
if strings.EqualFold(item.Name, leaf) {
|
||||||
pathIDOut = item.ID
|
pathIDOut = item.ID
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -791,7 +791,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
return time.Second
|
return time.Second
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -909,7 +909,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
return usage, nil
|
return usage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -945,7 +945,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1013,7 +1013,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
return info.SharedLink.URL, err
|
return info.SharedLink.URL, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// deletePermanently permenently deletes a trashed file
|
// deletePermanently permanently deletes a trashed file
|
||||||
func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "DELETE",
|
Method: "DELETE",
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// multpart upload for box
|
// multipart upload for box
|
||||||
|
|
||||||
package box
|
package box
|
||||||
|
|
||||||
|
|||||||
33
backend/cache/cache.go
vendored
33
backend/cache/cache.go
vendored
@@ -68,7 +68,7 @@ func init() {
|
|||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
Help: "Remote to cache.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_url",
|
Name: "plex_url",
|
||||||
@@ -109,7 +109,7 @@ will need to be cleared or unexpected EOF errors will occur.`,
|
|||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "info_age",
|
Name: "info_age",
|
||||||
Help: `How long to cache file structure information (directory listings, file size, times etc).
|
Help: `How long to cache file structure information (directory listings, file size, times, etc.).
|
||||||
If all write operations are done through the cache then you can safely make
|
If all write operations are done through the cache then you can safely make
|
||||||
this value very large as the cache store will also be updated in real time.`,
|
this value very large as the cache store will also be updated in real time.`,
|
||||||
Default: DefCacheInfoAge,
|
Default: DefCacheInfoAge,
|
||||||
@@ -340,7 +340,7 @@ func parseRootPath(path string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -362,7 +362,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
|
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
|
||||||
wrappedFs, wrapErr := cache.Get(remotePath)
|
wrappedFs, wrapErr := cache.Get(ctx, remotePath)
|
||||||
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
||||||
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
|
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
|
||||||
}
|
}
|
||||||
@@ -479,7 +479,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
|
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
|
||||||
}
|
}
|
||||||
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
||||||
f.tempFs, err = cache.Get(f.opt.TempWritePath)
|
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
|
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
|
||||||
}
|
}
|
||||||
@@ -506,13 +506,13 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
|
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
|
||||||
pollInterval := make(chan time.Duration, 1)
|
pollInterval := make(chan time.Duration, 1)
|
||||||
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
|
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
|
||||||
doChangeNotify(context.Background(), f.receiveChangeNotify, pollInterval)
|
doChangeNotify(ctx, f.receiveChangeNotify, pollInterval)
|
||||||
}
|
}
|
||||||
|
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
DuplicateFiles: false, // storage doesn't permit this
|
DuplicateFiles: false, // storage doesn't permit this
|
||||||
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
// override only those features that use a temp fs and it doesn't support them
|
// override only those features that use a temp fs and it doesn't support them
|
||||||
//f.features.ChangeNotify = f.ChangeNotify
|
//f.features.ChangeNotify = f.ChangeNotify
|
||||||
if f.opt.TempWritePath != "" {
|
if f.opt.TempWritePath != "" {
|
||||||
@@ -581,7 +581,7 @@ Some valid examples are:
|
|||||||
"0:10" -> the first ten chunks
|
"0:10" -> the first ten chunks
|
||||||
|
|
||||||
Any parameter with a key that starts with "file" can be used to
|
Any parameter with a key that starts with "file" can be used to
|
||||||
specify files to fetch, eg
|
specify files to fetch, e.g.
|
||||||
|
|
||||||
rclone rc cache/fetch chunks=0 file=hello file2=home/goodbye
|
rclone rc cache/fetch chunks=0 file=hello file2=home/goodbye
|
||||||
|
|
||||||
@@ -1236,7 +1236,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
|
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
|
||||||
|
|
||||||
@@ -1517,7 +1517,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
return f.put(ctx, in, src, options, do)
|
return f.put(ctx, in, src, options, do)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
|
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
|
||||||
|
|
||||||
@@ -1594,7 +1594,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return co, nil
|
return co, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
|
fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
|
||||||
|
|
||||||
@@ -1895,6 +1895,16 @@ func (f *Fs) Disconnect(ctx context.Context) error {
|
|||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Shutdown the backend, closing any background tasks and any
|
||||||
|
// cached connections.
|
||||||
|
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||||
|
do := f.Fs.Features().Shutdown
|
||||||
|
if do == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return do(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{
|
var commandHelp = []fs.CommandHelp{
|
||||||
{
|
{
|
||||||
Name: "stats",
|
Name: "stats",
|
||||||
@@ -1939,4 +1949,5 @@ var (
|
|||||||
_ fs.Disconnecter = (*Fs)(nil)
|
_ fs.Disconnecter = (*Fs)(nil)
|
||||||
_ fs.Commander = (*Fs)(nil)
|
_ fs.Commander = (*Fs)(nil)
|
||||||
_ fs.MergeDirser = (*Fs)(nil)
|
_ fs.MergeDirser = (*Fs)(nil)
|
||||||
|
_ fs.Shutdowner = (*Fs)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
5
backend/cache/cache_internal_test.go
vendored
5
backend/cache/cache_internal_test.go
vendored
@@ -925,14 +925,15 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
|
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
fs.Config.LowLevelRetries = 1
|
ci := fs.GetConfig(context.Background())
|
||||||
|
ci.LowLevelRetries = 1
|
||||||
|
|
||||||
// Instantiate root
|
// Instantiate root
|
||||||
if purge {
|
if purge {
|
||||||
boltDb.PurgeTempUploads()
|
boltDb.PurgeTempUploads()
|
||||||
_ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id))
|
_ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id))
|
||||||
}
|
}
|
||||||
f, err := cache.NewFs(remote, id, m)
|
f, err := cache.NewFs(context.Background(), remote, id, m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
cfs, err := r.getCacheFs(f)
|
cfs, err := r.getCacheFs(f)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ import (
|
|||||||
// used mostly for consistency checks (lazily for performance reasons).
|
// used mostly for consistency checks (lazily for performance reasons).
|
||||||
// Other formats can be developed that use an external meta store
|
// Other formats can be developed that use an external meta store
|
||||||
// free of these limitations, but this needs some support from
|
// free of these limitations, but this needs some support from
|
||||||
// rclone core (eg. metadata store interfaces).
|
// rclone core (e.g. metadata store interfaces).
|
||||||
//
|
//
|
||||||
// The following types of chunks are supported:
|
// The following types of chunks are supported:
|
||||||
// data and control, active and temporary.
|
// data and control, active and temporary.
|
||||||
@@ -97,7 +97,8 @@ var (
|
|||||||
//
|
//
|
||||||
// And still chunker's primary function is to chunk large files
|
// And still chunker's primary function is to chunk large files
|
||||||
// rather than serve as a generic metadata container.
|
// rather than serve as a generic metadata container.
|
||||||
const maxMetadataSize = 255
|
const maxMetadataSize = 1023
|
||||||
|
const maxMetadataSizeWritten = 255
|
||||||
|
|
||||||
// Current/highest supported metadata format.
|
// Current/highest supported metadata format.
|
||||||
const metadataVersion = 1
|
const metadataVersion = 1
|
||||||
@@ -142,7 +143,7 @@ func init() {
|
|||||||
Name: "remote",
|
Name: "remote",
|
||||||
Required: true,
|
Required: true,
|
||||||
Help: `Remote to chunk/unchunk.
|
Help: `Remote to chunk/unchunk.
|
||||||
Normally should contain a ':' and a path, eg "myremote:path/to/dir",
|
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
||||||
"myremote:bucket" or maybe "myremote:" (not recommended).`,
|
"myremote:bucket" or maybe "myremote:" (not recommended).`,
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
@@ -152,6 +153,7 @@ Normally should contain a ':' and a path, eg "myremote:path/to/dir",
|
|||||||
}, {
|
}, {
|
||||||
Name: "name_format",
|
Name: "name_format",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
Hide: fs.OptionHideCommandLine,
|
||||||
Default: `*.rclone_chunk.###`,
|
Default: `*.rclone_chunk.###`,
|
||||||
Help: `String format of chunk file names.
|
Help: `String format of chunk file names.
|
||||||
The two placeholders are: base file name (*) and chunk number (#...).
|
The two placeholders are: base file name (*) and chunk number (#...).
|
||||||
@@ -162,12 +164,14 @@ Possible chunk files are ignored if their name does not match given format.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "start_from",
|
Name: "start_from",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
Hide: fs.OptionHideCommandLine,
|
||||||
Default: 1,
|
Default: 1,
|
||||||
Help: `Minimum valid chunk number. Usually 0 or 1.
|
Help: `Minimum valid chunk number. Usually 0 or 1.
|
||||||
By default chunk numbers start from 1.`,
|
By default chunk numbers start from 1.`,
|
||||||
}, {
|
}, {
|
||||||
Name: "meta_format",
|
Name: "meta_format",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
Hide: fs.OptionHideCommandLine,
|
||||||
Default: "simplejson",
|
Default: "simplejson",
|
||||||
Help: `Format of the metadata object or "none". By default "simplejson".
|
Help: `Format of the metadata object or "none". By default "simplejson".
|
||||||
Metadata is a small JSON file named after the composite file.`,
|
Metadata is a small JSON file named after the composite file.`,
|
||||||
@@ -225,7 +229,7 @@ It has the following fields: ver, size, nchunks, md5, sha1.`,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -250,12 +254,12 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
// Look for a file first
|
// Look for a file first
|
||||||
remotePath := fspath.JoinRootPath(basePath, rpath)
|
remotePath := fspath.JoinRootPath(basePath, rpath)
|
||||||
baseFs, err := cache.Get(baseName + remotePath)
|
baseFs, err := cache.Get(ctx, baseName+remotePath)
|
||||||
if err != fs.ErrorIsFile && err != nil {
|
if err != fs.ErrorIsFile && err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", baseName+remotePath)
|
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", baseName+remotePath)
|
||||||
}
|
}
|
||||||
if !operations.CanServerSideMove(baseFs) {
|
if !operations.CanServerSideMove(baseFs) {
|
||||||
return nil, errors.New("can't use chunker on a backend which doesn't support server side move or copy")
|
return nil, errors.New("can't use chunker on a backend which doesn't support server-side move or copy")
|
||||||
}
|
}
|
||||||
|
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
@@ -278,7 +282,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
// (yet can't satisfy fstest.CheckListing, will ignore)
|
// (yet can't satisfy fstest.CheckListing, will ignore)
|
||||||
if err == nil && !f.useMeta && strings.Contains(rpath, "/") {
|
if err == nil && !f.useMeta && strings.Contains(rpath, "/") {
|
||||||
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
|
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
|
||||||
_, testErr := cache.Get(baseName + firstChunkPath)
|
_, testErr := cache.Get(ctx, baseName+firstChunkPath)
|
||||||
if testErr == fs.ErrorIsFile {
|
if testErr == fs.ErrorIsFile {
|
||||||
err = testErr
|
err = testErr
|
||||||
}
|
}
|
||||||
@@ -291,12 +295,12 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
DuplicateFiles: true,
|
DuplicateFiles: true,
|
||||||
ReadMimeType: true,
|
ReadMimeType: false, // Object.MimeType not supported
|
||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
ServerSideAcrossConfigs: true,
|
ServerSideAcrossConfigs: true,
|
||||||
}).Fill(f).Mask(baseFs).WrapsFs(f, baseFs)
|
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
|
||||||
|
|
||||||
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
|
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
|
||||||
|
|
||||||
@@ -466,7 +470,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
|
|||||||
// filePath can be name, relative or absolute path of main file.
|
// filePath can be name, relative or absolute path of main file.
|
||||||
//
|
//
|
||||||
// chunkNo must be a zero based index of data chunk.
|
// chunkNo must be a zero based index of data chunk.
|
||||||
// Negative chunkNo eg. -1 indicates a control chunk.
|
// Negative chunkNo e.g. -1 indicates a control chunk.
|
||||||
// ctrlType is type of control chunk (must be valid).
|
// ctrlType is type of control chunk (must be valid).
|
||||||
// ctrlType must be "" for data chunks.
|
// ctrlType must be "" for data chunks.
|
||||||
//
|
//
|
||||||
@@ -725,6 +729,9 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
|||||||
fs.Infof(f, "ignore non-data chunk %q", remote)
|
fs.Infof(f, "ignore non-data chunk %q", remote)
|
||||||
}
|
}
|
||||||
// need to read metadata to ensure actual object type
|
// need to read metadata to ensure actual object type
|
||||||
|
// no need to read if metaobject is too big or absent,
|
||||||
|
// use the fact that before calling validate()
|
||||||
|
// the `size` field caches metaobject size, if any
|
||||||
if f.useMeta && mainObject != nil && mainObject.size <= maxMetadataSize {
|
if f.useMeta && mainObject != nil && mainObject.size <= maxMetadataSize {
|
||||||
mainObject.unsure = true
|
mainObject.unsure = true
|
||||||
}
|
}
|
||||||
@@ -802,9 +809,10 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
o *Object
|
o *Object
|
||||||
baseObj fs.Object
|
baseObj fs.Object
|
||||||
err error
|
err error
|
||||||
|
sameMain bool
|
||||||
)
|
)
|
||||||
|
|
||||||
if f.useMeta {
|
if f.useMeta {
|
||||||
@@ -818,6 +826,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
|||||||
// as a hard limit. Anything larger than that is treated as a
|
// as a hard limit. Anything larger than that is treated as a
|
||||||
// non-chunked file without even checking its contents, so it's
|
// non-chunked file without even checking its contents, so it's
|
||||||
// paramount to prevent metadata from exceeding the maximum size.
|
// paramount to prevent metadata from exceeding the maximum size.
|
||||||
|
// Anything smaller is additionally checked for format.
|
||||||
o = f.newObject("", baseObj, nil)
|
o = f.newObject("", baseObj, nil)
|
||||||
if o.size > maxMetadataSize {
|
if o.size > maxMetadataSize {
|
||||||
return o, nil
|
return o, nil
|
||||||
@@ -847,18 +856,27 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
|||||||
return nil, errors.Wrap(err, "can't detect composite file")
|
return nil, errors.Wrap(err, "can't detect composite file")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
caseInsensitive := f.features.CaseInsensitive
|
||||||
for _, dirOrObject := range entries {
|
for _, dirOrObject := range entries {
|
||||||
entry, ok := dirOrObject.(fs.Object)
|
entry, ok := dirOrObject.(fs.Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
entryRemote := entry.Remote()
|
entryRemote := entry.Remote()
|
||||||
if !strings.Contains(entryRemote, remote) {
|
if !caseInsensitive && !strings.Contains(entryRemote, remote) {
|
||||||
continue // bypass regexp to save cpu
|
continue // bypass regexp to save cpu
|
||||||
}
|
}
|
||||||
mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(entryRemote)
|
mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(entryRemote)
|
||||||
if mainRemote == "" || mainRemote != remote {
|
if mainRemote == "" {
|
||||||
continue // skip non-conforming chunks
|
continue // skip non-chunks
|
||||||
|
}
|
||||||
|
if caseInsensitive {
|
||||||
|
sameMain = strings.EqualFold(mainRemote, remote)
|
||||||
|
} else {
|
||||||
|
sameMain = mainRemote == remote
|
||||||
|
}
|
||||||
|
if !sameMain {
|
||||||
|
continue // skip alien chunks
|
||||||
}
|
}
|
||||||
if ctrlType != "" || xactID != "" {
|
if ctrlType != "" || xactID != "" {
|
||||||
if f.useMeta {
|
if f.useMeta {
|
||||||
@@ -906,11 +924,22 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
|||||||
return o, nil
|
return o, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// readMetadata reads composite object metadata and caches results,
|
||||||
|
// in case of critical errors metadata is not cached.
|
||||||
|
// Returns ErrMetaUnknown if an unsupported metadata format is detected.
|
||||||
|
// If object is not chunked but marked by List or NewObject for recheck,
|
||||||
|
// readMetadata will attempt to parse object as composite with fallback
|
||||||
|
// to non-chunked representation if the attempt fails.
|
||||||
func (o *Object) readMetadata(ctx context.Context) error {
|
func (o *Object) readMetadata(ctx context.Context) error {
|
||||||
|
// return quickly if metadata is absent or has been already cached
|
||||||
|
if !o.f.useMeta {
|
||||||
|
o.isFull = true
|
||||||
|
}
|
||||||
if o.isFull {
|
if o.isFull {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if !o.f.useMeta || (!o.isComposite() && !o.unsure) {
|
if !o.isComposite() && !o.unsure {
|
||||||
|
// this for sure is a non-chunked standalone file
|
||||||
o.isFull = true
|
o.isFull = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -928,6 +957,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
|||||||
return ErrMetaTooBig
|
return ErrMetaTooBig
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// size is within limits, perform consistency checks
|
||||||
reader, err := metaObject.Open(ctx)
|
reader, err := metaObject.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -965,7 +995,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
|||||||
o.sha1 = metaInfo.sha1
|
o.sha1 = metaInfo.sha1
|
||||||
}
|
}
|
||||||
|
|
||||||
o.isFull = true
|
o.isFull = true // cache results
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -974,11 +1004,14 @@ func (f *Fs) put(
|
|||||||
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
|
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
|
||||||
basePut putFn, action string, target fs.Object) (obj fs.Object, err error) {
|
basePut putFn, action string, target fs.Object) (obj fs.Object, err error) {
|
||||||
|
|
||||||
|
// Perform consistency checks
|
||||||
if err := f.forbidChunk(src, remote); err != nil {
|
if err := f.forbidChunk(src, remote); err != nil {
|
||||||
return nil, errors.Wrap(err, action+" refused")
|
return nil, errors.Wrap(err, action+" refused")
|
||||||
}
|
}
|
||||||
if target == nil {
|
if target == nil {
|
||||||
// Get target object with a quick directory scan
|
// Get target object with a quick directory scan
|
||||||
|
// skip metadata check if target object does not exist.
|
||||||
|
// ignore not-chunked objects, skip chunk size checks.
|
||||||
if obj, err := f.scanObject(ctx, remote, true); err == nil {
|
if obj, err := f.scanObject(ctx, remote, true); err == nil {
|
||||||
target = obj
|
target = obj
|
||||||
}
|
}
|
||||||
@@ -991,6 +1024,7 @@ func (f *Fs) put(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Prepare to upload
|
||||||
c := f.newChunkingReader(src)
|
c := f.newChunkingReader(src)
|
||||||
wrapIn := c.wrapStream(ctx, in, src)
|
wrapIn := c.wrapStream(ctx, in, src)
|
||||||
|
|
||||||
@@ -1061,7 +1095,7 @@ func (f *Fs) put(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Wrapped remote may or may not have seen EOF from chunking reader,
|
// Wrapped remote may or may not have seen EOF from chunking reader,
|
||||||
// eg. the box multi-uploader reads exactly the chunk size specified
|
// e.g. the box multi-uploader reads exactly the chunk size specified
|
||||||
// and skips the "EOF" read. Hence, switch to next limit here.
|
// and skips the "EOF" read. Hence, switch to next limit here.
|
||||||
if !(c.chunkLimit == 0 || c.chunkLimit == c.chunkSize || c.sizeTotal == -1 || c.done) {
|
if !(c.chunkLimit == 0 || c.chunkLimit == c.chunkSize || c.sizeTotal == -1 || c.done) {
|
||||||
silentlyRemove(ctx, chunk)
|
silentlyRemove(ctx, chunk)
|
||||||
@@ -1197,6 +1231,12 @@ func (c *chunkingReader) wrapStream(ctx context.Context, in io.Reader, src fs.Ob
|
|||||||
|
|
||||||
switch {
|
switch {
|
||||||
case c.fs.useMD5:
|
case c.fs.useMD5:
|
||||||
|
srcObj := fs.UnWrapObjectInfo(src)
|
||||||
|
if srcObj != nil && srcObj.Fs().Features().SlowHash {
|
||||||
|
fs.Debugf(src, "skip slow MD5 on source file, hashing in-transit")
|
||||||
|
c.hasher = md5.New()
|
||||||
|
break
|
||||||
|
}
|
||||||
if c.md5, _ = src.Hash(ctx, hash.MD5); c.md5 == "" {
|
if c.md5, _ = src.Hash(ctx, hash.MD5); c.md5 == "" {
|
||||||
if c.fs.hashFallback {
|
if c.fs.hashFallback {
|
||||||
c.sha1, _ = src.Hash(ctx, hash.SHA1)
|
c.sha1, _ = src.Hash(ctx, hash.SHA1)
|
||||||
@@ -1205,6 +1245,12 @@ func (c *chunkingReader) wrapStream(ctx context.Context, in io.Reader, src fs.Ob
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
case c.fs.useSHA1:
|
case c.fs.useSHA1:
|
||||||
|
srcObj := fs.UnWrapObjectInfo(src)
|
||||||
|
if srcObj != nil && srcObj.Fs().Features().SlowHash {
|
||||||
|
fs.Debugf(src, "skip slow SHA1 on source file, hashing in-transit")
|
||||||
|
c.hasher = sha1.New()
|
||||||
|
break
|
||||||
|
}
|
||||||
if c.sha1, _ = src.Hash(ctx, hash.SHA1); c.sha1 == "" {
|
if c.sha1, _ = src.Hash(ctx, hash.SHA1); c.sha1 == "" {
|
||||||
if c.fs.hashFallback {
|
if c.fs.hashFallback {
|
||||||
c.md5, _ = src.Hash(ctx, hash.MD5)
|
c.md5, _ = src.Hash(ctx, hash.MD5)
|
||||||
@@ -1238,7 +1284,7 @@ func (c *chunkingReader) Read(buf []byte) (bytesRead int, err error) {
|
|||||||
if c.chunkLimit <= 0 {
|
if c.chunkLimit <= 0 {
|
||||||
// Chunk complete - switch to next one.
|
// Chunk complete - switch to next one.
|
||||||
// Note #1:
|
// Note #1:
|
||||||
// We might not get here because some remotes (eg. box multi-uploader)
|
// We might not get here because some remotes (e.g. box multi-uploader)
|
||||||
// read the specified size exactly and skip the concluding EOF Read.
|
// read the specified size exactly and skip the concluding EOF Read.
|
||||||
// Then a check in the put loop will kick in.
|
// Then a check in the put loop will kick in.
|
||||||
// Note #2:
|
// Note #2:
|
||||||
@@ -1277,7 +1323,7 @@ func (c *chunkingReader) accountBytes(bytesRead int64) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// dummyRead updates accounting, hashsums etc by simulating reads
|
// dummyRead updates accounting, hashsums, etc. by simulating reads
|
||||||
func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
|
func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
|
||||||
if c.hasher == nil && c.readCount+size > maxMetadataSize {
|
if c.hasher == nil && c.readCount+size > maxMetadataSize {
|
||||||
c.accountBytes(size)
|
c.accountBytes(size)
|
||||||
@@ -1429,7 +1475,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
// However, if rclone dies unexpectedly, it can leave hidden temporary
|
// However, if rclone dies unexpectedly, it can leave hidden temporary
|
||||||
// chunks, which cannot be discovered using the `list` command.
|
// chunks, which cannot be discovered using the `list` command.
|
||||||
// Remove does not try to search for such chunks or to delete them.
|
// Remove does not try to search for such chunks or to delete them.
|
||||||
// Sometimes this can lead to strange results eg. when `list` shows that
|
// Sometimes this can lead to strange results e.g. when `list` shows that
|
||||||
// directory is empty but `rmdir` refuses to remove it because on the
|
// directory is empty but `rmdir` refuses to remove it because on the
|
||||||
// level of wrapped remote it's actually *not* empty.
|
// level of wrapped remote it's actually *not* empty.
|
||||||
// As a workaround users can use `purge` to forcibly remove it.
|
// As a workaround users can use `purge` to forcibly remove it.
|
||||||
@@ -1581,6 +1627,8 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
|
|||||||
diff = "chunk sizes"
|
diff = "chunk sizes"
|
||||||
case f.opt.NameFormat != obj.f.opt.NameFormat:
|
case f.opt.NameFormat != obj.f.opt.NameFormat:
|
||||||
diff = "chunk name formats"
|
diff = "chunk name formats"
|
||||||
|
case f.opt.StartFrom != obj.f.opt.StartFrom:
|
||||||
|
diff = "chunk numbering"
|
||||||
case f.opt.MetaFormat != obj.f.opt.MetaFormat:
|
case f.opt.MetaFormat != obj.f.opt.MetaFormat:
|
||||||
diff = "meta formats"
|
diff = "meta formats"
|
||||||
}
|
}
|
||||||
@@ -1624,7 +1672,7 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -1645,7 +1693,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return f.copyOrMove(ctx, obj, remote, baseCopy, md5, sha1, "copy")
|
return f.copyOrMove(ctx, obj, remote, baseCopy, md5, sha1, "copy")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -1690,7 +1738,7 @@ func (f *Fs) baseMove(ctx context.Context, src fs.Object, remote string, delMode
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1770,6 +1818,16 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
|||||||
do(ctx, wrappedNotifyFunc, pollIntervalChan)
|
do(ctx, wrappedNotifyFunc, pollIntervalChan)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Shutdown the backend, closing any background tasks and any
|
||||||
|
// cached connections.
|
||||||
|
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||||
|
do := f.base.Features().Shutdown
|
||||||
|
if do == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return do(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
// Object represents a composite file wrapping one or more data chunks
|
// Object represents a composite file wrapping one or more data chunks
|
||||||
type Object struct {
|
type Object struct {
|
||||||
remote string
|
remote string
|
||||||
@@ -1799,6 +1857,9 @@ func (o *Object) addChunk(chunk fs.Object, chunkNo int) error {
|
|||||||
copy(newChunks, o.chunks)
|
copy(newChunks, o.chunks)
|
||||||
o.chunks = newChunks
|
o.chunks = newChunks
|
||||||
}
|
}
|
||||||
|
if o.chunks[chunkNo] != nil {
|
||||||
|
return fmt.Errorf("duplicate chunk number %d", chunkNo+o.f.opt.StartFrom)
|
||||||
|
}
|
||||||
o.chunks[chunkNo] = chunk
|
o.chunks[chunkNo] = chunk
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -2226,15 +2287,17 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1 s
|
|||||||
SHA1: sha1,
|
SHA1: sha1,
|
||||||
}
|
}
|
||||||
data, err := json.Marshal(&metadata)
|
data, err := json.Marshal(&metadata)
|
||||||
if err == nil && data != nil && len(data) >= maxMetadataSize {
|
if err == nil && data != nil && len(data) >= maxMetadataSizeWritten {
|
||||||
// be a nitpicker, never produce something you can't consume
|
// be a nitpicker, never produce something you can't consume
|
||||||
return nil, errors.New("metadata can't be this big, please report to rclone developers")
|
return nil, errors.New("metadata can't be this big, please report to rclone developers")
|
||||||
}
|
}
|
||||||
return data, err
|
return data, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// unmarshalSimpleJSON
|
// unmarshalSimpleJSON parses metadata.
|
||||||
//
|
//
|
||||||
|
// In case of errors returns a flag telling whether input has been
|
||||||
|
// produced by incompatible version of rclone vs wasn't metadata at all.
|
||||||
// Only metadata format version 1 is supported atm.
|
// Only metadata format version 1 is supported atm.
|
||||||
// Future releases will transparently migrate older metadata objects.
|
// Future releases will transparently migrate older metadata objects.
|
||||||
// New format will have a higher version number and cannot be correctly
|
// New format will have a higher version number and cannot be correctly
|
||||||
@@ -2244,7 +2307,7 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1 s
|
|||||||
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
|
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
|
||||||
// Be strict about JSON format
|
// Be strict about JSON format
|
||||||
// to reduce possibility that a random small file resembles metadata.
|
// to reduce possibility that a random small file resembles metadata.
|
||||||
if data != nil && len(data) > maxMetadataSize {
|
if data != nil && len(data) > maxMetadataSizeWritten {
|
||||||
return nil, false, ErrMetaTooBig
|
return nil, false, ErrMetaTooBig
|
||||||
}
|
}
|
||||||
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||||
@@ -2346,6 +2409,7 @@ var (
|
|||||||
_ fs.Abouter = (*Fs)(nil)
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
_ fs.Wrapper = (*Fs)(nil)
|
_ fs.Wrapper = (*Fs)(nil)
|
||||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||||
|
_ fs.Shutdowner = (*Fs)(nil)
|
||||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||||
_ fs.Object = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/fs/object"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
@@ -663,6 +664,80 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
|||||||
runSubtest(futureMeta, "future")
|
runSubtest(futureMeta, "future")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// test that chunker refuses to change on objects with future/unknowm metadata
|
||||||
|
func testFutureProof(t *testing.T, f *Fs) {
|
||||||
|
if f.opt.MetaFormat == "none" {
|
||||||
|
t.Skip("this test requires metadata support")
|
||||||
|
}
|
||||||
|
|
||||||
|
saveOpt := f.opt
|
||||||
|
ctx := context.Background()
|
||||||
|
f.opt.FailHard = true
|
||||||
|
const dir = "future"
|
||||||
|
const file = dir + "/test"
|
||||||
|
defer func() {
|
||||||
|
f.opt.FailHard = false
|
||||||
|
_ = operations.Purge(ctx, f.base, dir)
|
||||||
|
f.opt = saveOpt
|
||||||
|
}()
|
||||||
|
|
||||||
|
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||||
|
putPart := func(name string, part int, data, msg string) {
|
||||||
|
if part > 0 {
|
||||||
|
name = f.makeChunkName(name, part-1, "", "")
|
||||||
|
}
|
||||||
|
item := fstest.Item{Path: name, ModTime: modTime}
|
||||||
|
_, obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
|
||||||
|
assert.NotNil(t, obj, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// simulate chunked object from future
|
||||||
|
meta := `{"ver":999,"nchunks":3,"size":9,"garbage":"litter","sha1":"0707f2970043f9f7c22029482db27733deaec029"}`
|
||||||
|
putPart(file, 0, meta, "metaobject")
|
||||||
|
putPart(file, 1, "abc", "chunk1")
|
||||||
|
putPart(file, 2, "def", "chunk2")
|
||||||
|
putPart(file, 3, "ghi", "chunk3")
|
||||||
|
|
||||||
|
// List should succeed
|
||||||
|
ls, err := f.List(ctx, dir)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, 1, len(ls))
|
||||||
|
assert.Equal(t, int64(9), ls[0].Size())
|
||||||
|
|
||||||
|
// NewObject should succeed
|
||||||
|
obj, err := f.NewObject(ctx, file)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, file, obj.Remote())
|
||||||
|
assert.Equal(t, int64(9), obj.Size())
|
||||||
|
|
||||||
|
// Hash must fail
|
||||||
|
_, err = obj.Hash(ctx, hash.SHA1)
|
||||||
|
assert.Equal(t, ErrMetaUnknown, err)
|
||||||
|
|
||||||
|
// Move must fail
|
||||||
|
mobj, err := operations.Move(ctx, f, nil, file+"2", obj)
|
||||||
|
assert.Nil(t, mobj)
|
||||||
|
assert.Error(t, err)
|
||||||
|
if err != nil {
|
||||||
|
assert.Contains(t, err.Error(), "please upgrade rclone")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put must fail
|
||||||
|
oi := object.NewStaticObjectInfo(file, modTime, 3, true, nil, nil)
|
||||||
|
buf := bytes.NewBufferString("abc")
|
||||||
|
_, err = f.Put(ctx, buf, oi)
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
// Rcat must fail
|
||||||
|
in := ioutil.NopCloser(bytes.NewBufferString("abc"))
|
||||||
|
robj, err := operations.Rcat(ctx, f, file, in, modTime)
|
||||||
|
assert.Nil(t, robj)
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
if err != nil {
|
||||||
|
assert.Contains(t, err.Error(), "please upgrade rclone")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// InternalTest dispatches all internal tests
|
// InternalTest dispatches all internal tests
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
t.Run("PutLarge", func(t *testing.T) {
|
t.Run("PutLarge", func(t *testing.T) {
|
||||||
@@ -686,6 +761,9 @@ func (f *Fs) InternalTest(t *testing.T) {
|
|||||||
t.Run("MetadataInput", func(t *testing.T) {
|
t.Run("MetadataInput", func(t *testing.T) {
|
||||||
testMetadataInput(t, f)
|
testMetadataInput(t, f)
|
||||||
})
|
})
|
||||||
|
t.Run("FutureProof", func(t *testing.T) {
|
||||||
|
testFutureProof(t, f)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
var _ fstests.InternalTester = (*Fs)(nil)
|
||||||
|
|||||||
@@ -15,10 +15,10 @@ import (
|
|||||||
|
|
||||||
// Command line flags
|
// Command line flags
|
||||||
var (
|
var (
|
||||||
// Invalid characters are not supported by some remotes, eg. Mailru.
|
// Invalid characters are not supported by some remotes, e.g. Mailru.
|
||||||
// We enable testing with invalid characters when -remote is not set, so
|
// We enable testing with invalid characters when -remote is not set, so
|
||||||
// chunker overlays a local directory, but invalid characters are disabled
|
// chunker overlays a local directory, but invalid characters are disabled
|
||||||
// by default when -remote is set, eg. when test_all runs backend tests.
|
// by default when -remote is set, e.g. when test_all runs backend tests.
|
||||||
// You can still test with invalid characters using the below flag.
|
// You can still test with invalid characters using the below flag.
|
||||||
UseBadChars = flag.Bool("bad-chars", false, "Set to test bad characters in file names when -remote is set")
|
UseBadChars = flag.Bool("bad-chars", false, "Set to test bad characters in file names when -remote is set")
|
||||||
)
|
)
|
||||||
|
|||||||
1
backend/compress/.gitignore
vendored
Normal file
1
backend/compress/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
test
|
||||||
1416
backend/compress/compress.go
Normal file
1416
backend/compress/compress.go
Normal file
File diff suppressed because it is too large
Load Diff
65
backend/compress/compress_test.go
Normal file
65
backend/compress/compress_test.go
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
// Test Crypt filesystem interface
|
||||||
|
package compress
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
_ "github.com/rclone/rclone/backend/drive"
|
||||||
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
|
_ "github.com/rclone/rclone/backend/s3"
|
||||||
|
_ "github.com/rclone/rclone/backend/swift"
|
||||||
|
"github.com/rclone/rclone/fstest"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIntegration runs integration tests against the remote
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
opt := fstests.Opt{
|
||||||
|
RemoteName: *fstest.RemoteName,
|
||||||
|
NilObject: (*Object)(nil),
|
||||||
|
UnimplementableFsMethods: []string{
|
||||||
|
"OpenWriterAt",
|
||||||
|
"MergeDirs",
|
||||||
|
"DirCacheFlush",
|
||||||
|
"PutUnchecked",
|
||||||
|
"PutStream",
|
||||||
|
"UserInfo",
|
||||||
|
"Disconnect",
|
||||||
|
},
|
||||||
|
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||||
|
UnimplementableObjectMethods: []string{}}
|
||||||
|
fstests.Run(t, &opt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRemoteGzip tests GZIP compression
|
||||||
|
func TestRemoteGzip(t *testing.T) {
|
||||||
|
if *fstest.RemoteName != "" {
|
||||||
|
t.Skip("Skipping as -remote set")
|
||||||
|
}
|
||||||
|
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
|
||||||
|
name := "TestCompressGzip"
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: name + ":",
|
||||||
|
NilObject: (*Object)(nil),
|
||||||
|
UnimplementableFsMethods: []string{
|
||||||
|
"OpenWriterAt",
|
||||||
|
"MergeDirs",
|
||||||
|
"DirCacheFlush",
|
||||||
|
"PutUnchecked",
|
||||||
|
"PutStream",
|
||||||
|
"UserInfo",
|
||||||
|
"Disconnect",
|
||||||
|
},
|
||||||
|
UnimplementableObjectMethods: []string{
|
||||||
|
"GetTier",
|
||||||
|
"SetTier",
|
||||||
|
},
|
||||||
|
ExtraConfig: []fstests.ExtraConfigItem{
|
||||||
|
{Name: name, Key: "type", Value: "compress"},
|
||||||
|
{Name: name, Key: "remote", Value: tempdir},
|
||||||
|
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -147,7 +147,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
|||||||
// If salt is "" we use a fixed salt just to make attackers lives
|
// If salt is "" we use a fixed salt just to make attackers lives
|
||||||
// slighty harder than using no salt.
|
// slighty harder than using no salt.
|
||||||
//
|
//
|
||||||
// Note that empty passsword makes all 0x00 keys which is used in the
|
// Note that empty password makes all 0x00 keys which is used in the
|
||||||
// tests.
|
// tests.
|
||||||
func (c *Cipher) Key(password, salt string) (err error) {
|
func (c *Cipher) Key(password, salt string) (err error) {
|
||||||
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
|
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
|
||||||
@@ -633,11 +633,8 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
|
|||||||
}
|
}
|
||||||
// possibly err != nil here, but we will process the
|
// possibly err != nil here, but we will process the
|
||||||
// data and the next call to ReadFull will return 0, err
|
// data and the next call to ReadFull will return 0, err
|
||||||
// Write nonce to start of block
|
|
||||||
copy(fh.buf, fh.nonce[:])
|
|
||||||
// Encrypt the block using the nonce
|
// Encrypt the block using the nonce
|
||||||
block := fh.buf
|
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||||
secretbox.Seal(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
|
||||||
fh.bufIndex = 0
|
fh.bufIndex = 0
|
||||||
fh.bufSize = blockHeaderSize + n
|
fh.bufSize = blockHeaderSize + n
|
||||||
fh.nonce.increment()
|
fh.nonce.increment()
|
||||||
@@ -782,8 +779,7 @@ func (fh *decrypter) fillBuffer() (err error) {
|
|||||||
return ErrorEncryptedFileBadHeader
|
return ErrorEncryptedFileBadHeader
|
||||||
}
|
}
|
||||||
// Decrypt the block using the nonce
|
// Decrypt the block using the nonce
|
||||||
block := fh.buf
|
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||||
_, ok := secretbox.Open(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
|
||||||
if !ok {
|
if !ok {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err // return pending error as it is likely more accurate
|
return err // return pending error as it is likely more accurate
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ func init() {
|
|||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "filename_encryption",
|
Name: "filename_encryption",
|
||||||
@@ -76,7 +76,7 @@ NB If filename_encryption is "off" then this option will do nothing.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "server_side_across_configs",
|
Name: "server_side_across_configs",
|
||||||
Default: false,
|
Default: false,
|
||||||
Help: `Allow server side operations (eg copy) to work across different crypt configs.
|
Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
|
||||||
|
|
||||||
Normally this option is not what you want, but if you have two crypts
|
Normally this option is not what you want, but if you have two crypts
|
||||||
pointing to the same backend you can use it.
|
pointing to the same backend you can use it.
|
||||||
@@ -144,7 +144,7 @@ func NewCipher(m configmap.Mapper) (*Cipher, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -159,21 +159,21 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if strings.HasPrefix(remote, name+":") {
|
if strings.HasPrefix(remote, name+":") {
|
||||||
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
||||||
}
|
}
|
||||||
// Make sure to remove trailing . reffering to the current dir
|
// Make sure to remove trailing . referring to the current dir
|
||||||
if path.Base(rpath) == "." {
|
if path.Base(rpath) == "." {
|
||||||
rpath = strings.TrimSuffix(rpath, ".")
|
rpath = strings.TrimSuffix(rpath, ".")
|
||||||
}
|
}
|
||||||
// Look for a file first
|
// Look for a file first
|
||||||
var wrappedFs fs.Fs
|
var wrappedFs fs.Fs
|
||||||
if rpath == "" {
|
if rpath == "" {
|
||||||
wrappedFs, err = cache.Get(remote)
|
wrappedFs, err = cache.Get(ctx, remote)
|
||||||
} else {
|
} else {
|
||||||
remotePath := fspath.JoinRootPath(remote, cipher.EncryptFileName(rpath))
|
remotePath := fspath.JoinRootPath(remote, cipher.EncryptFileName(rpath))
|
||||||
wrappedFs, err = cache.Get(remotePath)
|
wrappedFs, err = cache.Get(ctx, remotePath)
|
||||||
// if that didn't produce a file, look for a directory
|
// if that didn't produce a file, look for a directory
|
||||||
if err != fs.ErrorIsFile {
|
if err != fs.ErrorIsFile {
|
||||||
remotePath = fspath.JoinRootPath(remote, cipher.EncryptDirName(rpath))
|
remotePath = fspath.JoinRootPath(remote, cipher.EncryptDirName(rpath))
|
||||||
wrappedFs, err = cache.Get(remotePath)
|
wrappedFs, err = cache.Get(ctx, remotePath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != fs.ErrorIsFile && err != nil {
|
if err != fs.ErrorIsFile && err != nil {
|
||||||
@@ -199,7 +199,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
SetTier: true,
|
SetTier: true,
|
||||||
GetTier: true,
|
GetTier: true,
|
||||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||||
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
|
|
||||||
return f, err
|
return f, err
|
||||||
}
|
}
|
||||||
@@ -444,7 +444,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
return do(ctx, f.cipher.EncryptDirName(dir))
|
return do(ctx, f.cipher.EncryptDirName(dir))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -469,7 +469,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return f.newObject(oResult), nil
|
return f.newObject(oResult), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -495,7 +495,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -917,6 +917,16 @@ func (f *Fs) Disconnect(ctx context.Context) error {
|
|||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Shutdown the backend, closing any background tasks and any
|
||||||
|
// cached connections.
|
||||||
|
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||||
|
do := f.Fs.Features().Shutdown
|
||||||
|
if do == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return do(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
||||||
//
|
//
|
||||||
// This encrypts the remote name and adjusts the size
|
// This encrypts the remote name and adjusts the size
|
||||||
@@ -1025,6 +1035,7 @@ var (
|
|||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
_ fs.UserInfoer = (*Fs)(nil)
|
_ fs.UserInfoer = (*Fs)(nil)
|
||||||
_ fs.Disconnecter = (*Fs)(nil)
|
_ fs.Disconnecter = (*Fs)(nil)
|
||||||
|
_ fs.Shutdowner = (*Fs)(nil)
|
||||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||||
_ fs.Object = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ func (o testWrapper) UnWrap() fs.Object {
|
|||||||
// Create a temporary local fs to upload things from
|
// Create a temporary local fs to upload things from
|
||||||
|
|
||||||
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
|
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
|
||||||
localFs, err := fs.TemporaryLocalFs()
|
localFs, err := fs.TemporaryLocalFs(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
cleanup = func() {
|
cleanup = func() {
|
||||||
require.NoError(t, localFs.Rmdir(context.Background(), ""))
|
require.NoError(t, localFs.Rmdir(context.Background(), ""))
|
||||||
@@ -87,7 +87,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// wrap the object in a crypt for upload using the nonce we
|
// wrap the object in a crypt for upload using the nonce we
|
||||||
// saved from the encryptor
|
// saved from the encrypter
|
||||||
src := f.newObjectInfo(oi, nonce)
|
src := f.newObjectInfo(oi, nonce)
|
||||||
|
|
||||||
// Test ObjectInfo methods
|
// Test ObjectInfo methods
|
||||||
|
|||||||
@@ -35,6 +35,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/rclone/rclone/fs/walk"
|
"github.com/rclone/rclone/fs/walk"
|
||||||
@@ -72,6 +73,7 @@ const (
|
|||||||
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
|
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
|
||||||
listRGrouping = 50 // number of IDs to search at once when using ListR
|
listRGrouping = 50 // number of IDs to search at once when using ListR
|
||||||
listRInputBuffer = 1000 // size of input buffer when using ListR
|
listRInputBuffer = 1000 // size of input buffer when using ListR
|
||||||
|
defaultXDGIcon = "text-html"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
@@ -127,6 +129,12 @@ var (
|
|||||||
_mimeTypeCustomTransform = map[string]string{
|
_mimeTypeCustomTransform = map[string]string{
|
||||||
"application/vnd.google-apps.script+json": "application/json",
|
"application/vnd.google-apps.script+json": "application/json",
|
||||||
}
|
}
|
||||||
|
_mimeTypeToXDGLinkIcons = map[string]string{
|
||||||
|
"application/vnd.google-apps.document": "x-office-document",
|
||||||
|
"application/vnd.google-apps.drawing": "x-office-drawing",
|
||||||
|
"application/vnd.google-apps.presentation": "x-office-presentation",
|
||||||
|
"application/vnd.google-apps.spreadsheet": "x-office-spreadsheet",
|
||||||
|
}
|
||||||
fetchFormatsOnce sync.Once // make sure we fetch the export/import formats only once
|
fetchFormatsOnce sync.Once // make sure we fetch the export/import formats only once
|
||||||
_exportFormats map[string][]string // allowed export MIME type conversions
|
_exportFormats map[string][]string // allowed export MIME type conversions
|
||||||
_importFormats map[string][]string // allowed import MIME type conversions
|
_importFormats map[string][]string // allowed import MIME type conversions
|
||||||
@@ -175,8 +183,7 @@ func init() {
|
|||||||
Description: "Google Drive",
|
Description: "Google Drive",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
Config: func(name string, m configmap.Mapper) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
ctx := context.TODO()
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -193,7 +200,7 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if opt.ServiceAccountFile == "" {
|
if opt.ServiceAccountFile == "" {
|
||||||
err = oauthutil.Config("drive", name, m, driveConfig, nil)
|
err = oauthutil.Config(ctx, "drive", name, m, driveConfig, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
@@ -283,8 +290,8 @@ Instructs rclone to operate on your "Shared with me" folder (where
|
|||||||
Google Drive lets you access the files and folders others have shared
|
Google Drive lets you access the files and folders others have shared
|
||||||
with you).
|
with you).
|
||||||
|
|
||||||
This works both with the "list" (lsd, lsl, etc) and the "copy"
|
This works both with the "list" (lsd, lsl, etc.) and the "copy"
|
||||||
commands (copy, sync, etc), and with all other commands too.`,
|
commands (copy, sync, etc.), and with all other commands too.`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "trashed_only",
|
Name: "trashed_only",
|
||||||
@@ -434,9 +441,9 @@ need to use --ignore size also.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "server_side_across_configs",
|
Name: "server_side_across_configs",
|
||||||
Default: false,
|
Default: false,
|
||||||
Help: `Allow server side operations (eg copy) to work across different drive configs.
|
Help: `Allow server-side operations (e.g. copy) to work across different drive configs.
|
||||||
|
|
||||||
This can be useful if you wish to do a server side copy between two
|
This can be useful if you wish to do a server-side copy between two
|
||||||
different Google drives. Note that this isn't enabled by default
|
different Google drives. Note that this isn't enabled by default
|
||||||
because it isn't easy to tell if it will work between any two
|
because it isn't easy to tell if it will work between any two
|
||||||
configurations.`,
|
configurations.`,
|
||||||
@@ -470,6 +477,21 @@ Note that this detection is relying on error message strings which
|
|||||||
Google don't document so it may break in the future.
|
Google don't document so it may break in the future.
|
||||||
|
|
||||||
See: https://github.com/rclone/rclone/issues/3857
|
See: https://github.com/rclone/rclone/issues/3857
|
||||||
|
`,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "stop_on_download_limit",
|
||||||
|
Default: false,
|
||||||
|
Help: `Make download limit errors be fatal
|
||||||
|
|
||||||
|
At the time of writing it is only possible to download 10TB of data from
|
||||||
|
Google Drive a day (this is an undocumented limit). When this limit is
|
||||||
|
reached Google Drive produces a slightly different error message. When
|
||||||
|
this flag is set it causes these errors to be fatal. These will stop
|
||||||
|
the in-progress sync.
|
||||||
|
|
||||||
|
Note that this detection is relying on error message strings which
|
||||||
|
Google don't document so it may break in the future.
|
||||||
`,
|
`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
@@ -539,6 +561,7 @@ type Options struct {
|
|||||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||||
DisableHTTP2 bool `config:"disable_http2"`
|
DisableHTTP2 bool `config:"disable_http2"`
|
||||||
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
|
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
|
||||||
|
StopOnDownloadLimit bool `config:"stop_on_download_limit"`
|
||||||
SkipShortcuts bool `config:"skip_shortcuts"`
|
SkipShortcuts bool `config:"skip_shortcuts"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
@@ -548,6 +571,7 @@ type Fs struct {
|
|||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
|
ci *fs.ConfigInfo // global config
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
svc *drive.Service // the connection to the drive server
|
svc *drive.Service // the connection to the drive server
|
||||||
v2Svc *drive_v2.Service // used to create download links for the v2 api
|
v2Svc *drive_v2.Service // used to create download links for the v2 api
|
||||||
@@ -638,6 +662,9 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
|
|||||||
return false, fserrors.FatalError(err)
|
return false, fserrors.FatalError(err)
|
||||||
}
|
}
|
||||||
return true, err
|
return true, err
|
||||||
|
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
|
||||||
|
fs.Errorf(f, "Received download limit error: %v", err)
|
||||||
|
return false, fserrors.FatalError(err)
|
||||||
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
|
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
|
||||||
fs.Errorf(f, "Received team drive file limit error: %v", err)
|
fs.Errorf(f, "Received team drive file limit error: %v", err)
|
||||||
return false, fserrors.FatalError(err)
|
return false, fserrors.FatalError(err)
|
||||||
@@ -669,7 +696,7 @@ func containsString(slice []string, s string) bool {
|
|||||||
|
|
||||||
// getFile returns drive.File for the ID passed and fields passed in
|
// getFile returns drive.File for the ID passed and fields passed in
|
||||||
func (f *Fs) getFile(ID string, fields googleapi.Field) (info *drive.File, err error) {
|
func (f *Fs) getFile(ID string, fields googleapi.Field) (info *drive.File, err error) {
|
||||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
info, err = f.svc.Files.Get(ID).
|
info, err = f.svc.Files.Get(ID).
|
||||||
Fields(fields).
|
Fields(fields).
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
@@ -693,10 +720,10 @@ func (f *Fs) getRootID() (string, error) {
|
|||||||
// If the user fn ever returns true then it early exits with found = true
|
// If the user fn ever returns true then it early exits with found = true
|
||||||
//
|
//
|
||||||
// Search params: https://developers.google.com/drive/search-parameters
|
// Search params: https://developers.google.com/drive/search-parameters
|
||||||
func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directoriesOnly, filesOnly, includeAll bool, fn listFn) (found bool, err error) {
|
func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directoriesOnly, filesOnly, trashedOnly, includeAll bool, fn listFn) (found bool, err error) {
|
||||||
var query []string
|
var query []string
|
||||||
if !includeAll {
|
if !includeAll {
|
||||||
q := "trashed=" + strconv.FormatBool(f.opt.TrashedOnly)
|
q := "trashed=" + strconv.FormatBool(trashedOnly)
|
||||||
if f.opt.TrashedOnly {
|
if f.opt.TrashedOnly {
|
||||||
q = fmt.Sprintf("(mimeType='%s' or %s)", driveFolderType, q)
|
q = fmt.Sprintf("(mimeType='%s' or %s)", driveFolderType, q)
|
||||||
}
|
}
|
||||||
@@ -921,8 +948,10 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
|
|||||||
|
|
||||||
// Figure out if the user wants to use a team drive
|
// Figure out if the user wants to use a team drive
|
||||||
func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error {
|
func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error {
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
|
|
||||||
// Stop if we are running non-interactive config
|
// Stop if we are running non-interactive config
|
||||||
if fs.Config.AutoConfirm {
|
if ci.AutoConfirm {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if opt.TeamDriveID == "" {
|
if opt.TeamDriveID == "" {
|
||||||
@@ -933,7 +962,7 @@ func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name
|
|||||||
if !config.Confirm(false) {
|
if !config.Confirm(false) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
f, err := newFs(name, "", m)
|
f, err := newFs(ctx, name, "", m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to make Fs to list teamdrives")
|
return errors.Wrap(err, "failed to make Fs to list teamdrives")
|
||||||
}
|
}
|
||||||
@@ -960,8 +989,8 @@ func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getClient makes an http client according to the options
|
// getClient makes an http client according to the options
|
||||||
func getClient(opt *Options) *http.Client {
|
func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||||
t := fshttp.NewTransportCustom(fs.Config, func(t *http.Transport) {
|
t := fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
|
||||||
if opt.DisableHTTP2 {
|
if opt.DisableHTTP2 {
|
||||||
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
|
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
|
||||||
}
|
}
|
||||||
@@ -971,7 +1000,7 @@ func getClient(opt *Options) *http.Client {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
|
func getServiceAccountClient(ctx context.Context, opt *Options, credentialsData []byte) (*http.Client, error) {
|
||||||
scopes := driveScopes(opt.Scope)
|
scopes := driveScopes(opt.Scope)
|
||||||
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
|
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -980,11 +1009,11 @@ func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client
|
|||||||
if opt.Impersonate != "" {
|
if opt.Impersonate != "" {
|
||||||
conf.Subject = opt.Impersonate
|
conf.Subject = opt.Impersonate
|
||||||
}
|
}
|
||||||
ctxWithSpecialClient := oauthutil.Context(getClient(opt))
|
ctxWithSpecialClient := oauthutil.Context(ctx, getClient(ctx, opt))
|
||||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createOAuthClient(opt *Options, name string, m configmap.Mapper) (*http.Client, error) {
|
func createOAuthClient(ctx context.Context, opt *Options, name string, m configmap.Mapper) (*http.Client, error) {
|
||||||
var oAuthClient *http.Client
|
var oAuthClient *http.Client
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
@@ -997,12 +1026,12 @@ func createOAuthClient(opt *Options, name string, m configmap.Mapper) (*http.Cli
|
|||||||
opt.ServiceAccountCredentials = string(loadedCreds)
|
opt.ServiceAccountCredentials = string(loadedCreds)
|
||||||
}
|
}
|
||||||
if opt.ServiceAccountCredentials != "" {
|
if opt.ServiceAccountCredentials != "" {
|
||||||
oAuthClient, err = getServiceAccountClient(opt, []byte(opt.ServiceAccountCredentials))
|
oAuthClient, err = getServiceAccountClient(ctx, opt, []byte(opt.ServiceAccountCredentials))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to create oauth client from service account")
|
return nil, errors.Wrap(err, "failed to create oauth client from service account")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(name, m, driveConfig, getClient(opt))
|
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to create oauth client")
|
return nil, errors.Wrap(err, "failed to create oauth client")
|
||||||
}
|
}
|
||||||
@@ -1045,7 +1074,7 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
|||||||
//
|
//
|
||||||
// It constructs a valid Fs but doesn't attempt to figure out whether
|
// It constructs a valid Fs but doesn't attempt to figure out whether
|
||||||
// it is a file or a directory.
|
// it is a file or a directory.
|
||||||
func newFs(name, path string, m configmap.Mapper) (*Fs, error) {
|
func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -1061,7 +1090,7 @@ func newFs(name, path string, m configmap.Mapper) (*Fs, error) {
|
|||||||
return nil, errors.Wrap(err, "drive: chunk size")
|
return nil, errors.Wrap(err, "drive: chunk size")
|
||||||
}
|
}
|
||||||
|
|
||||||
oAuthClient, err := createOAuthClient(opt, name, m)
|
oAuthClient, err := createOAuthClient(ctx, opt, name, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "drive: failed when making oauth client")
|
return nil, errors.Wrap(err, "drive: failed when making oauth client")
|
||||||
}
|
}
|
||||||
@@ -1071,11 +1100,13 @@ func newFs(name, path string, m configmap.Mapper) (*Fs, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))),
|
ci: ci,
|
||||||
|
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))),
|
||||||
m: m,
|
m: m,
|
||||||
grouping: listRGrouping,
|
grouping: listRGrouping,
|
||||||
listRmu: new(sync.Mutex),
|
listRmu: new(sync.Mutex),
|
||||||
@@ -1089,7 +1120,7 @@ func newFs(name, path string, m configmap.Mapper) (*Fs, error) {
|
|||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
// Create a new authorized Drive client.
|
// Create a new authorized Drive client.
|
||||||
f.client = oAuthClient
|
f.client = oAuthClient
|
||||||
@@ -1109,9 +1140,8 @@ func newFs(name, path string, m configmap.Mapper) (*Fs, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.Background()
|
f, err := newFs(ctx, name, path, m)
|
||||||
f, err := newFs(name, path, m)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1271,11 +1301,15 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
|
|||||||
if t == nil {
|
if t == nil {
|
||||||
return nil, errors.Errorf("unsupported link type %s", exportMimeType)
|
return nil, errors.Errorf("unsupported link type %s", exportMimeType)
|
||||||
}
|
}
|
||||||
|
xdgIcon := _mimeTypeToXDGLinkIcons[info.MimeType]
|
||||||
|
if xdgIcon == "" {
|
||||||
|
xdgIcon = defaultXDGIcon
|
||||||
|
}
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
err := t.Execute(&buf, struct {
|
err := t.Execute(&buf, struct {
|
||||||
URL, Title string
|
URL, Title, XDGIcon string
|
||||||
}{
|
}{
|
||||||
info.WebViewLink, info.Name,
|
info.WebViewLink, info.Name, xdgIcon,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "executing template failed")
|
return nil, errors.Wrap(err, "executing template failed")
|
||||||
@@ -1376,7 +1410,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||||
// Find the leaf in pathID
|
// Find the leaf in pathID
|
||||||
pathID = actualID(pathID)
|
pathID = actualID(pathID)
|
||||||
found, err = f.list(ctx, []string{pathID}, leaf, true, false, false, func(item *drive.File) bool {
|
found, err = f.list(ctx, []string{pathID}, leaf, true, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
|
||||||
if !f.opt.SkipGdocs {
|
if !f.opt.SkipGdocs {
|
||||||
_, exportName, _, isDocument := f.findExportFormat(item)
|
_, exportName, _, isDocument := f.findExportFormat(item)
|
||||||
if exportName == leaf {
|
if exportName == leaf {
|
||||||
@@ -1569,7 +1603,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
directoryID = actualID(directoryID)
|
directoryID = actualID(directoryID)
|
||||||
|
|
||||||
var iErr error
|
var iErr error
|
||||||
_, err = f.list(ctx, []string{directoryID}, "", false, false, false, func(item *drive.File) bool {
|
_, err = f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
|
||||||
entry, err := f.itemToDirEntry(path.Join(dir, item.Name), item)
|
entry, err := f.itemToDirEntry(path.Join(dir, item.Name), item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
iErr = err
|
iErr = err
|
||||||
@@ -1654,7 +1688,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
|
|||||||
listRSlices{dirs, paths}.Sort()
|
listRSlices{dirs, paths}.Sort()
|
||||||
var iErr error
|
var iErr error
|
||||||
foundItems := false
|
foundItems := false
|
||||||
_, err := f.list(ctx, dirs, "", false, false, false, func(item *drive.File) bool {
|
_, err := f.list(ctx, dirs, "", false, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
|
||||||
// shared with me items have no parents when at the root
|
// shared with me items have no parents when at the root
|
||||||
if f.opt.SharedWithMe && len(item.Parents) == 0 && len(paths) == 1 && paths[0] == "" {
|
if f.opt.SharedWithMe && len(item.Parents) == 0 && len(paths) == 1 && paths[0] == "" {
|
||||||
item.Parents = dirs
|
item.Parents = dirs
|
||||||
@@ -1670,7 +1704,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
|
|||||||
if len(paths) == 1 {
|
if len(paths) == 1 {
|
||||||
// don't check parents at root because
|
// don't check parents at root because
|
||||||
// - shared with me items have no parents at the root
|
// - shared with me items have no parents at the root
|
||||||
// - if using a root alias, eg "root" or "appDataFolder" the ID won't match
|
// - if using a root alias, e.g. "root" or "appDataFolder" the ID won't match
|
||||||
i = 0
|
i = 0
|
||||||
// items at root can have more than one parent so we need to put
|
// items at root can have more than one parent so we need to put
|
||||||
// the item in just once.
|
// the item in just once.
|
||||||
@@ -1785,7 +1819,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
mu := sync.Mutex{} // protects in and overflow
|
mu := sync.Mutex{} // protects in and overflow
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
in := make(chan listREntry, listRInputBuffer)
|
in := make(chan listREntry, listRInputBuffer)
|
||||||
out := make(chan error, fs.Config.Checkers)
|
out := make(chan error, f.ci.Checkers)
|
||||||
list := walk.NewListRHelper(callback)
|
list := walk.NewListRHelper(callback)
|
||||||
overflow := []listREntry{}
|
overflow := []listREntry{}
|
||||||
listed := 0
|
listed := 0
|
||||||
@@ -1824,7 +1858,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
in <- listREntry{directoryID, dir}
|
in <- listREntry{directoryID, dir}
|
||||||
|
|
||||||
for i := 0; i < fs.Config.Checkers; i++ {
|
for i := 0; i < f.ci.Checkers; i++ {
|
||||||
go f.listRRunner(ctx, &wg, in, out, cb, sendJob)
|
go f.listRRunner(ctx, &wg, in, out, cb, sendJob)
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
@@ -1857,7 +1891,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
}()
|
}()
|
||||||
// wait until the all workers to finish
|
// wait until the all workers to finish
|
||||||
for i := 0; i < fs.Config.Checkers; i++ {
|
for i := 0; i < f.ci.Checkers; i++ {
|
||||||
e := <-out
|
e := <-out
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
// if one worker returns an error early, close the input so all other workers exit
|
// if one worker returns an error early, close the input so all other workers exit
|
||||||
@@ -2025,10 +2059,10 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
|
|||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
existingObj, err := f.NewObject(ctx, src.Remote())
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||||
case fs.ErrorObjectNotFound:
|
case fs.ErrorObjectNotFound:
|
||||||
// Not found so create it
|
// Not found so create it
|
||||||
return f.PutUnchecked(ctx, in, src, options...)
|
return f.PutUnchecked(ctx, in, src, options...)
|
||||||
@@ -2129,7 +2163,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|||||||
for _, srcDir := range dirs[1:] {
|
for _, srcDir := range dirs[1:] {
|
||||||
// list the objects
|
// list the objects
|
||||||
infos := []*drive.File{}
|
infos := []*drive.File{}
|
||||||
_, err := f.list(ctx, []string{srcDir.ID()}, "", false, false, true, func(info *drive.File) bool {
|
_, err := f.list(ctx, []string{srcDir.ID()}, "", false, false, f.opt.TrashedOnly, true, func(info *drive.File) bool {
|
||||||
infos = append(infos, info)
|
infos = append(infos, info)
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
@@ -2207,7 +2241,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|||||||
}
|
}
|
||||||
var trashedFiles = false
|
var trashedFiles = false
|
||||||
if check {
|
if check {
|
||||||
found, err := f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
|
found, err := f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, true, func(item *drive.File) bool {
|
||||||
if !item.Trashed {
|
if !item.Trashed {
|
||||||
fs.Debugf(dir, "Rmdir: contains file: %q", item.Name)
|
fs.Debugf(dir, "Rmdir: contains file: %q", item.Name)
|
||||||
return true
|
return true
|
||||||
@@ -2253,7 +2287,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
return time.Millisecond
|
return time.Millisecond
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -2367,8 +2401,57 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
return f.purgeCheck(ctx, dir, false)
|
return f.purgeCheck(ctx, dir, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type cleanupResult struct {
|
||||||
|
Errors int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r cleanupResult) Error() string {
|
||||||
|
return fmt.Sprintf("%d errors during cleanup - see log", r.Errors)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) cleanupTeamDrive(ctx context.Context, dir string, directoryID string) (r cleanupResult, err error) {
|
||||||
|
_, err = f.list(ctx, []string{directoryID}, "", false, false, true, false, func(item *drive.File) bool {
|
||||||
|
remote := path.Join(dir, item.Name)
|
||||||
|
if item.ExplicitlyTrashed { // description is wrong - can also be set for folders - no need to recurse them
|
||||||
|
err := f.delete(ctx, item.Id, false)
|
||||||
|
if err != nil {
|
||||||
|
r.Errors++
|
||||||
|
fs.Errorf(remote, "%v", err)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if item.MimeType == driveFolderType {
|
||||||
|
if !isShortcutID(item.Id) {
|
||||||
|
rNew, _ := f.cleanupTeamDrive(ctx, remote, item.Id)
|
||||||
|
r.Errors += rNew.Errors
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
err = errors.Wrap(err, "failed to list directory")
|
||||||
|
r.Errors++
|
||||||
|
fs.Errorf(dir, "%v", err)
|
||||||
|
}
|
||||||
|
if r.Errors != 0 {
|
||||||
|
return r, r
|
||||||
|
}
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
// CleanUp empties the trash
|
// CleanUp empties the trash
|
||||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||||
|
if f.isTeamDrive {
|
||||||
|
directoryID, err := f.dirCache.FindDir(ctx, "", false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
directoryID = actualID(directoryID)
|
||||||
|
_, err = f.cleanupTeamDrive(ctx, "", directoryID)
|
||||||
|
return err
|
||||||
|
}
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
|
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(err)
|
||||||
@@ -2377,6 +2460,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
fs.Logf(f, "Note that emptying the trash happens in the background and can take some time.")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2420,7 +2504,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
usage := &fs.Usage{
|
usage := &fs.Usage{
|
||||||
Used: fs.NewUsageValue(q.UsageInDrive), // bytes in use
|
Used: fs.NewUsageValue(q.UsageInDrive), // bytes in use
|
||||||
Trashed: fs.NewUsageValue(q.UsageInDriveTrash), // bytes in trash
|
Trashed: fs.NewUsageValue(q.UsageInDriveTrash), // bytes in trash
|
||||||
Other: fs.NewUsageValue(q.Usage - q.UsageInDrive), // other usage eg gmail in drive
|
Other: fs.NewUsageValue(q.Usage - q.UsageInDrive), // other usage e.g. gmail in drive
|
||||||
}
|
}
|
||||||
if q.Limit > 0 {
|
if q.Limit > 0 {
|
||||||
usage.Total = fs.NewUsageValue(q.Limit) // quota of bytes that can be used
|
usage.Total = fs.NewUsageValue(q.Limit) // quota of bytes that can be used
|
||||||
@@ -2429,7 +2513,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
return usage, nil
|
return usage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -2530,7 +2614,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -2751,7 +2835,7 @@ func (f *Fs) changeChunkSize(chunkSizeString string) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) changeServiceAccountFile(file string) (err error) {
|
func (f *Fs) changeServiceAccountFile(ctx context.Context, file string) (err error) {
|
||||||
fs.Debugf(nil, "Changing Service Account File from %s to %s", f.opt.ServiceAccountFile, file)
|
fs.Debugf(nil, "Changing Service Account File from %s to %s", f.opt.ServiceAccountFile, file)
|
||||||
if file == f.opt.ServiceAccountFile {
|
if file == f.opt.ServiceAccountFile {
|
||||||
return nil
|
return nil
|
||||||
@@ -2773,7 +2857,7 @@ func (f *Fs) changeServiceAccountFile(file string) (err error) {
|
|||||||
}()
|
}()
|
||||||
f.opt.ServiceAccountFile = file
|
f.opt.ServiceAccountFile = file
|
||||||
f.opt.ServiceAccountCredentials = ""
|
f.opt.ServiceAccountCredentials = ""
|
||||||
oAuthClient, err := createOAuthClient(&f.opt, f.name, f.m)
|
oAuthClient, err := createOAuthClient(ctx, &f.opt, f.name, f.m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "drive: failed when making oauth client")
|
return errors.Wrap(err, "drive: failed when making oauth client")
|
||||||
}
|
}
|
||||||
@@ -2850,7 +2934,7 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
|||||||
}
|
}
|
||||||
|
|
||||||
var info *drive.File
|
var info *drive.File
|
||||||
err = dstFs.pacer.CallNoRetry(func() (bool, error) {
|
err = dstFs.pacer.Call(func() (bool, error) {
|
||||||
info, err = dstFs.svc.Files.Create(createInfo).
|
info, err = dstFs.svc.Files.Create(createInfo).
|
||||||
Fields(partialFields).
|
Fields(partialFields).
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
@@ -2903,7 +2987,7 @@ func (r unTrashResult) Error() string {
|
|||||||
func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurse bool) (r unTrashResult, err error) {
|
func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurse bool) (r unTrashResult, err error) {
|
||||||
directoryID = actualID(directoryID)
|
directoryID = actualID(directoryID)
|
||||||
fs.Debugf(dir, "finding trash to restore in directory %q", directoryID)
|
fs.Debugf(dir, "finding trash to restore in directory %q", directoryID)
|
||||||
_, err = f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
|
_, err = f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, true, func(item *drive.File) bool {
|
||||||
remote := path.Join(dir, item.Name)
|
remote := path.Join(dir, item.Name)
|
||||||
if item.ExplicitlyTrashed {
|
if item.ExplicitlyTrashed {
|
||||||
fs.Infof(remote, "restoring %q", item.Id)
|
fs.Infof(remote, "restoring %q", item.Id)
|
||||||
@@ -2959,6 +3043,41 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
|
|||||||
return f.unTrash(ctx, dir, directoryID, true)
|
return f.unTrash(ctx, dir, directoryID, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// copy file with id to dest
|
||||||
|
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||||
|
info, err := f.getFile(id, f.fileFields)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "couldn't find id")
|
||||||
|
}
|
||||||
|
if info.MimeType == driveFolderType {
|
||||||
|
return errors.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
|
||||||
|
}
|
||||||
|
info.Name = f.opt.Enc.ToStandardName(info.Name)
|
||||||
|
o, err := f.newObjectWithInfo(info.Name, info)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
destDir, destLeaf, err := fspath.Split(dest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if destLeaf == "" {
|
||||||
|
destLeaf = info.Name
|
||||||
|
}
|
||||||
|
if destDir == "" {
|
||||||
|
destDir = "."
|
||||||
|
}
|
||||||
|
dstFs, err := cache.Get(ctx, destDir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "copy failed")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{{
|
var commandHelp = []fs.CommandHelp{{
|
||||||
Name: "get",
|
Name: "get",
|
||||||
Short: "Get command for fetching the drive config parameters",
|
Short: "Get command for fetching the drive config parameters",
|
||||||
@@ -3059,6 +3178,29 @@ Result:
|
|||||||
"Errors": 0
|
"Errors": 0
|
||||||
}
|
}
|
||||||
`,
|
`,
|
||||||
|
}, {
|
||||||
|
Name: "copyid",
|
||||||
|
Short: "Copy files by ID",
|
||||||
|
Long: `This command copies files by ID
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
rclone backend copyid drive: ID path
|
||||||
|
rclone backend copyid drive: ID1 path1 ID2 path2
|
||||||
|
|
||||||
|
It copies the drive file with ID given to the path (an rclone path which
|
||||||
|
will be passed internally to rclone copyto). The ID and path pairs can be
|
||||||
|
repeated.
|
||||||
|
|
||||||
|
The path should end with a / to indicate copy the file as named to
|
||||||
|
this directory. If it doesn't end with a / then the last path
|
||||||
|
component will be used as the file name.
|
||||||
|
|
||||||
|
If the destination is a drive backend then server-side copying will be
|
||||||
|
attempted if possible.
|
||||||
|
|
||||||
|
Use the -i flag to see what would be copied before copying.
|
||||||
|
`,
|
||||||
}}
|
}}
|
||||||
|
|
||||||
// Command the backend to run a named command
|
// Command the backend to run a named command
|
||||||
@@ -3086,7 +3228,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
if serviceAccountFile, ok := opt["service_account_file"]; ok {
|
if serviceAccountFile, ok := opt["service_account_file"]; ok {
|
||||||
serviceAccountMap := make(map[string]string)
|
serviceAccountMap := make(map[string]string)
|
||||||
serviceAccountMap["previous"] = f.opt.ServiceAccountFile
|
serviceAccountMap["previous"] = f.opt.ServiceAccountFile
|
||||||
if err = f.changeServiceAccountFile(serviceAccountFile); err != nil {
|
if err = f.changeServiceAccountFile(ctx, serviceAccountFile); err != nil {
|
||||||
return out, err
|
return out, err
|
||||||
}
|
}
|
||||||
f.m.Set("service_account_file", serviceAccountFile)
|
f.m.Set("service_account_file", serviceAccountFile)
|
||||||
@@ -3112,7 +3254,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
dstFs := f
|
dstFs := f
|
||||||
target, ok := opt["target"]
|
target, ok := opt["target"]
|
||||||
if ok {
|
if ok {
|
||||||
targetFs, err := cache.Get(target)
|
targetFs, err := cache.Get(ctx, target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't find target")
|
return nil, errors.Wrap(err, "couldn't find target")
|
||||||
}
|
}
|
||||||
@@ -3130,6 +3272,19 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
dir = arg[0]
|
dir = arg[0]
|
||||||
}
|
}
|
||||||
return f.unTrashDir(ctx, dir, true)
|
return f.unTrashDir(ctx, dir, true)
|
||||||
|
case "copyid":
|
||||||
|
if len(arg)%2 != 0 {
|
||||||
|
return nil, errors.New("need an even number of arguments")
|
||||||
|
}
|
||||||
|
for len(arg) > 0 {
|
||||||
|
id, dest := arg[0], arg[1]
|
||||||
|
arg = arg[2:]
|
||||||
|
err = f.copyID(ctx, id, dest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed copying %q to %q", id, dest)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
default:
|
default:
|
||||||
return nil, fs.ErrorCommandNotFound
|
return nil, fs.ErrorCommandNotFound
|
||||||
}
|
}
|
||||||
@@ -3197,7 +3352,7 @@ func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
|||||||
}
|
}
|
||||||
directoryID = actualID(directoryID)
|
directoryID = actualID(directoryID)
|
||||||
|
|
||||||
found, err := f.list(ctx, []string{directoryID}, leaf, false, false, false, func(item *drive.File) bool {
|
found, err := f.list(ctx, []string{directoryID}, leaf, false, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
|
||||||
if !f.opt.SkipGdocs {
|
if !f.opt.SkipGdocs {
|
||||||
extension, exportName, exportMimeType, isDocument = f.findExportFormat(item)
|
extension, exportName, exportMimeType, isDocument = f.findExportFormat(item)
|
||||||
if exportName == leaf {
|
if exportName == leaf {
|
||||||
@@ -3272,11 +3427,10 @@ func (o *baseObject) httpResponse(ctx context.Context, url, method string, optio
|
|||||||
if url == "" {
|
if url == "" {
|
||||||
return nil, nil, errors.New("forbidden to download - check sharing permission")
|
return nil, nil, errors.New("forbidden to download - check sharing permission")
|
||||||
}
|
}
|
||||||
req, err = http.NewRequest(method, url, nil)
|
req, err = http.NewRequestWithContext(ctx, method, url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return req, nil, err
|
return req, nil, err
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||||
if o.bytes == 0 {
|
if o.bytes == 0 {
|
||||||
// Don't supply range requests for 0 length objects as they always fail
|
// Don't supply range requests for 0 length objects as they always fail
|
||||||
@@ -3609,7 +3763,7 @@ URL={{ .URL }}{{"\r"}}
|
|||||||
Encoding=UTF-8
|
Encoding=UTF-8
|
||||||
Name={{ .Title }}
|
Name={{ .Title }}
|
||||||
URL={{ .URL }}
|
URL={{ .URL }}
|
||||||
Icon=text-html
|
Icon={{ .XDGIcon }}
|
||||||
Type=Link
|
Type=Link
|
||||||
`
|
`
|
||||||
htmlTemplate = `<html>
|
htmlTemplate = `<html>
|
||||||
|
|||||||
@@ -7,6 +7,8 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"mime"
|
"mime"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -194,7 +196,7 @@ func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
|||||||
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
testFilesFs, err := fs.NewFs(testFilesPath)
|
testFilesFs, err := fs.NewFs(context.Background(), testFilesPath)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||||
@@ -208,7 +210,7 @@ func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
|
|||||||
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
testFilesFs, err := fs.NewFs(testFilesPath)
|
testFilesFs, err := fs.NewFs(context.Background(), testFilesPath)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||||
@@ -272,14 +274,15 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// from fstest/fstests/fstests.go
|
||||||
|
existingDir = "hello? sausage"
|
||||||
|
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
|
||||||
|
existingSubDir = "êé"
|
||||||
|
)
|
||||||
|
|
||||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts
|
// TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts
|
||||||
func (f *Fs) InternalTestShortcuts(t *testing.T) {
|
func (f *Fs) InternalTestShortcuts(t *testing.T) {
|
||||||
const (
|
|
||||||
// from fstest/fstests/fstests.go
|
|
||||||
existingDir = "hello? sausage"
|
|
||||||
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
|
|
||||||
existingSubDir = "êé"
|
|
||||||
)
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
srcObj, err := f.NewObject(ctx, existingFile)
|
srcObj, err := f.NewObject(ctx, existingFile)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -408,6 +411,55 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
|
|||||||
require.NoError(t, f.Purge(ctx, "trashDir"))
|
require.NoError(t, f.Purge(ctx, "trashDir"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
|
||||||
|
func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
obj, err := f.NewObject(ctx, existingFile)
|
||||||
|
require.NoError(t, err)
|
||||||
|
o := obj.(*Object)
|
||||||
|
|
||||||
|
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() {
|
||||||
|
_ = os.RemoveAll(dir)
|
||||||
|
}()
|
||||||
|
|
||||||
|
checkFile := func(name string) {
|
||||||
|
filePath := filepath.Join(dir, name)
|
||||||
|
fi, err := os.Stat(filePath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, int64(100), fi.Size())
|
||||||
|
err = os.Remove(filePath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("BadID", func(t *testing.T) {
|
||||||
|
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.Contains(t, err.Error(), "couldn't find id")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Directory", func(t *testing.T) {
|
||||||
|
rootID, err := f.dirCache.RootID(ctx, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = f.copyID(ctx, rootID, dir+"/")
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.Contains(t, err.Error(), "can't copy directory")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("WithoutDestName", func(t *testing.T) {
|
||||||
|
err = f.copyID(ctx, o.id, dir+"/")
|
||||||
|
require.NoError(t, err)
|
||||||
|
checkFile(path.Base(existingFile))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("WithDestName", func(t *testing.T) {
|
||||||
|
err = f.copyID(ctx, o.id, dir+"/potato.txt")
|
||||||
|
require.NoError(t, err)
|
||||||
|
checkFile("potato.txt")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
// These tests all depend on each other so run them as nested tests
|
// These tests all depend on each other so run them as nested tests
|
||||||
t.Run("DocumentImport", func(t *testing.T) {
|
t.Run("DocumentImport", func(t *testing.T) {
|
||||||
@@ -424,6 +476,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
|||||||
})
|
})
|
||||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||||
|
t.Run("CopyID", f.InternalTestCopyID)
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
var _ fstests.InternalTester = (*Fs)(nil)
|
||||||
|
|||||||
@@ -77,11 +77,10 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
var req *http.Request
|
var req *http.Request
|
||||||
req, err = http.NewRequest(method, urls, body)
|
req, err = http.NewRequestWithContext(ctx, method, urls, body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
googleapi.Expand(req.URL, map[string]string{
|
googleapi.Expand(req.URL, map[string]string{
|
||||||
"fileId": fileID,
|
"fileId": fileID,
|
||||||
})
|
})
|
||||||
@@ -114,8 +113,7 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
|
|||||||
|
|
||||||
// Make an http.Request for the range passed in
|
// Make an http.Request for the range passed in
|
||||||
func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request {
|
func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request {
|
||||||
req, _ := http.NewRequest("POST", rx.URI, body)
|
req, _ := http.NewRequestWithContext(ctx, "POST", rx.URI, body)
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
req.ContentLength = reqSize
|
req.ContentLength = reqSize
|
||||||
totalSize := "*"
|
totalSize := "*"
|
||||||
if rx.ContentLength >= 0 {
|
if rx.ContentLength >= 0 {
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
|
||||||
@@ -86,6 +87,8 @@ const (
|
|||||||
// by default.
|
// by default.
|
||||||
defaultChunkSize = 48 * fs.MebiByte
|
defaultChunkSize = 48 * fs.MebiByte
|
||||||
maxChunkSize = 150 * fs.MebiByte
|
maxChunkSize = 150 * fs.MebiByte
|
||||||
|
// Max length of filename parts: https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
|
||||||
|
maxFileNameLength = 255
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -107,6 +110,9 @@ var (
|
|||||||
|
|
||||||
// DbHashType is the hash.Type for Dropbox
|
// DbHashType is the hash.Type for Dropbox
|
||||||
DbHashType hash.Type
|
DbHashType hash.Type
|
||||||
|
|
||||||
|
// Errors
|
||||||
|
errNotSupportedInSharedMode = fserrors.NoRetryError(errors.New("not supported in shared files mode"))
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
@@ -116,11 +122,14 @@ func init() {
|
|||||||
Name: "dropbox",
|
Name: "dropbox",
|
||||||
Description: "Dropbox",
|
Description: "Dropbox",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string, m configmap.Mapper) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
opt := oauthutil.Options{
|
opt := oauthutil.Options{
|
||||||
NoOffline: true,
|
NoOffline: true,
|
||||||
|
OAuth2Opts: []oauth2.AuthCodeOption{
|
||||||
|
oauth2.SetAuthURLParam("token_access_type", "offline"),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
err := oauthutil.Config("dropbox", name, m, dropboxConfig, &opt)
|
err := oauthutil.Config(ctx, "dropbox", name, m, dropboxConfig, &opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
@@ -142,6 +151,31 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
|||||||
Help: "Impersonate this user when using a business account.",
|
Help: "Impersonate this user when using a business account.",
|
||||||
Default: "",
|
Default: "",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "shared_files",
|
||||||
|
Help: `Instructs rclone to work on individual shared files.
|
||||||
|
|
||||||
|
In this mode rclone's features are extremely limited - only list (ls, lsl, etc.)
|
||||||
|
operations and read operations (e.g. downloading) are supported in this mode.
|
||||||
|
All other operations will be disabled.`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "shared_folders",
|
||||||
|
Help: `Instructs rclone to work on shared folders.
|
||||||
|
|
||||||
|
When this flag is used with no path only the List operation is supported and
|
||||||
|
all available shared folders will be listed. If you specify a path the first part
|
||||||
|
will be interpreted as the name of shared folder. Rclone will then try to mount this
|
||||||
|
shared to the root namespace. On success shared folder rclone proceeds normally.
|
||||||
|
The shared folder is now pretty much a normal folder and all normal operations
|
||||||
|
are supported.
|
||||||
|
|
||||||
|
Note that we don't unmount the shared folder afterwards so the
|
||||||
|
--dropbox-shared-folders can be omitted after the first use of a particular
|
||||||
|
shared folder.`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -161,9 +195,11 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
|||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
Impersonate string `config:"impersonate"`
|
Impersonate string `config:"impersonate"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
SharedFiles bool `config:"shared_files"`
|
||||||
|
SharedFolders bool `config:"shared_folders"`
|
||||||
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote dropbox server
|
// Fs represents a remote dropbox server
|
||||||
@@ -186,7 +222,9 @@ type Fs struct {
|
|||||||
//
|
//
|
||||||
// Dropbox Objects always have full metadata
|
// Dropbox Objects always have full metadata
|
||||||
type Object struct {
|
type Object struct {
|
||||||
fs *Fs // what this object is part of
|
fs *Fs // what this object is part of
|
||||||
|
id string
|
||||||
|
url string
|
||||||
remote string // The remote path
|
remote string // The remote path
|
||||||
bytes int64 // size of the object
|
bytes int64 // size of the object
|
||||||
modTime time.Time // time it was last modified
|
modTime time.Time // time it was last modified
|
||||||
@@ -222,9 +260,11 @@ func shouldRetry(err error) (bool, error) {
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
baseErrString := errors.Cause(err).Error()
|
baseErrString := errors.Cause(err).Error()
|
||||||
// First check for Insufficient Space
|
// First check for specific errors
|
||||||
if strings.Contains(baseErrString, "insufficient_space") {
|
if strings.Contains(baseErrString, "insufficient_space") {
|
||||||
return false, fserrors.FatalError(err)
|
return false, fserrors.FatalError(err)
|
||||||
|
} else if strings.Contains(baseErrString, "malformed_path") {
|
||||||
|
return false, fserrors.NoRetryError(err)
|
||||||
}
|
}
|
||||||
// Then handle any official Retry-After header from Dropbox's SDK
|
// Then handle any official Retry-After header from Dropbox's SDK
|
||||||
switch e := err.(type) {
|
switch e := err.(type) {
|
||||||
@@ -262,7 +302,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -287,7 +327,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
oAuthClient, _, err := oauthutil.NewClient(name, m, dropboxConfig)
|
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, dropboxConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure dropbox")
|
return nil, errors.Wrap(err, "failed to configure dropbox")
|
||||||
}
|
}
|
||||||
@@ -295,7 +335,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
config := dropbox.Config{
|
config := dropbox.Config{
|
||||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||||
@@ -330,10 +370,62 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
f.users = users.New(config)
|
f.users = users.New(config)
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
ReadMimeType: true,
|
ReadMimeType: false,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(f)
|
})
|
||||||
f.setRoot(root)
|
|
||||||
|
// do not fill features yet
|
||||||
|
if f.opt.SharedFiles {
|
||||||
|
f.setRoot(root)
|
||||||
|
if f.root == "" {
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
_, err := f.findSharedFile(f.root)
|
||||||
|
f.root = ""
|
||||||
|
if err == nil {
|
||||||
|
return f, fs.ErrorIsFile
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.opt.SharedFolders {
|
||||||
|
f.setRoot(root)
|
||||||
|
if f.root == "" {
|
||||||
|
return f, nil // our root it empty so we probably want to list shared folders
|
||||||
|
}
|
||||||
|
|
||||||
|
dir := path.Dir(f.root)
|
||||||
|
if dir == "." {
|
||||||
|
dir = f.root
|
||||||
|
}
|
||||||
|
|
||||||
|
// root is not empty so we have find the right shared folder if it exists
|
||||||
|
id, err := f.findSharedFolder(dir)
|
||||||
|
if err != nil {
|
||||||
|
// if we didn't find the specified shared folder we have to bail out here
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// we found the specified shared folder so let's mount it
|
||||||
|
// this will add it to the users normal root namespace and allows us
|
||||||
|
// to actually perform operations on it using the normal api endpoints.
|
||||||
|
err = f.mountSharedFolder(id)
|
||||||
|
if err != nil {
|
||||||
|
switch e := err.(type) {
|
||||||
|
case sharing.MountFolderAPIError:
|
||||||
|
if e.EndpointError == nil || (e.EndpointError != nil && e.EndpointError.Tag != sharing.MountFolderErrorAlreadyMounted) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// if the moint failed we have to abort here
|
||||||
|
}
|
||||||
|
// if the mount succeeded it's now a normal folder in the users root namespace
|
||||||
|
// we disable shared folder mode and proceed normally
|
||||||
|
f.opt.SharedFolders = false
|
||||||
|
}
|
||||||
|
|
||||||
|
f.features.Fill(ctx, f)
|
||||||
|
|
||||||
// If root starts with / then use the actual root
|
// If root starts with / then use the actual root
|
||||||
if strings.HasPrefix(root, "/") {
|
if strings.HasPrefix(root, "/") {
|
||||||
@@ -355,6 +447,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
fs.Debugf(f, "Using root namespace %q", f.ns)
|
fs.Debugf(f, "Using root namespace %q", f.ns)
|
||||||
}
|
}
|
||||||
|
f.setRoot(root)
|
||||||
|
|
||||||
// See if the root is actually an object
|
// See if the root is actually an object
|
||||||
_, err = f.getFileMetadata(f.slashRoot)
|
_, err = f.getFileMetadata(f.slashRoot)
|
||||||
@@ -465,9 +558,150 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje
|
|||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
|
if f.opt.SharedFiles {
|
||||||
|
return f.findSharedFile(remote)
|
||||||
|
}
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// listSharedFoldersApi lists all available shared folders mounted and not mounted
|
||||||
|
// we'll need the id later so we have to return them in original format
|
||||||
|
func (f *Fs) listSharedFolders() (entries fs.DirEntries, err error) {
|
||||||
|
started := false
|
||||||
|
var res *sharing.ListFoldersResult
|
||||||
|
for {
|
||||||
|
if !started {
|
||||||
|
arg := sharing.ListFoldersArgs{
|
||||||
|
Limit: 100,
|
||||||
|
}
|
||||||
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
|
res, err = f.sharing.ListFolders(&arg)
|
||||||
|
return shouldRetry(err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
started = true
|
||||||
|
} else {
|
||||||
|
arg := sharing.ListFoldersContinueArg{
|
||||||
|
Cursor: res.Cursor,
|
||||||
|
}
|
||||||
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
|
res, err = f.sharing.ListFoldersContinue(&arg)
|
||||||
|
return shouldRetry(err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "list continue")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, entry := range res.Entries {
|
||||||
|
leaf := f.opt.Enc.ToStandardName(entry.Name)
|
||||||
|
d := fs.NewDir(leaf, time.Now()).SetID(entry.SharedFolderId)
|
||||||
|
entries = append(entries, d)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if res.Cursor == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// findSharedFolder find the id for a given shared folder name
|
||||||
|
// somewhat annoyingly there is no endpoint to query a shared folder by it's name
|
||||||
|
// so our only option is to iterate over all shared folders
|
||||||
|
func (f *Fs) findSharedFolder(name string) (id string, err error) {
|
||||||
|
entries, err := f.listSharedFolders()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
for _, entry := range entries {
|
||||||
|
if entry.(*fs.Dir).Remote() == name {
|
||||||
|
return entry.(*fs.Dir).ID(), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// mountSharedFolders mount a shared folder to the root namespace
|
||||||
|
func (f *Fs) mountSharedFolder(id string) error {
|
||||||
|
arg := sharing.MountFolderArg{
|
||||||
|
SharedFolderId: id,
|
||||||
|
}
|
||||||
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
|
_, err := f.sharing.MountFolder(&arg)
|
||||||
|
return shouldRetry(err)
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// listSharedFolders lists shared the user as access to (note this means individual
|
||||||
|
// files not files contained in shared folders)
|
||||||
|
func (f *Fs) listReceivedFiles() (entries fs.DirEntries, err error) {
|
||||||
|
started := false
|
||||||
|
var res *sharing.ListFilesResult
|
||||||
|
for {
|
||||||
|
if !started {
|
||||||
|
arg := sharing.ListFilesArg{
|
||||||
|
Limit: 100,
|
||||||
|
}
|
||||||
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
|
res, err = f.sharing.ListReceivedFiles(&arg)
|
||||||
|
return shouldRetry(err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
started = true
|
||||||
|
} else {
|
||||||
|
arg := sharing.ListFilesContinueArg{
|
||||||
|
Cursor: res.Cursor,
|
||||||
|
}
|
||||||
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
|
res, err = f.sharing.ListReceivedFilesContinue(&arg)
|
||||||
|
return shouldRetry(err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "list continue")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, entry := range res.Entries {
|
||||||
|
fmt.Printf("%+v\n", entry)
|
||||||
|
entryPath := entry.Name
|
||||||
|
o := &Object{
|
||||||
|
fs: f,
|
||||||
|
url: entry.PreviewUrl,
|
||||||
|
remote: entryPath,
|
||||||
|
modTime: entry.TimeInvited,
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
entries = append(entries, o)
|
||||||
|
}
|
||||||
|
if res.Cursor == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) findSharedFile(name string) (o *Object, err error) {
|
||||||
|
files, err := f.listReceivedFiles()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, entry := range files {
|
||||||
|
if entry.(*Object).remote == name {
|
||||||
|
return entry.(*Object), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
|
||||||
// List the objects and directories in dir into entries. The
|
// List the objects and directories in dir into entries. The
|
||||||
// entries can be returned in any order but should be for a
|
// entries can be returned in any order but should be for a
|
||||||
// complete directory.
|
// complete directory.
|
||||||
@@ -478,6 +712,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
if f.opt.SharedFiles {
|
||||||
|
return f.listReceivedFiles()
|
||||||
|
}
|
||||||
|
if f.opt.SharedFolders {
|
||||||
|
return f.listSharedFolders()
|
||||||
|
}
|
||||||
|
|
||||||
root := f.slashRoot
|
root := f.slashRoot
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
root += "/" + dir
|
root += "/" + dir
|
||||||
@@ -541,7 +782,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
|
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
|
||||||
remote := path.Join(dir, leaf)
|
remote := path.Join(dir, leaf)
|
||||||
if folderInfo != nil {
|
if folderInfo != nil {
|
||||||
d := fs.NewDir(remote, time.Now())
|
d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
} else if fileInfo != nil {
|
} else if fileInfo != nil {
|
||||||
o, err := f.newObjectWithInfo(remote, fileInfo)
|
o, err := f.newObjectWithInfo(remote, fileInfo)
|
||||||
@@ -564,6 +805,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
|
if f.opt.SharedFiles || f.opt.SharedFolders {
|
||||||
|
return nil, errNotSupportedInSharedMode
|
||||||
|
}
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
@@ -579,6 +823,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
// Mkdir creates the container if it doesn't exist
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
|
if f.opt.SharedFiles || f.opt.SharedFolders {
|
||||||
|
return errNotSupportedInSharedMode
|
||||||
|
}
|
||||||
root := path.Join(f.slashRoot, dir)
|
root := path.Join(f.slashRoot, dir)
|
||||||
|
|
||||||
// can't create or run metadata on root
|
// can't create or run metadata on root
|
||||||
@@ -598,6 +845,10 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
arg2 := files.CreateFolderArg{
|
arg2 := files.CreateFolderArg{
|
||||||
Path: f.opt.Enc.FromStandardPath(root),
|
Path: f.opt.Enc.FromStandardPath(root),
|
||||||
}
|
}
|
||||||
|
// Don't attempt to create filenames that are too long
|
||||||
|
if cErr := checkPathLength(arg2.Path); cErr != nil {
|
||||||
|
return cErr
|
||||||
|
}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
_, err = f.srv.CreateFolderV2(&arg2)
|
_, err = f.srv.CreateFolderV2(&arg2)
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
@@ -656,6 +907,9 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
|||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
|
if f.opt.SharedFiles || f.opt.SharedFolders {
|
||||||
|
return errNotSupportedInSharedMode
|
||||||
|
}
|
||||||
return f.purgeCheck(ctx, dir, true)
|
return f.purgeCheck(ctx, dir, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -664,7 +918,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
return time.Second
|
return time.Second
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -725,7 +979,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
|
|||||||
return f.purgeCheck(ctx, dir, false)
|
return f.purgeCheck(ctx, dir, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -830,7 +1084,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -927,8 +1181,16 @@ func (o *Object) Remote() string {
|
|||||||
return o.remote
|
return o.remote
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ID returns the object id
|
||||||
|
func (o *Object) ID() string {
|
||||||
|
return o.id
|
||||||
|
}
|
||||||
|
|
||||||
// Hash returns the dropbox special hash
|
// Hash returns the dropbox special hash
|
||||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
|
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
|
||||||
|
return "", errNotSupportedInSharedMode
|
||||||
|
}
|
||||||
if t != DbHashType {
|
if t != DbHashType {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@@ -946,8 +1208,9 @@ func (o *Object) Size() int64 {
|
|||||||
|
|
||||||
// setMetadataFromEntry sets the fs data from a files.FileMetadata
|
// setMetadataFromEntry sets the fs data from a files.FileMetadata
|
||||||
//
|
//
|
||||||
// This isn't a complete set of metadata and has an inacurate date
|
// This isn't a complete set of metadata and has an inaccurate date
|
||||||
func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
|
func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
|
||||||
|
o.id = info.Id
|
||||||
o.bytes = int64(info.Size)
|
o.bytes = int64(info.Size)
|
||||||
o.modTime = info.ClientModified
|
o.modTime = info.ClientModified
|
||||||
o.hash = info.ContentHash
|
o.hash = info.ContentHash
|
||||||
@@ -1016,10 +1279,27 @@ func (o *Object) Storable() bool {
|
|||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
|
if o.fs.opt.SharedFiles {
|
||||||
|
if len(options) != 0 {
|
||||||
|
return nil, errors.New("OpenOptions not supported for shared files")
|
||||||
|
}
|
||||||
|
arg := sharing.GetSharedLinkMetadataArg{
|
||||||
|
Url: o.url,
|
||||||
|
}
|
||||||
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
_, in, err = o.fs.sharing.GetSharedLinkFile(&arg)
|
||||||
|
return shouldRetry(err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
fs.FixRangeOption(options, o.bytes)
|
fs.FixRangeOption(options, o.bytes)
|
||||||
headers := fs.OpenOptionHeaders(options)
|
headers := fs.OpenOptionHeaders(options)
|
||||||
arg := files.DownloadArg{
|
arg := files.DownloadArg{
|
||||||
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
|
Path: o.id,
|
||||||
ExtraHeaders: headers,
|
ExtraHeaders: headers,
|
||||||
}
|
}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
@@ -1147,12 +1427,40 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
|||||||
return entry, nil
|
return entry, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// checks all the parts of name to see they are below
|
||||||
|
// maxFileNameLength runes.
|
||||||
|
//
|
||||||
|
// This checks the length as runes which isn't quite right as dropbox
|
||||||
|
// seems to encode some symbols (eg ☺) as two "characters". This seems
|
||||||
|
// like utf-16 except that ☺ doesn't need two characters in utf-16.
|
||||||
|
//
|
||||||
|
// Using runes instead of what dropbox is using will work for most
|
||||||
|
// cases, and when it goes wrong we will upload something we should
|
||||||
|
// have detected as too long which is the least damaging way to fail.
|
||||||
|
func checkPathLength(name string) (err error) {
|
||||||
|
for next := ""; len(name) > 0; name = next {
|
||||||
|
if slash := strings.IndexRune(name, '/'); slash >= 0 {
|
||||||
|
name, next = name[:slash], name[slash+1:]
|
||||||
|
} else {
|
||||||
|
next = ""
|
||||||
|
}
|
||||||
|
length := utf8.RuneCountInString(name)
|
||||||
|
if length > maxFileNameLength {
|
||||||
|
return fserrors.NoRetryError(fs.ErrorFileNameTooLong)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Update the already existing object
|
// Update the already existing object
|
||||||
//
|
//
|
||||||
// Copy the reader into the object updating modTime and size
|
// Copy the reader into the object updating modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
|
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
|
||||||
|
return errNotSupportedInSharedMode
|
||||||
|
}
|
||||||
remote := o.remotePath()
|
remote := o.remotePath()
|
||||||
if ignoredFiles.MatchString(remote) {
|
if ignoredFiles.MatchString(remote) {
|
||||||
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
|
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
|
||||||
@@ -1161,6 +1469,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
commitInfo.Mode.Tag = "overwrite"
|
commitInfo.Mode.Tag = "overwrite"
|
||||||
// The Dropbox API only accepts timestamps in UTC with second precision.
|
// The Dropbox API only accepts timestamps in UTC with second precision.
|
||||||
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
|
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
|
||||||
|
// Don't attempt to create filenames that are too long
|
||||||
|
if cErr := checkPathLength(commitInfo.Path); cErr != nil {
|
||||||
|
return cErr
|
||||||
|
}
|
||||||
|
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
var err error
|
var err error
|
||||||
@@ -1181,6 +1493,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||||
|
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
|
||||||
|
return errNotSupportedInSharedMode
|
||||||
|
}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
|
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
|
||||||
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
|
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
|
||||||
@@ -1201,4 +1516,5 @@ var (
|
|||||||
_ fs.DirMover = (*Fs)(nil)
|
_ fs.DirMover = (*Fs)(nil)
|
||||||
_ fs.Abouter = (*Fs)(nil)
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
_ fs.Object = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
|
_ fs.IDer = (*Object)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
44
backend/dropbox/dropbox_internal_test.go
Normal file
44
backend/dropbox/dropbox_internal_test.go
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
package dropbox
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestInternalCheckPathLength(t *testing.T) {
|
||||||
|
rep := func(n int, r rune) (out string) {
|
||||||
|
rs := make([]rune, n)
|
||||||
|
for i := range rs {
|
||||||
|
rs[i] = r
|
||||||
|
}
|
||||||
|
return string(rs)
|
||||||
|
}
|
||||||
|
for _, test := range []struct {
|
||||||
|
in string
|
||||||
|
ok bool
|
||||||
|
}{
|
||||||
|
{in: "", ok: true},
|
||||||
|
{in: rep(maxFileNameLength, 'a'), ok: true},
|
||||||
|
{in: rep(maxFileNameLength+1, 'a'), ok: false},
|
||||||
|
{in: rep(maxFileNameLength, '£'), ok: true},
|
||||||
|
{in: rep(maxFileNameLength+1, '£'), ok: false},
|
||||||
|
{in: rep(maxFileNameLength, '☺'), ok: true},
|
||||||
|
{in: rep(maxFileNameLength+1, '☺'), ok: false},
|
||||||
|
{in: rep(maxFileNameLength, '你'), ok: true},
|
||||||
|
{in: rep(maxFileNameLength+1, '你'), ok: false},
|
||||||
|
{in: "/ok/ok", ok: true},
|
||||||
|
{in: "/ok/" + rep(maxFileNameLength, 'a') + "/ok", ok: true},
|
||||||
|
{in: "/ok/" + rep(maxFileNameLength+1, 'a') + "/ok", ok: false},
|
||||||
|
{in: "/ok/" + rep(maxFileNameLength, '£') + "/ok", ok: true},
|
||||||
|
{in: "/ok/" + rep(maxFileNameLength+1, '£') + "/ok", ok: false},
|
||||||
|
{in: "/ok/" + rep(maxFileNameLength, '☺') + "/ok", ok: true},
|
||||||
|
{in: "/ok/" + rep(maxFileNameLength+1, '☺') + "/ok", ok: false},
|
||||||
|
{in: "/ok/" + rep(maxFileNameLength, '你') + "/ok", ok: true},
|
||||||
|
{in: "/ok/" + rep(maxFileNameLength+1, '你') + "/ok", ok: false},
|
||||||
|
} {
|
||||||
|
|
||||||
|
err := checkPathLength(test.in)
|
||||||
|
assert.Equal(t, test.ok, err == nil, test.in)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -35,7 +35,7 @@ func init() {
|
|||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
Name: "fichier",
|
Name: "fichier",
|
||||||
Description: "1Fichier",
|
Description: "1Fichier",
|
||||||
Config: func(name string, config configmap.Mapper) {
|
Config: func(ctx context.Context, name string, config configmap.Mapper) {
|
||||||
},
|
},
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
@@ -167,7 +167,7 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
//
|
//
|
||||||
// On Windows avoid single character remote names as they can be mixed
|
// On Windows avoid single character remote names as they can be mixed
|
||||||
// up with drive letters.
|
// up with drive letters.
|
||||||
func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(config, opt)
|
err := configstruct.Set(config, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -186,16 +186,17 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
|||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
|
||||||
baseClient: &http.Client{},
|
baseClient: &http.Client{},
|
||||||
}
|
}
|
||||||
|
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
DuplicateFiles: true,
|
DuplicateFiles: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(f)
|
ReadMimeType: true,
|
||||||
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
client := fshttp.NewClient(fs.Config)
|
client := fshttp.NewClient(ctx)
|
||||||
|
|
||||||
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
|
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
|
||||||
|
|
||||||
@@ -203,8 +204,6 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
|||||||
|
|
||||||
f.dirCache = dircache.New(root, rootID, f)
|
f.dirCache = dircache.New(root, rootID, f)
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
// Find the current root
|
// Find the current root
|
||||||
err = f.dirCache.FindRoot(ctx, false)
|
err = f.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -227,7 +226,7 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
f.features.Fill(&tempF)
|
f.features.Fill(ctx, &tempF)
|
||||||
// XXX: update the old f here instead of returning tempF, since
|
// XXX: update the old f here instead of returning tempF, since
|
||||||
// `features` were already filled with functions having *f as a receiver.
|
// `features` were already filled with functions having *f as a receiver.
|
||||||
// See https://github.com/rclone/rclone/issues/2182
|
// See https://github.com/rclone/rclone/issues/2182
|
||||||
@@ -306,10 +305,10 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
// will return the object and the error, otherwise will return
|
// will return the object and the error, otherwise will return
|
||||||
// nil and the error
|
// nil and the error
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
existingObj, err := f.NewObject(ctx, src.Remote())
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||||
case fs.ErrorObjectNotFound:
|
case fs.ErrorObjectNotFound:
|
||||||
// Not found so create it
|
// Not found so create it
|
||||||
return f.PutUnchecked(ctx, in, src, options...)
|
return f.PutUnchecked(ctx, in, src, options...)
|
||||||
|
|||||||
@@ -4,13 +4,11 @@ package fichier
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fs.Config.LogLevel = fs.LogLevelDebug
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestFichier:",
|
RemoteName: "TestFichier:",
|
||||||
})
|
})
|
||||||
|
|||||||
391
backend/filefabric/api/types.go
Normal file
391
backend/filefabric/api/types.go
Normal file
@@ -0,0 +1,391 @@
|
|||||||
|
// Package api has type definitions for filefabric
|
||||||
|
//
|
||||||
|
// Converted from the API responses with help from https://mholt.github.io/json-to-go/
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// TimeFormat for parameters (UTC)
|
||||||
|
timeFormatParameters = `2006-01-02 15:04:05`
|
||||||
|
// "2020-08-11 10:10:04" for JSON parsing
|
||||||
|
timeFormatJSON = `"` + timeFormatParameters + `"`
|
||||||
|
)
|
||||||
|
|
||||||
|
// Time represents represents date and time information for the
|
||||||
|
// filefabric API
|
||||||
|
type Time time.Time
|
||||||
|
|
||||||
|
// MarshalJSON turns a Time into JSON (in UTC)
|
||||||
|
func (t *Time) MarshalJSON() (out []byte, err error) {
|
||||||
|
timeString := (*time.Time)(t).UTC().Format(timeFormatJSON)
|
||||||
|
return []byte(timeString), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var zeroTime = []byte(`"0000-00-00 00:00:00"`)
|
||||||
|
|
||||||
|
// UnmarshalJSON turns JSON into a Time (in UTC)
|
||||||
|
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||||
|
// Set a Zero time.Time if we receive a zero time input
|
||||||
|
if bytes.Equal(data, zeroTime) {
|
||||||
|
*t = Time(time.Time{})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
newT, err := time.Parse(timeFormatJSON, string(data))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*t = Time(newT)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// String turns a Time into a string in UTC suitable for the API
|
||||||
|
// parameters
|
||||||
|
func (t Time) String() string {
|
||||||
|
return time.Time(t).UTC().Format(timeFormatParameters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status return returned in all status responses
|
||||||
|
type Status struct {
|
||||||
|
Code string `json:"status"`
|
||||||
|
Message string `json:"statusmessage"`
|
||||||
|
TaskID string `json:"taskid"`
|
||||||
|
// Warning string `json:"warning"` // obsolete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status statisfies the error interface
|
||||||
|
func (e *Status) Error() string {
|
||||||
|
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OK returns true if the status is all good
|
||||||
|
func (e *Status) OK() bool {
|
||||||
|
return e.Code == "ok"
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCode returns the status code if any
|
||||||
|
func (e *Status) GetCode() string {
|
||||||
|
return e.Code
|
||||||
|
}
|
||||||
|
|
||||||
|
// OKError defines an interface for items which can be OK or be an error
|
||||||
|
type OKError interface {
|
||||||
|
error
|
||||||
|
OK() bool
|
||||||
|
GetCode() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check Status satisfies the OKError interface
|
||||||
|
var _ OKError = (*Status)(nil)
|
||||||
|
|
||||||
|
// EmptyResponse is response which just returns the error condition
|
||||||
|
type EmptyResponse struct {
|
||||||
|
Status
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTokenByAuthTokenResponse is the response to getTokenByAuthToken
|
||||||
|
type GetTokenByAuthTokenResponse struct {
|
||||||
|
Status
|
||||||
|
Token string `json:"token"`
|
||||||
|
UserID string `json:"userid"`
|
||||||
|
AllowLoginRemember string `json:"allowloginremember"`
|
||||||
|
LastLogin Time `json:"lastlogin"`
|
||||||
|
AutoLoginCode string `json:"autologincode"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplianceInfo is the response to getApplianceInfo
|
||||||
|
type ApplianceInfo struct {
|
||||||
|
Status
|
||||||
|
Sitetitle string `json:"sitetitle"`
|
||||||
|
OauthLoginSupport string `json:"oauthloginsupport"`
|
||||||
|
IsAppliance string `json:"isappliance"`
|
||||||
|
SoftwareVersion string `json:"softwareversion"`
|
||||||
|
SoftwareVersionLabel string `json:"softwareversionlabel"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFolderContentsResponse is returned from getFolderContents
|
||||||
|
type GetFolderContentsResponse struct {
|
||||||
|
Status
|
||||||
|
Total int `json:"total,string"`
|
||||||
|
Items []Item `json:"filelist"`
|
||||||
|
Folder Item `json:"folder"`
|
||||||
|
From int `json:"from,string"`
|
||||||
|
//Count int `json:"count"`
|
||||||
|
Pid string `json:"pid"`
|
||||||
|
RefreshResult Status `json:"refreshresult"`
|
||||||
|
// Curfolder Item `json:"curfolder"` - sometimes returned as "ROOT"?
|
||||||
|
Parents []Item `json:"parents"`
|
||||||
|
CustomPermissions CustomPermissions `json:"custompermissions"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ItemType determine whether it is a file or a folder
|
||||||
|
type ItemType uint8
|
||||||
|
|
||||||
|
// Types of things in Item
|
||||||
|
const (
|
||||||
|
ItemTypeFile ItemType = 0
|
||||||
|
ItemTypeFolder ItemType = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
// Item ia a File or a Folder
|
||||||
|
type Item struct {
|
||||||
|
ID string `json:"fi_id"`
|
||||||
|
PID string `json:"fi_pid"`
|
||||||
|
// UID string `json:"fi_uid"`
|
||||||
|
Name string `json:"fi_name"`
|
||||||
|
// S3Name string `json:"fi_s3name"`
|
||||||
|
// Extension string `json:"fi_extension"`
|
||||||
|
// Description string `json:"fi_description"`
|
||||||
|
Type ItemType `json:"fi_type,string"`
|
||||||
|
// Created Time `json:"fi_created"`
|
||||||
|
Size int64 `json:"fi_size,string"`
|
||||||
|
ContentType string `json:"fi_contenttype"`
|
||||||
|
// Tags string `json:"fi_tags"`
|
||||||
|
// MainCode string `json:"fi_maincode"`
|
||||||
|
// Public int `json:"fi_public,string"`
|
||||||
|
// Provider string `json:"fi_provider"`
|
||||||
|
// ProviderFolder string `json:"fi_providerfolder"` // folder
|
||||||
|
// Encrypted int `json:"fi_encrypted,string"`
|
||||||
|
// StructType string `json:"fi_structtype"`
|
||||||
|
// Bname string `json:"fi_bname"` // folder
|
||||||
|
// OrgID string `json:"fi_orgid"`
|
||||||
|
// Favorite int `json:"fi_favorite,string"`
|
||||||
|
// IspartOf string `json:"fi_ispartof"` // folder
|
||||||
|
Modified Time `json:"fi_modified"`
|
||||||
|
// LastAccessed Time `json:"fi_lastaccessed"`
|
||||||
|
// Hits int64 `json:"fi_hits,string"`
|
||||||
|
// IP string `json:"fi_ip"` // folder
|
||||||
|
// BigDescription string `json:"fi_bigdescription"`
|
||||||
|
LocalTime Time `json:"fi_localtime"`
|
||||||
|
// OrgfolderID string `json:"fi_orgfolderid"`
|
||||||
|
// StorageIP string `json:"fi_storageip"` // folder
|
||||||
|
// RemoteTime Time `json:"fi_remotetime"`
|
||||||
|
// ProviderOptions string `json:"fi_provideroptions"`
|
||||||
|
// Access string `json:"fi_access"`
|
||||||
|
// Hidden string `json:"fi_hidden"` // folder
|
||||||
|
// VersionOf string `json:"fi_versionof"`
|
||||||
|
Trash bool `json:"trash"`
|
||||||
|
// Isbucket string `json:"isbucket"` // filelist
|
||||||
|
SubFolders int64 `json:"subfolders"` // folder
|
||||||
|
}
|
||||||
|
|
||||||
|
// ItemFields is a | separated list of fields in Item
|
||||||
|
var ItemFields = mustFields(Item{})
|
||||||
|
|
||||||
|
// fields returns the JSON fields in use by opt as a | separated
|
||||||
|
// string.
|
||||||
|
func fields(opt interface{}) (pipeTags string, err error) {
|
||||||
|
var tags []string
|
||||||
|
def := reflect.ValueOf(opt)
|
||||||
|
defType := def.Type()
|
||||||
|
for i := 0; i < def.NumField(); i++ {
|
||||||
|
field := defType.Field(i)
|
||||||
|
tag, ok := field.Tag.Lookup("json")
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if comma := strings.IndexRune(tag, ','); comma >= 0 {
|
||||||
|
tag = tag[:comma]
|
||||||
|
}
|
||||||
|
if tag == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tags = append(tags, tag)
|
||||||
|
}
|
||||||
|
return strings.Join(tags, "|"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mustFields returns the JSON fields in use by opt as a | separated
|
||||||
|
// string. It panics on failure.
|
||||||
|
func mustFields(opt interface{}) string {
|
||||||
|
tags, err := fields(opt)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return tags
|
||||||
|
}
|
||||||
|
|
||||||
|
// CustomPermissions is returned as part of GetFolderContentsResponse
|
||||||
|
type CustomPermissions struct {
|
||||||
|
Upload string `json:"upload"`
|
||||||
|
CreateSubFolder string `json:"createsubfolder"`
|
||||||
|
Rename string `json:"rename"`
|
||||||
|
Delete string `json:"delete"`
|
||||||
|
Move string `json:"move"`
|
||||||
|
ManagePermissions string `json:"managepermissions"`
|
||||||
|
ListOnly string `json:"listonly"`
|
||||||
|
VisibleInTrash string `json:"visibleintrash"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoCreateNewFolderResponse is response from foCreateNewFolder
|
||||||
|
type DoCreateNewFolderResponse struct {
|
||||||
|
Status
|
||||||
|
Item Item `json:"file"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoInitUploadResponse is response from doInitUpload
|
||||||
|
type DoInitUploadResponse struct {
|
||||||
|
Status
|
||||||
|
ProviderID string `json:"providerid"`
|
||||||
|
UploadCode string `json:"uploadcode"`
|
||||||
|
FileType string `json:"filetype"`
|
||||||
|
DirectUploadSupport string `json:"directuploadsupport"`
|
||||||
|
ResumeAllowed string `json:"resumeallowed"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploaderResponse is returned from /cgi-bin/uploader/uploader1.cgi
|
||||||
|
//
|
||||||
|
// Sometimes the response is returned as XML and sometimes as JSON
|
||||||
|
type UploaderResponse struct {
|
||||||
|
FileSize int64 `xml:"filesize" json:"filesize,string"`
|
||||||
|
MD5 string `xml:"md5" json:"md5"`
|
||||||
|
Success string `xml:"success" json:"success"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadStatus is returned from getUploadStatus
|
||||||
|
type UploadStatus struct {
|
||||||
|
Status
|
||||||
|
UploadCode string `json:"uploadcode"`
|
||||||
|
Metafile string `json:"metafile"`
|
||||||
|
Percent int `json:"percent,string"`
|
||||||
|
Uploaded int64 `json:"uploaded,string"`
|
||||||
|
Size int64 `json:"size,string"`
|
||||||
|
Filename string `json:"filename"`
|
||||||
|
Nofile string `json:"nofile"`
|
||||||
|
Completed string `json:"completed"`
|
||||||
|
Completsuccess string `json:"completsuccess"`
|
||||||
|
Completerror string `json:"completerror"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoCompleteUploadResponse is the response to doCompleteUpload
|
||||||
|
type DoCompleteUploadResponse struct {
|
||||||
|
Status
|
||||||
|
UploadedSize int64 `json:"uploadedsize,string"`
|
||||||
|
StorageIP string `json:"storageip"`
|
||||||
|
UploadedName string `json:"uploadedname"`
|
||||||
|
// Versioned []interface{} `json:"versioned"`
|
||||||
|
// VersionedID int `json:"versionedid"`
|
||||||
|
// Comment interface{} `json:"comment"`
|
||||||
|
File Item `json:"file"`
|
||||||
|
// UsSize string `json:"us_size"`
|
||||||
|
// PaSize string `json:"pa_size"`
|
||||||
|
// SpaceInfo SpaceInfo `json:"spaceinfo"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Providers is returned as part of UploadResponse
|
||||||
|
type Providers struct {
|
||||||
|
Max string `json:"max"`
|
||||||
|
Used string `json:"used"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
Private string `json:"private"`
|
||||||
|
Limit string `json:"limit"`
|
||||||
|
Percent int `json:"percent"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Total is returned as part of UploadResponse
|
||||||
|
type Total struct {
|
||||||
|
Max string `json:"max"`
|
||||||
|
Used string `json:"used"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
Priused string `json:"priused"`
|
||||||
|
Primax string `json:"primax"`
|
||||||
|
Limit string `json:"limit"`
|
||||||
|
Percent int `json:"percent"`
|
||||||
|
Pripercent int `json:"pripercent"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadResponse is returned as part of SpaceInfo
|
||||||
|
type UploadResponse struct {
|
||||||
|
Providers []Providers `json:"providers"`
|
||||||
|
Total Total `json:"total"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SpaceInfo is returned as part of DoCompleteUploadResponse
|
||||||
|
type SpaceInfo struct {
|
||||||
|
Response UploadResponse `json:"response"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteResponse is returned from doDeleteFile
|
||||||
|
type DeleteResponse struct {
|
||||||
|
Status
|
||||||
|
Deleted []string `json:"deleted"`
|
||||||
|
Errors []interface{} `json:"errors"`
|
||||||
|
ID string `json:"fi_id"`
|
||||||
|
BackgroundTask int `json:"backgroundtask"`
|
||||||
|
UsSize string `json:"us_size"`
|
||||||
|
PaSize string `json:"pa_size"`
|
||||||
|
//SpaceInfo SpaceInfo `json:"spaceinfo"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileResponse is returned from doRenameFile
|
||||||
|
type FileResponse struct {
|
||||||
|
Status
|
||||||
|
Item Item `json:"file"`
|
||||||
|
Exists string `json:"exists"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFilesResponse is returned from doMoveFiles
|
||||||
|
type MoveFilesResponse struct {
|
||||||
|
Status
|
||||||
|
Filesleft string `json:"filesleft"`
|
||||||
|
Addedtobackground string `json:"addedtobackground"`
|
||||||
|
Moved string `json:"moved"`
|
||||||
|
Item Item `json:"file"`
|
||||||
|
IDs []string `json:"fi_ids"`
|
||||||
|
Length int `json:"length"`
|
||||||
|
DirID string `json:"dir_id"`
|
||||||
|
MovedObjects []Item `json:"movedobjects"`
|
||||||
|
// FolderTasks []interface{} `json:"foldertasks"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// TasksResponse is the response to getUserBackgroundTasks
|
||||||
|
type TasksResponse struct {
|
||||||
|
Status
|
||||||
|
Tasks []Task `json:"tasks"`
|
||||||
|
Total string `json:"total"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// BtData is part of TasksResponse
|
||||||
|
type BtData struct {
|
||||||
|
Callback string `json:"callback"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Task describes a task returned in TasksResponse
|
||||||
|
type Task struct {
|
||||||
|
BtID string `json:"bt_id"`
|
||||||
|
UsID string `json:"us_id"`
|
||||||
|
BtType string `json:"bt_type"`
|
||||||
|
BtData BtData `json:"bt_data"`
|
||||||
|
BtStatustext string `json:"bt_statustext"`
|
||||||
|
BtStatusdata string `json:"bt_statusdata"`
|
||||||
|
BtMessage string `json:"bt_message"`
|
||||||
|
BtProcent string `json:"bt_procent"`
|
||||||
|
BtAdded string `json:"bt_added"`
|
||||||
|
BtStatus string `json:"bt_status"`
|
||||||
|
BtCompleted string `json:"bt_completed"`
|
||||||
|
BtTitle string `json:"bt_title"`
|
||||||
|
BtCredentials string `json:"bt_credentials"`
|
||||||
|
BtHidden string `json:"bt_hidden"`
|
||||||
|
BtAutoremove string `json:"bt_autoremove"`
|
||||||
|
BtDevsite string `json:"bt_devsite"`
|
||||||
|
BtPriority string `json:"bt_priority"`
|
||||||
|
BtReport string `json:"bt_report"`
|
||||||
|
BtSitemarker string `json:"bt_sitemarker"`
|
||||||
|
BtExecuteafter string `json:"bt_executeafter"`
|
||||||
|
BtCompletestatus string `json:"bt_completestatus"`
|
||||||
|
BtSubtype string `json:"bt_subtype"`
|
||||||
|
BtCanceled string `json:"bt_canceled"`
|
||||||
|
Callback string `json:"callback"`
|
||||||
|
CanBeCanceled bool `json:"canbecanceled"`
|
||||||
|
CanBeRestarted bool `json:"canberestarted"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
Settings string `json:"settings"`
|
||||||
|
}
|
||||||
1349
backend/filefabric/filefabric.go
Normal file
1349
backend/filefabric/filefabric.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/filefabric/filefabric_test.go
Normal file
17
backend/filefabric/filefabric_test.go
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
// Test filefabric filesystem interface
|
||||||
|
package filefabric_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/filefabric"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIntegration runs integration tests against the remote
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: "TestFileFabric:",
|
||||||
|
NilObject: (*filefabric.Object)(nil),
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"io"
|
"io"
|
||||||
"net/textproto"
|
"net/textproto"
|
||||||
"os"
|
|
||||||
"path"
|
"path"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -16,16 +15,22 @@ import (
|
|||||||
"github.com/jlaffaye/ftp"
|
"github.com/jlaffaye/ftp"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
|
"github.com/rclone/rclone/lib/env"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
currentUser = env.CurrentUser()
|
||||||
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
@@ -42,7 +47,7 @@ func init() {
|
|||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "user",
|
Name: "user",
|
||||||
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
Help: "FTP username, leave blank for current username, " + currentUser,
|
||||||
}, {
|
}, {
|
||||||
Name: "port",
|
Name: "port",
|
||||||
Help: "FTP port, leave blank to use default (21)",
|
Help: "FTP port, leave blank to use default (21)",
|
||||||
@@ -53,16 +58,16 @@ func init() {
|
|||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "tls",
|
Name: "tls",
|
||||||
Help: `Use FTPS over TLS (Implicit)
|
Help: `Use Implicit FTPS (FTP over TLS)
|
||||||
When using implicit FTP over TLS the client will connect using TLS
|
When using implicit FTP over TLS the client connects using TLS
|
||||||
right from the start, which in turn breaks the compatibility with
|
right from the start which breaks compatibility with
|
||||||
non-TLS-aware servers. This is usually served over port 990 rather
|
non-TLS-aware servers. This is usually served over port 990 rather
|
||||||
than port 21. Cannot be used in combination with explicit FTP.`,
|
than port 21. Cannot be used in combination with explicit FTP.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
}, {
|
}, {
|
||||||
Name: "explicit_tls",
|
Name: "explicit_tls",
|
||||||
Help: `Use FTP over TLS (Explicit)
|
Help: `Use Explicit FTPS (FTP over TLS)
|
||||||
When using explicit FTP over TLS the client explicitly request
|
When using explicit FTP over TLS the client explicitly requests
|
||||||
security from the server in order to upgrade a plain text connection
|
security from the server in order to upgrade a plain text connection
|
||||||
to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
@@ -81,6 +86,11 @@ to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
|||||||
Help: "Disable using EPSV even if server advertises support",
|
Help: "Disable using EPSV even if server advertises support",
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "disable_mlsd",
|
||||||
|
Help: "Disable using MLSD even if server advertises support",
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -107,15 +117,17 @@ type Options struct {
|
|||||||
Concurrency int `config:"concurrency"`
|
Concurrency int `config:"concurrency"`
|
||||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||||
DisableEPSV bool `config:"disable_epsv"`
|
DisableEPSV bool `config:"disable_epsv"`
|
||||||
|
DisableMLSD bool `config:"disable_mlsd"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote FTP server
|
// Fs represents a remote FTP server
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on if any
|
root string // the path we are working on if any
|
||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
ci *fs.ConfigInfo // global config
|
||||||
|
features *fs.Features // optional features
|
||||||
url string
|
url string
|
||||||
user string
|
user string
|
||||||
pass string
|
pass string
|
||||||
@@ -200,9 +212,9 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open a new connection to the FTP server.
|
// Open a new connection to the FTP server.
|
||||||
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
func (f *Fs) ftpConnection(ctx context.Context) (*ftp.ServerConn, error) {
|
||||||
fs.Debugf(f, "Connecting to FTP server")
|
fs.Debugf(f, "Connecting to FTP server")
|
||||||
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)}
|
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(f.ci.ConnectTimeout)}
|
||||||
if f.opt.TLS && f.opt.ExplicitTLS {
|
if f.opt.TLS && f.opt.ExplicitTLS {
|
||||||
fs.Errorf(f, "Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
fs.Errorf(f, "Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
||||||
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
||||||
@@ -222,8 +234,11 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
|||||||
if f.opt.DisableEPSV {
|
if f.opt.DisableEPSV {
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
||||||
}
|
}
|
||||||
if fs.Config.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
if f.opt.DisableMLSD {
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: fs.Config.Dump&fs.DumpAuth != 0}))
|
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
|
||||||
|
}
|
||||||
|
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||||
|
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
|
||||||
}
|
}
|
||||||
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
|
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -240,10 +255,11 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get an FTP connection from the pool, or open a new one
|
// Get an FTP connection from the pool, or open a new one
|
||||||
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
func (f *Fs) getFtpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||||
if f.opt.Concurrency > 0 {
|
if f.opt.Concurrency > 0 {
|
||||||
f.tokens.Get()
|
f.tokens.Get()
|
||||||
}
|
}
|
||||||
|
accounting.LimitTPS(ctx)
|
||||||
f.poolMu.Lock()
|
f.poolMu.Lock()
|
||||||
if len(f.pool) > 0 {
|
if len(f.pool) > 0 {
|
||||||
c = f.pool[0]
|
c = f.pool[0]
|
||||||
@@ -253,7 +269,7 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
|||||||
if c != nil {
|
if c != nil {
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
c, err = f.ftpConnection()
|
c, err = f.ftpConnection(ctx)
|
||||||
if err != nil && f.opt.Concurrency > 0 {
|
if err != nil && f.opt.Concurrency > 0 {
|
||||||
f.tokens.Put()
|
f.tokens.Put()
|
||||||
}
|
}
|
||||||
@@ -296,8 +312,7 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||||
ctx := context.Background()
|
|
||||||
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
@@ -311,7 +326,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
|||||||
}
|
}
|
||||||
user := opt.User
|
user := opt.User
|
||||||
if user == "" {
|
if user == "" {
|
||||||
user = os.Getenv("USER")
|
user = currentUser
|
||||||
}
|
}
|
||||||
port := opt.Port
|
port := opt.Port
|
||||||
if port == "" {
|
if port == "" {
|
||||||
@@ -324,10 +339,12 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
|||||||
protocol = "ftps://"
|
protocol = "ftps://"
|
||||||
}
|
}
|
||||||
u := protocol + path.Join(dialAddr+"/", root)
|
u := protocol + path.Join(dialAddr+"/", root)
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
|
ci: ci,
|
||||||
url: u,
|
url: u,
|
||||||
user: user,
|
user: user,
|
||||||
pass: pass,
|
pass: pass,
|
||||||
@@ -336,9 +353,9 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
|||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
// Make a connection and pool it to return errors early
|
// Make a connection and pool it to return errors early
|
||||||
c, err := f.getFtpConnection()
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "NewFs")
|
return nil, errors.Wrap(err, "NewFs")
|
||||||
}
|
}
|
||||||
@@ -409,7 +426,7 @@ func (f *Fs) dirFromStandardPath(dir string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// findItem finds a directory entry for the name in its parent directory
|
// findItem finds a directory entry for the name in its parent directory
|
||||||
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
|
||||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||||
fullPath := path.Join(f.root, remote)
|
fullPath := path.Join(f.root, remote)
|
||||||
if fullPath == "" || fullPath == "." || fullPath == "/" {
|
if fullPath == "" || fullPath == "." || fullPath == "/" {
|
||||||
@@ -423,7 +440,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
|||||||
dir := path.Dir(fullPath)
|
dir := path.Dir(fullPath)
|
||||||
base := path.Base(fullPath)
|
base := path.Base(fullPath)
|
||||||
|
|
||||||
c, err := f.getFtpConnection()
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "findItem")
|
return nil, errors.Wrap(err, "findItem")
|
||||||
}
|
}
|
||||||
@@ -445,7 +462,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
|||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||||
entry, err := f.findItem(remote)
|
entry, err := f.findItem(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -467,8 +484,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
|
|||||||
}
|
}
|
||||||
|
|
||||||
// dirExists checks the directory pointed to by remote exists or not
|
// dirExists checks the directory pointed to by remote exists or not
|
||||||
func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
|
||||||
entry, err := f.findItem(remote)
|
entry, err := f.findItem(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Wrap(err, "dirExists")
|
return false, errors.Wrap(err, "dirExists")
|
||||||
}
|
}
|
||||||
@@ -489,7 +506,7 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
|||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
||||||
c, err := f.getFtpConnection()
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "list")
|
return nil, errors.Wrap(err, "list")
|
||||||
}
|
}
|
||||||
@@ -510,7 +527,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// Wait for List for up to Timeout seconds
|
// Wait for List for up to Timeout seconds
|
||||||
timer := time.NewTimer(fs.Config.Timeout)
|
timer := time.NewTimer(f.ci.Timeout)
|
||||||
select {
|
select {
|
||||||
case listErr = <-errchan:
|
case listErr = <-errchan:
|
||||||
timer.Stop()
|
timer.Stop()
|
||||||
@@ -527,7 +544,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
// doesn't exist, so check it really doesn't exist if no
|
// doesn't exist, so check it really doesn't exist if no
|
||||||
// entries found.
|
// entries found.
|
||||||
if len(files) == 0 {
|
if len(files) == 0 {
|
||||||
exists, err := f.dirExists(dir)
|
exists, err := f.dirExists(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "list")
|
return nil, errors.Wrap(err, "list")
|
||||||
}
|
}
|
||||||
@@ -580,7 +597,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// nil and the error
|
// nil and the error
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
||||||
err := f.mkParentDir(src.Remote())
|
err := f.mkParentDir(ctx, src.Remote())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Put mkParentDir failed")
|
return nil, errors.Wrap(err, "Put mkParentDir failed")
|
||||||
}
|
}
|
||||||
@@ -598,12 +615,12 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getInfo reads the FileInfo for a path
|
// getInfo reads the FileInfo for a path
|
||||||
func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
|
||||||
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
|
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
|
||||||
dir := path.Dir(remote)
|
dir := path.Dir(remote)
|
||||||
base := path.Base(remote)
|
base := path.Base(remote)
|
||||||
|
|
||||||
c, err := f.getFtpConnection()
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "getInfo")
|
return nil, errors.Wrap(err, "getInfo")
|
||||||
}
|
}
|
||||||
@@ -630,12 +647,12 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// mkdir makes the directory and parents using unrooted paths
|
// mkdir makes the directory and parents using unrooted paths
|
||||||
func (f *Fs) mkdir(abspath string) error {
|
func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
||||||
abspath = path.Clean(abspath)
|
abspath = path.Clean(abspath)
|
||||||
if abspath == "." || abspath == "/" {
|
if abspath == "." || abspath == "/" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
fi, err := f.getInfo(abspath)
|
fi, err := f.getInfo(ctx, abspath)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if fi.IsDir {
|
if fi.IsDir {
|
||||||
return nil
|
return nil
|
||||||
@@ -645,11 +662,11 @@ func (f *Fs) mkdir(abspath string) error {
|
|||||||
return errors.Wrapf(err, "mkdir %q failed", abspath)
|
return errors.Wrapf(err, "mkdir %q failed", abspath)
|
||||||
}
|
}
|
||||||
parent := path.Dir(abspath)
|
parent := path.Dir(abspath)
|
||||||
err = f.mkdir(parent)
|
err = f.mkdir(ctx, parent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c, connErr := f.getFtpConnection()
|
c, connErr := f.getFtpConnection(ctx)
|
||||||
if connErr != nil {
|
if connErr != nil {
|
||||||
return errors.Wrap(connErr, "mkdir")
|
return errors.Wrap(connErr, "mkdir")
|
||||||
}
|
}
|
||||||
@@ -669,23 +686,23 @@ func (f *Fs) mkdir(abspath string) error {
|
|||||||
|
|
||||||
// mkParentDir makes the parent of remote if necessary and any
|
// mkParentDir makes the parent of remote if necessary and any
|
||||||
// directories above that
|
// directories above that
|
||||||
func (f *Fs) mkParentDir(remote string) error {
|
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
|
||||||
parent := path.Dir(remote)
|
parent := path.Dir(remote)
|
||||||
return f.mkdir(path.Join(f.root, parent))
|
return f.mkdir(ctx, path.Join(f.root, parent))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the directory if it doesn't exist
|
// Mkdir creates the directory if it doesn't exist
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||||
// defer fs.Trace(dir, "")("err=%v", &err)
|
// defer fs.Trace(dir, "")("err=%v", &err)
|
||||||
root := path.Join(f.root, dir)
|
root := path.Join(f.root, dir)
|
||||||
return f.mkdir(root)
|
return f.mkdir(ctx, root)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist or isn't empty
|
// Return an error if it doesn't exist or isn't empty
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
c, err := f.getFtpConnection()
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
||||||
}
|
}
|
||||||
@@ -701,11 +718,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
err := f.mkParentDir(remote)
|
err := f.mkParentDir(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move mkParentDir failed")
|
return nil, errors.Wrap(err, "Move mkParentDir failed")
|
||||||
}
|
}
|
||||||
c, err := f.getFtpConnection()
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move")
|
return nil, errors.Wrap(err, "Move")
|
||||||
}
|
}
|
||||||
@@ -725,7 +742,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -742,7 +759,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
dstPath := path.Join(f.root, dstRemote)
|
dstPath := path.Join(f.root, dstRemote)
|
||||||
|
|
||||||
// Check if destination exists
|
// Check if destination exists
|
||||||
fi, err := f.getInfo(dstPath)
|
fi, err := f.getInfo(ctx, dstPath)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if fi.IsDir {
|
if fi.IsDir {
|
||||||
return fs.ErrorDirExists
|
return fs.ErrorDirExists
|
||||||
@@ -753,13 +770,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Make sure the parent directory exists
|
// Make sure the parent directory exists
|
||||||
err = f.mkdir(path.Dir(dstPath))
|
err = f.mkdir(ctx, path.Dir(dstPath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do the move
|
// Do the move
|
||||||
c, err := f.getFtpConnection()
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "DirMove")
|
return errors.Wrap(err, "DirMove")
|
||||||
}
|
}
|
||||||
@@ -891,7 +908,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
c, err := o.fs.getFtpConnection()
|
c, err := o.fs.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "open")
|
return nil, errors.Wrap(err, "open")
|
||||||
}
|
}
|
||||||
@@ -926,7 +943,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
fs.Debugf(o, "Removed after failed upload: %v", err)
|
fs.Debugf(o, "Removed after failed upload: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
c, err := o.fs.getFtpConnection()
|
c, err := o.fs.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Update")
|
return errors.Wrap(err, "Update")
|
||||||
}
|
}
|
||||||
@@ -938,7 +955,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return errors.Wrap(err, "update stor")
|
return errors.Wrap(err, "update stor")
|
||||||
}
|
}
|
||||||
o.fs.putFtpConnection(&c, nil)
|
o.fs.putFtpConnection(&c, nil)
|
||||||
o.info, err = o.fs.getInfo(path)
|
o.info, err = o.fs.getInfo(ctx, path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "update getinfo")
|
return errors.Wrap(err, "update getinfo")
|
||||||
}
|
}
|
||||||
@@ -950,14 +967,14 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
|||||||
// defer fs.Trace(o, "")("err=%v", &err)
|
// defer fs.Trace(o, "")("err=%v", &err)
|
||||||
path := path.Join(o.fs.root, o.remote)
|
path := path.Join(o.fs.root, o.remote)
|
||||||
// Check if it's a directory or a file
|
// Check if it's a directory or a file
|
||||||
info, err := o.fs.getInfo(path)
|
info, err := o.fs.getInfo(ctx, path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if info.IsDir {
|
if info.IsDir {
|
||||||
err = o.fs.Rmdir(ctx, o.remote)
|
err = o.fs.Rmdir(ctx, o.remote)
|
||||||
} else {
|
} else {
|
||||||
c, err := o.fs.getFtpConnection()
|
c, err := o.fs.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Remove")
|
return errors.Wrap(err, "Remove")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -76,14 +76,14 @@ func init() {
|
|||||||
Prefix: "gcs",
|
Prefix: "gcs",
|
||||||
Description: "Google Cloud Storage (this is not Google Drive)",
|
Description: "Google Cloud Storage (this is not Google Drive)",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string, m configmap.Mapper) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
saFile, _ := m.Get("service_account_file")
|
saFile, _ := m.Get("service_account_file")
|
||||||
saCreds, _ := m.Get("service_account_credentials")
|
saCreds, _ := m.Get("service_account_credentials")
|
||||||
anonymous, _ := m.Get("anonymous")
|
anonymous, _ := m.Get("anonymous")
|
||||||
if saFile != "" || saCreds != "" || anonymous == "true" {
|
if saFile != "" || saCreds != "" || anonymous == "true" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
err := oauthutil.Config("google cloud storage", name, m, storageConfig, nil)
|
err := oauthutil.Config(ctx, "google cloud storage", name, m, storageConfig, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
@@ -370,12 +370,12 @@ func (o *Object) split() (bucket, bucketPath string) {
|
|||||||
return o.fs.split(o.remote)
|
return o.fs.split(o.remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) {
|
||||||
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error processing credentials")
|
return nil, errors.Wrap(err, "error processing credentials")
|
||||||
}
|
}
|
||||||
ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
|
ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx))
|
||||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -386,8 +386,7 @@ func (f *Fs) setRoot(root string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, bucket:path
|
// NewFs constructs an Fs from the path, bucket:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.TODO()
|
|
||||||
var oAuthClient *http.Client
|
var oAuthClient *http.Client
|
||||||
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
@@ -412,14 +411,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
opt.ServiceAccountCredentials = string(loadedCreds)
|
opt.ServiceAccountCredentials = string(loadedCreds)
|
||||||
}
|
}
|
||||||
if opt.Anonymous {
|
if opt.Anonymous {
|
||||||
oAuthClient = &http.Client{}
|
oAuthClient = fshttp.NewClient(ctx)
|
||||||
} else if opt.ServiceAccountCredentials != "" {
|
} else if opt.ServiceAccountCredentials != "" {
|
||||||
oAuthClient, err = getServiceAccountClient([]byte(opt.ServiceAccountCredentials))
|
oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
|
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
|
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
||||||
@@ -433,7 +432,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||||
cache: bucket.NewCache(),
|
cache: bucket.NewCache(),
|
||||||
}
|
}
|
||||||
f.setRoot(root)
|
f.setRoot(root)
|
||||||
@@ -442,7 +441,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
BucketBasedRootOK: true,
|
BucketBasedRootOK: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
// Create a new authorized Drive client.
|
// Create a new authorized Drive client.
|
||||||
f.client = oAuthClient
|
f.client = oAuthClient
|
||||||
@@ -565,7 +564,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||||||
remote = path.Join(bucket, remote)
|
remote = path.Join(bucket, remote)
|
||||||
}
|
}
|
||||||
// is this a directory marker?
|
// is this a directory marker?
|
||||||
if isDirectory && object.Size == 0 {
|
if isDirectory {
|
||||||
continue // skip directory marker
|
continue // skip directory marker
|
||||||
}
|
}
|
||||||
err = fn(remote, object, false)
|
err = fn(remote, object, false)
|
||||||
@@ -813,7 +812,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
return time.Nanosecond
|
return time.Nanosecond
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -1029,11 +1028,10 @@ func (o *Object) Storable() bool {
|
|||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
req, err := http.NewRequest("GET", o.url, nil)
|
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
fs.FixRangeOption(options, o.bytes)
|
fs.FixRangeOption(options, o.bytes)
|
||||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||||
var res *http.Response
|
var res *http.Response
|
||||||
@@ -1092,6 +1090,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
object.ContentLanguage = value
|
object.ContentLanguage = value
|
||||||
case "content-type":
|
case "content-type":
|
||||||
object.ContentType = value
|
object.ContentType = value
|
||||||
|
case "x-goog-storage-class":
|
||||||
|
object.StorageClass = value
|
||||||
default:
|
default:
|
||||||
const googMetaPrefix = "x-goog-meta-"
|
const googMetaPrefix = "x-goog-meta-"
|
||||||
if strings.HasPrefix(lowerKey, googMetaPrefix) {
|
if strings.HasPrefix(lowerKey, googMetaPrefix) {
|
||||||
|
|||||||
@@ -78,7 +78,7 @@ func init() {
|
|||||||
Prefix: "gphotos",
|
Prefix: "gphotos",
|
||||||
Description: "Google Photos",
|
Description: "Google Photos",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string, m configmap.Mapper) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -95,7 +95,7 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do the oauth
|
// Do the oauth
|
||||||
err = oauthutil.Config("google photos", name, m, oauthConfig, nil)
|
err = oauthutil.Config(ctx, "google photos", name, m, oauthConfig, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
golog.Fatalf("Failed to configure token: %v", err)
|
golog.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
@@ -132,15 +132,33 @@ you want to read the media.`,
|
|||||||
Default: 2000,
|
Default: 2000,
|
||||||
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
|
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "include_archived",
|
||||||
|
Default: false,
|
||||||
|
Help: `Also view and download archived media.
|
||||||
|
|
||||||
|
By default rclone does not request archived media. Thus, when syncing,
|
||||||
|
archived media is not visible in directory listings or transferred.
|
||||||
|
|
||||||
|
Note that media in albums is always visible and synced, no matter
|
||||||
|
their archive status.
|
||||||
|
|
||||||
|
With this flag, archived media are always visible in directory
|
||||||
|
listings and transferred.
|
||||||
|
|
||||||
|
Without this flag, archived media will not be visible in directory
|
||||||
|
listings and won't be transferred.`,
|
||||||
|
Advanced: true,
|
||||||
}}...),
|
}}...),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
ReadOnly bool `config:"read_only"`
|
ReadOnly bool `config:"read_only"`
|
||||||
ReadSize bool `config:"read_size"`
|
ReadSize bool `config:"read_size"`
|
||||||
StartYear int `config:"start_year"`
|
StartYear int `config:"start_year"`
|
||||||
|
IncludeArchived bool `config:"include_archived"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote storage server
|
// Fs represents a remote storage server
|
||||||
@@ -206,6 +224,10 @@ func (f *Fs) startYear() int {
|
|||||||
return f.opt.StartYear
|
return f.opt.StartYear
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *Fs) includeArchived() bool {
|
||||||
|
return f.opt.IncludeArchived
|
||||||
|
}
|
||||||
|
|
||||||
// retryErrorCodes is a slice of error codes that we will retry
|
// retryErrorCodes is a slice of error codes that we will retry
|
||||||
var retryErrorCodes = []int{
|
var retryErrorCodes = []int{
|
||||||
429, // Too Many Requests.
|
429, // Too Many Requests.
|
||||||
@@ -246,7 +268,7 @@ func errorHandler(resp *http.Response) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, bucket:path
|
// NewFs constructs an Fs from the path, bucket:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -254,8 +276,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
baseClient := fshttp.NewClient(fs.Config)
|
baseClient := fshttp.NewClient(ctx)
|
||||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
|
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure Box")
|
return nil, errors.Wrap(err, "failed to configure Box")
|
||||||
}
|
}
|
||||||
@@ -272,14 +294,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
unAuth: rest.NewClient(baseClient),
|
unAuth: rest.NewClient(baseClient),
|
||||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||||
ts: ts,
|
ts: ts,
|
||||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||||
startTime: time.Now(),
|
startTime: time.Now(),
|
||||||
albums: map[bool]*albums{},
|
albums: map[bool]*albums{},
|
||||||
uploaded: dirtree.New(),
|
uploaded: dirtree.New(),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
f.srv.SetErrorHandler(errorHandler)
|
f.srv.SetErrorHandler(errorHandler)
|
||||||
|
|
||||||
_, _, pattern := patterns.match(f.root, "", true)
|
_, _, pattern := patterns.match(f.root, "", true)
|
||||||
@@ -288,7 +310,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
var leaf string
|
var leaf string
|
||||||
f.root, leaf = path.Split(f.root)
|
f.root, leaf = path.Split(f.root)
|
||||||
f.root = strings.TrimRight(f.root, "/")
|
f.root = strings.TrimRight(f.root, "/")
|
||||||
_, err := f.NewObject(context.TODO(), leaf)
|
_, err := f.NewObject(ctx, leaf)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return f, fs.ErrorIsFile
|
return f, fs.ErrorIsFile
|
||||||
}
|
}
|
||||||
@@ -497,6 +519,12 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
|
|||||||
}
|
}
|
||||||
filter.PageSize = listChunks
|
filter.PageSize = listChunks
|
||||||
filter.PageToken = ""
|
filter.PageToken = ""
|
||||||
|
if filter.AlbumID == "" { // album ID and filters cannot be set together, else error 400 INVALID_ARGUMENT
|
||||||
|
if filter.Filters == nil {
|
||||||
|
filter.Filters = &api.Filters{}
|
||||||
|
}
|
||||||
|
filter.Filters.IncludeArchivedMedia = &f.opt.IncludeArchived
|
||||||
|
}
|
||||||
lastID := ""
|
lastID := ""
|
||||||
for {
|
for {
|
||||||
var result api.MediaItems
|
var result api.MediaItems
|
||||||
|
|||||||
@@ -35,14 +35,14 @@ func TestIntegration(t *testing.T) {
|
|||||||
if *fstest.RemoteName == "" {
|
if *fstest.RemoteName == "" {
|
||||||
*fstest.RemoteName = "TestGooglePhotos:"
|
*fstest.RemoteName = "TestGooglePhotos:"
|
||||||
}
|
}
|
||||||
f, err := fs.NewFs(*fstest.RemoteName)
|
f, err := fs.NewFs(ctx, *fstest.RemoteName)
|
||||||
if err == fs.ErrorNotFoundInConfigFile {
|
if err == fs.ErrorNotFoundInConfigFile {
|
||||||
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
|
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Create local Fs pointing at testfiles
|
// Create local Fs pointing at testfiles
|
||||||
localFs, err := fs.NewFs("testfiles")
|
localFs, err := fs.NewFs(ctx, "testfiles")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
t.Run("CreateAlbum", func(t *testing.T) {
|
t.Run("CreateAlbum", func(t *testing.T) {
|
||||||
@@ -115,7 +115,7 @@ func TestIntegration(t *testing.T) {
|
|||||||
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
|
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
|
||||||
})
|
})
|
||||||
|
|
||||||
// Check it is there in the date/month/year heirachy
|
// Check it is there in the date/month/year hierarchy
|
||||||
// 2013-07-13 is the creation date of the folder
|
// 2013-07-13 is the creation date of the folder
|
||||||
checkPresent := func(t *testing.T, objPath string) {
|
checkPresent := func(t *testing.T, objPath string) {
|
||||||
entries, err := f.List(ctx, objPath)
|
entries, err := f.List(ctx, objPath)
|
||||||
@@ -155,7 +155,7 @@ func TestIntegration(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("NewFsIsFile", func(t *testing.T) {
|
t.Run("NewFsIsFile", func(t *testing.T) {
|
||||||
fNew, err := fs.NewFs(*fstest.RemoteName + remote)
|
fNew, err := fs.NewFs(ctx, *fstest.RemoteName+remote)
|
||||||
assert.Equal(t, fs.ErrorIsFile, err)
|
assert.Equal(t, fs.ErrorIsFile, err)
|
||||||
leaf := path.Base(remote)
|
leaf := path.Base(remote)
|
||||||
o, err := fNew.NewObject(ctx, leaf)
|
o, err := fNew.NewObject(ctx, leaf)
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ type lister interface {
|
|||||||
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
|
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
|
||||||
dirTime() time.Time
|
dirTime() time.Time
|
||||||
startYear() int
|
startYear() int
|
||||||
|
includeArchived() bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// dirPattern describes a single directory pattern
|
// dirPattern describes a single directory pattern
|
||||||
|
|||||||
@@ -64,6 +64,11 @@ func (f *testLister) startYear() int {
|
|||||||
return 2000
|
return 2000
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mock includeArchived for testing
|
||||||
|
func (f *testLister) includeArchived() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func TestPatternMatch(t *testing.T) {
|
func TestPatternMatch(t *testing.T) {
|
||||||
for testNumber, test := range []struct {
|
for testNumber, test := range []struct {
|
||||||
// input
|
// input
|
||||||
|
|||||||
320
backend/hdfs/fs.go
Normal file
320
backend/hdfs/fs.go
Normal file
@@ -0,0 +1,320 @@
|
|||||||
|
// +build !plan9
|
||||||
|
|
||||||
|
package hdfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"os/user"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/colinmarc/hdfs/v2"
|
||||||
|
krb "github.com/jcmturner/gokrb5/v8/client"
|
||||||
|
"github.com/jcmturner/gokrb5/v8/config"
|
||||||
|
"github.com/jcmturner/gokrb5/v8/credentials"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Fs represents a HDFS server
|
||||||
|
type Fs struct {
|
||||||
|
name string
|
||||||
|
root string
|
||||||
|
features *fs.Features // optional features
|
||||||
|
opt Options // options for this backend
|
||||||
|
ci *fs.ConfigInfo // global config
|
||||||
|
client *hdfs.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
|
||||||
|
func getKerberosClient() (*krb.Client, error) {
|
||||||
|
configPath := os.Getenv("KRB5_CONFIG")
|
||||||
|
if configPath == "" {
|
||||||
|
configPath = "/etc/krb5.conf"
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg, err := config.Load(configPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine the ccache location from the environment, falling back to the
|
||||||
|
// default location.
|
||||||
|
ccachePath := os.Getenv("KRB5CCNAME")
|
||||||
|
if strings.Contains(ccachePath, ":") {
|
||||||
|
if strings.HasPrefix(ccachePath, "FILE:") {
|
||||||
|
ccachePath = strings.SplitN(ccachePath, ":", 2)[1]
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("unusable ccache: %s", ccachePath)
|
||||||
|
}
|
||||||
|
} else if ccachePath == "" {
|
||||||
|
u, err := user.Current()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ccachePath = fmt.Sprintf("/tmp/krb5cc_%s", u.Uid)
|
||||||
|
}
|
||||||
|
|
||||||
|
ccache, err := credentials.LoadCCache(ccachePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := krb.NewFromCCache(ccache, cfg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFs constructs an Fs from the path
|
||||||
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
options := hdfs.ClientOptions{
|
||||||
|
Addresses: []string{opt.Namenode},
|
||||||
|
UseDatanodeHostname: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
if opt.ServicePrincipalName != "" {
|
||||||
|
options.KerberosClient, err = getKerberosClient()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Problem with kerberos authentication: %s", err)
|
||||||
|
}
|
||||||
|
options.KerberosServicePrincipleName = opt.ServicePrincipalName
|
||||||
|
|
||||||
|
if opt.DataTransferProtection != "" {
|
||||||
|
options.DataTransferProtection = opt.DataTransferProtection
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
options.User = opt.Username
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := hdfs.NewClient(options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
f := &Fs{
|
||||||
|
name: name,
|
||||||
|
root: root,
|
||||||
|
opt: *opt,
|
||||||
|
ci: fs.GetConfig(ctx),
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
|
||||||
|
f.features = (&fs.Features{
|
||||||
|
CanHaveEmptyDirectories: true,
|
||||||
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
|
info, err := f.client.Stat(f.realpath(""))
|
||||||
|
if err == nil && !info.IsDir() {
|
||||||
|
f.root = path.Dir(f.root)
|
||||||
|
return f, fs.ErrorIsFile
|
||||||
|
}
|
||||||
|
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name of this fs
|
||||||
|
func (f *Fs) Name() string {
|
||||||
|
return f.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root of the remote (as passed into NewFs)
|
||||||
|
func (f *Fs) Root() string {
|
||||||
|
return f.root
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a description of the FS
|
||||||
|
func (f *Fs) String() string {
|
||||||
|
return fmt.Sprintf("hdfs://%s", f.opt.Namenode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Features returns the optional features of this Fs
|
||||||
|
func (f *Fs) Features() *fs.Features {
|
||||||
|
return f.features
|
||||||
|
}
|
||||||
|
|
||||||
|
// Precision return the precision of this Fs
|
||||||
|
func (f *Fs) Precision() time.Duration {
|
||||||
|
return time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hashes are not supported
|
||||||
|
func (f *Fs) Hashes() hash.Set {
|
||||||
|
return hash.Set(hash.None)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewObject finds file at remote or return fs.ErrorObjectNotFound
|
||||||
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
|
realpath := f.realpath(remote)
|
||||||
|
fs.Debugf(f, "new [%s]", realpath)
|
||||||
|
|
||||||
|
info, err := f.ensureFile(realpath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: remote,
|
||||||
|
size: info.Size(),
|
||||||
|
modTime: info.ModTime(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List the objects and directories in dir into entries.
|
||||||
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
realpath := f.realpath(dir)
|
||||||
|
fs.Debugf(f, "list [%s]", realpath)
|
||||||
|
|
||||||
|
err = f.ensureDirectory(realpath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
list, err := f.client.ReadDir(realpath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, x := range list {
|
||||||
|
stdName := f.opt.Enc.ToStandardName(x.Name())
|
||||||
|
remote := path.Join(dir, stdName)
|
||||||
|
if x.IsDir() {
|
||||||
|
entries = append(entries, fs.NewDir(remote, x.ModTime()))
|
||||||
|
} else {
|
||||||
|
entries = append(entries, &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: remote,
|
||||||
|
size: x.Size(),
|
||||||
|
modTime: x.ModTime()})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put the object
|
||||||
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
|
o := &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: src.Remote(),
|
||||||
|
}
|
||||||
|
err := o.Update(ctx, in, src, options...)
|
||||||
|
return o, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
|
return f.Put(ctx, in, src, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mkdir makes a directory
|
||||||
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
|
fs.Debugf(f, "mkdir [%s]", f.realpath(dir))
|
||||||
|
return f.client.MkdirAll(f.realpath(dir), 0755)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rmdir deletes the directory
|
||||||
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
|
realpath := f.realpath(dir)
|
||||||
|
fs.Debugf(f, "rmdir [%s]", realpath)
|
||||||
|
|
||||||
|
err := f.ensureDirectory(realpath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// do not remove empty directory
|
||||||
|
list, err := f.client.ReadDir(realpath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(list) > 0 {
|
||||||
|
return fs.ErrorDirectoryNotEmpty
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.client.Remove(realpath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Purge deletes all the files in the directory
|
||||||
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||||
|
realpath := f.realpath(dir)
|
||||||
|
fs.Debugf(f, "purge [%s]", realpath)
|
||||||
|
|
||||||
|
err := f.ensureDirectory(realpath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.client.RemoveAll(realpath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// About gets quota information from the Fs
|
||||||
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
|
info, err := f.client.StatFs()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &fs.Usage{
|
||||||
|
Total: fs.NewUsageValue(int64(info.Capacity)),
|
||||||
|
Used: fs.NewUsageValue(int64(info.Used)),
|
||||||
|
Free: fs.NewUsageValue(int64(info.Remaining)),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) ensureDirectory(realpath string) error {
|
||||||
|
info, err := f.client.Stat(realpath)
|
||||||
|
|
||||||
|
if e, ok := err.(*os.PathError); ok && e.Err == os.ErrNotExist {
|
||||||
|
return fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !info.IsDir() {
|
||||||
|
return fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) ensureFile(realpath string) (os.FileInfo, error) {
|
||||||
|
info, err := f.client.Stat(realpath)
|
||||||
|
|
||||||
|
if e, ok := err.(*os.PathError); ok && e.Err == os.ErrNotExist {
|
||||||
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if info.IsDir() {
|
||||||
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) realpath(dir string) string {
|
||||||
|
return f.opt.Enc.FromStandardPath(xPath(f.Root(), dir))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the interfaces are satisfied
|
||||||
|
var (
|
||||||
|
_ fs.Fs = (*Fs)(nil)
|
||||||
|
_ fs.Purger = (*Fs)(nil)
|
||||||
|
_ fs.PutStreamer = (*Fs)(nil)
|
||||||
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
|
)
|
||||||
86
backend/hdfs/hdfs.go
Normal file
86
backend/hdfs/hdfs.go
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
// +build !plan9
|
||||||
|
|
||||||
|
package hdfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/config"
|
||||||
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
fsi := &fs.RegInfo{
|
||||||
|
Name: "hdfs",
|
||||||
|
Description: "Hadoop distributed file system",
|
||||||
|
NewFs: NewFs,
|
||||||
|
Options: []fs.Option{{
|
||||||
|
Name: "namenode",
|
||||||
|
Help: "hadoop name node and port",
|
||||||
|
Required: true,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "namenode:8020",
|
||||||
|
Help: "Connect to host namenode at port 8020",
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
Name: "username",
|
||||||
|
Help: "hadoop user name",
|
||||||
|
Required: false,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "root",
|
||||||
|
Help: "Connect to hdfs as root",
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
Name: "service_principal_name",
|
||||||
|
Help: `Kerberos service principal name for the namenode
|
||||||
|
|
||||||
|
Enables KERBEROS authentication. Specifies the Service Principal Name
|
||||||
|
(<SERVICE>/<FQDN>) for the namenode.`,
|
||||||
|
Required: false,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "hdfs/namenode.hadoop.docker",
|
||||||
|
Help: "Namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.",
|
||||||
|
}},
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "data_transfer_protection",
|
||||||
|
Help: `Kerberos data transfer protection: authentication|integrity|privacy
|
||||||
|
|
||||||
|
Specifies whether or not authentication, data signature integrity
|
||||||
|
checks, and wire encryption is required when communicating the the
|
||||||
|
datanodes. Possible values are 'authentication', 'integrity' and
|
||||||
|
'privacy'. Used only with KERBEROS enabled.`,
|
||||||
|
Required: false,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "privacy",
|
||||||
|
Help: "Ensure authentication, integrity and encryption enabled.",
|
||||||
|
}},
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: config.ConfigEncoding,
|
||||||
|
Help: config.ConfigEncodingHelp,
|
||||||
|
Advanced: true,
|
||||||
|
Default: (encoder.Display | encoder.EncodeInvalidUtf8 | encoder.EncodeColon),
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
fs.Register(fsi)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Options for this backend
|
||||||
|
type Options struct {
|
||||||
|
Namenode string `config:"namenode"`
|
||||||
|
Username string `config:"username"`
|
||||||
|
ServicePrincipalName string `config:"service_principal_name"`
|
||||||
|
DataTransferProtection string `config:"data_transfer_protection"`
|
||||||
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// xPath make correct file path with leading '/'
|
||||||
|
func xPath(root string, tail string) string {
|
||||||
|
if !strings.HasPrefix(root, "/") {
|
||||||
|
root = "/" + root
|
||||||
|
}
|
||||||
|
return path.Join(root, tail)
|
||||||
|
}
|
||||||
20
backend/hdfs/hdfs_test.go
Normal file
20
backend/hdfs/hdfs_test.go
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
// Test HDFS filesystem interface
|
||||||
|
|
||||||
|
// +build !plan9
|
||||||
|
|
||||||
|
package hdfs_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/hdfs"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIntegration runs integration tests against the remote
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: "TestHdfs:",
|
||||||
|
NilObject: (*hdfs.Object)(nil),
|
||||||
|
})
|
||||||
|
}
|
||||||
6
backend/hdfs/hdfs_unsupported.go
Normal file
6
backend/hdfs/hdfs_unsupported.go
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
// Build for hdfs for unsupported platforms to stop go complaining
|
||||||
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
|
// +build plan9
|
||||||
|
|
||||||
|
package hdfs
|
||||||
177
backend/hdfs/object.go
Normal file
177
backend/hdfs/object.go
Normal file
@@ -0,0 +1,177 @@
|
|||||||
|
// +build !plan9
|
||||||
|
|
||||||
|
package hdfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"path"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/lib/readers"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Object describes an HDFS file
|
||||||
|
type Object struct {
|
||||||
|
fs *Fs
|
||||||
|
remote string
|
||||||
|
size int64
|
||||||
|
modTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fs returns the parent Fs
|
||||||
|
func (o *Object) Fs() fs.Info {
|
||||||
|
return o.fs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remote returns the remote path
|
||||||
|
func (o *Object) Remote() string {
|
||||||
|
return o.remote
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the size of an object in bytes
|
||||||
|
func (o *Object) Size() int64 {
|
||||||
|
return o.size
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModTime returns the modification time of the object
|
||||||
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
|
return o.modTime
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetModTime sets the modification time of the local fs object
|
||||||
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
|
realpath := o.fs.realpath(o.Remote())
|
||||||
|
err := o.fs.client.Chtimes(realpath, modTime, modTime)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
o.modTime = modTime
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Storable returns whether this object is storable
|
||||||
|
func (o *Object) Storable() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a string version
|
||||||
|
func (o *Object) String() string {
|
||||||
|
if o == nil {
|
||||||
|
return "<nil>"
|
||||||
|
}
|
||||||
|
return o.Remote()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hash is not supported
|
||||||
|
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||||
|
return "", hash.ErrUnsupported
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open an object for read
|
||||||
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
|
realpath := o.realpath()
|
||||||
|
fs.Debugf(o.fs, "open [%s]", realpath)
|
||||||
|
f, err := o.fs.client.Open(realpath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var offset, limit int64 = 0, -1
|
||||||
|
for _, option := range options {
|
||||||
|
switch x := option.(type) {
|
||||||
|
case *fs.SeekOption:
|
||||||
|
offset = x.Offset
|
||||||
|
case *fs.RangeOption:
|
||||||
|
offset, limit = x.Decode(o.Size())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = f.Seek(offset, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if limit != -1 {
|
||||||
|
in = readers.NewLimitedReadCloser(f, limit)
|
||||||
|
} else {
|
||||||
|
in = f
|
||||||
|
}
|
||||||
|
|
||||||
|
return in, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update object
|
||||||
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
|
realpath := o.fs.realpath(src.Remote())
|
||||||
|
dirname := path.Dir(realpath)
|
||||||
|
fs.Debugf(o.fs, "update [%s]", realpath)
|
||||||
|
|
||||||
|
err := o.fs.client.MkdirAll(dirname, 755)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := o.fs.client.Stat(realpath)
|
||||||
|
if err == nil {
|
||||||
|
err = o.fs.client.Remove(realpath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := o.fs.client.Create(realpath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup := func() {
|
||||||
|
rerr := o.fs.client.Remove(realpath)
|
||||||
|
if rerr != nil {
|
||||||
|
fs.Errorf(o.fs, "failed to remove [%v]: %v", realpath, rerr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = io.Copy(out, in)
|
||||||
|
if err != nil {
|
||||||
|
cleanup()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = out.Close()
|
||||||
|
if err != nil {
|
||||||
|
cleanup()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err = o.fs.client.Stat(realpath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
o.size = info.Size()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove an object
|
||||||
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
|
realpath := o.fs.realpath(o.remote)
|
||||||
|
fs.Debugf(o.fs, "remove [%s]", realpath)
|
||||||
|
return o.fs.client.Remove(realpath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *Object) realpath() string {
|
||||||
|
return o.fs.opt.Enc.FromStandardPath(xPath(o.Fs().Root(), o.remote))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the interfaces are satisfied
|
||||||
|
var (
|
||||||
|
_ fs.Object = (*Object)(nil)
|
||||||
|
)
|
||||||
@@ -58,7 +58,7 @@ The input format is comma separated list of key,value pairs. Standard
|
|||||||
|
|
||||||
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||||
|
|
||||||
You can set multiple headers, eg '"Cookie","name=value","Authorization","xxx"'.
|
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
|
||||||
`,
|
`,
|
||||||
Default: fs.CommaSepList{},
|
Default: fs.CommaSepList{},
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
@@ -115,8 +115,9 @@ type Options struct {
|
|||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string
|
name string
|
||||||
root string
|
root string
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
opt Options // options for this backend
|
opt Options // options for this backend
|
||||||
|
ci *fs.ConfigInfo // global config
|
||||||
endpoint *url.URL
|
endpoint *url.URL
|
||||||
endpointURL string // endpoint as a string
|
endpointURL string // endpoint as a string
|
||||||
httpClient *http.Client
|
httpClient *http.Client
|
||||||
@@ -145,8 +146,7 @@ func statusError(res *http.Response, err error) error {
|
|||||||
|
|
||||||
// NewFs creates a new Fs object from the name and root. It connects to
|
// NewFs creates a new Fs object from the name and root. It connects to
|
||||||
// the host specified in the config file.
|
// the host specified in the config file.
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.TODO()
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -172,7 +172,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
client := fshttp.NewClient(fs.Config)
|
client := fshttp.NewClient(ctx)
|
||||||
|
|
||||||
var isFile = false
|
var isFile = false
|
||||||
if !strings.HasSuffix(u.String(), "/") {
|
if !strings.HasSuffix(u.String(), "/") {
|
||||||
@@ -183,9 +183,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return http.ErrUseLastResponse
|
return http.ErrUseLastResponse
|
||||||
}
|
}
|
||||||
// check to see if points to a file
|
// check to see if points to a file
|
||||||
req, err := http.NewRequest("HEAD", u.String(), nil)
|
req, err := http.NewRequestWithContext(ctx, "HEAD", u.String(), nil)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
addHeaders(req, opt)
|
addHeaders(req, opt)
|
||||||
res, err := noRedir.Do(req)
|
res, err := noRedir.Do(req)
|
||||||
err = statusError(res, err)
|
err = statusError(res, err)
|
||||||
@@ -210,17 +209,19 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
|
ci: ci,
|
||||||
httpClient: client,
|
httpClient: client,
|
||||||
endpoint: u,
|
endpoint: u,
|
||||||
endpointURL: u.String(),
|
endpointURL: u.String(),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
if isFile {
|
if isFile {
|
||||||
return f, fs.ErrorIsFile
|
return f, fs.ErrorIsFile
|
||||||
}
|
}
|
||||||
@@ -389,11 +390,10 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
|||||||
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
||||||
}
|
}
|
||||||
// Do the request
|
// Do the request
|
||||||
req, err := http.NewRequest("GET", URL, nil)
|
req, err := http.NewRequestWithContext(ctx, "GET", URL, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "readDir failed")
|
return nil, errors.Wrap(err, "readDir failed")
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
f.addHeaders(req)
|
f.addHeaders(req)
|
||||||
res, err := f.httpClient.Do(req)
|
res, err := f.httpClient.Do(req)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -440,14 +440,15 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
var (
|
var (
|
||||||
entriesMu sync.Mutex // to protect entries
|
entriesMu sync.Mutex // to protect entries
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
in = make(chan string, fs.Config.Checkers)
|
checkers = f.ci.Checkers
|
||||||
|
in = make(chan string, checkers)
|
||||||
)
|
)
|
||||||
add := func(entry fs.DirEntry) {
|
add := func(entry fs.DirEntry) {
|
||||||
entriesMu.Lock()
|
entriesMu.Lock()
|
||||||
entries = append(entries, entry)
|
entries = append(entries, entry)
|
||||||
entriesMu.Unlock()
|
entriesMu.Unlock()
|
||||||
}
|
}
|
||||||
for i := 0; i < fs.Config.Checkers; i++ {
|
for i := 0; i < checkers; i++ {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
@@ -544,11 +545,10 @@ func (o *Object) stat(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
url := o.url()
|
url := o.url()
|
||||||
req, err := http.NewRequest("HEAD", url, nil)
|
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "stat failed")
|
return errors.Wrap(err, "stat failed")
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
o.fs.addHeaders(req)
|
o.fs.addHeaders(req)
|
||||||
res, err := o.fs.httpClient.Do(req)
|
res, err := o.fs.httpClient.Do(req)
|
||||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||||
@@ -585,7 +585,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|||||||
return errorReadOnly
|
return errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc)
|
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
|
||||||
func (o *Object) Storable() bool {
|
func (o *Object) Storable() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -593,11 +593,10 @@ func (o *Object) Storable() bool {
|
|||||||
// Open a remote http file object for reading. Seek is supported
|
// Open a remote http file object for reading. Seek is supported
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
url := o.url()
|
url := o.url()
|
||||||
req, err := http.NewRequest("GET", url, nil)
|
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Open failed")
|
return nil, errors.Wrap(err, "Open failed")
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
|
|
||||||
// Add optional headers
|
// Add optional headers
|
||||||
for k, v := range fs.OpenOptionHeaders(options) {
|
for k, v := range fs.OpenOptionHeaders(options) {
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
|||||||
ts := httptest.NewServer(handler)
|
ts := httptest.NewServer(handler)
|
||||||
|
|
||||||
// Configure the remote
|
// Configure the remote
|
||||||
config.LoadConfig()
|
config.LoadConfig(context.Background())
|
||||||
// fs.Config.LogLevel = fs.LogLevelDebug
|
// fs.Config.LogLevel = fs.LogLevelDebug
|
||||||
// fs.Config.DumpHeaders = true
|
// fs.Config.DumpHeaders = true
|
||||||
// fs.Config.DumpBodies = true
|
// fs.Config.DumpBodies = true
|
||||||
@@ -69,7 +69,7 @@ func prepare(t *testing.T) (fs.Fs, func()) {
|
|||||||
m, tidy := prepareServer(t)
|
m, tidy := prepareServer(t)
|
||||||
|
|
||||||
// Instantiate it
|
// Instantiate it
|
||||||
f, err := NewFs(remoteName, "", m)
|
f, err := NewFs(context.Background(), remoteName, "", m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
return f, tidy
|
return f, tidy
|
||||||
@@ -214,7 +214,7 @@ func TestIsAFileRoot(t *testing.T) {
|
|||||||
m, tidy := prepareServer(t)
|
m, tidy := prepareServer(t)
|
||||||
defer tidy()
|
defer tidy()
|
||||||
|
|
||||||
f, err := NewFs(remoteName, "one%.txt", m)
|
f, err := NewFs(context.Background(), remoteName, "one%.txt", m)
|
||||||
assert.Equal(t, err, fs.ErrorIsFile)
|
assert.Equal(t, err, fs.ErrorIsFile)
|
||||||
|
|
||||||
testListRoot(t, f, false)
|
testListRoot(t, f, false)
|
||||||
@@ -224,7 +224,7 @@ func TestIsAFileSubDir(t *testing.T) {
|
|||||||
m, tidy := prepareServer(t)
|
m, tidy := prepareServer(t)
|
||||||
defer tidy()
|
defer tidy()
|
||||||
|
|
||||||
f, err := NewFs(remoteName, "three/underthree.txt", m)
|
f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m)
|
||||||
assert.Equal(t, err, fs.ErrorIsFile)
|
assert.Equal(t, err, fs.ErrorIsFile)
|
||||||
|
|
||||||
entries, err := f.List(context.Background(), "")
|
entries, err := f.List(context.Background(), "")
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/swift"
|
"github.com/ncw/swift/v2"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -24,7 +24,7 @@ func newAuth(f *Fs) *auth {
|
|||||||
// Request constructs an http.Request for authentication
|
// Request constructs an http.Request for authentication
|
||||||
//
|
//
|
||||||
// returns nil for not needed
|
// returns nil for not needed
|
||||||
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
|
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
|
||||||
const retries = 10
|
const retries = 10
|
||||||
for try := 1; try <= retries; try++ {
|
for try := 1; try <= retries; try++ {
|
||||||
err = a.f.getCredentials(context.TODO())
|
err = a.f.getCredentials(context.TODO())
|
||||||
@@ -38,7 +38,7 @@ func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Response parses the result of an http request
|
// Response parses the result of an http request
|
||||||
func (a *auth) Response(resp *http.Response) error {
|
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ package hubic
|
|||||||
|
|
||||||
// This uses the normal swift mechanism to update the credentials and
|
// This uses the normal swift mechanism to update the credentials and
|
||||||
// ignores the expires field returned by the Hubic API. This may need
|
// ignores the expires field returned by the Hubic API. This may need
|
||||||
// to be revisted after some actual experience.
|
// to be revisited after some actual experience.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@@ -16,7 +16,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
swiftLib "github.com/ncw/swift"
|
swiftLib "github.com/ncw/swift/v2"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/swift"
|
"github.com/rclone/rclone/backend/swift"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
@@ -56,8 +56,8 @@ func init() {
|
|||||||
Name: "hubic",
|
Name: "hubic",
|
||||||
Description: "Hubic",
|
Description: "Hubic",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string, m configmap.Mapper) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
err := oauthutil.Config("hubic", name, m, oauthConfig, nil)
|
err := oauthutil.Config(ctx, "hubic", name, m, oauthConfig, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
@@ -71,7 +71,7 @@ func init() {
|
|||||||
type credentials struct {
|
type credentials struct {
|
||||||
Token string `json:"token"` // OpenStack token
|
Token string `json:"token"` // OpenStack token
|
||||||
Endpoint string `json:"endpoint"` // OpenStack endpoint
|
Endpoint string `json:"endpoint"` // OpenStack endpoint
|
||||||
Expires string `json:"expires"` // Expires date - eg "2015-11-09T14:24:56+01:00"
|
Expires string `json:"expires"` // Expires date - e.g. "2015-11-09T14:24:56+01:00"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote hubic
|
// Fs represents a remote hubic
|
||||||
@@ -110,11 +110,10 @@ func (f *Fs) String() string {
|
|||||||
//
|
//
|
||||||
// The credentials are read into the Fs
|
// The credentials are read into the Fs
|
||||||
func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||||
req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
resp, err := f.client.Do(req)
|
resp, err := f.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -146,8 +145,8 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
client, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure Hubic")
|
return nil, errors.Wrap(err, "failed to configure Hubic")
|
||||||
}
|
}
|
||||||
@@ -157,13 +156,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Make the swift Connection
|
// Make the swift Connection
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
c := &swiftLib.Connection{
|
c := &swiftLib.Connection{
|
||||||
Auth: newAuth(f),
|
Auth: newAuth(f),
|
||||||
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
|
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
|
||||||
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
|
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
|
||||||
Transport: fshttp.NewTransport(fs.Config),
|
Transport: fshttp.NewTransport(ctx),
|
||||||
}
|
}
|
||||||
err = c.Authenticate()
|
err = c.Authenticate(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error authenticating swift connection")
|
return nil, errors.Wrap(err, "error authenticating swift connection")
|
||||||
}
|
}
|
||||||
@@ -176,7 +176,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Make inner swift Fs from the connection
|
// Make inner swift Fs from the connection
|
||||||
swiftFs, err := swift.NewFsWithConnection(opt, name, root, c, true)
|
swiftFs, err := swift.NewFsWithConnection(ctx, opt, name, root, c, true)
|
||||||
if err != nil && err != fs.ErrorIsFile {
|
if err != nil && err != fs.ErrorIsFile {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -153,9 +153,9 @@ type CustomerInfo struct {
|
|||||||
AccountType string `json:"account_type"`
|
AccountType string `json:"account_type"`
|
||||||
SubscriptionType string `json:"subscription_type"`
|
SubscriptionType string `json:"subscription_type"`
|
||||||
Usage int64 `json:"usage"`
|
Usage int64 `json:"usage"`
|
||||||
Qouta int64 `json:"quota"`
|
Quota int64 `json:"quota"`
|
||||||
BusinessUsage int64 `json:"business_usage"`
|
BusinessUsage int64 `json:"business_usage"`
|
||||||
BusinessQouta int64 `json:"business_quota"`
|
BusinessQuota int64 `json:"business_quota"`
|
||||||
WriteLocked bool `json:"write_locked"`
|
WriteLocked bool `json:"write_locked"`
|
||||||
ReadLocked bool `json:"read_locked"`
|
ReadLocked bool `json:"read_locked"`
|
||||||
LockedCause interface{} `json:"locked_cause"`
|
LockedCause interface{} `json:"locked_cause"`
|
||||||
@@ -386,7 +386,7 @@ type Error struct {
|
|||||||
Cause string `xml:"cause"`
|
Cause string `xml:"cause"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error returns a string for the error and statistifes the error interface
|
// Error returns a string for the error and satisfies the error interface
|
||||||
func (e *Error) Error() string {
|
func (e *Error) Error() string {
|
||||||
out := fmt.Sprintf("error %d", e.StatusCode)
|
out := fmt.Sprintf("error %d", e.StatusCode)
|
||||||
if e.Message != "" {
|
if e.Message != "" {
|
||||||
|
|||||||
@@ -63,6 +63,10 @@ const (
|
|||||||
v1ClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
v1ClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||||
v1EncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
v1EncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||||
v1configVersion = 0
|
v1configVersion = 0
|
||||||
|
|
||||||
|
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
|
||||||
|
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
|
||||||
|
teliaCloudClientID = "desktop"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -83,9 +87,7 @@ func init() {
|
|||||||
Name: "jottacloud",
|
Name: "jottacloud",
|
||||||
Description: "Jottacloud",
|
Description: "Jottacloud",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string, m configmap.Mapper) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
ctx := context.TODO()
|
|
||||||
|
|
||||||
refresh := false
|
refresh := false
|
||||||
if version, ok := m.Get("configVersion"); ok {
|
if version, ok := m.Get("configVersion"); ok {
|
||||||
ver, err := strconv.Atoi(version)
|
ver, err := strconv.Atoi(version)
|
||||||
@@ -107,11 +109,18 @@ func init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Use legacy authentification?.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n")
|
fmt.Printf("Choose authentication type:\n" +
|
||||||
if config.Confirm(false) {
|
"1: Standard authentication - use this if you're a normal Jottacloud user.\n" +
|
||||||
v1config(ctx, name, m)
|
"2: Legacy authentication - this is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n" +
|
||||||
} else {
|
"3: Telia Cloud authentication - use this if you are using Telia Cloud.\n")
|
||||||
|
|
||||||
|
switch config.ChooseNumber("Your choice", 1, 3) {
|
||||||
|
case 1:
|
||||||
v2config(ctx, name, m)
|
v2config(ctx, name, m)
|
||||||
|
case 2:
|
||||||
|
v1config(ctx, name, m)
|
||||||
|
case 3:
|
||||||
|
teliaCloudConfig(ctx, name, m)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
@@ -230,9 +239,49 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
|||||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||||
}
|
}
|
||||||
|
|
||||||
// v1config configure a jottacloud backend using legacy authentification
|
func teliaCloudConfig(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
|
teliaCloudOauthConfig := &oauth2.Config{
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
|
AuthURL: teliaCloudAuthURL,
|
||||||
|
TokenURL: teliaCloudTokenURL,
|
||||||
|
},
|
||||||
|
ClientID: teliaCloudClientID,
|
||||||
|
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||||
|
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := oauthutil.Config(ctx, "jottacloud", name, m, teliaCloudOauthConfig, nil)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||||
|
if config.Confirm(false) {
|
||||||
|
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, teliaCloudOauthConfig)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||||
|
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||||
|
|
||||||
|
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to setup mountpoint: %s", err)
|
||||||
|
}
|
||||||
|
m.Set(configDevice, device)
|
||||||
|
m.Set(configMountpoint, mountpoint)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Set("configVersion", strconv.Itoa(configVersion))
|
||||||
|
m.Set(configClientID, teliaCloudClientID)
|
||||||
|
m.Set(configTokenURL, teliaCloudTokenURL)
|
||||||
|
}
|
||||||
|
|
||||||
|
// v1config configure a jottacloud backend using legacy authentication
|
||||||
func v1config(ctx context.Context, name string, m configmap.Mapper) {
|
func v1config(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||||
|
|
||||||
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
|
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
|
||||||
if config.Confirm(false) {
|
if config.Confirm(false) {
|
||||||
@@ -275,7 +324,7 @@ func v1config(ctx context.Context, name string, m configmap.Mapper) {
|
|||||||
|
|
||||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||||
if config.Confirm(false) {
|
if config.Confirm(false) {
|
||||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||||
}
|
}
|
||||||
@@ -323,7 +372,7 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
|
|||||||
return deviceRegistration, err
|
return deviceRegistration, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// doAuthV1 runs the actual token request for V1 authentification
|
// doAuthV1 runs the actual token request for V1 authentication
|
||||||
func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
|
func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
|
||||||
// prepare out token request with username and password
|
// prepare out token request with username and password
|
||||||
values := url.Values{}
|
values := url.Values{}
|
||||||
@@ -365,9 +414,9 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
|
|||||||
return token, err
|
return token, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// v2config configure a jottacloud backend using the modern JottaCli token based authentification
|
// v2config configure a jottacloud backend using the modern JottaCli token based authentication
|
||||||
func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||||
|
|
||||||
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
|
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
|
||||||
fmt.Printf("Login Token> ")
|
fmt.Printf("Login Token> ")
|
||||||
@@ -387,7 +436,7 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
|||||||
|
|
||||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||||
if config.Confirm(false) {
|
if config.Confirm(false) {
|
||||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||||
}
|
}
|
||||||
@@ -405,7 +454,7 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
|||||||
m.Set("configVersion", strconv.Itoa(configVersion))
|
m.Set("configVersion", strconv.Itoa(configVersion))
|
||||||
}
|
}
|
||||||
|
|
||||||
// doAuthV2 runs the actual token request for V2 authentification
|
// doAuthV2 runs the actual token request for V2 authentication
|
||||||
func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
|
func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
|
||||||
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
|
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -641,8 +690,7 @@ func grantTypeFilter(req *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.TODO()
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -664,7 +712,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, errors.New("Outdated config - please reconfigure this backend")
|
return nil, errors.New("Outdated config - please reconfigure this backend")
|
||||||
}
|
}
|
||||||
|
|
||||||
baseClient := fshttp.NewClient(fs.Config)
|
baseClient := fshttp.NewClient(ctx)
|
||||||
|
|
||||||
if ver == configVersion {
|
if ver == configVersion {
|
||||||
oauthConfig.ClientID = "jottacli"
|
oauthConfig.ClientID = "jottacli"
|
||||||
@@ -700,7 +748,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create OAuth Client
|
// Create OAuth Client
|
||||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
|
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
|
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
|
||||||
}
|
}
|
||||||
@@ -714,14 +762,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
opt: *opt,
|
opt: *opt,
|
||||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||||
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
|
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
|
||||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
WriteMimeType: true,
|
WriteMimeType: false,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
f.srv.SetErrorHandler(errorHandler)
|
f.srv.SetErrorHandler(errorHandler)
|
||||||
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
|
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
|
||||||
f.features.ListR = nil
|
f.features.ListR = nil
|
||||||
@@ -1100,7 +1148,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
|||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -1130,7 +1178,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
//return f.newObjectWithInfo(remote, &result)
|
//return f.newObjectWithInfo(remote, &result)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -1161,7 +1209,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1517,7 +1565,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the file state is INCOMPLETE and CORRPUT, try to upload a then
|
// If the file state is INCOMPLETE and CORRUPT, try to upload a then
|
||||||
if response.State != "COMPLETED" {
|
if response.State != "COMPLETED" {
|
||||||
// how much do we still have to upload?
|
// how much do we still have to upload?
|
||||||
remainingBytes := size - response.ResumePos
|
remainingBytes := size - response.ResumePos
|
||||||
|
|||||||
@@ -256,7 +256,7 @@ func (f *Fs) fullPath(part string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs a new filesystem given a root path and configuration options
|
// NewFs constructs a new filesystem given a root path and configuration options
|
||||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err = configstruct.Set(m, opt)
|
err = configstruct.Set(m, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -267,7 +267,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
httpClient := httpclient.New()
|
httpClient := httpclient.New()
|
||||||
httpClient.Client = fshttp.NewClient(fs.Config)
|
httpClient.Client = fshttp.NewClient(ctx)
|
||||||
client := koofrclient.NewKoofrClientWithHTTPClient(opt.Endpoint, httpClient)
|
client := koofrclient.NewKoofrClientWithHTTPClient(opt.Endpoint, httpClient)
|
||||||
basicAuth := fmt.Sprintf("Basic %s",
|
basicAuth := fmt.Sprintf("Basic %s",
|
||||||
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
|
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
|
||||||
@@ -287,7 +287,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
|||||||
DuplicateFiles: false,
|
DuplicateFiles: false,
|
||||||
BucketBased: false,
|
BucketBased: false,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
for _, m := range mounts {
|
for _, m := range mounts {
|
||||||
if opt.MountID != "" {
|
if opt.MountID != "" {
|
||||||
if m.Id == opt.MountID {
|
if m.Id == opt.MountID {
|
||||||
|
|||||||
@@ -70,6 +70,20 @@ points, as you explicitly acknowledge that they should be skipped.`,
|
|||||||
Default: false,
|
Default: false,
|
||||||
NoPrefix: true,
|
NoPrefix: true,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "zero_size_links",
|
||||||
|
Help: `Assume the Stat size of links is zero (and read them instead)
|
||||||
|
|
||||||
|
On some virtual filesystems (such ash LucidLink), reading a link size via a Stat call always returns 0.
|
||||||
|
However, on unix it reads as the length of the text in the link. This may cause errors like this when
|
||||||
|
syncing:
|
||||||
|
|
||||||
|
Failed to copy: corrupted on transfer: sizes differ 0 vs 13
|
||||||
|
|
||||||
|
Setting this flag causes rclone to read the link and use that as the size of the link
|
||||||
|
instead of 0 which in most cases fixes the problem.`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "no_unicode_normalization",
|
Name: "no_unicode_normalization",
|
||||||
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
|
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||||
@@ -87,13 +101,13 @@ Normally rclone checks the size and modification time of files as they
|
|||||||
are being uploaded and aborts with a message which starts "can't copy
|
are being uploaded and aborts with a message which starts "can't copy
|
||||||
- source file is being updated" if the file changes during upload.
|
- source file is being updated" if the file changes during upload.
|
||||||
|
|
||||||
However on some file systems this modification time check may fail (eg
|
However on some file systems this modification time check may fail (e.g.
|
||||||
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
|
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
|
||||||
check can be disabled with this flag.
|
check can be disabled with this flag.
|
||||||
|
|
||||||
If this flag is set, rclone will use its best efforts to transfer a
|
If this flag is set, rclone will use its best efforts to transfer a
|
||||||
file which is being updated. If the file is only having things
|
file which is being updated. If the file is only having things
|
||||||
appended to it (eg a log) then rclone will transfer the log file with
|
appended to it (e.g. a log) then rclone will transfer the log file with
|
||||||
the size it had the first time rclone saw it.
|
the size it had the first time rclone saw it.
|
||||||
|
|
||||||
If the file is being modified throughout (not just appended to) then
|
If the file is being modified throughout (not just appended to) then
|
||||||
@@ -170,6 +184,7 @@ type Options struct {
|
|||||||
FollowSymlinks bool `config:"copy_links"`
|
FollowSymlinks bool `config:"copy_links"`
|
||||||
TranslateSymlinks bool `config:"links"`
|
TranslateSymlinks bool `config:"links"`
|
||||||
SkipSymlinks bool `config:"skip_links"`
|
SkipSymlinks bool `config:"skip_links"`
|
||||||
|
ZeroSizeLinks bool `config:"zero_size_links"`
|
||||||
NoUTFNorm bool `config:"no_unicode_normalization"`
|
NoUTFNorm bool `config:"no_unicode_normalization"`
|
||||||
NoCheckUpdated bool `config:"no_check_updated"`
|
NoCheckUpdated bool `config:"no_check_updated"`
|
||||||
NoUNC bool `config:"nounc"`
|
NoUNC bool `config:"nounc"`
|
||||||
@@ -217,7 +232,7 @@ type Object struct {
|
|||||||
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path
|
// NewFs constructs an Fs from the path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -245,7 +260,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
IsLocal: true,
|
IsLocal: true,
|
||||||
SlowHash: true,
|
SlowHash: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
if opt.FollowSymlinks {
|
if opt.FollowSymlinks {
|
||||||
f.lstat = os.Stat
|
f.lstat = os.Stat
|
||||||
}
|
}
|
||||||
@@ -456,8 +471,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||||
localPath := filepath.Join(fsDirPath, name)
|
localPath := filepath.Join(fsDirPath, name)
|
||||||
fi, err = os.Stat(localPath)
|
fi, err = os.Stat(localPath)
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) || isCircularSymlinkError(err) {
|
||||||
// Skip bad symlinks
|
// Skip bad symlinks and circular symlinks
|
||||||
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
|
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
|
||||||
fs.Errorf(newRemote, "Listing error: %v", err)
|
fs.Errorf(newRemote, "Listing error: %v", err)
|
||||||
err = accounting.Stats(ctx).Error(err)
|
err = accounting.Stats(ctx).Error(err)
|
||||||
@@ -637,7 +652,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
return os.RemoveAll(dir)
|
return os.RemoveAll(dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -701,7 +716,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1232,7 +1247,8 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
|||||||
o.mode = info.Mode()
|
o.mode = info.Mode()
|
||||||
o.fs.objectMetaMu.Unlock()
|
o.fs.objectMetaMu.Unlock()
|
||||||
// On Windows links read as 0 size so set the correct size here
|
// On Windows links read as 0 size so set the correct size here
|
||||||
if runtime.GOOS == "windows" && o.translatedLink {
|
// Optionally, users can turn this feature on with the zero_size_links flag
|
||||||
|
if (runtime.GOOS == "windows" || o.fs.opt.ZeroSizeLinks) && o.translatedLink {
|
||||||
linkdst, err := os.Readlink(o.path)
|
linkdst, err := os.Readlink(o.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "Failed to read link size: %v", err)
|
fs.Errorf(o, "Failed to read link size: %v", err)
|
||||||
|
|||||||
@@ -163,6 +163,6 @@ func TestSymlinkError(t *testing.T) {
|
|||||||
"links": "true",
|
"links": "true",
|
||||||
"copy_links": "true",
|
"copy_links": "true",
|
||||||
}
|
}
|
||||||
_, err := NewFs("local", "/", m)
|
_, err := NewFs(context.Background(), "local", "/", m)
|
||||||
assert.Equal(t, errLinksAndCopyLinks, err)
|
assert.Equal(t, errLinksAndCopyLinks, err)
|
||||||
}
|
}
|
||||||
|
|||||||
22
backend/local/symlink.go
Normal file
22
backend/local/symlink.go
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
// +build !windows,!plan9,!js
|
||||||
|
|
||||||
|
package local
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// isCircularSymlinkError checks if the current error code is because of a circular symlink
|
||||||
|
func isCircularSymlinkError(err error) bool {
|
||||||
|
if err != nil {
|
||||||
|
if newerr, ok := err.(*os.PathError); ok {
|
||||||
|
if errcode, ok := newerr.Err.(syscall.Errno); ok {
|
||||||
|
if errcode == syscall.ELOOP {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
17
backend/local/symlink_other.go
Normal file
17
backend/local/symlink_other.go
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
// +build windows plan9 js
|
||||||
|
|
||||||
|
package local
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// isCircularSymlinkError checks if the current error code is because of a circular symlink
|
||||||
|
func isCircularSymlinkError(err error) bool {
|
||||||
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), "The name of the file cannot be resolved by the system") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
@@ -102,6 +102,7 @@ func init() {
|
|||||||
This feature is called "speedup" or "put by hash". It is especially efficient
|
This feature is called "speedup" or "put by hash". It is especially efficient
|
||||||
in case of generally available files like popular books, video or audio clips,
|
in case of generally available files like popular books, video or audio clips,
|
||||||
because files are searched by hash in all accounts of all mailru users.
|
because files are searched by hash in all accounts of all mailru users.
|
||||||
|
It is meaningless and ineffective if source file is unique or encrypted.
|
||||||
Please note that rclone may need local memory and disk space to calculate
|
Please note that rclone may need local memory and disk space to calculate
|
||||||
content hash in advance and decide whether full upload is required.
|
content hash in advance and decide whether full upload is required.
|
||||||
Also, if rclone does not know file size in advance (e.g. in case of
|
Also, if rclone does not know file size in advance (e.g. in case of
|
||||||
@@ -192,7 +193,7 @@ This option must not be used by an ordinary user. It is intended only to
|
|||||||
facilitate remote troubleshooting of backend issues. Strict meaning of
|
facilitate remote troubleshooting of backend issues. Strict meaning of
|
||||||
flags is not documented and not guaranteed to persist between releases.
|
flags is not documented and not guaranteed to persist between releases.
|
||||||
Quirks will be removed when the backend grows stable.
|
Quirks will be removed when the backend grows stable.
|
||||||
Supported quirks: atomicmkdir binlist gzip insecure retry400`,
|
Supported quirks: atomicmkdir binlist unknowndirs`,
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -238,9 +239,6 @@ func shouldRetry(res *http.Response, err error, f *Fs, opts *rest.Opts) (bool, e
|
|||||||
reAuthErr := f.reAuthorize(opts, err)
|
reAuthErr := f.reAuthorize(opts, err)
|
||||||
return reAuthErr == nil, err // return an original error
|
return reAuthErr == nil, err // return an original error
|
||||||
}
|
}
|
||||||
if res != nil && res.StatusCode == 400 && f.quirks.retry400 {
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
|
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -275,8 +273,9 @@ type Fs struct {
|
|||||||
name string
|
name string
|
||||||
root string // root path
|
root string // root path
|
||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
|
ci *fs.ConfigInfo // global config
|
||||||
speedupGlobs []string // list of file name patterns eligible for speedup
|
speedupGlobs []string // list of file name patterns eligible for speedup
|
||||||
speedupAny bool // true if all file names are aligible for speedup
|
speedupAny bool // true if all file names are eligible for speedup
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
srv *rest.Client // REST API client
|
srv *rest.Client // REST API client
|
||||||
cli *http.Client // underlying HTTP client (for authorize)
|
cli *http.Client // underlying HTTP client (for authorize)
|
||||||
@@ -296,9 +295,8 @@ type Fs struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// fs.Debugf(nil, ">>> NewFs %q %q", name, root)
|
// fs.Debugf(nil, ">>> NewFs %q %q", name, root)
|
||||||
ctx := context.Background() // Note: NewFs does not pass context!
|
|
||||||
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
@@ -315,10 +313,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
// However the f.root string should not have leading or trailing slashes
|
// However the f.root string should not have leading or trailing slashes
|
||||||
root = strings.Trim(root, "/")
|
root = strings.Trim(root, "/")
|
||||||
|
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
|
ci: ci,
|
||||||
m: m,
|
m: m,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -327,7 +327,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
f.quirks.parseQuirks(opt.Quirks)
|
f.quirks.parseQuirks(opt.Quirks)
|
||||||
|
|
||||||
f.pacer = fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleepPacer), pacer.MaxSleep(maxSleepPacer), pacer.DecayConstant(decayConstPacer)))
|
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleepPacer), pacer.MaxSleep(maxSleepPacer), pacer.DecayConstant(decayConstPacer)))
|
||||||
|
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
@@ -335,27 +335,21 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
// Can copy/move across mailru configs (almost, thus true here), but
|
// Can copy/move across mailru configs (almost, thus true here), but
|
||||||
// only when they share common account (this is checked in Copy/Move).
|
// only when they share common account (this is checked in Copy/Move).
|
||||||
ServerSideAcrossConfigs: true,
|
ServerSideAcrossConfigs: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
// Override few config settings and create a client
|
// Override few config settings and create a client
|
||||||
clientConfig := *fs.Config
|
newCtx, clientConfig := fs.AddConfig(ctx)
|
||||||
if opt.UserAgent != "" {
|
if opt.UserAgent != "" {
|
||||||
clientConfig.UserAgent = opt.UserAgent
|
clientConfig.UserAgent = opt.UserAgent
|
||||||
}
|
}
|
||||||
clientConfig.NoGzip = !f.quirks.gzip // Send not "Accept-Encoding: gzip" like official client
|
clientConfig.NoGzip = true // Mimic official client, skip sending "Accept-Encoding: gzip"
|
||||||
f.cli = fshttp.NewClient(&clientConfig)
|
f.cli = fshttp.NewClient(newCtx)
|
||||||
|
|
||||||
f.srv = rest.NewClient(f.cli)
|
f.srv = rest.NewClient(f.cli)
|
||||||
f.srv.SetRoot(api.APIServerURL)
|
f.srv.SetRoot(api.APIServerURL)
|
||||||
f.srv.SetHeader("Accept", "*/*") // Send "Accept: */*" with every request like official client
|
f.srv.SetHeader("Accept", "*/*") // Send "Accept: */*" with every request like official client
|
||||||
f.srv.SetErrorHandler(errorHandler)
|
f.srv.SetErrorHandler(errorHandler)
|
||||||
|
|
||||||
if f.quirks.insecure {
|
|
||||||
transport := f.cli.Transport.(*fshttp.Transport).Transport
|
|
||||||
transport.TLSClientConfig.InsecureSkipVerify = true
|
|
||||||
transport.ProxyConnectHeader = http.Header{"User-Agent": {clientConfig.UserAgent}}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = f.authorize(ctx, false); err != nil {
|
if err = f.authorize(ctx, false); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -388,30 +382,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
// Internal maintenance flags (to be removed when the backend matures).
|
// Internal maintenance flags (to be removed when the backend matures).
|
||||||
// Primarily intended to facilitate remote support and troubleshooting.
|
// Primarily intended to facilitate remote support and troubleshooting.
|
||||||
type quirks struct {
|
type quirks struct {
|
||||||
gzip bool
|
|
||||||
insecure bool
|
|
||||||
binlist bool
|
binlist bool
|
||||||
atomicmkdir bool
|
atomicmkdir bool
|
||||||
retry400 bool
|
unknowndirs bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *quirks) parseQuirks(option string) {
|
func (q *quirks) parseQuirks(option string) {
|
||||||
for _, flag := range strings.Split(option, ",") {
|
for _, flag := range strings.Split(option, ",") {
|
||||||
switch strings.ToLower(strings.TrimSpace(flag)) {
|
switch strings.ToLower(strings.TrimSpace(flag)) {
|
||||||
case "gzip":
|
|
||||||
// This backend mimics the official client which never sends the
|
|
||||||
// "Accept-Encoding: gzip" header. However, enabling compression
|
|
||||||
// might be good for performance.
|
|
||||||
// Use this quirk to investigate the performance impact.
|
|
||||||
// Remove this quirk if performance does not improve.
|
|
||||||
q.gzip = true
|
|
||||||
case "insecure":
|
|
||||||
// The mailru disk-o protocol is not documented. To compare HTTP
|
|
||||||
// stream against the official client one can use Telerik Fiddler,
|
|
||||||
// which introduces a self-signed certificate. This quirk forces
|
|
||||||
// the Go http layer to accept it.
|
|
||||||
// Remove this quirk when the backend reaches maturity.
|
|
||||||
q.insecure = true
|
|
||||||
case "binlist":
|
case "binlist":
|
||||||
// The official client sometimes uses a so called "bin" protocol,
|
// The official client sometimes uses a so called "bin" protocol,
|
||||||
// implemented in the listBin file system method below. This method
|
// implemented in the listBin file system method below. This method
|
||||||
@@ -424,18 +402,14 @@ func (q *quirks) parseQuirks(option string) {
|
|||||||
case "atomicmkdir":
|
case "atomicmkdir":
|
||||||
// At the moment rclone requires Mkdir to return success if the
|
// At the moment rclone requires Mkdir to return success if the
|
||||||
// directory already exists. However, such programs as borgbackup
|
// directory already exists. However, such programs as borgbackup
|
||||||
// or restic use mkdir as a locking primitive and depend on its
|
// use mkdir as a locking primitive and depend on its atomicity.
|
||||||
// atomicity. This quirk is a workaround. It can be removed
|
// Remove this quirk when the above issue is investigated.
|
||||||
// when the above issue is investigated.
|
|
||||||
q.atomicmkdir = true
|
q.atomicmkdir = true
|
||||||
case "retry400":
|
case "unknowndirs":
|
||||||
// This quirk will help in troubleshooting a very rare "Error 400"
|
// Accepts unknown resource types as folders.
|
||||||
// issue. It can be removed if the problem does not show up
|
q.unknowndirs = true
|
||||||
// for a year or so. See the below issue:
|
|
||||||
// https://github.com/ivandeex/rclone/issues/14
|
|
||||||
q.retry400 = true
|
|
||||||
default:
|
default:
|
||||||
// Just ignore all unknown flags
|
// Ignore unknown flags
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -449,7 +423,7 @@ func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
|
|||||||
|
|
||||||
if err != nil || !tokenIsValid(t) {
|
if err != nil || !tokenIsValid(t) {
|
||||||
fs.Infof(f, "Valid token not found, authorizing.")
|
fs.Infof(f, "Valid token not found, authorizing.")
|
||||||
ctx := oauthutil.Context(f.cli)
|
ctx := oauthutil.Context(ctx, f.cli)
|
||||||
t, err = oauthConfig.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
|
t, err = oauthConfig.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
|
||||||
}
|
}
|
||||||
if err == nil && !tokenIsValid(t) {
|
if err == nil && !tokenIsValid(t) {
|
||||||
@@ -472,7 +446,7 @@ func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
|
|||||||
// crashing with panic `comparing uncomparable type map[string]interface{}`
|
// crashing with panic `comparing uncomparable type map[string]interface{}`
|
||||||
// As a workaround, mimic oauth2.NewClient() wrapping token source in
|
// As a workaround, mimic oauth2.NewClient() wrapping token source in
|
||||||
// oauth2.ReuseTokenSource
|
// oauth2.ReuseTokenSource
|
||||||
_, ts, err := oauthutil.NewClientWithBaseClient(f.name, f.m, oauthConfig, f.cli)
|
_, ts, err := oauthutil.NewClientWithBaseClient(ctx, f.name, f.m, oauthConfig, f.cli)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
f.source = oauth2.ReuseTokenSource(nil, ts)
|
f.source = oauth2.ReuseTokenSource(nil, ts)
|
||||||
}
|
}
|
||||||
@@ -551,7 +525,7 @@ func (f *Fs) relPath(absPath string) (string, error) {
|
|||||||
return "", fmt.Errorf("path %q should be under %q", absPath, f.root)
|
return "", fmt.Errorf("path %q should be under %q", absPath, f.root)
|
||||||
}
|
}
|
||||||
|
|
||||||
// metaServer ...
|
// metaServer returns URL of current meta server
|
||||||
func (f *Fs) metaServer(ctx context.Context) (string, error) {
|
func (f *Fs) metaServer(ctx context.Context) (string, error) {
|
||||||
f.metaMu.Lock()
|
f.metaMu.Lock()
|
||||||
defer f.metaMu.Unlock()
|
defer f.metaMu.Unlock()
|
||||||
@@ -656,33 +630,56 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, -1, err
|
return nil, -1, err
|
||||||
}
|
}
|
||||||
|
|
||||||
mTime := int64(item.Mtime)
|
mTime := int64(item.Mtime)
|
||||||
if mTime < 0 {
|
if mTime < 0 {
|
||||||
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
|
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
|
||||||
mTime = 0
|
mTime = 0
|
||||||
}
|
}
|
||||||
switch item.Kind {
|
modTime := time.Unix(mTime, 0)
|
||||||
case "folder":
|
|
||||||
dir := fs.NewDir(remote, time.Unix(mTime, 0)).SetSize(item.Size)
|
isDir, err := f.isDir(item.Kind, remote)
|
||||||
dirSize := item.Count.Files + item.Count.Folders
|
if err != nil {
|
||||||
return dir, dirSize, nil
|
return nil, -1, err
|
||||||
case "file":
|
|
||||||
binHash, err := mrhash.DecodeString(item.Hash)
|
|
||||||
if err != nil {
|
|
||||||
return nil, -1, err
|
|
||||||
}
|
|
||||||
file := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: remote,
|
|
||||||
hasMetaData: true,
|
|
||||||
size: item.Size,
|
|
||||||
mrHash: binHash,
|
|
||||||
modTime: time.Unix(mTime, 0),
|
|
||||||
}
|
|
||||||
return file, -1, nil
|
|
||||||
default:
|
|
||||||
return nil, -1, fmt.Errorf("Unknown resource type %q", item.Kind)
|
|
||||||
}
|
}
|
||||||
|
if isDir {
|
||||||
|
dir := fs.NewDir(remote, modTime).SetSize(item.Size)
|
||||||
|
return dir, item.Count.Files + item.Count.Folders, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
binHash, err := mrhash.DecodeString(item.Hash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, -1, err
|
||||||
|
}
|
||||||
|
file := &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: remote,
|
||||||
|
hasMetaData: true,
|
||||||
|
size: item.Size,
|
||||||
|
mrHash: binHash,
|
||||||
|
modTime: modTime,
|
||||||
|
}
|
||||||
|
return file, -1, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isDir returns true for directories, false for files
|
||||||
|
func (f *Fs) isDir(kind, path string) (bool, error) {
|
||||||
|
switch kind {
|
||||||
|
case "":
|
||||||
|
return false, errors.New("empty resource type")
|
||||||
|
case "file":
|
||||||
|
return false, nil
|
||||||
|
case "folder":
|
||||||
|
// fall thru
|
||||||
|
case "camera-upload", "mounted", "shared":
|
||||||
|
fs.Debugf(f, "[%s]: folder has type %q", path, kind)
|
||||||
|
default:
|
||||||
|
if !f.quirks.unknowndirs {
|
||||||
|
return false, fmt.Errorf("unknown resource type %q", kind)
|
||||||
|
}
|
||||||
|
fs.Errorf(f, "[%s]: folder has unknown type %q", path, kind)
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// List the objects and directories in dir into entries.
|
// List the objects and directories in dir into entries.
|
||||||
@@ -698,7 +695,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
entries, err = f.listM1(ctx, f.absPath(dir), 0, maxInt32)
|
entries, err = f.listM1(ctx, f.absPath(dir), 0, maxInt32)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err == nil && fs.Config.LogLevel >= fs.LogLevelDebug {
|
if err == nil && f.ci.LogLevel >= fs.LogLevelDebug {
|
||||||
names := []string{}
|
names := []string{}
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
names = append(names, entry.Remote())
|
names = append(names, entry.Remote())
|
||||||
@@ -750,7 +747,11 @@ func (f *Fs) listM1(ctx context.Context, dirPath string, offset int, limit int)
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if info.Body.Kind != "folder" {
|
isDir, err := f.isDir(info.Body.Kind, dirPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !isDir {
|
||||||
return nil, fs.ErrorIsFile
|
return nil, fs.ErrorIsFile
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -958,7 +959,7 @@ func (t *treeState) NextRecord() (fs.DirEntry, error) {
|
|||||||
return nil, r.Error()
|
return nil, r.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
if fs.Config.LogLevel >= fs.LogLevelDebug {
|
if t.f.ci.LogLevel >= fs.LogLevelDebug {
|
||||||
ctime, _ := modTime.MarshalJSON()
|
ctime, _ := modTime.MarshalJSON()
|
||||||
fs.Debugf(t.f, "binDir %d.%d %q %q (%d) %s", t.level, itemType, t.currDir, name, size, ctime)
|
fs.Debugf(t.f, "binDir %d.%d %q %q (%d) %s", t.level, itemType, t.currDir, name, size, ctime)
|
||||||
}
|
}
|
||||||
@@ -1228,7 +1229,7 @@ func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given.
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error.
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
@@ -1323,7 +1324,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return dstObj, err
|
return dstObj, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given.
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error.
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
@@ -1410,7 +1411,7 @@ func (f *Fs) moveItemBin(ctx context.Context, srcPath, dstPath, opName string) e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
@@ -1603,23 +1604,28 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
fileBuf []byte
|
fileBuf []byte
|
||||||
fileHash []byte
|
fileHash []byte
|
||||||
newHash []byte
|
newHash []byte
|
||||||
trySpeedup bool
|
slowHash bool
|
||||||
|
localSrc bool
|
||||||
)
|
)
|
||||||
|
if srcObj := fs.UnWrapObjectInfo(src); srcObj != nil {
|
||||||
|
srcFeatures := srcObj.Fs().Features()
|
||||||
|
slowHash = srcFeatures.SlowHash
|
||||||
|
localSrc = srcFeatures.IsLocal
|
||||||
|
}
|
||||||
|
|
||||||
// Don't disturb the source if file fits in hash.
|
// Try speedup if it's globally enabled but skip extra post
|
||||||
// Skip an extra speedup request if file fits in hash.
|
// request if file is small and fits in the metadata request
|
||||||
if size > mrhash.Size {
|
trySpeedup := o.fs.opt.SpeedupEnable && size > mrhash.Size
|
||||||
// Request hash from source.
|
|
||||||
|
// Try to get the hash if it's instant
|
||||||
|
if trySpeedup && !slowHash {
|
||||||
if srcHash, err := src.Hash(ctx, MrHashType); err == nil && srcHash != "" {
|
if srcHash, err := src.Hash(ctx, MrHashType); err == nil && srcHash != "" {
|
||||||
fileHash, _ = mrhash.DecodeString(srcHash)
|
fileHash, _ = mrhash.DecodeString(srcHash)
|
||||||
}
|
}
|
||||||
|
if fileHash != nil {
|
||||||
// Try speedup if it's globally enabled and source hash is available.
|
|
||||||
trySpeedup = o.fs.opt.SpeedupEnable
|
|
||||||
if trySpeedup && fileHash != nil {
|
|
||||||
if o.putByHash(ctx, fileHash, src, "source") {
|
if o.putByHash(ctx, fileHash, src, "source") {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1628,13 +1634,22 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Need to calculate hash, check whether file is still eligible for speedup
|
// Need to calculate hash, check whether file is still eligible for speedup
|
||||||
if trySpeedup {
|
trySpeedup = trySpeedup && o.fs.eligibleForSpeedup(o.Remote(), size, options...)
|
||||||
trySpeedup = o.fs.eligibleForSpeedup(o.Remote(), size, options...)
|
|
||||||
|
// Attempt to put by hash if file is local and eligible
|
||||||
|
if trySpeedup && localSrc {
|
||||||
|
if srcHash, err := src.Hash(ctx, MrHashType); err == nil && srcHash != "" {
|
||||||
|
fileHash, _ = mrhash.DecodeString(srcHash)
|
||||||
|
}
|
||||||
|
if fileHash != nil && o.putByHash(ctx, fileHash, src, "localfs") {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// If local file hashing has failed, it's pointless to try anymore
|
||||||
|
trySpeedup = false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Attempt to put by calculating hash in memory
|
// Attempt to put by calculating hash in memory
|
||||||
if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) {
|
if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) {
|
||||||
//fs.Debugf(o, "attempt to put by hash from memory")
|
|
||||||
fileBuf, err = ioutil.ReadAll(in)
|
fileBuf, err = ioutil.ReadAll(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1649,7 +1664,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
// Attempt to put by hash using a spool file
|
// Attempt to put by hash using a spool file
|
||||||
if trySpeedup {
|
if trySpeedup {
|
||||||
tmpFs, err := fs.TemporaryLocalFs()
|
tmpFs, err := fs.TemporaryLocalFs(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Infof(tmpFs, "Failed to create spool FS: %v", err)
|
fs.Infof(tmpFs, "Failed to create spool FS: %v", err)
|
||||||
} else {
|
} else {
|
||||||
@@ -1764,6 +1779,7 @@ func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// putByHash is a thin wrapper around addFileMetaData
|
||||||
func (o *Object) putByHash(ctx context.Context, mrHash []byte, info fs.ObjectInfo, method string) bool {
|
func (o *Object) putByHash(ctx context.Context, mrHash []byte, info fs.ObjectInfo, method string) bool {
|
||||||
oNew := new(Object)
|
oNew := new(Object)
|
||||||
*oNew = *o
|
*oNew = *o
|
||||||
@@ -2190,6 +2206,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
// Discard the beginning of the data
|
// Discard the beginning of the data
|
||||||
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
|
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
closeBody(res)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2247,7 +2264,7 @@ func (e *endHandler) handle(err error) error {
|
|||||||
return io.EOF
|
return io.EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
// serverPool backs server dispacher
|
// serverPool backs server dispatcher
|
||||||
type serverPool struct {
|
type serverPool struct {
|
||||||
pool pendingServerMap
|
pool pendingServerMap
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
@@ -2362,7 +2379,7 @@ func (p *serverPool) addServer(url string, now time.Time) {
|
|||||||
expiry := now.Add(p.expirySec * time.Second)
|
expiry := now.Add(p.expirySec * time.Second)
|
||||||
|
|
||||||
expiryStr := []byte("-")
|
expiryStr := []byte("-")
|
||||||
if fs.Config.LogLevel >= fs.LogLevelInfo {
|
if p.fs.ci.LogLevel >= fs.LogLevelInfo {
|
||||||
expiryStr, _ = expiry.MarshalJSON()
|
expiryStr, _ = expiry.MarshalJSON()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ Improvements:
|
|||||||
* Uploads could be done in parallel
|
* Uploads could be done in parallel
|
||||||
* Downloads would be more efficient done in one go
|
* Downloads would be more efficient done in one go
|
||||||
* Uploads would be more efficient with bigger chunks
|
* Uploads would be more efficient with bigger chunks
|
||||||
* Looks like mega can support server side copy, but it isn't implemented in go-mega
|
* Looks like mega can support server-side copy, but it isn't implemented in go-mega
|
||||||
* Upload can set modtime... - set as int64_t - can set ctime and mtime?
|
* Upload can set modtime... - set as int64_t - can set ctime and mtime?
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@@ -180,7 +180,7 @@ func (f *Fs) readMetaDataForPath(remote string) (info *mega.Node, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -194,6 +194,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, errors.Wrap(err, "couldn't decrypt password")
|
return nil, errors.Wrap(err, "couldn't decrypt password")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
|
|
||||||
// cache *mega.Mega on username so we can re-use and share
|
// cache *mega.Mega on username so we can re-use and share
|
||||||
// them between remotes. They are expensive to make as they
|
// them between remotes. They are expensive to make as they
|
||||||
@@ -204,8 +205,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
defer megaCacheMu.Unlock()
|
defer megaCacheMu.Unlock()
|
||||||
srv := megaCache[opt.User]
|
srv := megaCache[opt.User]
|
||||||
if srv == nil {
|
if srv == nil {
|
||||||
srv = mega.New().SetClient(fshttp.NewClient(fs.Config))
|
srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||||
srv.SetRetries(fs.Config.LowLevelRetries) // let mega do the low level retries
|
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
||||||
srv.SetLogger(func(format string, v ...interface{}) {
|
srv.SetLogger(func(format string, v ...interface{}) {
|
||||||
fs.Infof("*go-mega*", format, v...)
|
fs.Infof("*go-mega*", format, v...)
|
||||||
})
|
})
|
||||||
@@ -228,12 +229,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
srv: srv,
|
srv: srv,
|
||||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
DuplicateFiles: true,
|
DuplicateFiles: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
// Find the root node and check if it is a file or not
|
// Find the root node and check if it is a file or not
|
||||||
_, err = f.findRoot(false)
|
_, err = f.findRoot(false)
|
||||||
@@ -699,7 +700,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
|||||||
dstDirNode, err = dstFs.mkdir(absRoot, dstParent)
|
dstDirNode, err = dstFs.mkdir(absRoot, dstParent)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "server side move failed to make dst parent dir")
|
return errors.Wrap(err, "server-side move failed to make dst parent dir")
|
||||||
}
|
}
|
||||||
|
|
||||||
if srcRemote != "" {
|
if srcRemote != "" {
|
||||||
@@ -712,7 +713,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
|||||||
srcDirNode, err = f.findDir(absRoot, srcParent)
|
srcDirNode, err = f.findDir(absRoot, srcParent)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "server side move failed to lookup src parent dir")
|
return errors.Wrap(err, "server-side move failed to lookup src parent dir")
|
||||||
}
|
}
|
||||||
|
|
||||||
// move the object into its new directory if required
|
// move the object into its new directory if required
|
||||||
@@ -723,7 +724,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
|||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "server side move failed")
|
return errors.Wrap(err, "server-side move failed")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -737,7 +738,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
|||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "server side rename failed")
|
return errors.Wrap(err, "server-side rename failed")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -746,7 +747,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -781,7 +782,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
|
|||||||
@@ -221,8 +221,8 @@ func (f *Fs) setRoot(root string) {
|
|||||||
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, bucket:path
|
// NewFs constructs an Fs from the path, bucket:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -241,7 +241,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
BucketBasedRootOK: true,
|
BucketBasedRootOK: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||||
od := buckets.getObjectData(f.rootBucket, f.rootDirectory)
|
od := buckets.getObjectData(f.rootBucket, f.rootDirectory)
|
||||||
if od != nil {
|
if od != nil {
|
||||||
@@ -462,7 +462,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
return time.Nanosecond
|
return time.Nanosecond
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -592,7 +592,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
data: data,
|
data: data,
|
||||||
hash: "",
|
hash: "",
|
||||||
modTime: src.ModTime(ctx),
|
modTime: src.ModTime(ctx),
|
||||||
mimeType: fs.MimeType(ctx, o),
|
mimeType: fs.MimeType(ctx, src),
|
||||||
}
|
}
|
||||||
buckets.updateObjectData(bucket, bucketPath, o.od)
|
buckets.updateObjectData(bucket, bucketPath, o.od)
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -253,8 +253,10 @@ type MoveItemRequest struct {
|
|||||||
//CreateShareLinkRequest is the request to create a sharing link
|
//CreateShareLinkRequest is the request to create a sharing link
|
||||||
//Always Type:view and Scope:anonymous for public sharing
|
//Always Type:view and Scope:anonymous for public sharing
|
||||||
type CreateShareLinkRequest struct {
|
type CreateShareLinkRequest struct {
|
||||||
Type string `json:"type"` //Link type in View, Edit or Embed
|
Type string `json:"type"` // Link type in View, Edit or Embed
|
||||||
Scope string `json:"scope,omitempty"` //Optional. Scope in anonymousi, organization
|
Scope string `json:"scope,omitempty"` // Scope in anonymous, organization
|
||||||
|
Password string `json:"password,omitempty"` // The password of the sharing link that is set by the creator. Optional and OneDrive Personal only.
|
||||||
|
Expiry *time.Time `json:"expirationDateTime,omitempty"` // A String with format of yyyy-MM-ddTHH:mm:ssZ of DateTime indicates the expiration time of the permission.
|
||||||
}
|
}
|
||||||
|
|
||||||
//CreateShareLinkResponse is the response from CreateShareLinkRequest
|
//CreateShareLinkResponse is the response from CreateShareLinkRequest
|
||||||
@@ -281,6 +283,7 @@ type CreateShareLinkResponse struct {
|
|||||||
type AsyncOperationStatus struct {
|
type AsyncOperationStatus struct {
|
||||||
PercentageComplete float64 `json:"percentageComplete"` // A float value between 0 and 100 that indicates the percentage complete.
|
PercentageComplete float64 `json:"percentageComplete"` // A float value between 0 and 100 that indicates the percentage complete.
|
||||||
Status string `json:"status"` // A string value that maps to an enumeration of possible values about the status of the job. "notStarted | inProgress | completed | updating | failed | deletePending | deleteFailed | waiting"
|
Status string `json:"status"` // A string value that maps to an enumeration of possible values about the status of the job. "notStarted | inProgress | completed | updating | failed | deletePending | deleteFailed | waiting"
|
||||||
|
ErrorCode string `json:"errorCode"` // Not officially documented :(
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetID returns a normalized ID of the item
|
// GetID returns a normalized ID of the item
|
||||||
|
|||||||
@@ -11,7 +11,9 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -45,7 +47,6 @@ const (
|
|||||||
minSleep = 10 * time.Millisecond
|
minSleep = 10 * time.Millisecond
|
||||||
maxSleep = 2 * time.Second
|
maxSleep = 2 * time.Second
|
||||||
decayConstant = 2 // bigger for slower decay, exponential
|
decayConstant = 2 // bigger for slower decay, exponential
|
||||||
graphURL = "https://graph.microsoft.com/v1.0"
|
|
||||||
configDriveID = "drive_id"
|
configDriveID = "drive_id"
|
||||||
configDriveType = "drive_type"
|
configDriveType = "drive_type"
|
||||||
driveTypePersonal = "personal"
|
driveTypePersonal = "personal"
|
||||||
@@ -53,22 +54,40 @@ const (
|
|||||||
driveTypeSharepoint = "documentLibrary"
|
driveTypeSharepoint = "documentLibrary"
|
||||||
defaultChunkSize = 10 * fs.MebiByte
|
defaultChunkSize = 10 * fs.MebiByte
|
||||||
chunkSizeMultiple = 320 * fs.KibiByte
|
chunkSizeMultiple = 320 * fs.KibiByte
|
||||||
|
|
||||||
|
regionGlobal = "global"
|
||||||
|
regionUS = "us"
|
||||||
|
regionDE = "de"
|
||||||
|
regionCN = "cn"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
|
authPath = "/common/oauth2/v2.0/authorize"
|
||||||
|
tokenPath = "/common/oauth2/v2.0/token"
|
||||||
|
|
||||||
// Description of how to auth for this app for a business account
|
// Description of how to auth for this app for a business account
|
||||||
oauthConfig = &oauth2.Config{
|
oauthConfig = &oauth2.Config{
|
||||||
Endpoint: oauth2.Endpoint{
|
|
||||||
AuthURL: "https://login.microsoftonline.com/common/oauth2/v2.0/authorize",
|
|
||||||
TokenURL: "https://login.microsoftonline.com/common/oauth2/v2.0/token",
|
|
||||||
},
|
|
||||||
Scopes: []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access", "Sites.Read.All"},
|
Scopes: []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access", "Sites.Read.All"},
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
graphAPIEndpoint = map[string]string{
|
||||||
|
"global": "https://graph.microsoft.com",
|
||||||
|
"us": "https://graph.microsoft.us",
|
||||||
|
"de": "https://graph.microsoft.de",
|
||||||
|
"cn": "https://microsoftgraph.chinacloudapi.cn",
|
||||||
|
}
|
||||||
|
|
||||||
|
authEndpoint = map[string]string{
|
||||||
|
"global": "https://login.microsoftonline.com",
|
||||||
|
"us": "https://login.microsoftonline.us",
|
||||||
|
"de": "https://login.microsoftonline.de",
|
||||||
|
"cn": "https://login.chinacloudapi.cn",
|
||||||
|
}
|
||||||
|
|
||||||
// QuickXorHashType is the hash.Type for OneDrive
|
// QuickXorHashType is the hash.Type for OneDrive
|
||||||
QuickXorHashType hash.Type
|
QuickXorHashType hash.Type
|
||||||
)
|
)
|
||||||
@@ -80,16 +99,22 @@ func init() {
|
|||||||
Name: "onedrive",
|
Name: "onedrive",
|
||||||
Description: "Microsoft OneDrive",
|
Description: "Microsoft OneDrive",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string, m configmap.Mapper) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
ctx := context.TODO()
|
region, _ := m.Get("region")
|
||||||
err := oauthutil.Config("onedrive", name, m, oauthConfig, nil)
|
graphURL := graphAPIEndpoint[region] + "/v1.0"
|
||||||
|
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||||
|
AuthURL: authEndpoint[region] + authPath,
|
||||||
|
TokenURL: authEndpoint[region] + tokenPath,
|
||||||
|
}
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
|
err := oauthutil.Config(ctx, "onedrive", name, m, oauthConfig, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop if we are running non-interactive config
|
// Stop if we are running non-interactive config
|
||||||
if fs.Config.AutoConfirm {
|
if ci.AutoConfirm {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -111,7 +136,7 @@ func init() {
|
|||||||
Sites []siteResource `json:"value"`
|
Sites []siteResource `json:"value"`
|
||||||
}
|
}
|
||||||
|
|
||||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure OneDrive: %v", err)
|
log.Fatalf("Failed to configure OneDrive: %v", err)
|
||||||
}
|
}
|
||||||
@@ -120,9 +145,18 @@ func init() {
|
|||||||
var opts rest.Opts
|
var opts rest.Opts
|
||||||
var finalDriveID string
|
var finalDriveID string
|
||||||
var siteID string
|
var siteID string
|
||||||
|
var relativePath string
|
||||||
switch config.Choose("Your choice",
|
switch config.Choose("Your choice",
|
||||||
[]string{"onedrive", "sharepoint", "driveid", "siteid", "search"},
|
[]string{"onedrive", "sharepoint", "url", "search", "driveid", "siteid", "path"},
|
||||||
[]string{"OneDrive Personal or Business", "Root Sharepoint site", "Type in driveID", "Type in SiteID", "Search a Sharepoint site"},
|
[]string{
|
||||||
|
"OneDrive Personal or Business",
|
||||||
|
"Root Sharepoint site",
|
||||||
|
"Sharepoint site name or URL (e.g. mysite or https://contoso.sharepoint.com/sites/mysite)",
|
||||||
|
"Search for a Sharepoint site",
|
||||||
|
"Type in driveID (advanced)",
|
||||||
|
"Type in SiteID (advanced)",
|
||||||
|
"Sharepoint server-relative path (advanced, e.g. /teams/hr)",
|
||||||
|
},
|
||||||
false) {
|
false) {
|
||||||
|
|
||||||
case "onedrive":
|
case "onedrive":
|
||||||
@@ -143,6 +177,20 @@ func init() {
|
|||||||
case "siteid":
|
case "siteid":
|
||||||
fmt.Printf("Paste your Site ID here> ")
|
fmt.Printf("Paste your Site ID here> ")
|
||||||
siteID = config.ReadLine()
|
siteID = config.ReadLine()
|
||||||
|
case "url":
|
||||||
|
fmt.Println("Example: \"https://contoso.sharepoint.com/sites/mysite\" or \"mysite\"")
|
||||||
|
fmt.Printf("Paste your Site URL here> ")
|
||||||
|
siteURL := config.ReadLine()
|
||||||
|
re := regexp.MustCompile(`https://.*\.sharepoint.com/sites/(.*)`)
|
||||||
|
match := re.FindStringSubmatch(siteURL)
|
||||||
|
if len(match) == 2 {
|
||||||
|
relativePath = "/sites/" + match[1]
|
||||||
|
} else {
|
||||||
|
relativePath = "/sites/" + siteURL
|
||||||
|
}
|
||||||
|
case "path":
|
||||||
|
fmt.Printf("Enter server-relative URL here> ")
|
||||||
|
relativePath = config.ReadLine()
|
||||||
case "search":
|
case "search":
|
||||||
fmt.Printf("What to search for> ")
|
fmt.Printf("What to search for> ")
|
||||||
searchTerm := config.ReadLine()
|
searchTerm := config.ReadLine()
|
||||||
@@ -169,6 +217,21 @@ func init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if we use server-relative URL for finding the drive
|
||||||
|
if relativePath != "" {
|
||||||
|
opts = rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
RootURL: graphURL,
|
||||||
|
Path: "/sites/root:" + relativePath,
|
||||||
|
}
|
||||||
|
site := siteResource{}
|
||||||
|
_, err := srv.CallJSON(ctx, &opts, nil, &site)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to query available site by relative path: %v", err)
|
||||||
|
}
|
||||||
|
siteID = site.SiteID
|
||||||
|
}
|
||||||
|
|
||||||
// if we have a siteID we need to ask for the drives
|
// if we have a siteID we need to ask for the drives
|
||||||
if siteID != "" {
|
if siteID != "" {
|
||||||
opts = rest.Opts{
|
opts = rest.Opts{
|
||||||
@@ -233,7 +296,7 @@ func init() {
|
|||||||
|
|
||||||
fmt.Printf("Found drive '%s' of type '%s', URL: %s\nIs that okay?\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL)
|
fmt.Printf("Found drive '%s' of type '%s', URL: %s\nIs that okay?\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL)
|
||||||
// This does not work, YET :)
|
// This does not work, YET :)
|
||||||
if !config.ConfirmWithConfig(m, "config_drive_ok", true) {
|
if !config.ConfirmWithConfig(ctx, m, "config_drive_ok", true) {
|
||||||
log.Fatalf("Cancelled by user")
|
log.Fatalf("Cancelled by user")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -242,6 +305,25 @@ func init() {
|
|||||||
config.SaveConfig()
|
config.SaveConfig()
|
||||||
},
|
},
|
||||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||||
|
Name: "region",
|
||||||
|
Help: "Choose national cloud region for OneDrive.",
|
||||||
|
Default: "global",
|
||||||
|
Examples: []fs.OptionExample{
|
||||||
|
{
|
||||||
|
Value: regionGlobal,
|
||||||
|
Help: "Microsoft Cloud Global",
|
||||||
|
}, {
|
||||||
|
Value: regionUS,
|
||||||
|
Help: "Microsoft Cloud for US Government",
|
||||||
|
}, {
|
||||||
|
Value: regionDE,
|
||||||
|
Help: "Microsoft Cloud Germany",
|
||||||
|
}, {
|
||||||
|
Value: regionCN,
|
||||||
|
Help: "Azure and Office 365 operated by 21Vianet in China",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
|
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
|
||||||
|
|
||||||
@@ -274,12 +356,11 @@ listing, set this option.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "server_side_across_configs",
|
Name: "server_side_across_configs",
|
||||||
Default: false,
|
Default: false,
|
||||||
Help: `Allow server side operations (eg copy) to work across different onedrive configs.
|
Help: `Allow server-side operations (e.g. copy) to work across different onedrive configs.
|
||||||
|
|
||||||
This can be useful if you wish to do a server side copy between two
|
This will only work if you are copying between two OneDrive *Personal* drives AND
|
||||||
different Onedrives. Note that this isn't enabled by default
|
the files to copy are already shared between them. In other cases, rclone will
|
||||||
because it isn't easy to tell if it will work between any two
|
fall back to normal copy (which will be slightly slower).`,
|
||||||
configurations.`,
|
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "no_versions",
|
Name: "no_versions",
|
||||||
@@ -296,6 +377,41 @@ modification time and removes all but the last version.
|
|||||||
|
|
||||||
**NB** Onedrive personal can't currently delete versions so don't use
|
**NB** Onedrive personal can't currently delete versions so don't use
|
||||||
this flag there.
|
this flag there.
|
||||||
|
`,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "link_scope",
|
||||||
|
Default: "anonymous",
|
||||||
|
Help: `Set the scope of the links created by the link command.`,
|
||||||
|
Advanced: true,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "anonymous",
|
||||||
|
Help: "Anyone with the link has access, without needing to sign in. This may include people outside of your organization. Anonymous link support may be disabled by an administrator.",
|
||||||
|
}, {
|
||||||
|
Value: "organization",
|
||||||
|
Help: "Anyone signed into your organization (tenant) can use the link to get access. Only available in OneDrive for Business and SharePoint.",
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
Name: "link_type",
|
||||||
|
Default: "view",
|
||||||
|
Help: `Set the type of the links created by the link command.`,
|
||||||
|
Advanced: true,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "view",
|
||||||
|
Help: "Creates a read-only link to the item.",
|
||||||
|
}, {
|
||||||
|
Value: "edit",
|
||||||
|
Help: "Creates a read-write link to the item.",
|
||||||
|
}, {
|
||||||
|
Value: "embed",
|
||||||
|
Help: "Creates an embeddable link to the item.",
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
Name: "link_password",
|
||||||
|
Default: "",
|
||||||
|
Help: `Set the password for links created by the link command.
|
||||||
|
|
||||||
|
At the time of writing this only works with OneDrive personal paid accounts.
|
||||||
`,
|
`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
@@ -311,8 +427,6 @@ this flag there.
|
|||||||
// | (vertical line) -> '|' // FULLWIDTH VERTICAL LINE
|
// | (vertical line) -> '|' // FULLWIDTH VERTICAL LINE
|
||||||
// ? (question mark) -> '?' // FULLWIDTH QUESTION MARK
|
// ? (question mark) -> '?' // FULLWIDTH QUESTION MARK
|
||||||
// * (asterisk) -> '*' // FULLWIDTH ASTERISK
|
// * (asterisk) -> '*' // FULLWIDTH ASTERISK
|
||||||
// # (number sign) -> '#' // FULLWIDTH NUMBER SIGN
|
|
||||||
// % (percent sign) -> '%' // FULLWIDTH PERCENT SIGN
|
|
||||||
//
|
//
|
||||||
// Folder names cannot begin with a tilde ('~')
|
// Folder names cannot begin with a tilde ('~')
|
||||||
// List of replaced characters:
|
// List of replaced characters:
|
||||||
@@ -337,7 +451,6 @@ this flag there.
|
|||||||
// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/addressing-driveitems?view=odsp-graph-online#path-encoding
|
// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/addressing-driveitems?view=odsp-graph-online#path-encoding
|
||||||
Default: (encoder.Display |
|
Default: (encoder.Display |
|
||||||
encoder.EncodeBackSlash |
|
encoder.EncodeBackSlash |
|
||||||
encoder.EncodeHashPercent |
|
|
||||||
encoder.EncodeLeftSpace |
|
encoder.EncodeLeftSpace |
|
||||||
encoder.EncodeLeftTilde |
|
encoder.EncodeLeftTilde |
|
||||||
encoder.EncodeRightPeriod |
|
encoder.EncodeRightPeriod |
|
||||||
@@ -350,12 +463,16 @@ this flag there.
|
|||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
|
Region string `config:"region"`
|
||||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
DriveID string `config:"drive_id"`
|
DriveID string `config:"drive_id"`
|
||||||
DriveType string `config:"drive_type"`
|
DriveType string `config:"drive_type"`
|
||||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||||
NoVersions bool `config:"no_versions"`
|
NoVersions bool `config:"no_versions"`
|
||||||
|
LinkScope string `config:"link_scope"`
|
||||||
|
LinkType string `config:"link_type"`
|
||||||
|
LinkPassword string `config:"link_password"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -364,6 +481,7 @@ type Fs struct {
|
|||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
|
ci *fs.ConfigInfo // global config
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
srv *rest.Client // the connection to the one drive server
|
srv *rest.Client // the connection to the one drive server
|
||||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
@@ -427,6 +545,9 @@ var retryErrorCodes = []int{
|
|||||||
509, // Bandwidth Limit Exceeded
|
509, // Bandwidth Limit Exceeded
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var gatewayTimeoutError sync.Once
|
||||||
|
var errAsyncJobAccessDenied = errors.New("async job failed - access denied")
|
||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this resp and err
|
// shouldRetry returns a boolean as to whether this resp and err
|
||||||
// deserve to be retried. It returns the err as a convenience
|
// deserve to be retried. It returns the err as a convenience
|
||||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||||
@@ -451,6 +572,10 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
|||||||
fs.Debugf(nil, "Too many requests. Trying again in %d seconds.", retryAfter)
|
fs.Debugf(nil, "Too many requests. Trying again in %d seconds.", retryAfter)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
case 504: // Gateway timeout
|
||||||
|
gatewayTimeoutError.Do(func() {
|
||||||
|
fs.Errorf(nil, "%v: upload chunks may be taking too long - try reducing --onedrive-chunk-size or decreasing --transfers", err)
|
||||||
|
})
|
||||||
case 507: // Insufficient Storage
|
case 507: // Insufficient Storage
|
||||||
return false, fserrors.FatalError(err)
|
return false, fserrors.FatalError(err)
|
||||||
}
|
}
|
||||||
@@ -468,10 +593,8 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
|||||||
//
|
//
|
||||||
// If `relPath` == '', do not append the slash (See #3664)
|
// If `relPath` == '', do not append the slash (See #3664)
|
||||||
func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||||
if relPath != "" {
|
opts, _ := f.newOptsCallWithIDPath(normalizedID, relPath, true, "GET", "")
|
||||||
relPath = "/" + withTrailingColon(rest.URLPathEscape(f.opt.Enc.FromStandardPath(relPath)))
|
|
||||||
}
|
|
||||||
opts := newOptsCall(normalizedID, "GET", ":"+relPath)
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(resp, err)
|
||||||
@@ -486,17 +609,8 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
|||||||
|
|
||||||
if f.driveType != driveTypePersonal || firstSlashIndex == -1 {
|
if f.driveType != driveTypePersonal || firstSlashIndex == -1 {
|
||||||
var opts rest.Opts
|
var opts rest.Opts
|
||||||
if len(path) == 0 {
|
opts = f.newOptsCallWithPath(ctx, path, "GET", "")
|
||||||
opts = rest.Opts{
|
opts.Path = strings.TrimSuffix(opts.Path, ":")
|
||||||
Method: "GET",
|
|
||||||
Path: "/root",
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
opts = rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: "/root:/" + rest.URLPathEscape(f.opt.Enc.FromStandardPath(path)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(resp, err)
|
||||||
@@ -590,8 +704,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.Background()
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -608,27 +721,35 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, errors.New("unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
|
return nil, errors.New("unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
|
||||||
|
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||||
|
AuthURL: authEndpoint[opt.Region] + authPath,
|
||||||
|
TokenURL: authEndpoint[opt.Region] + tokenPath,
|
||||||
|
}
|
||||||
|
|
||||||
root = parsePath(root)
|
root = parsePath(root)
|
||||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
oAuthClient, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure OneDrive")
|
return nil, errors.Wrap(err, "failed to configure OneDrive")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
|
ci: ci,
|
||||||
driveID: opt.DriveID,
|
driveID: opt.DriveID,
|
||||||
driveType: opt.DriveType,
|
driveType: opt.DriveType,
|
||||||
srv: rest.NewClient(oAuthClient).SetRoot(graphURL + "/drives/" + opt.DriveID),
|
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
f.srv.SetErrorHandler(errorHandler)
|
f.srv.SetErrorHandler(errorHandler)
|
||||||
|
|
||||||
// Renew the token in the background
|
// Renew the token in the background
|
||||||
@@ -741,7 +862,7 @@ func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, e
|
|||||||
// fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf)
|
// fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf)
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
var info *api.Item
|
var info *api.Item
|
||||||
opts := newOptsCall(dirID, "POST", "/children")
|
opts := f.newOptsCall(dirID, "POST", "/children")
|
||||||
mkdir := api.CreateItemRequest{
|
mkdir := api.CreateItemRequest{
|
||||||
Name: f.opt.Enc.FromStandardName(leaf),
|
Name: f.opt.Enc.FromStandardName(leaf),
|
||||||
ConflictBehavior: "fail",
|
ConflictBehavior: "fail",
|
||||||
@@ -773,7 +894,7 @@ type listAllFn func(*api.Item) bool
|
|||||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||||
// Top parameter asks for bigger pages of data
|
// Top parameter asks for bigger pages of data
|
||||||
// https://dev.onedrive.com/odata/optional-query-parameters.htm
|
// https://dev.onedrive.com/odata/optional-query-parameters.htm
|
||||||
opts := newOptsCall(dirID, "GET", "/children?$top=1000")
|
opts := f.newOptsCall(dirID, "GET", "/children?$top=1000")
|
||||||
OUTER:
|
OUTER:
|
||||||
for {
|
for {
|
||||||
var result api.ListChildrenResponse
|
var result api.ListChildrenResponse
|
||||||
@@ -912,7 +1033,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
|
|
||||||
// deleteObject removes an object by ID
|
// deleteObject removes an object by ID
|
||||||
func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||||
opts := newOptsCall(id, "DELETE", "")
|
opts := f.newOptsCall(id, "DELETE", "")
|
||||||
opts.NoResponse = true
|
opts.NoResponse = true
|
||||||
|
|
||||||
return f.pacer.Call(func() (bool, error) {
|
return f.pacer.Call(func() (bool, error) {
|
||||||
@@ -967,7 +1088,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
|
|
||||||
// waitForJob waits for the job with status in url to complete
|
// waitForJob waits for the job with status in url to complete
|
||||||
func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
|
func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
|
||||||
deadline := time.Now().Add(fs.Config.Timeout)
|
deadline := time.Now().Add(f.ci.Timeout)
|
||||||
for time.Now().Before(deadline) {
|
for time.Now().Before(deadline) {
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
var err error
|
var err error
|
||||||
@@ -992,10 +1113,12 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
|
|||||||
|
|
||||||
switch status.Status {
|
switch status.Status {
|
||||||
case "failed":
|
case "failed":
|
||||||
case "deleteFailed":
|
if strings.HasPrefix(status.ErrorCode, "AccessDenied_") {
|
||||||
{
|
return errAsyncJobAccessDenied
|
||||||
return errors.Errorf("%s: async operation returned %q", o.remote, status.Status)
|
|
||||||
}
|
}
|
||||||
|
fallthrough
|
||||||
|
case "deleteFailed":
|
||||||
|
return errors.Errorf("%s: async operation returned %q", o.remote, status.Status)
|
||||||
case "completed":
|
case "completed":
|
||||||
err = o.readMetaData(ctx)
|
err = o.readMetaData(ctx)
|
||||||
return errors.Wrapf(err, "async operation completed but readMetaData failed")
|
return errors.Wrapf(err, "async operation completed but readMetaData failed")
|
||||||
@@ -1003,10 +1126,10 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
|
|||||||
|
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
}
|
}
|
||||||
return errors.Errorf("async operation didn't complete after %v", fs.Config.Timeout)
|
return errors.Errorf("async operation didn't complete after %v", f.ci.Timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -1021,6 +1144,17 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
|
if f.driveType != srcObj.fs.driveType {
|
||||||
|
fs.Debugf(src, "Can't server-side copy - drive types differ")
|
||||||
|
return nil, fs.ErrorCantCopy
|
||||||
|
}
|
||||||
|
|
||||||
|
// For OneDrive Business, this is only supported within the same drive
|
||||||
|
if f.driveType != driveTypePersonal && srcObj.fs.driveID != f.driveID {
|
||||||
|
fs.Debugf(src, "Can't server-side copy - cross-drive but not OneDrive Personal")
|
||||||
|
return nil, fs.ErrorCantCopy
|
||||||
|
}
|
||||||
|
|
||||||
err := srcObj.readMetaData(ctx)
|
err := srcObj.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1042,11 +1176,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Copy the object
|
// Copy the object
|
||||||
opts := newOptsCall(srcObj.id, "POST", "/copy")
|
// The query param is a workaround for OneDrive Business for #4590
|
||||||
|
opts := f.newOptsCall(srcObj.id, "POST", "/copy?@microsoft.graph.conflictBehavior=replace")
|
||||||
opts.ExtraHeaders = map[string]string{"Prefer": "respond-async"}
|
opts.ExtraHeaders = map[string]string{"Prefer": "respond-async"}
|
||||||
opts.NoResponse = true
|
opts.NoResponse = true
|
||||||
|
|
||||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
id, dstDriveID, _ := f.parseNormalizedID(directoryID)
|
||||||
|
|
||||||
replacedLeaf := f.opt.Enc.FromStandardName(leaf)
|
replacedLeaf := f.opt.Enc.FromStandardName(leaf)
|
||||||
copyReq := api.CopyItemRequest{
|
copyReq := api.CopyItemRequest{
|
||||||
@@ -1073,6 +1208,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
|
|
||||||
// Wait for job to finish
|
// Wait for job to finish
|
||||||
err = f.waitForJob(ctx, location, dstObj)
|
err = f.waitForJob(ctx, location, dstObj)
|
||||||
|
if err == errAsyncJobAccessDenied {
|
||||||
|
fs.Debugf(src, "Server-side copy failed - file not shared between drives")
|
||||||
|
return nil, fs.ErrorCantCopy
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1097,7 +1236,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
return f.purgeCheck(ctx, dir, false)
|
return f.purgeCheck(ctx, dir, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -1119,8 +1258,8 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
id, dstDriveID, _ := f.parseNormalizedID(directoryID)
|
||||||
_, srcObjDriveID, _ := parseNormalizedID(srcObj.id)
|
_, srcObjDriveID, _ := f.parseNormalizedID(srcObj.id)
|
||||||
|
|
||||||
if f.canonicalDriveID(dstDriveID) != srcObj.fs.canonicalDriveID(srcObjDriveID) {
|
if f.canonicalDriveID(dstDriveID) != srcObj.fs.canonicalDriveID(srcObjDriveID) {
|
||||||
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
|
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
|
||||||
@@ -1130,7 +1269,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Move the object
|
// Move the object
|
||||||
opts := newOptsCall(srcObj.id, "PATCH", "")
|
opts := f.newOptsCall(srcObj.id, "PATCH", "")
|
||||||
|
|
||||||
move := api.MoveItemRequest{
|
move := api.MoveItemRequest{
|
||||||
Name: f.opt.Enc.FromStandardName(leaf),
|
Name: f.opt.Enc.FromStandardName(leaf),
|
||||||
@@ -1162,7 +1301,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1181,8 +1320,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
parsedDstDirID, dstDriveID, _ := parseNormalizedID(dstDirectoryID)
|
parsedDstDirID, dstDriveID, _ := f.parseNormalizedID(dstDirectoryID)
|
||||||
_, srcDriveID, _ := parseNormalizedID(srcID)
|
_, srcDriveID, _ := f.parseNormalizedID(srcID)
|
||||||
|
|
||||||
if f.canonicalDriveID(dstDriveID) != srcFs.canonicalDriveID(srcDriveID) {
|
if f.canonicalDriveID(dstDriveID) != srcFs.canonicalDriveID(srcDriveID) {
|
||||||
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
|
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
|
||||||
@@ -1198,7 +1337,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do the move
|
// Do the move
|
||||||
opts := newOptsCall(srcID, "PATCH", "")
|
opts := f.newOptsCall(srcID, "PATCH", "")
|
||||||
move := api.MoveItemRequest{
|
move := api.MoveItemRequest{
|
||||||
Name: f.opt.Enc.FromStandardName(dstLeaf),
|
Name: f.opt.Enc.FromStandardName(dstLeaf),
|
||||||
ParentReference: &api.ItemReference{
|
ParentReference: &api.ItemReference{
|
||||||
@@ -1274,11 +1413,17 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
opts := newOptsCall(info.GetID(), "POST", "/createLink")
|
opts := f.newOptsCall(info.GetID(), "POST", "/createLink")
|
||||||
|
|
||||||
share := api.CreateShareLinkRequest{
|
share := api.CreateShareLinkRequest{
|
||||||
Type: "view",
|
Type: f.opt.LinkType,
|
||||||
Scope: "anonymous",
|
Scope: f.opt.LinkScope,
|
||||||
|
Password: f.opt.LinkPassword,
|
||||||
|
}
|
||||||
|
|
||||||
|
if expire < fs.Duration(time.Hour*24*365*100) {
|
||||||
|
expiry := time.Now().Add(time.Duration(expire))
|
||||||
|
share.Expiry = &expiry
|
||||||
}
|
}
|
||||||
|
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
@@ -1296,7 +1441,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
|
|
||||||
// CleanUp deletes all the hidden files.
|
// CleanUp deletes all the hidden files.
|
||||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||||
token := make(chan struct{}, fs.Config.Checkers)
|
token := make(chan struct{}, f.ci.Checkers)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
|
err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
|
||||||
err = entries.ForObjectError(func(obj fs.Object) error {
|
err = entries.ForObjectError(func(obj fs.Object) error {
|
||||||
@@ -1326,7 +1471,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||||||
|
|
||||||
// Finds and removes any old versions for o
|
// Finds and removes any old versions for o
|
||||||
func (o *Object) deleteVersions(ctx context.Context) error {
|
func (o *Object) deleteVersions(ctx context.Context) error {
|
||||||
opts := newOptsCall(o.id, "GET", "/versions")
|
opts := o.fs.newOptsCall(o.id, "GET", "/versions")
|
||||||
var versions api.VersionsResponse
|
var versions api.VersionsResponse
|
||||||
err := o.fs.pacer.Call(func() (bool, error) {
|
err := o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &versions)
|
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &versions)
|
||||||
@@ -1353,7 +1498,7 @@ func (o *Object) deleteVersion(ctx context.Context, ID string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
fs.Infof(o, "removing version %q", ID)
|
fs.Infof(o, "removing version %q", ID)
|
||||||
opts := newOptsCall(o.id, "DELETE", "/versions/"+ID)
|
opts := o.fs.newOptsCall(o.id, "DELETE", "/versions/"+ID)
|
||||||
opts.NoResponse = true
|
opts.NoResponse = true
|
||||||
return o.fs.pacer.Call(func() (bool, error) {
|
return o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err := o.fs.srv.Call(ctx, &opts)
|
resp, err := o.fs.srv.Call(ctx, &opts)
|
||||||
@@ -1498,21 +1643,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
|||||||
|
|
||||||
// setModTime sets the modification time of the local fs object
|
// setModTime sets the modification time of the local fs object
|
||||||
func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) {
|
func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) {
|
||||||
var opts rest.Opts
|
opts := o.fs.newOptsCallWithPath(ctx, o.remote, "PATCH", "")
|
||||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false)
|
|
||||||
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
|
|
||||||
if drive != "" {
|
|
||||||
opts = rest.Opts{
|
|
||||||
Method: "PATCH",
|
|
||||||
RootURL: rootURL,
|
|
||||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf))),
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
opts = rest.Opts{
|
|
||||||
Method: "PATCH",
|
|
||||||
Path: "/root:/" + withTrailingColon(rest.URLPathEscape(o.srvPath())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
update := api.SetFileSystemInfo{
|
update := api.SetFileSystemInfo{
|
||||||
FileSystemInfo: api.FileSystemInfoFacet{
|
FileSystemInfo: api.FileSystemInfoFacet{
|
||||||
CreatedDateTime: api.Timestamp(modTime),
|
CreatedDateTime: api.Timestamp(modTime),
|
||||||
@@ -1559,7 +1690,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
|
|
||||||
fs.FixRangeOption(options, o.size)
|
fs.FixRangeOption(options, o.size)
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
opts := newOptsCall(o.id, "GET", "/content")
|
opts := o.fs.newOptsCall(o.id, "GET", "/content")
|
||||||
opts.Options = options
|
opts.Options = options
|
||||||
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
@@ -1579,22 +1710,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
|
|
||||||
// createUploadSession creates an upload session for the object
|
// createUploadSession creates an upload session for the object
|
||||||
func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (response *api.CreateUploadResponse, err error) {
|
func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (response *api.CreateUploadResponse, err error) {
|
||||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false)
|
opts := o.fs.newOptsCallWithPath(ctx, o.remote, "POST", "/createUploadSession")
|
||||||
id, drive, rootURL := parseNormalizedID(directoryID)
|
|
||||||
var opts rest.Opts
|
|
||||||
if drive != "" {
|
|
||||||
opts = rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
RootURL: rootURL,
|
|
||||||
Path: fmt.Sprintf("/%s/items/%s:/%s:/createUploadSession",
|
|
||||||
drive, id, rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf))),
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
opts = rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/createUploadSession",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
createRequest := api.CreateUploadRequest{}
|
createRequest := api.CreateUploadRequest{}
|
||||||
createRequest.Item.FileSystemInfo.CreatedDateTime = api.Timestamp(modTime)
|
createRequest.Item.FileSystemInfo.CreatedDateTime = api.Timestamp(modTime)
|
||||||
createRequest.Item.FileSystemInfo.LastModifiedDateTime = api.Timestamp(modTime)
|
createRequest.Item.FileSystemInfo.LastModifiedDateTime = api.Timestamp(modTime)
|
||||||
@@ -1767,27 +1883,10 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
|||||||
|
|
||||||
fs.Debugf(o, "Starting singlepart upload")
|
fs.Debugf(o, "Starting singlepart upload")
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
var opts rest.Opts
|
opts := o.fs.newOptsCallWithPath(ctx, o.remote, "PUT", "/content")
|
||||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false)
|
opts.ContentLength = &size
|
||||||
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
|
opts.Body = in
|
||||||
if drive != "" {
|
opts.Options = options
|
||||||
opts = rest.Opts{
|
|
||||||
Method: "PUT",
|
|
||||||
RootURL: rootURL,
|
|
||||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf)) + ":/content",
|
|
||||||
ContentLength: &size,
|
|
||||||
Body: in,
|
|
||||||
Options: options,
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
opts = rest.Opts{
|
|
||||||
Method: "PUT",
|
|
||||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
|
|
||||||
ContentLength: &size,
|
|
||||||
Body: in,
|
|
||||||
Options: options,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
|
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
|
||||||
@@ -1863,8 +1962,42 @@ func (o *Object) ID() string {
|
|||||||
return o.id
|
return o.id
|
||||||
}
|
}
|
||||||
|
|
||||||
func newOptsCall(normalizedID string, method string, route string) (opts rest.Opts) {
|
/*
|
||||||
id, drive, rootURL := parseNormalizedID(normalizedID)
|
* URL Build routine area start
|
||||||
|
* 1. In this area, region-related URL rewrites are applied. As the API is blackbox,
|
||||||
|
* we cannot thoroughly test this part. Please be extremely careful while changing them.
|
||||||
|
* 2. If possible, please don't introduce region related code in other region, but patch these helper functions.
|
||||||
|
* 3. To avoid region-related issues, please don't manually build rest.Opts from scratch.
|
||||||
|
* Instead, use these helper function, and customize the URL afterwards if needed.
|
||||||
|
*
|
||||||
|
* currently, the 21ViaNet's API differs in the following places:
|
||||||
|
* - https://{Endpoint}/drives/{driveID}/items/{leaf}:/{route}
|
||||||
|
* - this API doesn't work (gives invalid request)
|
||||||
|
* - can be replaced with the following API:
|
||||||
|
* - https://{Endpoint}/drives/{driveID}/items/children('{leaf}')/{route}
|
||||||
|
* - however, this API does NOT support multi-level leaf like a/b/c
|
||||||
|
* - https://{Endpoint}/drives/{driveID}/items/children('@a1')/{route}?@a1=URLEncode("'{leaf}'")
|
||||||
|
* - this API does support multi-level leaf like a/b/c
|
||||||
|
* - https://{Endpoint}/drives/{driveID}/root/children('@a1')/{route}?@a1=URLEncode({path})
|
||||||
|
* - Same as above
|
||||||
|
*/
|
||||||
|
|
||||||
|
// parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`)
|
||||||
|
// and returns itemID, driveID, rootURL.
|
||||||
|
// Such a normalized ID can come from (*Item).GetID()
|
||||||
|
func (f *Fs) parseNormalizedID(ID string) (string, string, string) {
|
||||||
|
rootURL := graphAPIEndpoint[f.opt.Region] + "/v1.0/drives"
|
||||||
|
if strings.Index(ID, "#") >= 0 {
|
||||||
|
s := strings.Split(ID, "#")
|
||||||
|
return s[1], s[0], rootURL
|
||||||
|
}
|
||||||
|
return ID, "", ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// newOptsCall build the rest.Opts structure with *a normalizedID(driveID#fileID, or simply fileID)*
|
||||||
|
// using url template https://{Endpoint}/drives/{driveID}/items/{itemID}/{route}
|
||||||
|
func (f *Fs) newOptsCall(normalizedID string, method string, route string) (opts rest.Opts) {
|
||||||
|
id, drive, rootURL := f.parseNormalizedID(normalizedID)
|
||||||
|
|
||||||
if drive != "" {
|
if drive != "" {
|
||||||
return rest.Opts{
|
return rest.Opts{
|
||||||
@@ -1879,17 +2012,91 @@ func newOptsCall(normalizedID string, method string, route string) (opts rest.Op
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`)
|
func escapeSingleQuote(str string) string {
|
||||||
// and returns itemID, driveID, rootURL.
|
return strings.ReplaceAll(str, "'", "''")
|
||||||
// Such a normalized ID can come from (*Item).GetID()
|
|
||||||
func parseNormalizedID(ID string) (string, string, string) {
|
|
||||||
if strings.Index(ID, "#") >= 0 {
|
|
||||||
s := strings.Split(ID, "#")
|
|
||||||
return s[1], s[0], graphURL + "/drives"
|
|
||||||
}
|
|
||||||
return ID, "", ""
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// newOptsCallWithIDPath build the rest.Opts structure with *a normalizedID (driveID#fileID, or simply fileID) and leaf*
|
||||||
|
// using url template https://{Endpoint}/drives/{driveID}/items/{leaf}:/{route} (for international OneDrive)
|
||||||
|
// or https://{Endpoint}/drives/{driveID}/items/children('{leaf}')/{route}
|
||||||
|
// and https://{Endpoint}/drives/{driveID}/items/children('@a1')/{route}?@a1=URLEncode("'{leaf}'") (for 21ViaNet)
|
||||||
|
// if isPath is false, this function will only work when the leaf is "" or a child name (i.e. it doesn't accept multi-level leaf)
|
||||||
|
// if isPath is true, multi-level leaf like a/b/c can be passed
|
||||||
|
func (f *Fs) newOptsCallWithIDPath(normalizedID string, leaf string, isPath bool, method string, route string) (opts rest.Opts, ok bool) {
|
||||||
|
encoder := f.opt.Enc.FromStandardName
|
||||||
|
if isPath {
|
||||||
|
encoder = f.opt.Enc.FromStandardPath
|
||||||
|
}
|
||||||
|
trueDirID, drive, rootURL := f.parseNormalizedID(normalizedID)
|
||||||
|
if drive == "" {
|
||||||
|
trueDirID = normalizedID
|
||||||
|
}
|
||||||
|
entity := "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(encoder(leaf))) + route
|
||||||
|
if f.opt.Region == regionCN {
|
||||||
|
if isPath {
|
||||||
|
entity = "/items/" + trueDirID + "/children('@a1')" + route + "?@a1=" + url.QueryEscape("'"+encoder(escapeSingleQuote(leaf))+"'")
|
||||||
|
} else {
|
||||||
|
entity = "/items/" + trueDirID + "/children('" + rest.URLPathEscape(encoder(escapeSingleQuote(leaf))) + "')" + route
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if drive == "" {
|
||||||
|
ok = false
|
||||||
|
opts = rest.Opts{
|
||||||
|
Method: method,
|
||||||
|
Path: entity,
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ok = true
|
||||||
|
opts = rest.Opts{
|
||||||
|
Method: method,
|
||||||
|
RootURL: rootURL,
|
||||||
|
Path: "/" + drive + entity,
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// newOptsCallWithIDPath build the rest.Opts structure with an *absolute path start from root*
|
||||||
|
// using url template https://{Endpoint}/drives/{driveID}/root:/{path}:/{route}
|
||||||
|
// or https://{Endpoint}/drives/{driveID}/root/children('@a1')/{route}?@a1=URLEncode({path})
|
||||||
|
func (f *Fs) newOptsCallWithRootPath(path string, method string, route string) (opts rest.Opts) {
|
||||||
|
path = strings.TrimSuffix(path, "/")
|
||||||
|
newURL := "/root:/" + withTrailingColon(rest.URLPathEscape(f.opt.Enc.FromStandardPath(path))) + route
|
||||||
|
if f.opt.Region == regionCN {
|
||||||
|
newURL = "/root/children('@a1')" + route + "?@a1=" + url.QueryEscape("'"+escapeSingleQuote(f.opt.Enc.FromStandardPath(path))+"'")
|
||||||
|
}
|
||||||
|
return rest.Opts{
|
||||||
|
Method: method,
|
||||||
|
Path: newURL,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newOptsCallWithPath build the rest.Opt intelligently.
|
||||||
|
// It will first try to resolve the path using dircache, which enables support for "Share with me" files.
|
||||||
|
// If present in cache, then use ID + Path variant, else fallback into RootPath variant
|
||||||
|
func (f *Fs) newOptsCallWithPath(ctx context.Context, path string, method string, route string) (opts rest.Opts) {
|
||||||
|
if path == "" {
|
||||||
|
url := "/root" + route
|
||||||
|
return rest.Opts{
|
||||||
|
Method: method,
|
||||||
|
Path: url,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// find dircache
|
||||||
|
leaf, directoryID, _ := f.dirCache.FindPath(ctx, path, false)
|
||||||
|
// try to use IDPath variant first
|
||||||
|
if opts, ok := f.newOptsCallWithIDPath(directoryID, leaf, false, method, route); ok {
|
||||||
|
return opts
|
||||||
|
}
|
||||||
|
// fallback to use RootPath variant first
|
||||||
|
return f.newOptsCallWithRootPath(path, method, route)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* URL Build routine area end
|
||||||
|
*/
|
||||||
|
|
||||||
// Returns the canonical form of the driveID
|
// Returns the canonical form of the driveID
|
||||||
func (f *Fs) canonicalDriveID(driveID string) (canonicalDriveID string) {
|
func (f *Fs) canonicalDriveID(driveID string) (canonicalDriveID string) {
|
||||||
if driveID == "" {
|
if driveID == "" {
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -19,6 +20,20 @@ func TestIntegration(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestIntegrationCn runs integration tests against the remote
|
||||||
|
func TestIntegrationCn(t *testing.T) {
|
||||||
|
if *fstest.RemoteName != "" {
|
||||||
|
t.Skip("skipping as -remote is set")
|
||||||
|
}
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: "TestOneDriveCn:",
|
||||||
|
NilObject: (*Object)(nil),
|
||||||
|
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||||
|
CeilChunkSize: fstests.NextMultipleOf(chunkSizeMultiple),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||||
return f.setUploadChunkSize(cs)
|
return f.setUploadChunkSize(cs)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -164,8 +164,7 @@ func (f *Fs) DirCacheFlush() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, bucket:path
|
// NewFs constructs an Fs from the path, bucket:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.Background()
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -188,8 +187,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
srv: rest.NewClient(fshttp.NewClient(ctx)).SetErrorHandler(errorHandler),
|
||||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
|
|
||||||
f.dirCache = dircache.New(root, "0", f)
|
f.dirCache = dircache.New(root, "0", f)
|
||||||
@@ -217,7 +216,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
// Find the current root
|
// Find the current root
|
||||||
err = f.dirCache.FindRoot(ctx, false)
|
err = f.dirCache.FindRoot(ctx, false)
|
||||||
@@ -338,7 +337,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
return time.Second
|
return time.Second
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -402,7 +401,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return dstObj, nil
|
return dstObj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -460,7 +459,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -722,7 +721,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
|||||||
for _, folder := range folderList.Folders {
|
for _, folder := range folderList.Folders {
|
||||||
// fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID)
|
// fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID)
|
||||||
|
|
||||||
if leaf == folder.Name {
|
if strings.EqualFold(leaf, folder.Name) {
|
||||||
// found
|
// found
|
||||||
return folder.FolderID, true, nil
|
return folder.FolderID, true, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -96,7 +96,7 @@ func (i *Item) ModTime() (t time.Time) {
|
|||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
// ItemResult is returned from the /listfolder, /createfolder, /deletefolder, /deletefile etc methods
|
// ItemResult is returned from the /listfolder, /createfolder, /deletefolder, /deletefile, etc. methods
|
||||||
type ItemResult struct {
|
type ItemResult struct {
|
||||||
Error
|
Error
|
||||||
Metadata Item `json:"metadata"`
|
Metadata Item `json:"metadata"`
|
||||||
|
|||||||
@@ -72,7 +72,7 @@ func init() {
|
|||||||
Name: "pcloud",
|
Name: "pcloud",
|
||||||
Description: "Pcloud",
|
Description: "Pcloud",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string, m configmap.Mapper) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
optc := new(Options)
|
optc := new(Options)
|
||||||
err := configstruct.Set(m, optc)
|
err := configstruct.Set(m, optc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -98,7 +98,7 @@ func init() {
|
|||||||
CheckAuth: checkAuth,
|
CheckAuth: checkAuth,
|
||||||
StateBlankOK: true, // pCloud seems to drop the state parameter now - see #4210
|
StateBlankOK: true, // pCloud seems to drop the state parameter now - see #4210
|
||||||
}
|
}
|
||||||
err = oauthutil.Config("pcloud", name, m, oauthConfig, &opt)
|
err = oauthutil.Config(ctx, "pcloud", name, m, oauthConfig, &opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
@@ -219,7 +219,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
|||||||
// Check if it is an api.Error
|
// Check if it is an api.Error
|
||||||
if apiErr, ok := err.(*api.Error); ok {
|
if apiErr, ok := err.(*api.Error); ok {
|
||||||
// See https://docs.pcloud.com/errors/ for error treatment
|
// See https://docs.pcloud.com/errors/ for error treatment
|
||||||
// Errors are classified as 1xxx, 2xxx etc
|
// Errors are classified as 1xxx, 2xxx, etc.
|
||||||
switch apiErr.Result / 1000 {
|
switch apiErr.Result / 1000 {
|
||||||
case 4: // 4xxx: rate limiting
|
case 4: // 4xxx: rate limiting
|
||||||
doRetry = true
|
doRetry = true
|
||||||
@@ -280,8 +280,7 @@ func errorHandler(resp *http.Response) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.Background()
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -289,7 +288,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
root = parsePath(root)
|
root = parsePath(root)
|
||||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
oAuthClient, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure Pcloud")
|
return nil, errors.Wrap(err, "failed to configure Pcloud")
|
||||||
}
|
}
|
||||||
@@ -300,12 +299,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
srv: rest.NewClient(oAuthClient).SetRoot("https://" + opt.Hostname),
|
srv: rest.NewClient(oAuthClient).SetRoot("https://" + opt.Hostname),
|
||||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: false,
|
CaseInsensitive: false,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
f.srv.SetErrorHandler(errorHandler)
|
f.srv.SetErrorHandler(errorHandler)
|
||||||
|
|
||||||
// Renew the token in the background
|
// Renew the token in the background
|
||||||
@@ -622,7 +621,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
return time.Second
|
return time.Second
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -705,7 +704,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -755,7 +754,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1118,7 +1117,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
Method: "PUT",
|
Method: "PUT",
|
||||||
Path: "/uploadfile",
|
Path: "/uploadfile",
|
||||||
Body: in,
|
Body: in,
|
||||||
ContentType: fs.MimeType(ctx, o),
|
ContentType: fs.MimeType(ctx, src),
|
||||||
ContentLength: &size,
|
ContentLength: &size,
|
||||||
Parameters: url.Values{},
|
Parameters: url.Values{},
|
||||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||||
@@ -1132,7 +1131,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
// Special treatment for a 0 length upload. This doesn't work
|
// Special treatment for a 0 length upload. This doesn't work
|
||||||
// with PUT even with Content-Length set (by setting
|
// with PUT even with Content-Length set (by setting
|
||||||
// opts.Body=0), so upload it as a multpart form POST with
|
// opts.Body=0), so upload it as a multipart form POST with
|
||||||
// Content-Length set.
|
// Content-Length set.
|
||||||
if size == 0 {
|
if size == 0 {
|
||||||
formReader, contentType, overhead, err := rest.MultipartUpload(in, opts.Parameters, "content", leaf)
|
formReader, contentType, overhead, err := rest.MultipartUpload(in, opts.Parameters, "content", leaf)
|
||||||
|
|||||||
@@ -78,8 +78,8 @@ func init() {
|
|||||||
Name: "premiumizeme",
|
Name: "premiumizeme",
|
||||||
Description: "premiumize.me",
|
Description: "premiumize.me",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string, m configmap.Mapper) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
err := oauthutil.Config("premiumizeme", name, m, oauthConfig, nil)
|
err := oauthutil.Config(ctx, "premiumizeme", name, m, oauthConfig, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
@@ -234,8 +234,7 @@ func (f *Fs) baseParams() url.Values {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.Background()
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -248,12 +247,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
var client *http.Client
|
var client *http.Client
|
||||||
var ts *oauthutil.TokenSource
|
var ts *oauthutil.TokenSource
|
||||||
if opt.APIKey == "" {
|
if opt.APIKey == "" {
|
||||||
client, ts, err = oauthutil.NewClient(name, m, oauthConfig)
|
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure premiumize.me")
|
return nil, errors.Wrap(err, "failed to configure premiumize.me")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
client = fshttp.NewClient(fs.Config)
|
client = fshttp.NewClient(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
@@ -261,13 +260,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
f.srv.SetErrorHandler(errorHandler)
|
f.srv.SetErrorHandler(errorHandler)
|
||||||
|
|
||||||
// Renew the token in the background
|
// Renew the token in the background
|
||||||
@@ -303,7 +302,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
f.features.Fill(&tempF)
|
f.features.Fill(ctx, &tempF)
|
||||||
// XXX: update the old f here instead of returning tempF, since
|
// XXX: update the old f here instead of returning tempF, since
|
||||||
// `features` were already filled with functions having *f as a receiver.
|
// `features` were already filled with functions having *f as a receiver.
|
||||||
// See https://github.com/rclone/rclone/issues/2182
|
// See https://github.com/rclone/rclone/issues/2182
|
||||||
@@ -346,7 +345,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||||
// Find the leaf in pathID
|
// Find the leaf in pathID
|
||||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||||
if item.Name == leaf {
|
if strings.EqualFold(item.Name, leaf) {
|
||||||
pathIDOut = item.ID
|
pathIDOut = item.ID
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -682,7 +681,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -718,7 +717,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ func parsePath(path string) (root string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
||||||
// defer log.Trace(name, "root=%v", root)("f=%+v, err=%v", &f, &err)
|
// defer log.Trace(name, "root=%v", root)("f=%+v, err=%v", &f, &err)
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
@@ -77,8 +77,8 @@ func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
root = parsePath(root)
|
root = parsePath(root)
|
||||||
httpClient := fshttp.NewClient(fs.Config)
|
httpClient := fshttp.NewClient(ctx)
|
||||||
oAuthClient, _, err := oauthutil.NewClientWithBaseClient(name, m, putioConfig, httpClient)
|
oAuthClient, _, err := oauthutil.NewClientWithBaseClient(ctx, name, m, putioConfig, httpClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure putio")
|
return nil, errors.Wrap(err, "failed to configure putio")
|
||||||
}
|
}
|
||||||
@@ -86,7 +86,7 @@ func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
|||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
client: putio.NewClient(oAuthClient),
|
client: putio.NewClient(oAuthClient),
|
||||||
httpClient: httpClient,
|
httpClient: httpClient,
|
||||||
oAuthClient: oAuthClient,
|
oAuthClient: oAuthClient,
|
||||||
@@ -95,9 +95,8 @@ func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
|||||||
DuplicateFiles: true,
|
DuplicateFiles: true,
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(p)
|
}).Fill(ctx, p)
|
||||||
p.dirCache = dircache.New(root, "0", p)
|
p.dirCache = dircache.New(root, "0", p)
|
||||||
ctx := context.Background()
|
|
||||||
// Find the current root
|
// Find the current root
|
||||||
err = p.dirCache.FindRoot(ctx, false)
|
err = p.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -236,10 +235,10 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||||
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
|
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
|
||||||
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
existingObj, err := f.NewObject(ctx, src.Remote())
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||||
case fs.ErrorObjectNotFound:
|
case fs.ErrorObjectNotFound:
|
||||||
// Not found so create it
|
// Not found so create it
|
||||||
return f.PutUnchecked(ctx, in, src, options...)
|
return f.PutUnchecked(ctx, in, src, options...)
|
||||||
@@ -283,11 +282,10 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
|||||||
func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID string, modTime time.Time, options []fs.OpenOption) (location string, err error) {
|
func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID string, modTime time.Time, options []fs.OpenOption) (location string, err error) {
|
||||||
// defer log.Trace(f, "name=%v, size=%v, parentID=%v, modTime=%v", name, size, parentID, modTime.String())("location=%v, err=%v", location, &err)
|
// defer log.Trace(f, "name=%v, size=%v, parentID=%v, modTime=%v", name, size, parentID, modTime.String())("location=%v, err=%v", location, &err)
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
req, err := http.NewRequest("POST", "https://upload.put.io/files/", nil)
|
req, err := http.NewRequestWithContext(ctx, "POST", "https://upload.put.io/files/", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
req.Header.Set("tus-resumable", "1.0.0")
|
req.Header.Set("tus-resumable", "1.0.0")
|
||||||
req.Header.Set("upload-length", strconv.FormatInt(size, 10))
|
req.Header.Set("upload-length", strconv.FormatInt(size, 10))
|
||||||
b64name := base64.StdEncoding.EncodeToString([]byte(f.opt.Enc.FromStandardName(name)))
|
b64name := base64.StdEncoding.EncodeToString([]byte(f.opt.Enc.FromStandardName(name)))
|
||||||
@@ -429,21 +427,19 @@ func (f *Fs) transferChunk(ctx context.Context, location string, start int64, ch
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) makeUploadHeadRequest(ctx context.Context, location string) (*http.Request, error) {
|
func (f *Fs) makeUploadHeadRequest(ctx context.Context, location string) (*http.Request, error) {
|
||||||
req, err := http.NewRequest("HEAD", location, nil)
|
req, err := http.NewRequestWithContext(ctx, "HEAD", location, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
req.Header.Set("tus-resumable", "1.0.0")
|
req.Header.Set("tus-resumable", "1.0.0")
|
||||||
return req, nil
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) makeUploadPatchRequest(ctx context.Context, location string, in io.Reader, offset, length int64) (*http.Request, error) {
|
func (f *Fs) makeUploadPatchRequest(ctx context.Context, location string, in io.Reader, offset, length int64) (*http.Request, error) {
|
||||||
req, err := http.NewRequest("PATCH", location, in)
|
req, err := http.NewRequestWithContext(ctx, "PATCH", location, in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
req.Header.Set("tus-resumable", "1.0.0")
|
req.Header.Set("tus-resumable", "1.0.0")
|
||||||
req.Header.Set("upload-offset", strconv.FormatInt(offset, 10))
|
req.Header.Set("upload-offset", strconv.FormatInt(offset, 10))
|
||||||
req.Header.Set("content-length", strconv.FormatInt(length, 10))
|
req.Header.Set("content-length", strconv.FormatInt(length, 10))
|
||||||
@@ -525,7 +521,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
|
|||||||
return f.purgeCheck(ctx, dir, false)
|
return f.purgeCheck(ctx, dir, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -564,7 +560,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
|||||||
return f.NewObject(ctx, remote)
|
return f.NewObject(ctx, remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -604,7 +600,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ func (o *Object) MimeType(ctx context.Context) string {
|
|||||||
|
|
||||||
// setMetadataFromEntry sets the fs data from a putio.File
|
// setMetadataFromEntry sets the fs data from a putio.File
|
||||||
//
|
//
|
||||||
// This isn't a complete set of metadata and has an inacurate date
|
// This isn't a complete set of metadata and has an inaccurate date
|
||||||
func (o *Object) setMetadataFromEntry(info putio.File) error {
|
func (o *Object) setMetadataFromEntry(info putio.File) error {
|
||||||
o.file = &info
|
o.file = &info
|
||||||
o.modtime = info.UpdatedAt.Time
|
o.modtime = info.UpdatedAt.Time
|
||||||
@@ -229,11 +229,10 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
headers := fs.OpenOptionHeaders(options)
|
headers := fs.OpenOptionHeaders(options)
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
req, err := http.NewRequest(http.MethodGet, storageURL, nil)
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, storageURL, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
req.Header.Set("User-Agent", o.fs.client.UserAgent)
|
req.Header.Set("User-Agent", o.fs.client.UserAgent)
|
||||||
|
|
||||||
// merge headers with extra headers
|
// merge headers with extra headers
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package putio
|
package putio
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"log"
|
"log"
|
||||||
"regexp"
|
"regexp"
|
||||||
"time"
|
"time"
|
||||||
@@ -59,11 +60,11 @@ func init() {
|
|||||||
Name: "putio",
|
Name: "putio",
|
||||||
Description: "Put.io",
|
Description: "Put.io",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string, m configmap.Mapper) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
opt := oauthutil.Options{
|
opt := oauthutil.Options{
|
||||||
NoOffline: true,
|
NoOffline: true,
|
||||||
}
|
}
|
||||||
err := oauthutil.Config("putio", name, m, putioConfig, &opt)
|
err := oauthutil.Config(ctx, "putio", name, m, putioConfig, &opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -93,7 +93,7 @@ as multipart uploads using this chunk size.
|
|||||||
Note that "--qingstor-upload-concurrency" chunks of this size are buffered
|
Note that "--qingstor-upload-concurrency" chunks of this size are buffered
|
||||||
in memory per transfer.
|
in memory per transfer.
|
||||||
|
|
||||||
If you are transferring large files over high speed links and you have
|
If you are transferring large files over high-speed links and you have
|
||||||
enough memory, then increasing this will speed up the transfers.`,
|
enough memory, then increasing this will speed up the transfers.`,
|
||||||
Default: minChunkSize,
|
Default: minChunkSize,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
@@ -104,10 +104,10 @@ enough memory, then increasing this will speed up the transfers.`,
|
|||||||
This is the number of chunks of the same file that are uploaded
|
This is the number of chunks of the same file that are uploaded
|
||||||
concurrently.
|
concurrently.
|
||||||
|
|
||||||
NB if you set this to > 1 then the checksums of multpart uploads
|
NB if you set this to > 1 then the checksums of multipart uploads
|
||||||
become corrupted (the uploads themselves are not corrupted though).
|
become corrupted (the uploads themselves are not corrupted though).
|
||||||
|
|
||||||
If you are uploading small numbers of large file over high speed link
|
If you are uploading small numbers of large files over high-speed links
|
||||||
and these uploads do not fully utilize your bandwidth, then increasing
|
and these uploads do not fully utilize your bandwidth, then increasing
|
||||||
this may help to speed up the transfers.`,
|
this may help to speed up the transfers.`,
|
||||||
Default: 1,
|
Default: 1,
|
||||||
@@ -207,7 +207,7 @@ func (o *Object) split() (bucket, bucketPath string) {
|
|||||||
func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
|
func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
|
||||||
/*
|
/*
|
||||||
Pattern to match an endpoint,
|
Pattern to match an endpoint,
|
||||||
eg: "http(s)://qingstor.com:443" --> "http(s)", "qingstor.com", 443
|
e.g.: "http(s)://qingstor.com:443" --> "http(s)", "qingstor.com", 443
|
||||||
"http(s)//qingstor.com" --> "http(s)", "qingstor.com", ""
|
"http(s)//qingstor.com" --> "http(s)", "qingstor.com", ""
|
||||||
"qingstor.com" --> "", "qingstor.com", ""
|
"qingstor.com" --> "", "qingstor.com", ""
|
||||||
*/
|
*/
|
||||||
@@ -228,7 +228,7 @@ func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// qsConnection makes a connection to qingstor
|
// qsConnection makes a connection to qingstor
|
||||||
func qsServiceConnection(opt *Options) (*qs.Service, error) {
|
func qsServiceConnection(ctx context.Context, opt *Options) (*qs.Service, error) {
|
||||||
accessKeyID := opt.AccessKeyID
|
accessKeyID := opt.AccessKeyID
|
||||||
secretAccessKey := opt.SecretAccessKey
|
secretAccessKey := opt.SecretAccessKey
|
||||||
|
|
||||||
@@ -277,7 +277,7 @@ func qsServiceConnection(opt *Options) (*qs.Service, error) {
|
|||||||
cf.Host = host
|
cf.Host = host
|
||||||
cf.Port = port
|
cf.Port = port
|
||||||
// unsupported in v3.1: cf.ConnectionRetries = opt.ConnectionRetries
|
// unsupported in v3.1: cf.ConnectionRetries = opt.ConnectionRetries
|
||||||
cf.Connection = fshttp.NewClient(fs.Config)
|
cf.Connection = fshttp.NewClient(ctx)
|
||||||
|
|
||||||
return qs.Init(cf)
|
return qs.Init(cf)
|
||||||
}
|
}
|
||||||
@@ -319,7 +319,7 @@ func (f *Fs) setRoot(root string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, bucket:path
|
// NewFs constructs an Fs from the path, bucket:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -334,7 +334,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "qingstor: upload cutoff")
|
return nil, errors.Wrap(err, "qingstor: upload cutoff")
|
||||||
}
|
}
|
||||||
svc, err := qsServiceConnection(opt)
|
svc, err := qsServiceConnection(ctx, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -357,7 +357,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
BucketBasedRootOK: true,
|
BucketBasedRootOK: true,
|
||||||
SlowModTime: true,
|
SlowModTime: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||||
// Check to see if the object exists
|
// Check to see if the object exists
|
||||||
@@ -428,7 +428,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
return fsObj, fsObj.Update(ctx, in, src, options...)
|
return fsObj, fsObj.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -872,11 +872,12 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string) (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
maxLimit := int(listLimitSize)
|
// maxLimit := int(listLimitSize)
|
||||||
var marker *string
|
var marker *string
|
||||||
for {
|
for {
|
||||||
req := qs.ListMultipartUploadsInput{
|
req := qs.ListMultipartUploadsInput{
|
||||||
Limit: &maxLimit,
|
// The default is 200 but this errors if more than 200 is put in so leave at the default
|
||||||
|
// Limit: &maxLimit,
|
||||||
KeyMarker: marker,
|
KeyMarker: marker,
|
||||||
}
|
}
|
||||||
var resp *qs.ListMultipartUploadsOutput
|
var resp *qs.ListMultipartUploadsOutput
|
||||||
@@ -927,7 +928,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
|||||||
}
|
}
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
cleanErr := f.cleanUpBucket(ctx, f.opt.Enc.FromStandardName(entry.Remote()))
|
cleanErr := f.cleanUpBucket(ctx, f.opt.Enc.FromStandardName(entry.Remote()))
|
||||||
if err != nil {
|
if cleanErr != nil {
|
||||||
fs.Errorf(f, "Failed to cleanup bucket: %q", cleanErr)
|
fs.Errorf(f, "Failed to cleanup bucket: %q", cleanErr)
|
||||||
err = cleanErr
|
err = cleanErr
|
||||||
}
|
}
|
||||||
|
|||||||
327
backend/s3/s3.go
327
backend/s3/s3.go
@@ -5,6 +5,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
|
"crypto/tls"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
@@ -32,7 +33,7 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/aws/request"
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
"github.com/ncw/swift"
|
"github.com/ncw/swift/v2"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -58,7 +59,7 @@ import (
|
|||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
Name: "s3",
|
Name: "s3",
|
||||||
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)",
|
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, and Tencent COS",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
@@ -130,7 +131,7 @@ func init() {
|
|||||||
Provider: "AWS",
|
Provider: "AWS",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "us-east-1",
|
Value: "us-east-1",
|
||||||
Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty.",
|
Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia, or Pacific Northwest.\nLeave location constraint empty.",
|
||||||
}, {
|
}, {
|
||||||
Value: "us-east-2",
|
Value: "us-east-2",
|
||||||
Help: "US East (Ohio) Region\nNeeds location constraint us-east-2.",
|
Help: "US East (Ohio) Region\nNeeds location constraint us-east-2.",
|
||||||
@@ -224,7 +225,7 @@ func init() {
|
|||||||
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
|
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
|
||||||
}, {
|
}, {
|
||||||
Value: "other-v2-signature",
|
Value: "other-v2-signature",
|
||||||
Help: "Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.",
|
Help: "Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH.",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "endpoint",
|
Name: "endpoint",
|
||||||
@@ -611,7 +612,7 @@ func init() {
|
|||||||
Provider: "AWS",
|
Provider: "AWS",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "",
|
Value: "",
|
||||||
Help: "Empty for US Region, Northern Virginia or Pacific Northwest.",
|
Help: "Empty for US Region, Northern Virginia, or Pacific Northwest.",
|
||||||
}, {
|
}, {
|
||||||
Value: "us-east-2",
|
Value: "us-east-2",
|
||||||
Help: "US East (Ohio) Region.",
|
Help: "US East (Ohio) Region.",
|
||||||
@@ -798,7 +799,7 @@ This ACL is used for creating objects and if bucket_acl isn't set, for creating
|
|||||||
|
|
||||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||||
|
|
||||||
Note that this ACL is applied when server side copying objects as S3
|
Note that this ACL is applied when server-side copying objects as S3
|
||||||
doesn't copy the ACL from the source but rather writes a fresh one.`,
|
doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "default",
|
Value: "default",
|
||||||
@@ -867,6 +868,12 @@ isn't set then "acl" is used instead.`,
|
|||||||
Value: "authenticated-read",
|
Value: "authenticated-read",
|
||||||
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
|
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
|
||||||
}},
|
}},
|
||||||
|
}, {
|
||||||
|
Name: "requester_pays",
|
||||||
|
Help: "Enables requester pays option when interacting with S3 bucket.",
|
||||||
|
Provider: "AWS",
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "server_side_encryption",
|
Name: "server_side_encryption",
|
||||||
Help: "The server-side encryption algorithm used when storing this object in S3.",
|
Help: "The server-side encryption algorithm used when storing this object in S3.",
|
||||||
@@ -914,8 +921,11 @@ isn't set then "acl" is used instead.`,
|
|||||||
Help: "None",
|
Help: "None",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "sse_customer_key_md5",
|
Name: "sse_customer_key_md5",
|
||||||
Help: "If using SSE-C you must provide the secret encryption key MD5 checksum.",
|
Help: `If using SSE-C you may provide the secret encryption key MD5 checksum (optional).
|
||||||
|
|
||||||
|
If you leave it blank, this is calculated automatically from the sse_customer_key provided.
|
||||||
|
`,
|
||||||
Provider: "AWS,Ceph,Minio",
|
Provider: "AWS,Ceph,Minio",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
@@ -1015,14 +1025,14 @@ The minimum is 0 and the maximum is 5GB.`,
|
|||||||
Help: `Chunk size to use for uploading.
|
Help: `Chunk size to use for uploading.
|
||||||
|
|
||||||
When uploading files larger than upload_cutoff or files with unknown
|
When uploading files larger than upload_cutoff or files with unknown
|
||||||
size (eg from "rclone rcat" or uploaded with "rclone mount" or google
|
size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google
|
||||||
photos or google docs) they will be uploaded as multipart uploads
|
photos or google docs) they will be uploaded as multipart uploads
|
||||||
using this chunk size.
|
using this chunk size.
|
||||||
|
|
||||||
Note that "--s3-upload-concurrency" chunks of this size are buffered
|
Note that "--s3-upload-concurrency" chunks of this size are buffered
|
||||||
in memory per transfer.
|
in memory per transfer.
|
||||||
|
|
||||||
If you are transferring large files over high speed links and you have
|
If you are transferring large files over high-speed links and you have
|
||||||
enough memory, then increasing this will speed up the transfers.
|
enough memory, then increasing this will speed up the transfers.
|
||||||
|
|
||||||
Rclone will automatically increase the chunk size when uploading a
|
Rclone will automatically increase the chunk size when uploading a
|
||||||
@@ -1031,7 +1041,7 @@ large file of known size to stay below the 10,000 chunks limit.
|
|||||||
Files of unknown size are uploaded with the configured
|
Files of unknown size are uploaded with the configured
|
||||||
chunk_size. Since the default chunk size is 5MB and there can be at
|
chunk_size. Since the default chunk size is 5MB and there can be at
|
||||||
most 10,000 chunks, this means that by default the maximum size of
|
most 10,000 chunks, this means that by default the maximum size of
|
||||||
file you can stream upload is 48GB. If you wish to stream upload
|
a file you can stream upload is 48GB. If you wish to stream upload
|
||||||
larger files then you will need to increase chunk_size.`,
|
larger files then you will need to increase chunk_size.`,
|
||||||
Default: minChunkSize,
|
Default: minChunkSize,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
@@ -1054,7 +1064,7 @@ large file of a known size to stay below this number of chunks limit.
|
|||||||
Name: "copy_cutoff",
|
Name: "copy_cutoff",
|
||||||
Help: `Cutoff for switching to multipart copy
|
Help: `Cutoff for switching to multipart copy
|
||||||
|
|
||||||
Any files larger than this that need to be server side copied will be
|
Any files larger than this that need to be server-side copied will be
|
||||||
copied in chunks of this size.
|
copied in chunks of this size.
|
||||||
|
|
||||||
The minimum is 0 and the maximum is 5GB.`,
|
The minimum is 0 and the maximum is 5GB.`,
|
||||||
@@ -1106,7 +1116,7 @@ If empty it will default to the environment variable "AWS_PROFILE" or
|
|||||||
This is the number of chunks of the same file that are uploaded
|
This is the number of chunks of the same file that are uploaded
|
||||||
concurrently.
|
concurrently.
|
||||||
|
|
||||||
If you are uploading small numbers of large file over high speed link
|
If you are uploading small numbers of large files over high-speed links
|
||||||
and these uploads do not fully utilize your bandwidth, then increasing
|
and these uploads do not fully utilize your bandwidth, then increasing
|
||||||
this may help to speed up the transfers.`,
|
this may help to speed up the transfers.`,
|
||||||
Default: 4,
|
Default: 4,
|
||||||
@@ -1120,7 +1130,7 @@ if false then rclone will use virtual path style. See [the AWS S3
|
|||||||
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
|
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
|
||||||
for more info.
|
for more info.
|
||||||
|
|
||||||
Some providers (eg AWS, Aliyun OSS, Netease COS or Tencent COS) require this set to
|
Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to
|
||||||
false - rclone will do this automatically based on the provider
|
false - rclone will do this automatically based on the provider
|
||||||
setting.`,
|
setting.`,
|
||||||
Default: true,
|
Default: true,
|
||||||
@@ -1132,7 +1142,7 @@ setting.`,
|
|||||||
If this is false (the default) then rclone will use v4 authentication.
|
If this is false (the default) then rclone will use v4 authentication.
|
||||||
If it is set then rclone will use v2 authentication.
|
If it is set then rclone will use v2 authentication.
|
||||||
|
|
||||||
Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
|
Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
@@ -1167,10 +1177,47 @@ In Ceph, this can be increased with the "rgw list buckets max chunk" option.
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "no_check_bucket",
|
Name: "no_check_bucket",
|
||||||
Help: `If set don't attempt to check the bucket exists or create it
|
Help: `If set, don't attempt to check the bucket exists or create it
|
||||||
|
|
||||||
This can be useful when trying to minimise the number of transactions
|
This can be useful when trying to minimise the number of transactions
|
||||||
rclone does if you know the bucket exists already.
|
rclone does if you know the bucket exists already.
|
||||||
|
|
||||||
|
It can also be needed if the user you are using does not have bucket
|
||||||
|
creation permissions. Before v1.52.0 this would have passed silently
|
||||||
|
due to a bug.
|
||||||
|
`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "no_head",
|
||||||
|
Help: `If set, don't HEAD uploaded objects to check integrity
|
||||||
|
|
||||||
|
This can be useful when trying to minimise the number of transactions
|
||||||
|
rclone does.
|
||||||
|
|
||||||
|
Setting it means that if rclone receives a 200 OK message after
|
||||||
|
uploading an object with PUT then it will assume that it got uploaded
|
||||||
|
properly.
|
||||||
|
|
||||||
|
In particular it will assume:
|
||||||
|
|
||||||
|
- the metadata, including modtime, storage class and content type was as uploaded
|
||||||
|
- the size was as uploaded
|
||||||
|
|
||||||
|
It reads the following items from the response for a single part PUT:
|
||||||
|
|
||||||
|
- the MD5SUM
|
||||||
|
- The uploaded date
|
||||||
|
|
||||||
|
For multipart uploads these items aren't read.
|
||||||
|
|
||||||
|
If an source object of unknown length is uploaded then rclone **will** do a
|
||||||
|
HEAD request.
|
||||||
|
|
||||||
|
Setting this flag increases the chance for undetected upload failures,
|
||||||
|
in particular an incorrect size, so it isn't recommended for normal
|
||||||
|
operation. In practice the chance of an undetected upload failure is
|
||||||
|
very small even with this flag.
|
||||||
`,
|
`,
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
@@ -1203,13 +1250,26 @@ This option controls how often unused buffers will be removed from the pool.`,
|
|||||||
Default: memoryPoolUseMmap,
|
Default: memoryPoolUseMmap,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Help: `Whether to use mmap buffers in internal memory pool.`,
|
Help: `Whether to use mmap buffers in internal memory pool.`,
|
||||||
|
}, {
|
||||||
|
Name: "disable_http2",
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
Help: `Disable usage of http2 for S3 backends
|
||||||
|
|
||||||
|
There is currently an unsolved issue with the s3 (specifically minio) backend
|
||||||
|
and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be
|
||||||
|
disabled here. When the issue is solved this flag will be removed.
|
||||||
|
|
||||||
|
See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631
|
||||||
|
|
||||||
|
`,
|
||||||
},
|
},
|
||||||
}})
|
}})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Constants
|
// Constants
|
||||||
const (
|
const (
|
||||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
metaMtime = "Mtime" // the meta key to store mtime in - e.g. X-Amz-Meta-Mtime
|
||||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||||
// The maximum size of object we can COPY - this should be 5GiB but is < 5GB for b2 compatibility
|
// The maximum size of object we can COPY - this should be 5GiB but is < 5GB for b2 compatibility
|
||||||
// See https://forum.rclone.org/t/copying-files-within-a-b2-bucket/16680/76
|
// See https://forum.rclone.org/t/copying-files-within-a-b2-bucket/16680/76
|
||||||
@@ -1236,6 +1296,7 @@ type Options struct {
|
|||||||
LocationConstraint string `config:"location_constraint"`
|
LocationConstraint string `config:"location_constraint"`
|
||||||
ACL string `config:"acl"`
|
ACL string `config:"acl"`
|
||||||
BucketACL string `config:"bucket_acl"`
|
BucketACL string `config:"bucket_acl"`
|
||||||
|
RequesterPays bool `config:"requester_pays"`
|
||||||
ServerSideEncryption string `config:"server_side_encryption"`
|
ServerSideEncryption string `config:"server_side_encryption"`
|
||||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||||
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
|
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
|
||||||
@@ -1257,9 +1318,11 @@ type Options struct {
|
|||||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||||
ListChunk int64 `config:"list_chunk"`
|
ListChunk int64 `config:"list_chunk"`
|
||||||
NoCheckBucket bool `config:"no_check_bucket"`
|
NoCheckBucket bool `config:"no_check_bucket"`
|
||||||
|
NoHead bool `config:"no_head"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||||
|
DisableHTTP2 bool `config:"disable_http2"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote s3 server
|
// Fs represents a remote s3 server
|
||||||
@@ -1267,6 +1330,8 @@ type Fs struct {
|
|||||||
name string // the name of the remote
|
name string // the name of the remote
|
||||||
root string // root of the bucket - ignore all objects above this
|
root string // root of the bucket - ignore all objects above this
|
||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
|
ci *fs.ConfigInfo // global config
|
||||||
|
ctx context.Context // global context for reading config
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
c *s3.S3 // the connection to the s3 server
|
c *s3.S3 // the connection to the s3 server
|
||||||
ses *session.Session // the s3 session
|
ses *session.Session // the s3 session
|
||||||
@@ -1276,6 +1341,7 @@ type Fs struct {
|
|||||||
pacer *fs.Pacer // To pace the API calls
|
pacer *fs.Pacer // To pace the API calls
|
||||||
srv *http.Client // a plain http client
|
srv *http.Client // a plain http client
|
||||||
pool *pool.Pool // memory pool
|
pool *pool.Pool // memory pool
|
||||||
|
etagIsNotMD5 bool // if set ETags are not MD5s
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a s3 object
|
// Object describes a s3 object
|
||||||
@@ -1286,12 +1352,12 @@ type Object struct {
|
|||||||
// that in you need to call readMetaData
|
// that in you need to call readMetaData
|
||||||
fs *Fs // what this object is part of
|
fs *Fs // what this object is part of
|
||||||
remote string // The remote path
|
remote string // The remote path
|
||||||
etag string // md5sum of the object
|
md5 string // md5sum of the object
|
||||||
bytes int64 // size of the object
|
bytes int64 // size of the object
|
||||||
lastModified time.Time // Last modified
|
lastModified time.Time // Last modified
|
||||||
meta map[string]*string // The object metadata if known - may be nil
|
meta map[string]*string // The object metadata if known - may be nil
|
||||||
mimeType string // MimeType of object - may be ""
|
mimeType string // MimeType of object - may be ""
|
||||||
storageClass string // eg GLACIER
|
storageClass string // e.g. GLACIER
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
@@ -1325,6 +1391,7 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
// retryErrorCodes is a slice of error codes that we will retry
|
// retryErrorCodes is a slice of error codes that we will retry
|
||||||
// See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
|
// See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
|
||||||
var retryErrorCodes = []int{
|
var retryErrorCodes = []int{
|
||||||
|
429, // Too Many Requests
|
||||||
500, // Internal Server Error - "We encountered an internal error. Please try again."
|
500, // Internal Server Error - "We encountered an internal error. Please try again."
|
||||||
503, // Service Unavailable/Slow Down - "Reduce your request rate"
|
503, // Service Unavailable/Slow Down - "Reduce your request rate"
|
||||||
}
|
}
|
||||||
@@ -1381,8 +1448,21 @@ func (o *Object) split() (bucket, bucketPath string) {
|
|||||||
return o.fs.split(o.remote)
|
return o.fs.split(o.remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getClient makes an http client according to the options
|
||||||
|
func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||||
|
// TODO: Do we need cookies too?
|
||||||
|
t := fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
|
||||||
|
if opt.DisableHTTP2 {
|
||||||
|
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return &http.Client{
|
||||||
|
Transport: t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// s3Connection makes a connection to s3
|
// s3Connection makes a connection to s3
|
||||||
func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
func s3Connection(ctx context.Context, opt *Options) (*s3.S3, *session.Session, error) {
|
||||||
// Make the auth
|
// Make the auth
|
||||||
v := credentials.Value{
|
v := credentials.Value{
|
||||||
AccessKeyID: opt.AccessKeyID,
|
AccessKeyID: opt.AccessKeyID,
|
||||||
@@ -1391,6 +1471,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
|
lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
|
||||||
|
|
||||||
def := defaults.Get()
|
def := defaults.Get()
|
||||||
def.Config.HTTPClient = lowTimeoutClient
|
def.Config.HTTPClient = lowTimeoutClient
|
||||||
|
|
||||||
@@ -1459,7 +1540,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
|||||||
awsConfig := aws.NewConfig().
|
awsConfig := aws.NewConfig().
|
||||||
WithMaxRetries(0). // Rely on rclone's retry logic
|
WithMaxRetries(0). // Rely on rclone's retry logic
|
||||||
WithCredentials(cred).
|
WithCredentials(cred).
|
||||||
WithHTTPClient(fshttp.NewClient(fs.Config)).
|
WithHTTPClient(getClient(ctx, opt)).
|
||||||
WithS3ForcePathStyle(opt.ForcePathStyle).
|
WithS3ForcePathStyle(opt.ForcePathStyle).
|
||||||
WithS3UseAccelerate(opt.UseAccelerateEndpoint).
|
WithS3UseAccelerate(opt.UseAccelerateEndpoint).
|
||||||
WithS3UsEast1RegionalEndpoint(endpoints.RegionalS3UsEast1Endpoint)
|
WithS3UsEast1RegionalEndpoint(endpoints.RegionalS3UsEast1Endpoint)
|
||||||
@@ -1540,7 +1621,7 @@ func (f *Fs) setRoot(root string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, bucket:path
|
// NewFs constructs an Fs from the path, bucket:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -1561,27 +1642,44 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if opt.BucketACL == "" {
|
if opt.BucketACL == "" {
|
||||||
opt.BucketACL = opt.ACL
|
opt.BucketACL = opt.ACL
|
||||||
}
|
}
|
||||||
c, ses, err := s3Connection(opt)
|
if opt.SSECustomerKey != "" && opt.SSECustomerKeyMD5 == "" {
|
||||||
|
// calculate CustomerKeyMD5 if not supplied
|
||||||
|
md5sumBinary := md5.Sum([]byte(opt.SSECustomerKey))
|
||||||
|
opt.SSECustomerKeyMD5 = base64.StdEncoding.EncodeToString(md5sumBinary[:])
|
||||||
|
}
|
||||||
|
c, ses, err := s3Connection(ctx, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
|
ci: ci,
|
||||||
|
ctx: ctx,
|
||||||
c: c,
|
c: c,
|
||||||
ses: ses,
|
ses: ses,
|
||||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||||
cache: bucket.NewCache(),
|
cache: bucket.NewCache(),
|
||||||
srv: fshttp.NewClient(fs.Config),
|
srv: getClient(ctx, opt),
|
||||||
pool: pool.New(
|
pool: pool.New(
|
||||||
time.Duration(opt.MemoryPoolFlushTime),
|
time.Duration(opt.MemoryPoolFlushTime),
|
||||||
int(opt.ChunkSize),
|
int(opt.ChunkSize),
|
||||||
opt.UploadConcurrency*fs.Config.Transfers,
|
opt.UploadConcurrency*ci.Transfers,
|
||||||
opt.MemoryPoolUseMmap,
|
opt.MemoryPoolUseMmap,
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
if opt.ServerSideEncryption == "aws:kms" || opt.SSECustomerAlgorithm != "" {
|
||||||
|
// From: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html
|
||||||
|
//
|
||||||
|
// Objects encrypted by SSE-S3 or plaintext have ETags that are an MD5
|
||||||
|
// digest of their data.
|
||||||
|
//
|
||||||
|
// Objects encrypted by SSE-C or SSE-KMS have ETags that are not an
|
||||||
|
// MD5 digest of their object data.
|
||||||
|
f.etagIsNotMD5 = true
|
||||||
|
}
|
||||||
f.setRoot(root)
|
f.setRoot(root)
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
@@ -1591,27 +1689,23 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
SetTier: true,
|
SetTier: true,
|
||||||
GetTier: true,
|
GetTier: true,
|
||||||
SlowModTime: true,
|
SlowModTime: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||||
// Check to see if the object exists
|
// Check to see if the (bucket,directory) is actually an existing file
|
||||||
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
oldRoot := f.root
|
||||||
req := s3.HeadObjectInput{
|
newRoot, leaf := path.Split(oldRoot)
|
||||||
Bucket: &f.rootBucket,
|
f.setRoot(newRoot)
|
||||||
Key: &encodedDirectory,
|
_, err := f.NewObject(ctx, leaf)
|
||||||
}
|
if err != nil {
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
||||||
_, err = f.c.HeadObject(&req)
|
// File doesn't exist or is a directory so return old f
|
||||||
return f.shouldRetry(err)
|
f.setRoot(oldRoot)
|
||||||
})
|
return f, nil
|
||||||
if err == nil {
|
|
||||||
newRoot := path.Dir(f.root)
|
|
||||||
if newRoot == "." {
|
|
||||||
newRoot = ""
|
|
||||||
}
|
}
|
||||||
f.setRoot(newRoot)
|
return nil, err
|
||||||
// return an error with an fs which points to the parent
|
|
||||||
return f, fs.ErrorIsFile
|
|
||||||
}
|
}
|
||||||
|
// return an error with an fs which points to the parent
|
||||||
|
return f, fs.ErrorIsFile
|
||||||
}
|
}
|
||||||
// f.listMultipartUploads()
|
// f.listMultipartUploads()
|
||||||
return f, nil
|
return f, nil
|
||||||
@@ -1633,7 +1727,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Obje
|
|||||||
} else {
|
} else {
|
||||||
o.lastModified = *info.LastModified
|
o.lastModified = *info.LastModified
|
||||||
}
|
}
|
||||||
o.etag = aws.StringValue(info.ETag)
|
o.setMD5FromEtag(aws.StringValue(info.ETag))
|
||||||
o.bytes = aws.Int64Value(info.Size)
|
o.bytes = aws.Int64Value(info.Size)
|
||||||
o.storageClass = aws.StringValue(info.StorageClass)
|
o.storageClass = aws.StringValue(info.StorageClass)
|
||||||
} else {
|
} else {
|
||||||
@@ -1685,7 +1779,7 @@ func (f *Fs) updateRegionForBucket(bucket string) error {
|
|||||||
// Make a new session with the new region
|
// Make a new session with the new region
|
||||||
oldRegion := f.opt.Region
|
oldRegion := f.opt.Region
|
||||||
f.opt.Region = region
|
f.opt.Region = region
|
||||||
c, ses, err := s3Connection(&f.opt)
|
c, ses, err := s3Connection(f.ctx, &f.opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "creating new session failed")
|
return errors.Wrap(err, "creating new session failed")
|
||||||
}
|
}
|
||||||
@@ -1745,6 +1839,9 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||||||
if urlEncodeListings {
|
if urlEncodeListings {
|
||||||
req.EncodingType = aws.String(s3.EncodingTypeUrl)
|
req.EncodingType = aws.String(s3.EncodingTypeUrl)
|
||||||
}
|
}
|
||||||
|
if f.opt.RequesterPays {
|
||||||
|
req.RequestPayer = aws.String(s3.RequestPayerRequester)
|
||||||
|
}
|
||||||
var resp *s3.ListObjectsOutput
|
var resp *s3.ListObjectsOutput
|
||||||
var err error
|
var err error
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
@@ -2110,7 +2207,7 @@ func pathEscape(s string) string {
|
|||||||
return strings.Replace(rest.URLPathEscape(s), "+", "%2B", -1)
|
return strings.Replace(rest.URLPathEscape(s), "+", "%2B", -1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy does a server side copy
|
// copy does a server-side copy
|
||||||
//
|
//
|
||||||
// It adds the boiler plate to the req passed in and calls the s3
|
// It adds the boiler plate to the req passed in and calls the s3
|
||||||
// method
|
// method
|
||||||
@@ -2120,9 +2217,24 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
|
|||||||
req.Key = &dstPath
|
req.Key = &dstPath
|
||||||
source := pathEscape(path.Join(srcBucket, srcPath))
|
source := pathEscape(path.Join(srcBucket, srcPath))
|
||||||
req.CopySource = &source
|
req.CopySource = &source
|
||||||
|
if f.opt.RequesterPays {
|
||||||
|
req.RequestPayer = aws.String(s3.RequestPayerRequester)
|
||||||
|
}
|
||||||
if f.opt.ServerSideEncryption != "" {
|
if f.opt.ServerSideEncryption != "" {
|
||||||
req.ServerSideEncryption = &f.opt.ServerSideEncryption
|
req.ServerSideEncryption = &f.opt.ServerSideEncryption
|
||||||
}
|
}
|
||||||
|
if f.opt.SSECustomerAlgorithm != "" {
|
||||||
|
req.SSECustomerAlgorithm = &f.opt.SSECustomerAlgorithm
|
||||||
|
req.CopySourceSSECustomerAlgorithm = &f.opt.SSECustomerAlgorithm
|
||||||
|
}
|
||||||
|
if f.opt.SSECustomerKey != "" {
|
||||||
|
req.SSECustomerKey = &f.opt.SSECustomerKey
|
||||||
|
req.CopySourceSSECustomerKey = &f.opt.SSECustomerKey
|
||||||
|
}
|
||||||
|
if f.opt.SSECustomerKeyMD5 != "" {
|
||||||
|
req.SSECustomerKeyMD5 = &f.opt.SSECustomerKeyMD5
|
||||||
|
req.CopySourceSSECustomerKeyMD5 = &f.opt.SSECustomerKeyMD5
|
||||||
|
}
|
||||||
if f.opt.SSEKMSKeyID != "" {
|
if f.opt.SSEKMSKeyID != "" {
|
||||||
req.SSEKMSKeyId = &f.opt.SSEKMSKeyID
|
req.SSEKMSKeyId = &f.opt.SSEKMSKeyID
|
||||||
}
|
}
|
||||||
@@ -2244,7 +2356,7 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -2288,7 +2400,7 @@ func (f *Fs) getMemoryPool(size int64) *pool.Pool {
|
|||||||
return pool.New(
|
return pool.New(
|
||||||
time.Duration(f.opt.MemoryPoolFlushTime),
|
time.Duration(f.opt.MemoryPoolFlushTime),
|
||||||
int(size),
|
int(size),
|
||||||
f.opt.UploadConcurrency*fs.Config.Transfers,
|
f.opt.UploadConcurrency*f.ci.Transfers,
|
||||||
f.opt.MemoryPoolUseMmap,
|
f.opt.MemoryPoolUseMmap,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -2335,7 +2447,7 @@ All the objects shown will be marked for restore, then
|
|||||||
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
||||||
|
|
||||||
It returns a list of status dictionaries with Remote and Status
|
It returns a list of status dictionaries with Remote and Status
|
||||||
keys. The Status will be OK if it was successfull or an error message
|
keys. The Status will be OK if it was successful or an error message
|
||||||
if not.
|
if not.
|
||||||
|
|
||||||
[
|
[
|
||||||
@@ -2500,7 +2612,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
// listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
|
// listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
|
||||||
//
|
//
|
||||||
// Note that rather lazily we treat key as a prefix so it matches
|
// Note that rather lazily we treat key as a prefix so it matches
|
||||||
// directories and objects. This could suprise the user if they ask
|
// directories and objects. This could surprise the user if they ask
|
||||||
// for "dir" and it returns "dirKey"
|
// for "dir" and it returns "dirKey"
|
||||||
func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uploads []*s3.MultipartUpload, err error) {
|
func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uploads []*s3.MultipartUpload, err error) {
|
||||||
var (
|
var (
|
||||||
@@ -2634,30 +2746,38 @@ func (o *Object) Remote() string {
|
|||||||
|
|
||||||
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
|
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
|
||||||
|
|
||||||
|
// Set the MD5 from the etag
|
||||||
|
func (o *Object) setMD5FromEtag(etag string) {
|
||||||
|
if o.fs.etagIsNotMD5 {
|
||||||
|
o.md5 = ""
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if etag == "" {
|
||||||
|
o.md5 = ""
|
||||||
|
return
|
||||||
|
}
|
||||||
|
hash := strings.Trim(strings.ToLower(etag), `"`)
|
||||||
|
// Check the etag is a valid md5sum
|
||||||
|
if !matchMd5.MatchString(hash) {
|
||||||
|
o.md5 = ""
|
||||||
|
return
|
||||||
|
}
|
||||||
|
o.md5 = hash
|
||||||
|
}
|
||||||
|
|
||||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t != hash.MD5 {
|
if t != hash.MD5 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
hash := strings.Trim(strings.ToLower(o.etag), `"`)
|
// If we haven't got an MD5, then check the metadata
|
||||||
// Check the etag is a valid md5sum
|
if o.md5 == "" {
|
||||||
if !matchMd5.MatchString(hash) {
|
|
||||||
err := o.readMetaData(ctx)
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if md5sum, ok := o.meta[metaMD5Hash]; ok {
|
|
||||||
md5sumBytes, err := base64.StdEncoding.DecodeString(*md5sum)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
hash = hex.EncodeToString(md5sumBytes)
|
|
||||||
} else {
|
|
||||||
hash = ""
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return hash, nil
|
return o.md5, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Size returns the size of an object in bytes
|
// Size returns the size of an object in bytes
|
||||||
@@ -2671,6 +2791,18 @@ func (o *Object) headObject(ctx context.Context) (resp *s3.HeadObjectOutput, err
|
|||||||
Bucket: &bucket,
|
Bucket: &bucket,
|
||||||
Key: &bucketPath,
|
Key: &bucketPath,
|
||||||
}
|
}
|
||||||
|
if o.fs.opt.RequesterPays {
|
||||||
|
req.RequestPayer = aws.String(s3.RequestPayerRequester)
|
||||||
|
}
|
||||||
|
if o.fs.opt.SSECustomerAlgorithm != "" {
|
||||||
|
req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm
|
||||||
|
}
|
||||||
|
if o.fs.opt.SSECustomerKey != "" {
|
||||||
|
req.SSECustomerKey = &o.fs.opt.SSECustomerKey
|
||||||
|
}
|
||||||
|
if o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||||
|
req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5
|
||||||
|
}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
var err error
|
var err error
|
||||||
resp, err = o.fs.c.HeadObjectWithContext(ctx, &req)
|
resp, err = o.fs.c.HeadObjectWithContext(ctx, &req)
|
||||||
@@ -2705,12 +2837,23 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||||||
if resp.ContentLength != nil {
|
if resp.ContentLength != nil {
|
||||||
size = *resp.ContentLength
|
size = *resp.ContentLength
|
||||||
}
|
}
|
||||||
o.etag = aws.StringValue(resp.ETag)
|
o.setMD5FromEtag(aws.StringValue(resp.ETag))
|
||||||
o.bytes = size
|
o.bytes = size
|
||||||
o.meta = resp.Metadata
|
o.meta = resp.Metadata
|
||||||
if o.meta == nil {
|
if o.meta == nil {
|
||||||
o.meta = map[string]*string{}
|
o.meta = map[string]*string{}
|
||||||
}
|
}
|
||||||
|
// Read MD5 from metadata if present
|
||||||
|
if md5sumBase64, ok := o.meta[metaMD5Hash]; ok {
|
||||||
|
md5sumBytes, err := base64.StdEncoding.DecodeString(*md5sumBase64)
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(o, "Failed to read md5sum from metadata %q: %v", *md5sumBase64, err)
|
||||||
|
} else if len(md5sumBytes) != 16 {
|
||||||
|
fs.Debugf(o, "Failed to read md5sum from metadata %q: wrong length", *md5sumBase64)
|
||||||
|
} else {
|
||||||
|
o.md5 = hex.EncodeToString(md5sumBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
o.storageClass = aws.StringValue(resp.StorageClass)
|
o.storageClass = aws.StringValue(resp.StorageClass)
|
||||||
if resp.LastModified == nil {
|
if resp.LastModified == nil {
|
||||||
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
|
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
|
||||||
@@ -2727,7 +2870,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
if fs.Config.UseServerModTime {
|
if o.fs.ci.UseServerModTime {
|
||||||
return o.lastModified
|
return o.lastModified
|
||||||
}
|
}
|
||||||
err := o.readMetaData(ctx)
|
err := o.readMetaData(ctx)
|
||||||
@@ -2769,6 +2912,9 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|||||||
Metadata: o.meta,
|
Metadata: o.meta,
|
||||||
MetadataDirective: aws.String(s3.MetadataDirectiveReplace), // replace metadata with that passed in
|
MetadataDirective: aws.String(s3.MetadataDirectiveReplace), // replace metadata with that passed in
|
||||||
}
|
}
|
||||||
|
if o.fs.opt.RequesterPays {
|
||||||
|
req.RequestPayer = aws.String(s3.RequestPayerRequester)
|
||||||
|
}
|
||||||
return o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o)
|
return o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2784,6 +2930,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
Bucket: &bucket,
|
Bucket: &bucket,
|
||||||
Key: &bucketPath,
|
Key: &bucketPath,
|
||||||
}
|
}
|
||||||
|
if o.fs.opt.RequesterPays {
|
||||||
|
req.RequestPayer = aws.String(s3.RequestPayerRequester)
|
||||||
|
}
|
||||||
if o.fs.opt.SSECustomerAlgorithm != "" {
|
if o.fs.opt.SSECustomerAlgorithm != "" {
|
||||||
req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm
|
req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm
|
||||||
}
|
}
|
||||||
@@ -3033,8 +3182,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
|
|
||||||
// read the md5sum if available
|
// read the md5sum if available
|
||||||
// - for non multpart
|
// - for non multipart
|
||||||
// - so we can add a ContentMD5
|
// - so we can add a ContentMD5
|
||||||
|
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
|
||||||
// - for multipart provided checksums aren't disabled
|
// - for multipart provided checksums aren't disabled
|
||||||
// - so we can add the md5sum in the metadata as metaMD5Hash
|
// - so we can add the md5sum in the metadata as metaMD5Hash
|
||||||
var md5sum string
|
var md5sum string
|
||||||
@@ -3044,7 +3194,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
hashBytes, err := hex.DecodeString(hash)
|
hashBytes, err := hex.DecodeString(hash)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
md5sum = base64.StdEncoding.EncodeToString(hashBytes)
|
md5sum = base64.StdEncoding.EncodeToString(hashBytes)
|
||||||
if multipart {
|
if (multipart || o.fs.etagIsNotMD5) && !o.fs.opt.DisableChecksum {
|
||||||
|
// Set the md5sum as metadata on the object if
|
||||||
|
// - a multipart upload
|
||||||
|
// - the Etag is not an MD5, eg when using SSE/SSE-C
|
||||||
|
// provided checksums aren't disabled
|
||||||
metadata[metaMD5Hash] = &md5sum
|
metadata[metaMD5Hash] = &md5sum
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -3063,6 +3217,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
if md5sum != "" {
|
if md5sum != "" {
|
||||||
req.ContentMD5 = &md5sum
|
req.ContentMD5 = &md5sum
|
||||||
}
|
}
|
||||||
|
if o.fs.opt.RequesterPays {
|
||||||
|
req.RequestPayer = aws.String(s3.RequestPayerRequester)
|
||||||
|
}
|
||||||
if o.fs.opt.ServerSideEncryption != "" {
|
if o.fs.opt.ServerSideEncryption != "" {
|
||||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||||
}
|
}
|
||||||
@@ -3111,6 +3268,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var resp *http.Response // response from PUT
|
||||||
if multipart {
|
if multipart {
|
||||||
err = o.uploadMultipart(ctx, &req, size, in)
|
err = o.uploadMultipart(ctx, &req, size, in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -3140,18 +3298,18 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
|
|
||||||
// create the vanilla http request
|
// create the vanilla http request
|
||||||
httpReq, err := http.NewRequest("PUT", url, in)
|
httpReq, err := http.NewRequestWithContext(ctx, "PUT", url, in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "s3 upload: new request")
|
return errors.Wrap(err, "s3 upload: new request")
|
||||||
}
|
}
|
||||||
httpReq = httpReq.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
|
|
||||||
// set the headers we signed and the length
|
// set the headers we signed and the length
|
||||||
httpReq.Header = headers
|
httpReq.Header = headers
|
||||||
httpReq.ContentLength = size
|
httpReq.ContentLength = size
|
||||||
|
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
resp, err := o.fs.srv.Do(httpReq)
|
var err error
|
||||||
|
resp, err = o.fs.srv.Do(httpReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(err)
|
||||||
}
|
}
|
||||||
@@ -3170,6 +3328,26 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// User requested we don't HEAD the object after uploading it
|
||||||
|
// so make up the object as best we can assuming it got
|
||||||
|
// uploaded properly. If size < 0 then we need to do the HEAD.
|
||||||
|
if o.fs.opt.NoHead && size >= 0 {
|
||||||
|
o.md5 = md5sum
|
||||||
|
o.bytes = size
|
||||||
|
o.lastModified = time.Now()
|
||||||
|
o.meta = req.Metadata
|
||||||
|
o.mimeType = aws.StringValue(req.ContentType)
|
||||||
|
o.storageClass = aws.StringValue(req.StorageClass)
|
||||||
|
// If we have done a single part PUT request then we can read these
|
||||||
|
if resp != nil {
|
||||||
|
if date, err := http.ParseTime(resp.Header.Get("Date")); err == nil {
|
||||||
|
o.lastModified = date
|
||||||
|
}
|
||||||
|
o.setMD5FromEtag(resp.Header.Get("Etag"))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Read the metadata from the newly created object
|
// Read the metadata from the newly created object
|
||||||
o.meta = nil // wipe old metadata
|
o.meta = nil // wipe old metadata
|
||||||
err = o.readMetaData(ctx)
|
err = o.readMetaData(ctx)
|
||||||
@@ -3183,6 +3361,9 @@ func (o *Object) Remove(ctx context.Context) error {
|
|||||||
Bucket: &bucket,
|
Bucket: &bucket,
|
||||||
Key: &bucketPath,
|
Key: &bucketPath,
|
||||||
}
|
}
|
||||||
|
if o.fs.opt.RequesterPays {
|
||||||
|
req.RequestPayer = aws.String(s3.RequestPayerRequester)
|
||||||
|
}
|
||||||
err := o.fs.pacer.Call(func() (bool, error) {
|
err := o.fs.pacer.Call(func() (bool, error) {
|
||||||
_, err := o.fs.c.DeleteObjectWithContext(ctx, &req)
|
_, err := o.fs.c.DeleteObjectWithContext(ctx, &req)
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(err)
|
||||||
|
|||||||
@@ -53,6 +53,7 @@ func sign(AccessKey, SecretKey string, req *http.Request) {
|
|||||||
var md5 string
|
var md5 string
|
||||||
var contentType string
|
var contentType string
|
||||||
var headersToSign []string
|
var headersToSign []string
|
||||||
|
tmpHeadersToSign := make(map[string][]string)
|
||||||
for k, v := range req.Header {
|
for k, v := range req.Header {
|
||||||
k = strings.ToLower(k)
|
k = strings.ToLower(k)
|
||||||
switch k {
|
switch k {
|
||||||
@@ -62,15 +63,24 @@ func sign(AccessKey, SecretKey string, req *http.Request) {
|
|||||||
contentType = v[0]
|
contentType = v[0]
|
||||||
default:
|
default:
|
||||||
if strings.HasPrefix(k, "x-amz-") {
|
if strings.HasPrefix(k, "x-amz-") {
|
||||||
vall := strings.Join(v, ",")
|
tmpHeadersToSign[k] = v
|
||||||
headersToSign = append(headersToSign, k+":"+vall)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
var keys []string
|
||||||
|
for k := range tmpHeadersToSign {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
// https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
|
||||||
|
sort.Strings(keys)
|
||||||
|
|
||||||
|
for _, key := range keys {
|
||||||
|
vall := strings.Join(tmpHeadersToSign[key], ",")
|
||||||
|
headersToSign = append(headersToSign, key+":"+vall)
|
||||||
|
}
|
||||||
// Make headers of interest into canonical string
|
// Make headers of interest into canonical string
|
||||||
var joinedHeadersToSign string
|
var joinedHeadersToSign string
|
||||||
if len(headersToSign) > 0 {
|
if len(headersToSign) > 0 {
|
||||||
sort.StringSlice(headersToSign).Sort()
|
|
||||||
joinedHeadersToSign = strings.Join(headersToSign, "\n") + "\n"
|
joinedHeadersToSign = strings.Join(headersToSign, "\n") + "\n"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package seafile
|
package seafile
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -27,7 +28,7 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getPacer returns the unique pacer for that remote URL
|
// getPacer returns the unique pacer for that remote URL
|
||||||
func getPacer(remote string) *fs.Pacer {
|
func getPacer(ctx context.Context, remote string) *fs.Pacer {
|
||||||
pacerMutex.Lock()
|
pacerMutex.Lock()
|
||||||
defer pacerMutex.Unlock()
|
defer pacerMutex.Unlock()
|
||||||
|
|
||||||
@@ -37,6 +38,7 @@ func getPacer(remote string) *fs.Pacer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pacers[remote] = fs.NewPacer(
|
pacers[remote] = fs.NewPacer(
|
||||||
|
ctx,
|
||||||
pacer.NewDefault(
|
pacer.NewDefault(
|
||||||
pacer.MinSleep(minSleep),
|
pacer.MinSleep(minSleep),
|
||||||
pacer.MaxSleep(maxSleep),
|
pacer.MaxSleep(maxSleep),
|
||||||
|
|||||||
@@ -147,7 +147,7 @@ type Fs struct {
|
|||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -197,15 +197,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
opt: *opt,
|
opt: *opt,
|
||||||
endpoint: u,
|
endpoint: u,
|
||||||
endpointURL: u.String(),
|
endpointURL: u.String(),
|
||||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()),
|
srv: rest.NewClient(fshttp.NewClient(ctx)).SetRoot(u.String()),
|
||||||
pacer: getPacer(opt.URL),
|
pacer: getPacer(ctx, opt.URL),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
BucketBased: opt.LibraryName == "",
|
BucketBased: opt.LibraryName == "",
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
serverInfo, err := f.getServerInfo(ctx)
|
serverInfo, err := f.getServerInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -297,7 +296,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Config callback for 2FA
|
// Config callback for 2FA
|
||||||
func Config(name string, m configmap.Mapper) {
|
func Config(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
serverURL, ok := m.Get(configURL)
|
serverURL, ok := m.Get(configURL)
|
||||||
if !ok || serverURL == "" {
|
if !ok || serverURL == "" {
|
||||||
// If there's no server URL, it means we're trying an operation at the backend level, like a "rclone authorize seafile"
|
// If there's no server URL, it means we're trying an operation at the backend level, like a "rclone authorize seafile"
|
||||||
@@ -306,7 +306,7 @@ func Config(name string, m configmap.Mapper) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Stop if we are running non-interactive config
|
// Stop if we are running non-interactive config
|
||||||
if fs.Config.AutoConfirm {
|
if ci.AutoConfirm {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -343,7 +343,7 @@ func Config(name string, m configmap.Mapper) {
|
|||||||
if !strings.HasPrefix(url, "/") {
|
if !strings.HasPrefix(url, "/") {
|
||||||
url += "/"
|
url += "/"
|
||||||
}
|
}
|
||||||
srv := rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(url)
|
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(url)
|
||||||
|
|
||||||
// We loop asking for a 2FA code
|
// We loop asking for a 2FA code
|
||||||
for {
|
for {
|
||||||
@@ -663,7 +663,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) e
|
|||||||
|
|
||||||
// ==================== Optional Interface fs.Copier ====================
|
// ==================== Optional Interface fs.Copier ====================
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -714,7 +714,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
|
|
||||||
// ==================== Optional Interface fs.Mover ====================
|
// ==================== Optional Interface fs.Mover ====================
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -804,7 +804,7 @@ func (f *Fs) adjustDestination(ctx context.Context, libraryID, srcFilename, dstP
|
|||||||
// ==================== Optional Interface fs.DirMover ====================
|
// ==================== Optional Interface fs.DirMover ====================
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"os/user"
|
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -22,6 +21,7 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/pkg/sftp"
|
"github.com/pkg/sftp"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
@@ -33,6 +33,7 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
sshagent "github.com/xanzy/ssh-agent"
|
sshagent "github.com/xanzy/ssh-agent"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
|
"golang.org/x/crypto/ssh/knownhosts"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -43,7 +44,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
currentUser = readCurrentUser()
|
currentUser = env.CurrentUser()
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -82,6 +83,21 @@ func init() {
|
|||||||
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
||||||
in the new OpenSSH format can't be used.`,
|
in the new OpenSSH format can't be used.`,
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
|
}, {
|
||||||
|
Name: "pubkey_file",
|
||||||
|
Help: `Optional path to public key file.
|
||||||
|
|
||||||
|
Set this if you have a signed certificate you want to use for authentication.` + env.ShellExpandHelp,
|
||||||
|
}, {
|
||||||
|
Name: "known_hosts_file",
|
||||||
|
Help: `Optional path to known_hosts file.
|
||||||
|
|
||||||
|
Set this value to enable server host key validation.` + env.ShellExpandHelp,
|
||||||
|
Advanced: true,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "~/.ssh/known_hosts",
|
||||||
|
Help: "Use OpenSSH's known_hosts file",
|
||||||
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "key_use_agent",
|
Name: "key_use_agent",
|
||||||
Help: `When set forces the usage of the ssh-agent.
|
Help: `When set forces the usage of the ssh-agent.
|
||||||
@@ -176,6 +192,20 @@ Home directory can be found in a shared folder called "home"
|
|||||||
|
|
||||||
The subsystem option is ignored when server_command is defined.`,
|
The subsystem option is ignored when server_command is defined.`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "use_fstat",
|
||||||
|
Default: false,
|
||||||
|
Help: `If set use fstat instead of stat
|
||||||
|
|
||||||
|
Some servers limit the amount of open files and calling Stat after opening
|
||||||
|
the file will throw an error from the server. Setting this flag will call
|
||||||
|
Fstat instead of Stat which is called on an already open file handle.
|
||||||
|
|
||||||
|
It has been found that this helps with IBM Sterling SFTP servers which have
|
||||||
|
"extractability" level set to 1 which means only 1 file can be opened at
|
||||||
|
any given time.
|
||||||
|
`,
|
||||||
|
Advanced: true,
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
fs.Register(fsi)
|
fs.Register(fsi)
|
||||||
@@ -190,6 +220,8 @@ type Options struct {
|
|||||||
KeyPem string `config:"key_pem"`
|
KeyPem string `config:"key_pem"`
|
||||||
KeyFile string `config:"key_file"`
|
KeyFile string `config:"key_file"`
|
||||||
KeyFilePass string `config:"key_file_pass"`
|
KeyFilePass string `config:"key_file_pass"`
|
||||||
|
PubKeyFile string `config:"pubkey_file"`
|
||||||
|
KnownHostsFile string `config:"known_hosts_file"`
|
||||||
KeyUseAgent bool `config:"key_use_agent"`
|
KeyUseAgent bool `config:"key_use_agent"`
|
||||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||||
@@ -201,6 +233,7 @@ type Options struct {
|
|||||||
SkipLinks bool `config:"skip_links"`
|
SkipLinks bool `config:"skip_links"`
|
||||||
Subsystem string `config:"subsystem"`
|
Subsystem string `config:"subsystem"`
|
||||||
ServerCommand string `config:"server_command"`
|
ServerCommand string `config:"server_command"`
|
||||||
|
UseFstat bool `config:"use_fstat"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs stores the interface to the remote SFTP files
|
// Fs stores the interface to the remote SFTP files
|
||||||
@@ -209,6 +242,7 @@ type Fs struct {
|
|||||||
root string
|
root string
|
||||||
absRoot string
|
absRoot string
|
||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
|
ci *fs.ConfigInfo // global config
|
||||||
m configmap.Mapper // config
|
m configmap.Mapper // config
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
config *ssh.ClientConfig
|
config *ssh.ClientConfig
|
||||||
@@ -218,6 +252,7 @@ type Fs struct {
|
|||||||
poolMu sync.Mutex
|
poolMu sync.Mutex
|
||||||
pool []*conn
|
pool []*conn
|
||||||
pacer *fs.Pacer // pacer for operations
|
pacer *fs.Pacer // pacer for operations
|
||||||
|
savedpswd string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||||
@@ -231,25 +266,11 @@ type Object struct {
|
|||||||
sha1sum *string // Cached SHA1 checksum
|
sha1sum *string // Cached SHA1 checksum
|
||||||
}
|
}
|
||||||
|
|
||||||
// readCurrentUser finds the current user name or "" if not found
|
|
||||||
func readCurrentUser() (userName string) {
|
|
||||||
usr, err := user.Current()
|
|
||||||
if err == nil {
|
|
||||||
return usr.Username
|
|
||||||
}
|
|
||||||
// Fall back to reading $USER then $LOGNAME
|
|
||||||
userName = os.Getenv("USER")
|
|
||||||
if userName != "" {
|
|
||||||
return userName
|
|
||||||
}
|
|
||||||
return os.Getenv("LOGNAME")
|
|
||||||
}
|
|
||||||
|
|
||||||
// dial starts a client connection to the given SSH server. It is a
|
// dial starts a client connection to the given SSH server. It is a
|
||||||
// convenience function that connects to the given network address,
|
// convenience function that connects to the given network address,
|
||||||
// initiates the SSH handshake, and then sets up a Client.
|
// initiates the SSH handshake, and then sets up a Client.
|
||||||
func (f *Fs) dial(network, addr string, sshConfig *ssh.ClientConfig) (*ssh.Client, error) {
|
func (f *Fs) dial(ctx context.Context, network, addr string, sshConfig *ssh.ClientConfig) (*ssh.Client, error) {
|
||||||
dialer := fshttp.NewDialer(fs.Config)
|
dialer := fshttp.NewDialer(ctx)
|
||||||
conn, err := dialer.Dial(network, addr)
|
conn, err := dialer.Dial(network, addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -295,12 +316,12 @@ func (c *conn) closed() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open a new connection to the SFTP server.
|
// Open a new connection to the SFTP server.
|
||||||
func (f *Fs) sftpConnection() (c *conn, err error) {
|
func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
|
||||||
// Rate limit rate of new connections
|
// Rate limit rate of new connections
|
||||||
c = &conn{
|
c = &conn{
|
||||||
err: make(chan error, 1),
|
err: make(chan error, 1),
|
||||||
}
|
}
|
||||||
c.sshClient, err = f.dial("tcp", f.opt.Host+":"+f.opt.Port, f.config)
|
c.sshClient, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port, f.config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't connect SSH")
|
return nil, errors.Wrap(err, "couldn't connect SSH")
|
||||||
}
|
}
|
||||||
@@ -338,12 +359,15 @@ func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.C
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
opts = opts[:len(opts):len(opts)] // make sure we don't overwrite the callers opts
|
||||||
|
opts = append(opts, sftp.UseFstat(f.opt.UseFstat))
|
||||||
|
|
||||||
return sftp.NewClientPipe(pr, pw, opts...)
|
return sftp.NewClientPipe(pr, pw, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get an SFTP connection from the pool, or open a new one
|
// Get an SFTP connection from the pool, or open a new one
|
||||||
func (f *Fs) getSftpConnection() (c *conn, err error) {
|
func (f *Fs) getSftpConnection(ctx context.Context) (c *conn, err error) {
|
||||||
|
accounting.LimitTPS(ctx)
|
||||||
f.poolMu.Lock()
|
f.poolMu.Lock()
|
||||||
for len(f.pool) > 0 {
|
for len(f.pool) > 0 {
|
||||||
c = f.pool[0]
|
c = f.pool[0]
|
||||||
@@ -360,7 +384,7 @@ func (f *Fs) getSftpConnection() (c *conn, err error) {
|
|||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
c, err = f.sftpConnection()
|
c, err = f.sftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
@@ -407,10 +431,32 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
|||||||
f.poolMu.Unlock()
|
f.poolMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Drain the pool of any connections
|
||||||
|
func (f *Fs) drainPool(ctx context.Context) (err error) {
|
||||||
|
f.poolMu.Lock()
|
||||||
|
defer f.poolMu.Unlock()
|
||||||
|
for i, c := range f.pool {
|
||||||
|
if cErr := c.closed(); cErr == nil {
|
||||||
|
cErr = c.close()
|
||||||
|
if cErr != nil {
|
||||||
|
err = cErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.pool[i] = nil
|
||||||
|
}
|
||||||
|
f.pool = nil
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// NewFs creates a new Fs object from the name and root. It connects to
|
// NewFs creates a new Fs object from the name and root. It connects to
|
||||||
// the host specified in the config file.
|
// the host specified in the config file.
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.Background()
|
// This will hold the Fs object. We need to create it here
|
||||||
|
// so we can refer to it in the SSH callback, but it's populated
|
||||||
|
// in NewFsWithConnection
|
||||||
|
f := &Fs{
|
||||||
|
ci: fs.GetConfig(ctx),
|
||||||
|
}
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -423,12 +469,21 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if opt.Port == "" {
|
if opt.Port == "" {
|
||||||
opt.Port = "22"
|
opt.Port = "22"
|
||||||
}
|
}
|
||||||
|
|
||||||
sshConfig := &ssh.ClientConfig{
|
sshConfig := &ssh.ClientConfig{
|
||||||
User: opt.User,
|
User: opt.User,
|
||||||
Auth: []ssh.AuthMethod{},
|
Auth: []ssh.AuthMethod{},
|
||||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||||
Timeout: fs.Config.ConnectTimeout,
|
Timeout: f.ci.ConnectTimeout,
|
||||||
ClientVersion: "SSH-2.0-" + fs.Config.UserAgent,
|
ClientVersion: "SSH-2.0-" + f.ci.UserAgent,
|
||||||
|
}
|
||||||
|
|
||||||
|
if opt.KnownHostsFile != "" {
|
||||||
|
hostcallback, err := knownhosts.New(opt.KnownHostsFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "couldn't parse known_hosts_file")
|
||||||
|
}
|
||||||
|
sshConfig.HostKeyCallback = hostcallback
|
||||||
}
|
}
|
||||||
|
|
||||||
if opt.UseInsecureCipher {
|
if opt.UseInsecureCipher {
|
||||||
@@ -438,6 +493,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
keyFile := env.ShellExpand(opt.KeyFile)
|
keyFile := env.ShellExpand(opt.KeyFile)
|
||||||
|
pubkeyFile := env.ShellExpand(opt.PubKeyFile)
|
||||||
//keyPem := env.ShellExpand(opt.KeyPem)
|
//keyPem := env.ShellExpand(opt.KeyPem)
|
||||||
// Add ssh agent-auth if no password or file or key PEM specified
|
// Add ssh agent-auth if no password or file or key PEM specified
|
||||||
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent {
|
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent {
|
||||||
@@ -507,7 +563,38 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to parse private key file")
|
return nil, errors.Wrap(err, "failed to parse private key file")
|
||||||
}
|
}
|
||||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signer))
|
|
||||||
|
// If a public key has been specified then use that
|
||||||
|
if pubkeyFile != "" {
|
||||||
|
certfile, err := ioutil.ReadFile(pubkeyFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "unable to read cert file")
|
||||||
|
}
|
||||||
|
|
||||||
|
pk, _, _, _, err := ssh.ParseAuthorizedKey(certfile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "unable to parse cert file")
|
||||||
|
}
|
||||||
|
|
||||||
|
// And the signer for this, which includes the private key signer
|
||||||
|
// This is what we'll pass to the ssh client.
|
||||||
|
// Normally the ssh client will use the public key built
|
||||||
|
// into the private key, but we need to tell it to use the user
|
||||||
|
// specified public key cert. This signer is specific to the
|
||||||
|
// cert and will include the private key signer. Now ssh
|
||||||
|
// knows everything it needs.
|
||||||
|
cert, ok := pk.(*ssh.Certificate)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("public key file is not a certificate file: " + pubkeyFile)
|
||||||
|
}
|
||||||
|
pubsigner, err := ssh.NewCertSigner(cert, signer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "error generating cert signer")
|
||||||
|
}
|
||||||
|
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(pubsigner))
|
||||||
|
} else {
|
||||||
|
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signer))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Auth from password if specified
|
// Auth from password if specified
|
||||||
@@ -516,39 +603,77 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
sshConfig.Auth = append(sshConfig.Auth,
|
||||||
|
ssh.Password(clearpass),
|
||||||
|
ssh.KeyboardInteractive(func(user, instruction string, questions []string, echos []bool) ([]string, error) {
|
||||||
|
return f.keyboardInteractiveReponse(user, instruction, questions, echos, clearpass)
|
||||||
|
}),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ask for password if none was defined and we're allowed to
|
// Config for password if none was defined and we're allowed to
|
||||||
|
// We don't ask now; we ask if the ssh connection succeeds
|
||||||
if opt.Pass == "" && opt.AskPassword {
|
if opt.Pass == "" && opt.AskPassword {
|
||||||
_, _ = fmt.Fprint(os.Stderr, "Enter SFTP password: ")
|
sshConfig.Auth = append(sshConfig.Auth,
|
||||||
clearpass := config.ReadPassword()
|
ssh.PasswordCallback(f.getPass),
|
||||||
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
ssh.KeyboardInteractive(func(user, instruction string, questions []string, echos []bool) ([]string, error) {
|
||||||
|
pass, _ := f.getPass()
|
||||||
|
return f.keyboardInteractiveReponse(user, instruction, questions, echos, pass)
|
||||||
|
}),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
return NewFsWithConnection(ctx, name, root, m, opt, sshConfig)
|
return NewFsWithConnection(ctx, f, name, root, m, opt, sshConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do the keyboard interactive challenge
|
||||||
|
//
|
||||||
|
// Just send the password back for all questions
|
||||||
|
func (f *Fs) keyboardInteractiveReponse(user, instruction string, questions []string, echos []bool, pass string) ([]string, error) {
|
||||||
|
fs.Debugf(f, "keyboard interactive auth requested")
|
||||||
|
answers := make([]string, len(questions))
|
||||||
|
for i := range answers {
|
||||||
|
answers[i] = pass
|
||||||
|
}
|
||||||
|
return answers, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're in password mode and ssh connection succeeds then this
|
||||||
|
// callback is called. First time around we ask the user, and then
|
||||||
|
// save it so on reconnection we give back the previous string.
|
||||||
|
// This removes the ability to let the user correct a mistaken entry,
|
||||||
|
// but means that reconnects are transparent.
|
||||||
|
// We'll re-use config.Pass for this, 'cos we know it's not been
|
||||||
|
// specified.
|
||||||
|
func (f *Fs) getPass() (string, error) {
|
||||||
|
for f.savedpswd == "" {
|
||||||
|
_, _ = fmt.Fprint(os.Stderr, "Enter SFTP password: ")
|
||||||
|
f.savedpswd = config.ReadPassword()
|
||||||
|
}
|
||||||
|
return f.savedpswd, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFsWithConnection creates a new Fs object from the name and root and an ssh.ClientConfig. It connects to
|
// NewFsWithConnection creates a new Fs object from the name and root and an ssh.ClientConfig. It connects to
|
||||||
// the host specified in the ssh.ClientConfig
|
// the host specified in the ssh.ClientConfig
|
||||||
func NewFsWithConnection(ctx context.Context, name string, root string, m configmap.Mapper, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
|
func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m configmap.Mapper, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
|
||||||
f := &Fs{
|
// Populate the Filesystem Object
|
||||||
name: name,
|
f.name = name
|
||||||
root: root,
|
f.root = root
|
||||||
absRoot: root,
|
f.absRoot = root
|
||||||
opt: *opt,
|
f.opt = *opt
|
||||||
m: m,
|
f.m = m
|
||||||
config: sshConfig,
|
f.config = sshConfig
|
||||||
url: "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root,
|
f.url = "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root
|
||||||
mkdirLock: newStringLock(),
|
f.mkdirLock = newStringLock()
|
||||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||||
}
|
f.savedpswd = ""
|
||||||
|
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
SlowHash: true,
|
SlowHash: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
// Make a connection and pool it to return errors early
|
// Make a connection and pool it to return errors early
|
||||||
c, err := f.getSftpConnection()
|
c, err := f.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "NewFs")
|
return nil, errors.Wrap(err, "NewFs")
|
||||||
}
|
}
|
||||||
@@ -616,7 +741,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
}
|
}
|
||||||
err := o.stat()
|
err := o.stat(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -625,11 +750,11 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
|
|
||||||
// dirExists returns true,nil if the directory exists, false, nil if
|
// dirExists returns true,nil if the directory exists, false, nil if
|
||||||
// it doesn't or false, err
|
// it doesn't or false, err
|
||||||
func (f *Fs) dirExists(dir string) (bool, error) {
|
func (f *Fs) dirExists(ctx context.Context, dir string) (bool, error) {
|
||||||
if dir == "" {
|
if dir == "" {
|
||||||
dir = "."
|
dir = "."
|
||||||
}
|
}
|
||||||
c, err := f.getSftpConnection()
|
c, err := f.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Wrap(err, "dirExists")
|
return false, errors.Wrap(err, "dirExists")
|
||||||
}
|
}
|
||||||
@@ -658,7 +783,7 @@ func (f *Fs) dirExists(dir string) (bool, error) {
|
|||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
root := path.Join(f.absRoot, dir)
|
root := path.Join(f.absRoot, dir)
|
||||||
ok, err := f.dirExists(root)
|
ok, err := f.dirExists(ctx, root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "List failed")
|
return nil, errors.Wrap(err, "List failed")
|
||||||
}
|
}
|
||||||
@@ -669,7 +794,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
if sftpDir == "" {
|
if sftpDir == "" {
|
||||||
sftpDir = "."
|
sftpDir = "."
|
||||||
}
|
}
|
||||||
c, err := f.getSftpConnection()
|
c, err := f.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "List")
|
return nil, errors.Wrap(err, "List")
|
||||||
}
|
}
|
||||||
@@ -688,7 +813,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
oldInfo := info
|
oldInfo := info
|
||||||
info, err = f.stat(remote)
|
info, err = f.stat(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !os.IsNotExist(err) {
|
if !os.IsNotExist(err) {
|
||||||
fs.Errorf(remote, "stat of non-regular file failed: %v", err)
|
fs.Errorf(remote, "stat of non-regular file failed: %v", err)
|
||||||
@@ -713,7 +838,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
|
|
||||||
// Put data from <in> into a new remote sftp file object described by <src.Remote()> and <src.ModTime(ctx)>
|
// Put data from <in> into a new remote sftp file object described by <src.Remote()> and <src.ModTime(ctx)>
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
err := f.mkParentDir(src.Remote())
|
err := f.mkParentDir(ctx, src.Remote())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Put mkParentDir failed")
|
return nil, errors.Wrap(err, "Put mkParentDir failed")
|
||||||
}
|
}
|
||||||
@@ -736,19 +861,19 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
|
|
||||||
// mkParentDir makes the parent of remote if necessary and any
|
// mkParentDir makes the parent of remote if necessary and any
|
||||||
// directories above that
|
// directories above that
|
||||||
func (f *Fs) mkParentDir(remote string) error {
|
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
|
||||||
parent := path.Dir(remote)
|
parent := path.Dir(remote)
|
||||||
return f.mkdir(path.Join(f.absRoot, parent))
|
return f.mkdir(ctx, path.Join(f.absRoot, parent))
|
||||||
}
|
}
|
||||||
|
|
||||||
// mkdir makes the directory and parents using native paths
|
// mkdir makes the directory and parents using native paths
|
||||||
func (f *Fs) mkdir(dirPath string) error {
|
func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
|
||||||
f.mkdirLock.Lock(dirPath)
|
f.mkdirLock.Lock(dirPath)
|
||||||
defer f.mkdirLock.Unlock(dirPath)
|
defer f.mkdirLock.Unlock(dirPath)
|
||||||
if dirPath == "." || dirPath == "/" {
|
if dirPath == "." || dirPath == "/" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
ok, err := f.dirExists(dirPath)
|
ok, err := f.dirExists(ctx, dirPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "mkdir dirExists failed")
|
return errors.Wrap(err, "mkdir dirExists failed")
|
||||||
}
|
}
|
||||||
@@ -756,11 +881,11 @@ func (f *Fs) mkdir(dirPath string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
parent := path.Dir(dirPath)
|
parent := path.Dir(dirPath)
|
||||||
err = f.mkdir(parent)
|
err = f.mkdir(ctx, parent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c, err := f.getSftpConnection()
|
c, err := f.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "mkdir")
|
return errors.Wrap(err, "mkdir")
|
||||||
}
|
}
|
||||||
@@ -775,7 +900,7 @@ func (f *Fs) mkdir(dirPath string) error {
|
|||||||
// Mkdir makes the root directory of the Fs object
|
// Mkdir makes the root directory of the Fs object
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
root := path.Join(f.absRoot, dir)
|
root := path.Join(f.absRoot, dir)
|
||||||
return f.mkdir(root)
|
return f.mkdir(ctx, root)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rmdir removes the root directory of the Fs object
|
// Rmdir removes the root directory of the Fs object
|
||||||
@@ -791,7 +916,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
}
|
}
|
||||||
// Remove the directory
|
// Remove the directory
|
||||||
root := path.Join(f.absRoot, dir)
|
root := path.Join(f.absRoot, dir)
|
||||||
c, err := f.getSftpConnection()
|
c, err := f.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Rmdir")
|
return errors.Wrap(err, "Rmdir")
|
||||||
}
|
}
|
||||||
@@ -807,11 +932,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
err := f.mkParentDir(remote)
|
err := f.mkParentDir(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move mkParentDir failed")
|
return nil, errors.Wrap(err, "Move mkParentDir failed")
|
||||||
}
|
}
|
||||||
c, err := f.getSftpConnection()
|
c, err := f.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move")
|
return nil, errors.Wrap(err, "Move")
|
||||||
}
|
}
|
||||||
@@ -831,7 +956,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -848,7 +973,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
dstPath := path.Join(f.absRoot, dstRemote)
|
dstPath := path.Join(f.absRoot, dstRemote)
|
||||||
|
|
||||||
// Check if destination exists
|
// Check if destination exists
|
||||||
ok, err := f.dirExists(dstPath)
|
ok, err := f.dirExists(ctx, dstPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "DirMove dirExists dst failed")
|
return errors.Wrap(err, "DirMove dirExists dst failed")
|
||||||
}
|
}
|
||||||
@@ -857,13 +982,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Make sure the parent directory exists
|
// Make sure the parent directory exists
|
||||||
err = f.mkdir(path.Dir(dstPath))
|
err = f.mkdir(ctx, path.Dir(dstPath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do the move
|
// Do the move
|
||||||
c, err := f.getSftpConnection()
|
c, err := f.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "DirMove")
|
return errors.Wrap(err, "DirMove")
|
||||||
}
|
}
|
||||||
@@ -879,8 +1004,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// run runds cmd on the remote end returning standard output
|
// run runds cmd on the remote end returning standard output
|
||||||
func (f *Fs) run(cmd string) ([]byte, error) {
|
func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
|
||||||
c, err := f.getSftpConnection()
|
c, err := f.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "run: get SFTP connection")
|
return nil, errors.Wrap(err, "run: get SFTP connection")
|
||||||
}
|
}
|
||||||
@@ -888,7 +1013,7 @@ func (f *Fs) run(cmd string) ([]byte, error) {
|
|||||||
|
|
||||||
session, err := c.sshClient.NewSession()
|
session, err := c.sshClient.NewSession()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "run: get SFTP sessiion")
|
return nil, errors.Wrap(err, "run: get SFTP session")
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = session.Close()
|
_ = session.Close()
|
||||||
@@ -908,6 +1033,7 @@ func (f *Fs) run(cmd string) ([]byte, error) {
|
|||||||
|
|
||||||
// Hashes returns the supported hash types of the filesystem
|
// Hashes returns the supported hash types of the filesystem
|
||||||
func (f *Fs) Hashes() hash.Set {
|
func (f *Fs) Hashes() hash.Set {
|
||||||
|
ctx := context.TODO()
|
||||||
if f.opt.DisableHashCheck {
|
if f.opt.DisableHashCheck {
|
||||||
return hash.Set(hash.None)
|
return hash.Set(hash.None)
|
||||||
}
|
}
|
||||||
@@ -926,7 +1052,7 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
}
|
}
|
||||||
*changed = true
|
*changed = true
|
||||||
for _, command := range commands {
|
for _, command := range commands {
|
||||||
output, err := f.run(command)
|
output, err := f.run(ctx, command)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -971,7 +1097,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
if len(escapedPath) == 0 {
|
if len(escapedPath) == 0 {
|
||||||
escapedPath = "/"
|
escapedPath = "/"
|
||||||
}
|
}
|
||||||
stdout, err := f.run("df -k " + escapedPath)
|
stdout, err := f.run(ctx, "df -k "+escapedPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "your remote may not support About")
|
return nil, errors.Wrap(err, "your remote may not support About")
|
||||||
}
|
}
|
||||||
@@ -990,6 +1116,12 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
return usage, nil
|
return usage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Shutdown the backend, closing any background tasks and any
|
||||||
|
// cached connections.
|
||||||
|
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||||
|
return f.drainPool(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
// Fs is the filesystem this remote sftp file object is located within
|
// Fs is the filesystem this remote sftp file object is located within
|
||||||
func (o *Object) Fs() fs.Info {
|
func (o *Object) Fs() fs.Info {
|
||||||
return o.fs
|
return o.fs
|
||||||
@@ -1034,7 +1166,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
|||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := o.fs.getSftpConnection()
|
c, err := o.fs.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "Hash get SFTP connection")
|
return "", errors.Wrap(err, "Hash get SFTP connection")
|
||||||
}
|
}
|
||||||
@@ -1142,8 +1274,8 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// statRemote stats the file or directory at the remote given
|
// statRemote stats the file or directory at the remote given
|
||||||
func (f *Fs) stat(remote string) (info os.FileInfo, err error) {
|
func (f *Fs) stat(ctx context.Context, remote string) (info os.FileInfo, err error) {
|
||||||
c, err := f.getSftpConnection()
|
c, err := f.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "stat")
|
return nil, errors.Wrap(err, "stat")
|
||||||
}
|
}
|
||||||
@@ -1154,8 +1286,8 @@ func (f *Fs) stat(remote string) (info os.FileInfo, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// stat updates the info in the Object
|
// stat updates the info in the Object
|
||||||
func (o *Object) stat() error {
|
func (o *Object) stat(ctx context.Context) error {
|
||||||
info, err := o.fs.stat(o.remote)
|
info, err := o.fs.stat(ctx, o.remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return fs.ErrorObjectNotFound
|
return fs.ErrorObjectNotFound
|
||||||
@@ -1174,7 +1306,7 @@ func (o *Object) stat() error {
|
|||||||
// it also updates the info field
|
// it also updates the info field
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
if o.fs.opt.SetModTime {
|
if o.fs.opt.SetModTime {
|
||||||
c, err := o.fs.getSftpConnection()
|
c, err := o.fs.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "SetModTime")
|
return errors.Wrap(err, "SetModTime")
|
||||||
}
|
}
|
||||||
@@ -1184,14 +1316,14 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|||||||
return errors.Wrap(err, "SetModTime failed")
|
return errors.Wrap(err, "SetModTime failed")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err := o.stat()
|
err := o.stat(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "SetModTime stat failed")
|
return errors.Wrap(err, "SetModTime stat failed")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Storable returns whether the remote sftp file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc)
|
// Storable returns whether the remote sftp file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
|
||||||
func (o *Object) Storable() bool {
|
func (o *Object) Storable() bool {
|
||||||
return o.mode.IsRegular()
|
return o.mode.IsRegular()
|
||||||
}
|
}
|
||||||
@@ -1257,7 +1389,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
c, err := o.fs.getSftpConnection()
|
c, err := o.fs.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Open")
|
return nil, errors.Wrap(err, "Open")
|
||||||
}
|
}
|
||||||
@@ -1281,7 +1413,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
// Clear the hash cache since we are about to update the object
|
// Clear the hash cache since we are about to update the object
|
||||||
o.md5sum = nil
|
o.md5sum = nil
|
||||||
o.sha1sum = nil
|
o.sha1sum = nil
|
||||||
c, err := o.fs.getSftpConnection()
|
c, err := o.fs.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Update")
|
return errors.Wrap(err, "Update")
|
||||||
}
|
}
|
||||||
@@ -1292,7 +1424,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
// remove the file if upload failed
|
// remove the file if upload failed
|
||||||
remove := func() {
|
remove := func() {
|
||||||
c, removeErr := o.fs.getSftpConnection()
|
c, removeErr := o.fs.getSftpConnection(ctx)
|
||||||
if removeErr != nil {
|
if removeErr != nil {
|
||||||
fs.Debugf(src, "Failed to open new SSH connection for delete: %v", removeErr)
|
fs.Debugf(src, "Failed to open new SSH connection for delete: %v", removeErr)
|
||||||
return
|
return
|
||||||
@@ -1324,7 +1456,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
// Remove a remote sftp file object
|
// Remove a remote sftp file object
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
c, err := o.fs.getSftpConnection()
|
c, err := o.fs.getSftpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Remove")
|
return errors.Wrap(err, "Remove")
|
||||||
}
|
}
|
||||||
@@ -1340,5 +1472,6 @@ var (
|
|||||||
_ fs.Mover = &Fs{}
|
_ fs.Mover = &Fs{}
|
||||||
_ fs.DirMover = &Fs{}
|
_ fs.DirMover = &Fs{}
|
||||||
_ fs.Abouter = &Fs{}
|
_ fs.Abouter = &Fs{}
|
||||||
|
_ fs.Shutdowner = &Fs{}
|
||||||
_ fs.Object = &Object{}
|
_ fs.Object = &Object{}
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -95,7 +95,7 @@ type UploadSpecification struct {
|
|||||||
ChunkURI string `json:"ChunkUri"` // Specifies the URI the client must send the file data to
|
ChunkURI string `json:"ChunkUri"` // Specifies the URI the client must send the file data to
|
||||||
FinishURI string `json:"FinishUri"` // If provided, specifies the final call the client must perform to finish the upload process
|
FinishURI string `json:"FinishUri"` // If provided, specifies the final call the client must perform to finish the upload process
|
||||||
ProgressData string `json:"ProgressData"` // Allows the client to check progress of standard uploads
|
ProgressData string `json:"ProgressData"` // Allows the client to check progress of standard uploads
|
||||||
IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supproted.
|
IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supported.
|
||||||
ResumeIndex int64 `json:"ResumeIndex"` // Specifies the initial index for resuming, if IsResume is true.
|
ResumeIndex int64 `json:"ResumeIndex"` // Specifies the initial index for resuming, if IsResume is true.
|
||||||
ResumeOffset int64 `json:"ResumeOffset"` // Specifies the initial file offset by bytes, if IsResume is true
|
ResumeOffset int64 `json:"ResumeOffset"` // Specifies the initial file offset by bytes, if IsResume is true
|
||||||
ResumeFileHash string `json:"ResumeFileHash"` // Specifies the MD5 hash of the first ResumeOffset bytes of the partial file found at the server
|
ResumeFileHash string `json:"ResumeFileHash"` // Specifies the MD5 hash of the first ResumeOffset bytes of the partial file found at the server
|
||||||
|
|||||||
@@ -136,7 +136,7 @@ func init() {
|
|||||||
Name: "sharefile",
|
Name: "sharefile",
|
||||||
Description: "Citrix Sharefile",
|
Description: "Citrix Sharefile",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string, m configmap.Mapper) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
oauthConfig := newOauthConfig("")
|
oauthConfig := newOauthConfig("")
|
||||||
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
|
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
|
||||||
if auth == nil || auth.Form == nil {
|
if auth == nil || auth.Form == nil {
|
||||||
@@ -155,7 +155,7 @@ func init() {
|
|||||||
opt := oauthutil.Options{
|
opt := oauthutil.Options{
|
||||||
CheckAuth: checkAuth,
|
CheckAuth: checkAuth,
|
||||||
}
|
}
|
||||||
err := oauthutil.Config("sharefile", name, m, oauthConfig, &opt)
|
err := oauthutil.Config(ctx, "sharefile", name, m, oauthConfig, &opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
@@ -237,6 +237,7 @@ type Fs struct {
|
|||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
|
ci *fs.ConfigInfo // global config
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
srv *rest.Client // the connection to the server
|
srv *rest.Client // the connection to the server
|
||||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
@@ -410,8 +411,7 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.Background()
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -437,23 +437,25 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
oauthConfig := newOauthConfig(opt.Endpoint + tokenPath)
|
oauthConfig := newOauthConfig(opt.Endpoint + tokenPath)
|
||||||
var client *http.Client
|
var client *http.Client
|
||||||
var ts *oauthutil.TokenSource
|
var ts *oauthutil.TokenSource
|
||||||
client, ts, err = oauthutil.NewClient(name, m, oauthConfig)
|
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure sharefile")
|
return nil, errors.Wrap(err, "failed to configure sharefile")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
|
ci: ci,
|
||||||
srv: rest.NewClient(client).SetRoot(opt.Endpoint + apiPath),
|
srv: rest.NewClient(client).SetRoot(opt.Endpoint + apiPath),
|
||||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
ReadMimeType: false,
|
ReadMimeType: false,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
f.srv.SetErrorHandler(errorHandler)
|
f.srv.SetErrorHandler(errorHandler)
|
||||||
f.fillBufferTokens()
|
f.fillBufferTokens()
|
||||||
|
|
||||||
@@ -518,7 +520,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
f.features.Fill(&tempF)
|
f.features.Fill(ctx, &tempF)
|
||||||
// XXX: update the old f here instead of returning tempF, since
|
// XXX: update the old f here instead of returning tempF, since
|
||||||
// `features` were already filled with functions having *f as a receiver.
|
// `features` were already filled with functions having *f as a receiver.
|
||||||
// See https://github.com/rclone/rclone/issues/2182
|
// See https://github.com/rclone/rclone/issues/2182
|
||||||
@@ -532,8 +534,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
|
|
||||||
// Fill up (or reset) the buffer tokens
|
// Fill up (or reset) the buffer tokens
|
||||||
func (f *Fs) fillBufferTokens() {
|
func (f *Fs) fillBufferTokens() {
|
||||||
f.bufferTokens = make(chan []byte, fs.Config.Transfers)
|
f.bufferTokens = make(chan []byte, f.ci.Transfers)
|
||||||
for i := 0; i < fs.Config.Transfers; i++ {
|
for i := 0; i < f.ci.Transfers; i++ {
|
||||||
f.bufferTokens <- nil
|
f.bufferTokens <- nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -964,7 +966,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
|
|||||||
return item, nil
|
return item, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -1006,7 +1008,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1034,7 +1036,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -1090,7 +1092,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
|||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return nil, errors.Wrap(err, "copy: failed to examine destination dir")
|
return nil, errors.Wrap(err, "copy: failed to examine destination dir")
|
||||||
} else {
|
} else {
|
||||||
// otherwise need to copy via a temporary directlry
|
// otherwise need to copy via a temporary directory
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1339,7 +1341,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
Overwrite: true,
|
Overwrite: true,
|
||||||
CreatedDate: modTime,
|
CreatedDate: modTime,
|
||||||
ModifiedDate: modTime,
|
ModifiedDate: modTime,
|
||||||
Tool: fs.Config.UserAgent,
|
Tool: o.fs.ci.UserAgent,
|
||||||
}
|
}
|
||||||
|
|
||||||
if isLargeFile {
|
if isLargeFile {
|
||||||
@@ -1349,7 +1351,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
} else {
|
} else {
|
||||||
// otherwise use threaded which is more efficient
|
// otherwise use threaded which is more efficient
|
||||||
req.Method = "threaded"
|
req.Method = "threaded"
|
||||||
req.ThreadCount = &fs.Config.Transfers
|
req.ThreadCount = &o.fs.ci.Transfers
|
||||||
req.Filesize = &size
|
req.Filesize = &size
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ type largeUpload struct {
|
|||||||
wrap accounting.WrapFn // account parts being transferred
|
wrap accounting.WrapFn // account parts being transferred
|
||||||
size int64 // total size
|
size int64 // total size
|
||||||
parts int64 // calculated number of parts, if known
|
parts int64 // calculated number of parts, if known
|
||||||
info *api.UploadSpecification // where to post chunks etc
|
info *api.UploadSpecification // where to post chunks, etc.
|
||||||
threads int // number of threads to use in upload
|
threads int // number of threads to use in upload
|
||||||
streamed bool // set if using streamed upload
|
streamed bool // set if using streamed upload
|
||||||
}
|
}
|
||||||
@@ -58,7 +58,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
return nil, errors.Errorf("can't use method %q with newLargeUpload", info.Method)
|
return nil, errors.Errorf("can't use method %q with newLargeUpload", info.Method)
|
||||||
}
|
}
|
||||||
|
|
||||||
threads := fs.Config.Transfers
|
threads := f.ci.Transfers
|
||||||
if threads > info.MaxNumberOfThreads {
|
if threads > info.MaxNumberOfThreads {
|
||||||
threads = info.MaxNumberOfThreads
|
threads = info.MaxNumberOfThreads
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -76,7 +76,7 @@ func init() {
|
|||||||
Name: "sugarsync",
|
Name: "sugarsync",
|
||||||
Description: "Sugarsync",
|
Description: "Sugarsync",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string, m configmap.Mapper) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -85,7 +85,7 @@ func init() {
|
|||||||
|
|
||||||
if opt.RefreshToken != "" {
|
if opt.RefreshToken != "" {
|
||||||
fmt.Printf("Already have a token - refresh?\n")
|
fmt.Printf("Already have a token - refresh?\n")
|
||||||
if !config.ConfirmWithConfig(m, "config_refresh_token", true) {
|
if !config.ConfirmWithConfig(ctx, m, "config_refresh_token", true) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -106,7 +106,7 @@ func init() {
|
|||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: "/app-authorization",
|
Path: "/app-authorization",
|
||||||
}
|
}
|
||||||
srv := rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(rootURL) // FIXME
|
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(rootURL) // FIXME
|
||||||
|
|
||||||
// FIXME
|
// FIXME
|
||||||
//err = f.pacer.Call(func() (bool, error) {
|
//err = f.pacer.Call(func() (bool, error) {
|
||||||
@@ -264,7 +264,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Fi
|
|||||||
}
|
}
|
||||||
|
|
||||||
found, err := f.listAll(ctx, directoryID, func(item *api.File) bool {
|
found, err := f.listAll(ctx, directoryID, func(item *api.File) bool {
|
||||||
if item.Name == leaf {
|
if strings.EqualFold(item.Name, leaf) {
|
||||||
info = item
|
info = item
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -350,7 +350,7 @@ func (f *Fs) getAuth(req *http.Request) (err error) {
|
|||||||
// if have auth, check it is in date
|
// if have auth, check it is in date
|
||||||
if f.opt.Authorization == "" || f.opt.User == "" || f.authExpiry.IsZero() || time.Until(f.authExpiry) < expiryLeeway {
|
if f.opt.Authorization == "" || f.opt.User == "" || f.authExpiry.IsZero() || time.Until(f.authExpiry) < expiryLeeway {
|
||||||
// Get the auth token
|
// Get the auth token
|
||||||
f.srv.SetSigner(nil) // temporariliy remove the signer so we don't infinitely recurse
|
f.srv.SetSigner(nil) // temporarily remove the signer so we don't infinitely recurse
|
||||||
err = f.getAuthToken(ctx)
|
err = f.getAuthToken(ctx)
|
||||||
f.srv.SetSigner(f.getAuth) // replace signer
|
f.srv.SetSigner(f.getAuth) // replace signer
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -395,9 +395,7 @@ func parseExpiry(expiryString string) time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -405,20 +403,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
root = parsePath(root)
|
root = parsePath(root)
|
||||||
client := fshttp.NewClient(fs.Config)
|
client := fshttp.NewClient(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
m: m,
|
m: m,
|
||||||
authExpiry: parseExpiry(opt.AuthorizationExpiry),
|
authExpiry: parseExpiry(opt.AuthorizationExpiry),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
f.srv.SetSigner(f.getAuth) // use signing hook to get the auth
|
f.srv.SetSigner(f.getAuth) // use signing hook to get the auth
|
||||||
f.srv.SetErrorHandler(errorHandler)
|
f.srv.SetErrorHandler(errorHandler)
|
||||||
|
|
||||||
@@ -533,7 +531,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
|||||||
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||||
// Find the leaf in pathID
|
// Find the leaf in pathID
|
||||||
found, err = f.listAll(ctx, pathID, nil, func(item *api.Collection) bool {
|
found, err = f.listAll(ctx, pathID, nil, func(item *api.Collection) bool {
|
||||||
if item.Name == leaf {
|
if strings.EqualFold(item.Name, leaf) {
|
||||||
pathIDOut = item.Ref
|
pathIDOut = item.Ref
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -576,7 +574,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||||||
}
|
}
|
||||||
newID = resp.Header.Get("Location")
|
newID = resp.Header.Get("Location")
|
||||||
if newID == "" {
|
if newID == "" {
|
||||||
// look up ID if not returned (eg for syncFolder)
|
// look up ID if not returned (e.g. for syncFolder)
|
||||||
var found bool
|
var found bool
|
||||||
newID, found, err = f.FindLeaf(ctx, pathID, leaf)
|
newID, found, err = f.FindLeaf(ctx, pathID, leaf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -837,7 +835,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
return fs.ModTimeNotSupported
|
return fs.ModTimeNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -923,7 +921,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
return f.purgeCheck(ctx, dir, false)
|
return f.purgeCheck(ctx, dir, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// moveFile moves a file server side
|
// moveFile moves a file server-side
|
||||||
func (f *Fs) moveFile(ctx context.Context, id, leaf, directoryID string) (info *api.File, err error) {
|
func (f *Fs) moveFile(ctx context.Context, id, leaf, directoryID string) (info *api.File, err error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "PUT",
|
Method: "PUT",
|
||||||
@@ -951,7 +949,7 @@ func (f *Fs) moveFile(ctx context.Context, id, leaf, directoryID string) (info *
|
|||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// moveDir moves a folder server side
|
// moveDir moves a folder server-side
|
||||||
func (f *Fs) moveDir(ctx context.Context, id, leaf, directoryID string) (err error) {
|
func (f *Fs) moveDir(ctx context.Context, id, leaf, directoryID string) (err error) {
|
||||||
// Move the object
|
// Move the object
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
@@ -970,7 +968,7 @@ func (f *Fs) moveDir(ctx context.Context, id, leaf, directoryID string) (err err
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -1006,7 +1004,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server-side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
package swift
|
package swift
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/swift"
|
"github.com/ncw/swift/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
// auth is an authenticator for swift. It overrides the StorageUrl
|
// auth is an authenticator for swift. It overrides the StorageUrl
|
||||||
@@ -28,19 +29,19 @@ func newAuth(parentAuth swift.Authenticator, storageURL string, authToken string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Request creates an http.Request for the auth - return nil if not needed
|
// Request creates an http.Request for the auth - return nil if not needed
|
||||||
func (a *auth) Request(c *swift.Connection) (*http.Request, error) {
|
func (a *auth) Request(ctx context.Context, c *swift.Connection) (*http.Request, error) {
|
||||||
if a.parentAuth == nil {
|
if a.parentAuth == nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
return a.parentAuth.Request(c)
|
return a.parentAuth.Request(ctx, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Response parses the http.Response
|
// Response parses the http.Response
|
||||||
func (a *auth) Response(resp *http.Response) error {
|
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
|
||||||
if a.parentAuth == nil {
|
if a.parentAuth == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return a.parentAuth.Response(resp)
|
return a.parentAuth.Response(ctx, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The public storage URL - set Internal to true to read
|
// The public storage URL - set Internal to true to read
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/swift"
|
"github.com/ncw/swift/v2"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -24,6 +24,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/rclone/rclone/fs/walk"
|
"github.com/rclone/rclone/fs/walk"
|
||||||
|
"github.com/rclone/rclone/lib/atexit"
|
||||||
"github.com/rclone/rclone/lib/bucket"
|
"github.com/rclone/rclone/lib/bucket"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
@@ -51,7 +52,7 @@ default for this is 5GB which is its maximum value.`,
|
|||||||
Name: "no_chunk",
|
Name: "no_chunk",
|
||||||
Help: `Don't chunk files during streaming upload.
|
Help: `Don't chunk files during streaming upload.
|
||||||
|
|
||||||
When doing streaming uploads (eg using rcat or mount) setting this
|
When doing streaming uploads (e.g. using rcat or mount) setting this
|
||||||
flag will cause the swift backend to not upload chunked files.
|
flag will cause the swift backend to not upload chunked files.
|
||||||
|
|
||||||
This will limit the maximum upload size to 5GB. However non chunked
|
This will limit the maximum upload size to 5GB. However non chunked
|
||||||
@@ -167,6 +168,11 @@ func init() {
|
|||||||
Help: "Admin",
|
Help: "Admin",
|
||||||
Value: "admin",
|
Value: "admin",
|
||||||
}},
|
}},
|
||||||
|
}, {
|
||||||
|
Name: "leave_parts_on_error",
|
||||||
|
Help: `If true avoid calling abort upload on a failure. It should be set to true for resuming uploads across different sessions.`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "storage_policy",
|
Name: "storage_policy",
|
||||||
Help: `The storage policy to use when creating a new container
|
Help: `The storage policy to use when creating a new container
|
||||||
@@ -208,6 +214,7 @@ type Options struct {
|
|||||||
ApplicationCredentialID string `config:"application_credential_id"`
|
ApplicationCredentialID string `config:"application_credential_id"`
|
||||||
ApplicationCredentialName string `config:"application_credential_name"`
|
ApplicationCredentialName string `config:"application_credential_name"`
|
||||||
ApplicationCredentialSecret string `config:"application_credential_secret"`
|
ApplicationCredentialSecret string `config:"application_credential_secret"`
|
||||||
|
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||||
StoragePolicy string `config:"storage_policy"`
|
StoragePolicy string `config:"storage_policy"`
|
||||||
EndpointType string `config:"endpoint_type"`
|
EndpointType string `config:"endpoint_type"`
|
||||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
@@ -221,6 +228,7 @@ type Fs struct {
|
|||||||
root string // the path we are working on if any
|
root string // the path we are working on if any
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
opt Options // options for this backend
|
opt Options // options for this backend
|
||||||
|
ci *fs.ConfigInfo // global config
|
||||||
c *swift.Connection // the connection to the swift server
|
c *swift.Connection // the connection to the swift server
|
||||||
rootContainer string // container part of root (if any)
|
rootContainer string // container part of root (if any)
|
||||||
rootDirectory string // directory part of root (if any)
|
rootDirectory string // directory part of root (if any)
|
||||||
@@ -272,7 +280,7 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
|
|
||||||
// retryErrorCodes is a slice of error codes that we will retry
|
// retryErrorCodes is a slice of error codes that we will retry
|
||||||
var retryErrorCodes = []int{
|
var retryErrorCodes = []int{
|
||||||
401, // Unauthorized (eg "Token has expired")
|
401, // Unauthorized (e.g. "Token has expired")
|
||||||
408, // Request Timeout
|
408, // Request Timeout
|
||||||
409, // Conflict - various states that could be resolved on a retry
|
409, // Conflict - various states that could be resolved on a retry
|
||||||
429, // Rate exceeded.
|
429, // Rate exceeded.
|
||||||
@@ -340,7 +348,8 @@ func (o *Object) split() (container, containerPath string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// swiftConnection makes a connection to swift
|
// swiftConnection makes a connection to swift
|
||||||
func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Connection, error) {
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
c := &swift.Connection{
|
c := &swift.Connection{
|
||||||
// Keep these in the same order as the Config for ease of checking
|
// Keep these in the same order as the Config for ease of checking
|
||||||
UserName: opt.User,
|
UserName: opt.User,
|
||||||
@@ -359,9 +368,9 @@ func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
|||||||
ApplicationCredentialName: opt.ApplicationCredentialName,
|
ApplicationCredentialName: opt.ApplicationCredentialName,
|
||||||
ApplicationCredentialSecret: opt.ApplicationCredentialSecret,
|
ApplicationCredentialSecret: opt.ApplicationCredentialSecret,
|
||||||
EndpointType: swift.EndpointType(opt.EndpointType),
|
EndpointType: swift.EndpointType(opt.EndpointType),
|
||||||
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
|
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
|
||||||
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
|
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
|
||||||
Transport: fshttp.NewTransport(fs.Config),
|
Transport: fshttp.NewTransport(ctx),
|
||||||
}
|
}
|
||||||
if opt.EnvAuth {
|
if opt.EnvAuth {
|
||||||
err := c.ApplyEnvironment()
|
err := c.ApplyEnvironment()
|
||||||
@@ -382,7 +391,7 @@ func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
|||||||
if c.AuthUrl == "" {
|
if c.AuthUrl == "" {
|
||||||
return nil, errors.New("auth not found")
|
return nil, errors.New("auth not found")
|
||||||
}
|
}
|
||||||
err := c.Authenticate() // fills in c.StorageUrl and c.AuthToken
|
err := c.Authenticate(ctx) // fills in c.StorageUrl and c.AuthToken
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -432,13 +441,15 @@ func (f *Fs) setRoot(root string) {
|
|||||||
//
|
//
|
||||||
// if noCheckContainer is set then the Fs won't check the container
|
// if noCheckContainer is set then the Fs won't check the container
|
||||||
// exists before creating it.
|
// exists before creating it.
|
||||||
func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) {
|
func NewFsWithConnection(ctx context.Context, opt *Options, name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) {
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
|
ci: ci,
|
||||||
c: c,
|
c: c,
|
||||||
noCheckContainer: noCheckContainer,
|
noCheckContainer: noCheckContainer,
|
||||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||||
cache: bucket.NewCache(),
|
cache: bucket.NewCache(),
|
||||||
}
|
}
|
||||||
f.setRoot(root)
|
f.setRoot(root)
|
||||||
@@ -448,7 +459,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
|||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
BucketBasedRootOK: true,
|
BucketBasedRootOK: true,
|
||||||
SlowModTime: true,
|
SlowModTime: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
if f.rootContainer != "" && f.rootDirectory != "" {
|
if f.rootContainer != "" && f.rootDirectory != "" {
|
||||||
// Check to see if the object exists - ignoring directory markers
|
// Check to see if the object exists - ignoring directory markers
|
||||||
var info swift.Object
|
var info swift.Object
|
||||||
@@ -456,7 +467,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
|||||||
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
info, rxHeaders, err = f.c.Object(f.rootContainer, encodedDirectory)
|
info, rxHeaders, err = f.c.Object(ctx, f.rootContainer, encodedDirectory)
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(rxHeaders, err)
|
||||||
})
|
})
|
||||||
if err == nil && info.ContentType != directoryMarkerContentType {
|
if err == nil && info.ContentType != directoryMarkerContentType {
|
||||||
@@ -473,7 +484,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -485,17 +496,17 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, errors.Wrap(err, "swift: chunk size")
|
return nil, errors.Wrap(err, "swift: chunk size")
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := swiftConnection(opt, name)
|
c, err := swiftConnection(ctx, opt, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return NewFsWithConnection(opt, name, root, c, false)
|
return NewFsWithConnection(ctx, opt, name, root, c, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return an Object from a path
|
// Return an Object from a path
|
||||||
//
|
//
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, error) {
|
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *swift.Object) (fs.Object, error) {
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
@@ -505,7 +516,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, er
|
|||||||
// making sure we read the full metadata for all 0 byte files.
|
// making sure we read the full metadata for all 0 byte files.
|
||||||
// We don't read the metadata for directory marker objects.
|
// We don't read the metadata for directory marker objects.
|
||||||
if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" {
|
if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" {
|
||||||
err := o.readMetaData() // reads info and headers, returning an error
|
err := o.readMetaData(ctx) // reads info and headers, returning an error
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
// We have a dangling large object here so just return the original metadata
|
// We have a dangling large object here so just return the original metadata
|
||||||
fs.Errorf(o, "dangling large object with no contents")
|
fs.Errorf(o, "dangling large object with no contents")
|
||||||
@@ -522,7 +533,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, er
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
err := o.readMetaData() // reads info and headers, returning an error
|
err := o.readMetaData(ctx) // reads info and headers, returning an error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -533,7 +544,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, er
|
|||||||
// NewObject finds the Object at remote. If it can't be found it
|
// NewObject finds the Object at remote. If it can't be found it
|
||||||
// returns the error fs.ErrorObjectNotFound.
|
// returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(ctx, remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// listFn is called from list and listContainerRoot to handle an object.
|
// listFn is called from list and listContainerRoot to handle an object.
|
||||||
@@ -545,7 +556,7 @@ type listFn func(remote string, object *swift.Object, isDirectory bool) error
|
|||||||
// container to the start.
|
// container to the start.
|
||||||
//
|
//
|
||||||
// Set recurse to read sub directories
|
// Set recurse to read sub directories
|
||||||
func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn listFn) error {
|
func (f *Fs) listContainerRoot(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn listFn) error {
|
||||||
if prefix != "" && !strings.HasSuffix(prefix, "/") {
|
if prefix != "" && !strings.HasSuffix(prefix, "/") {
|
||||||
prefix += "/"
|
prefix += "/"
|
||||||
}
|
}
|
||||||
@@ -560,11 +571,11 @@ func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer
|
|||||||
if !recurse {
|
if !recurse {
|
||||||
opts.Delimiter = '/'
|
opts.Delimiter = '/'
|
||||||
}
|
}
|
||||||
return f.c.ObjectsWalk(container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
|
return f.c.ObjectsWalk(ctx, container, &opts, func(ctx context.Context, opts *swift.ObjectsOpts) (interface{}, error) {
|
||||||
var objects []swift.Object
|
var objects []swift.Object
|
||||||
var err error
|
var err error
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
objects, err = f.c.Objects(container, opts)
|
objects, err = f.c.Objects(ctx, container, opts)
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -602,8 +613,8 @@ func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer
|
|||||||
type addEntryFn func(fs.DirEntry) error
|
type addEntryFn func(fs.DirEntry) error
|
||||||
|
|
||||||
// list the objects into the function supplied
|
// list the objects into the function supplied
|
||||||
func (f *Fs) list(container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn addEntryFn) error {
|
func (f *Fs) list(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn addEntryFn) error {
|
||||||
err := f.listContainerRoot(container, directory, prefix, addContainer, recurse, includeDirMarkers, func(remote string, object *swift.Object, isDirectory bool) (err error) {
|
err := f.listContainerRoot(ctx, container, directory, prefix, addContainer, recurse, includeDirMarkers, func(remote string, object *swift.Object, isDirectory bool) (err error) {
|
||||||
if isDirectory {
|
if isDirectory {
|
||||||
remote = strings.TrimRight(remote, "/")
|
remote = strings.TrimRight(remote, "/")
|
||||||
d := fs.NewDir(remote, time.Time{}).SetSize(object.Bytes)
|
d := fs.NewDir(remote, time.Time{}).SetSize(object.Bytes)
|
||||||
@@ -611,7 +622,7 @@ func (f *Fs) list(container, directory, prefix string, addContainer bool, recurs
|
|||||||
} else {
|
} else {
|
||||||
// newObjectWithInfo does a full metadata read on 0 size objects which might be dynamic large objects
|
// newObjectWithInfo does a full metadata read on 0 size objects which might be dynamic large objects
|
||||||
var o fs.Object
|
var o fs.Object
|
||||||
o, err = f.newObjectWithInfo(remote, object)
|
o, err = f.newObjectWithInfo(ctx, remote, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -628,12 +639,12 @@ func (f *Fs) list(container, directory, prefix string, addContainer bool, recurs
|
|||||||
}
|
}
|
||||||
|
|
||||||
// listDir lists a single directory
|
// listDir lists a single directory
|
||||||
func (f *Fs) listDir(container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
||||||
if container == "" {
|
if container == "" {
|
||||||
return nil, fs.ErrorListBucketRequired
|
return nil, fs.ErrorListBucketRequired
|
||||||
}
|
}
|
||||||
// List the objects
|
// List the objects
|
||||||
err = f.list(container, directory, prefix, addContainer, false, false, func(entry fs.DirEntry) error {
|
err = f.list(ctx, container, directory, prefix, addContainer, false, false, func(entry fs.DirEntry) error {
|
||||||
entries = append(entries, entry)
|
entries = append(entries, entry)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@@ -649,7 +660,7 @@ func (f *Fs) listDir(container, directory, prefix string, addContainer bool) (en
|
|||||||
func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) {
|
func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||||
var containers []swift.Container
|
var containers []swift.Container
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
containers, err = f.c.ContainersAll(nil)
|
containers, err = f.c.ContainersAll(ctx, nil)
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -680,7 +691,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
}
|
}
|
||||||
return f.listContainers(ctx)
|
return f.listContainers(ctx)
|
||||||
}
|
}
|
||||||
return f.listDir(container, directory, f.rootDirectory, f.rootContainer == "")
|
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@@ -703,7 +714,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
container, directory := f.split(dir)
|
container, directory := f.split(dir)
|
||||||
list := walk.NewListRHelper(callback)
|
list := walk.NewListRHelper(callback)
|
||||||
listR := func(container, directory, prefix string, addContainer bool) error {
|
listR := func(container, directory, prefix string, addContainer bool) error {
|
||||||
return f.list(container, directory, prefix, addContainer, true, false, func(entry fs.DirEntry) error {
|
return f.list(ctx, container, directory, prefix, addContainer, true, false, func(entry fs.DirEntry) error {
|
||||||
return list.Add(entry)
|
return list.Add(entry)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -741,7 +752,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
var containers []swift.Container
|
var containers []swift.Container
|
||||||
var err error
|
var err error
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
containers, err = f.c.ContainersAll(nil)
|
containers, err = f.c.ContainersAll(ctx, nil)
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -793,7 +804,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
|||||||
if !f.noCheckContainer {
|
if !f.noCheckContainer {
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
_, rxHeaders, err = f.c.Container(container)
|
_, rxHeaders, err = f.c.Container(ctx, container)
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(rxHeaders, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -803,7 +814,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
|||||||
headers["X-Storage-Policy"] = f.opt.StoragePolicy
|
headers["X-Storage-Policy"] = f.opt.StoragePolicy
|
||||||
}
|
}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
err = f.c.ContainerCreate(container, headers)
|
err = f.c.ContainerCreate(ctx, container, headers)
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -824,7 +835,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
}
|
}
|
||||||
err := f.cache.Remove(container, func() error {
|
err := f.cache.Remove(container, func() error {
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
err := f.c.ContainerDelete(container)
|
err := f.c.ContainerDelete(ctx, container)
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -849,12 +860,12 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
return fs.ErrorListBucketRequired
|
return fs.ErrorListBucketRequired
|
||||||
}
|
}
|
||||||
// Delete all the files including the directory markers
|
// Delete all the files including the directory markers
|
||||||
toBeDeleted := make(chan fs.Object, fs.Config.Transfers)
|
toBeDeleted := make(chan fs.Object, f.ci.Transfers)
|
||||||
delErr := make(chan error, 1)
|
delErr := make(chan error, 1)
|
||||||
go func() {
|
go func() {
|
||||||
delErr <- operations.DeleteFiles(ctx, toBeDeleted)
|
delErr <- operations.DeleteFiles(ctx, toBeDeleted)
|
||||||
}()
|
}()
|
||||||
err := f.list(container, directory, f.rootDirectory, false, true, true, func(entry fs.DirEntry) error {
|
err := f.list(ctx, container, directory, f.rootDirectory, false, true, true, func(entry fs.DirEntry) error {
|
||||||
if o, ok := entry.(*Object); ok {
|
if o, ok := entry.(*Object); ok {
|
||||||
toBeDeleted <- o
|
toBeDeleted <- o
|
||||||
}
|
}
|
||||||
@@ -871,7 +882,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
return f.Rmdir(ctx, dir)
|
return f.Rmdir(ctx, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
@@ -894,7 +905,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
srcContainer, srcPath := srcObj.split()
|
srcContainer, srcPath := srcObj.split()
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
rxHeaders, err = f.c.ObjectCopy(srcContainer, srcPath, dstContainer, dstPath, nil)
|
rxHeaders, err = f.c.ObjectCopy(ctx, srcContainer, srcPath, dstContainer, dstPath, nil)
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(rxHeaders, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -933,11 +944,11 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||||||
if t != hash.MD5 {
|
if t != hash.MD5 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
isDynamicLargeObject, err := o.isDynamicLargeObject()
|
isDynamicLargeObject, err := o.isDynamicLargeObject(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
isStaticLargeObject, err := o.isStaticLargeObject()
|
isStaticLargeObject, err := o.isStaticLargeObject(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@@ -950,8 +961,8 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||||||
|
|
||||||
// hasHeader checks for the header passed in returning false if the
|
// hasHeader checks for the header passed in returning false if the
|
||||||
// object isn't found.
|
// object isn't found.
|
||||||
func (o *Object) hasHeader(header string) (bool, error) {
|
func (o *Object) hasHeader(ctx context.Context, header string) (bool, error) {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
return false, nil
|
return false, nil
|
||||||
@@ -963,17 +974,29 @@ func (o *Object) hasHeader(header string) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// isDynamicLargeObject checks for X-Object-Manifest header
|
// isDynamicLargeObject checks for X-Object-Manifest header
|
||||||
func (o *Object) isDynamicLargeObject() (bool, error) {
|
func (o *Object) isDynamicLargeObject(ctx context.Context) (bool, error) {
|
||||||
return o.hasHeader("X-Object-Manifest")
|
return o.hasHeader(ctx, "X-Object-Manifest")
|
||||||
}
|
}
|
||||||
|
|
||||||
// isStaticLargeObjectFile checks for the X-Static-Large-Object header
|
// isStaticLargeObjectFile checks for the X-Static-Large-Object header
|
||||||
func (o *Object) isStaticLargeObject() (bool, error) {
|
func (o *Object) isStaticLargeObject(ctx context.Context) (bool, error) {
|
||||||
return o.hasHeader("X-Static-Large-Object")
|
return o.hasHeader(ctx, "X-Static-Large-Object")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Object) isInContainerVersioning(container string) (bool, error) {
|
func (o *Object) isLargeObject(ctx context.Context) (result bool, err error) {
|
||||||
_, headers, err := o.fs.c.Container(container)
|
result, err = o.hasHeader(ctx, "X-Static-Large-Object")
|
||||||
|
if result {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
result, err = o.hasHeader(ctx, "X-Object-Manifest")
|
||||||
|
if result {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *Object) isInContainerVersioning(ctx context.Context, container string) (bool, error) {
|
||||||
|
_, headers, err := o.fs.c.Container(ctx, container)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -1009,7 +1032,7 @@ func (o *Object) decodeMetaData(info *swift.Object) (err error) {
|
|||||||
// it also sets the info
|
// it also sets the info
|
||||||
//
|
//
|
||||||
// it returns fs.ErrorObjectNotFound if the object isn't found
|
// it returns fs.ErrorObjectNotFound if the object isn't found
|
||||||
func (o *Object) readMetaData() (err error) {
|
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||||
if o.headers != nil {
|
if o.headers != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1017,7 +1040,7 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
var h swift.Headers
|
var h swift.Headers
|
||||||
container, containerPath := o.split()
|
container, containerPath := o.split()
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
info, h, err = o.fs.c.Object(container, containerPath)
|
info, h, err = o.fs.c.Object(ctx, container, containerPath)
|
||||||
return shouldRetryHeaders(h, err)
|
return shouldRetryHeaders(h, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1040,10 +1063,10 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
if fs.Config.UseServerModTime {
|
if o.fs.ci.UseServerModTime {
|
||||||
return o.lastModified
|
return o.lastModified
|
||||||
}
|
}
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Failed to read metadata: %s", err)
|
fs.Debugf(o, "Failed to read metadata: %s", err)
|
||||||
return o.lastModified
|
return o.lastModified
|
||||||
@@ -1058,7 +1081,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
|||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1076,7 +1099,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|||||||
}
|
}
|
||||||
container, containerPath := o.split()
|
container, containerPath := o.split()
|
||||||
return o.fs.pacer.Call(func() (bool, error) {
|
return o.fs.pacer.Call(func() (bool, error) {
|
||||||
err = o.fs.c.ObjectUpdate(container, containerPath, newHeaders)
|
err = o.fs.c.ObjectUpdate(ctx, container, containerPath, newHeaders)
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -1097,7 +1120,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
container, containerPath := o.split()
|
container, containerPath := o.split()
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
in, rxHeaders, err = o.fs.c.ObjectOpen(container, containerPath, !isRanging, headers)
|
in, rxHeaders, err = o.fs.c.ObjectOpen(ctx, container, containerPath, !isRanging, headers)
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(rxHeaders, err)
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
@@ -1111,50 +1134,41 @@ func min(x, y int64) int64 {
|
|||||||
return y
|
return y
|
||||||
}
|
}
|
||||||
|
|
||||||
// removeSegments removes any old segments from o
|
func (o *Object) getSegmentsLargeObject(ctx context.Context) (map[string][]string, error) {
|
||||||
//
|
container, objectName := o.split()
|
||||||
// if except is passed in then segments with that prefix won't be deleted
|
segmentContainer, segmentObjects, err := o.fs.c.LargeObjectGetSegments(ctx, container, objectName)
|
||||||
func (o *Object) removeSegments(except string) error {
|
|
||||||
segmentsContainer, _, err := o.getSegmentsDlo()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
fs.Debugf(o, "Failed to get list segments of object: %v", err)
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
except = path.Join(o.remote, except)
|
var containerSegments = make(map[string][]string)
|
||||||
// fs.Debugf(o, "segmentsContainer %q prefix %q", segmentsContainer, prefix)
|
for _, segment := range segmentObjects {
|
||||||
err = o.fs.listContainerRoot(segmentsContainer, o.remote, "", false, true, true, func(remote string, object *swift.Object, isDirectory bool) error {
|
if _, ok := containerSegments[segmentContainer]; !ok {
|
||||||
if isDirectory {
|
containerSegments[segmentContainer] = make([]string, 0, len(segmentObjects))
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
if except != "" && strings.HasPrefix(remote, except) {
|
segments, _ := containerSegments[segmentContainer]
|
||||||
// fs.Debugf(o, "Ignoring current segment file %q in container %q", remote, segmentsContainer)
|
segments = append(segments, segment.Name)
|
||||||
return nil
|
containerSegments[segmentContainer] = segments
|
||||||
}
|
|
||||||
fs.Debugf(o, "Removing segment file %q in container %q", remote, segmentsContainer)
|
|
||||||
var err error
|
|
||||||
return o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
err = o.fs.c.ObjectDelete(segmentsContainer, remote)
|
|
||||||
return shouldRetry(err)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
// remove the segments container if empty, ignore errors
|
return containerSegments, nil
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
}
|
||||||
err = o.fs.c.ContainerDelete(segmentsContainer)
|
|
||||||
if err == swift.ContainerNotFound || err == swift.ContainerNotEmpty {
|
func (o *Object) removeSegmentsLargeObject(ctx context.Context, containerSegments map[string][]string) error {
|
||||||
return false, err
|
if containerSegments == nil || len(containerSegments) <= 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for container, segments := range containerSegments {
|
||||||
|
_, err := o.fs.c.BulkDelete(ctx, container, segments)
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(o, "Failed to delete bulk segments %v", err)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
return shouldRetry(err)
|
|
||||||
})
|
|
||||||
if err == nil {
|
|
||||||
fs.Debugf(o, "Removed empty container %q", segmentsContainer)
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Object) getSegmentsDlo() (segmentsContainer string, prefix string, err error) {
|
func (o *Object) getSegmentsDlo(ctx context.Context) (segmentsContainer string, prefix string, err error) {
|
||||||
if err = o.readMetaData(); err != nil {
|
if err = o.readMetaData(ctx); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
dirManifest := o.headers["X-Object-Manifest"]
|
dirManifest := o.headers["X-Object-Manifest"]
|
||||||
@@ -1178,7 +1192,7 @@ func urlEncode(str string) string {
|
|||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
for i := 0; i < len(str); i++ {
|
for i := 0; i < len(str); i++ {
|
||||||
c := str[i]
|
c := str[i]
|
||||||
if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '/' || c == '.' {
|
if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '/' || c == '.' || c == '_' || c == '-' {
|
||||||
_ = buf.WriteByte(c)
|
_ = buf.WriteByte(c)
|
||||||
} else {
|
} else {
|
||||||
_, _ = buf.WriteString(fmt.Sprintf("%%%02X", c))
|
_, _ = buf.WriteString(fmt.Sprintf("%%%02X", c))
|
||||||
@@ -1189,14 +1203,14 @@ func urlEncode(str string) string {
|
|||||||
|
|
||||||
// updateChunks updates the existing object using chunks to a separate
|
// updateChunks updates the existing object using chunks to a separate
|
||||||
// container. It returns a string which prefixes current segments.
|
// container. It returns a string which prefixes current segments.
|
||||||
func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
|
func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
|
||||||
container, containerPath := o.split()
|
container, containerPath := o.split()
|
||||||
segmentsContainer := container + "_segments"
|
segmentsContainer := container + "_segments"
|
||||||
// Create the segmentsContainer if it doesn't exist
|
// Create the segmentsContainer if it doesn't exist
|
||||||
var err error
|
var err error
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
_, rxHeaders, err = o.fs.c.Container(segmentsContainer)
|
_, rxHeaders, err = o.fs.c.Container(ctx, segmentsContainer)
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(rxHeaders, err)
|
||||||
})
|
})
|
||||||
if err == swift.ContainerNotFound {
|
if err == swift.ContainerNotFound {
|
||||||
@@ -1205,7 +1219,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
|||||||
headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
|
headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
|
||||||
}
|
}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
err = o.fs.c.ContainerCreate(segmentsContainer, headers)
|
err = o.fs.c.ContainerCreate(ctx, segmentsContainer, headers)
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -1218,10 +1232,20 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
|||||||
uniquePrefix := fmt.Sprintf("%s/%d", swift.TimeToFloatString(time.Now()), size)
|
uniquePrefix := fmt.Sprintf("%s/%d", swift.TimeToFloatString(time.Now()), size)
|
||||||
segmentsPath := path.Join(containerPath, uniquePrefix)
|
segmentsPath := path.Join(containerPath, uniquePrefix)
|
||||||
in := bufio.NewReader(in0)
|
in := bufio.NewReader(in0)
|
||||||
segmentInfos := make([]string, 0, ((size / int64(o.fs.opt.ChunkSize)) + 1))
|
segmentInfos := make([]string, 0, (size/int64(o.fs.opt.ChunkSize))+1)
|
||||||
|
defer atexit.OnError(&err, func() {
|
||||||
|
if o.fs.opt.LeavePartsOnError {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fs.Debugf(o, "Delete segments when err raise %v", err)
|
||||||
|
if segmentInfos == nil || len(segmentInfos) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
deleteChunks(ctx, o, segmentsContainer, segmentInfos)
|
||||||
|
})()
|
||||||
for {
|
for {
|
||||||
// can we read at least one byte?
|
// can we read at least one byte?
|
||||||
if _, err := in.Peek(1); err != nil {
|
if _, err = in.Peek(1); err != nil {
|
||||||
if left > 0 {
|
if left > 0 {
|
||||||
return "", err // read less than expected
|
return "", err // read less than expected
|
||||||
}
|
}
|
||||||
@@ -1239,15 +1263,13 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
|||||||
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, segmentsContainer)
|
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, segmentsContainer)
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
rxHeaders, err = o.fs.c.ObjectPut(segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
rxHeaders, err = o.fs.c.ObjectPut(ctx, segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
segmentInfos = append(segmentInfos, segmentPath)
|
segmentInfos = append(segmentInfos, segmentPath)
|
||||||
}
|
}
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(rxHeaders, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
deleteChunks(o, segmentsContainer, segmentInfos)
|
|
||||||
segmentInfos = nil
|
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
@@ -1258,24 +1280,26 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
|||||||
emptyReader := bytes.NewReader(nil)
|
emptyReader := bytes.NewReader(nil)
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
rxHeaders, err = o.fs.c.ObjectPut(container, containerPath, emptyReader, true, "", contentType, headers)
|
rxHeaders, err = o.fs.c.ObjectPut(ctx, container, containerPath, emptyReader, true, "", contentType, headers)
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(rxHeaders, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
deleteChunks(o, segmentsContainer, segmentInfos)
|
if err == nil {
|
||||||
|
//reset data
|
||||||
segmentInfos = nil
|
segmentInfos = nil
|
||||||
}
|
}
|
||||||
return uniquePrefix + "/", err
|
return uniquePrefix + "/", err
|
||||||
}
|
}
|
||||||
|
|
||||||
func deleteChunks(o *Object, segmentsContainer string, segmentInfos []string) {
|
func deleteChunks(ctx context.Context, o *Object, segmentsContainer string, segmentInfos []string) {
|
||||||
if segmentInfos != nil && len(segmentInfos) > 0 {
|
if segmentInfos == nil || len(segmentInfos) == 0 {
|
||||||
for _, v := range segmentInfos {
|
return
|
||||||
fs.Debugf(o, "Delete segment file %q on %q", v, segmentsContainer)
|
}
|
||||||
e := o.fs.c.ObjectDelete(segmentsContainer, v)
|
for _, v := range segmentInfos {
|
||||||
if e != nil {
|
fs.Debugf(o, "Delete segment file %q on %q", v, segmentsContainer)
|
||||||
fs.Errorf(o, "Error occurred in delete segment file %q on %q, error: %q", v, segmentsContainer, e)
|
e := o.fs.c.ObjectDelete(ctx, segmentsContainer, v)
|
||||||
}
|
if e != nil {
|
||||||
|
fs.Errorf(o, "Error occurred in delete segment file %q on %q, error: %q", v, segmentsContainer, e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1296,20 +1320,26 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
modTime := src.ModTime(ctx)
|
modTime := src.ModTime(ctx)
|
||||||
|
|
||||||
// Note whether this is a dynamic large object before starting
|
// Note whether this is a dynamic large object before starting
|
||||||
isDynamicLargeObject, err := o.isDynamicLargeObject()
|
isLargeObject, err := o.isLargeObject(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//capture segments before upload
|
||||||
|
var segmentsContainer map[string][]string
|
||||||
|
if isLargeObject {
|
||||||
|
segmentsContainer, _ = o.getSegmentsLargeObject(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
// Set the mtime
|
// Set the mtime
|
||||||
m := swift.Metadata{}
|
m := swift.Metadata{}
|
||||||
m.SetModTime(modTime)
|
m.SetModTime(modTime)
|
||||||
contentType := fs.MimeType(ctx, src)
|
contentType := fs.MimeType(ctx, src)
|
||||||
headers := m.ObjectHeaders()
|
headers := m.ObjectHeaders()
|
||||||
fs.OpenOptionAddHeaders(options, headers)
|
fs.OpenOptionAddHeaders(options, headers)
|
||||||
uniquePrefix := ""
|
|
||||||
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
|
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
|
||||||
uniquePrefix, err = o.updateChunks(in, headers, size, contentType)
|
_, err = o.updateChunks(ctx, in, headers, size, contentType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1325,7 +1355,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
rxHeaders, err = o.fs.c.ObjectPut(container, containerPath, in, true, "", contentType, headers)
|
rxHeaders, err = o.fs.c.ObjectPut(ctx, container, containerPath, in, true, "", contentType, headers)
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(rxHeaders, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1343,47 +1373,59 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
o.size = int64(inCount.BytesRead())
|
o.size = int64(inCount.BytesRead())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
isInContainerVersioning, _ := o.isInContainerVersioning(ctx, container)
|
||||||
// If file was a dynamic large object then remove old/all segments
|
// If file was a large object and the container is not enable versioning then remove old/all segments
|
||||||
if isDynamicLargeObject {
|
if isLargeObject && len(segmentsContainer) > 0 && !isInContainerVersioning {
|
||||||
err = o.removeSegments(uniquePrefix)
|
err := o.removeSegmentsLargeObject(ctx, segmentsContainer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Failed to remove old segments - carrying on with upload: %v", err)
|
fs.Logf(o, "Failed to remove old segments - carrying on with upload: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the metadata from the newly created object if necessary
|
// Read the metadata from the newly created object if necessary
|
||||||
return o.readMetaData()
|
return o.readMetaData(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||||
container, containerPath := o.split()
|
container, containerPath := o.split()
|
||||||
|
|
||||||
|
//check object is large object
|
||||||
|
isLargeObject, err := o.isLargeObject(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
//check container has enabled version to reserve segment when delete
|
||||||
|
isInContainerVersioning := false
|
||||||
|
if isLargeObject {
|
||||||
|
isInContainerVersioning, err = o.isInContainerVersioning(ctx, container)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//capture segments object if this object is large object
|
||||||
|
var containerSegments map[string][]string
|
||||||
|
if isLargeObject {
|
||||||
|
containerSegments, err = o.getSegmentsLargeObject(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
// Remove file/manifest first
|
// Remove file/manifest first
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
err = o.fs.c.ObjectDelete(container, containerPath)
|
err = o.fs.c.ObjectDelete(ctx, container, containerPath)
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
isDynamicLargeObject, err := o.isDynamicLargeObject()
|
|
||||||
if err != nil {
|
if !isLargeObject || isInContainerVersioning {
|
||||||
return err
|
return nil
|
||||||
}
|
}
|
||||||
// ...then segments if required
|
|
||||||
if isDynamicLargeObject {
|
if isLargeObject {
|
||||||
isInContainerVersioning, err := o.isInContainerVersioning(container)
|
return o.removeSegmentsLargeObject(ctx, containerSegments)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !isInContainerVersioning {
|
|
||||||
err = o.removeSegments("")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/swift"
|
"github.com/ncw/swift/v2"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -4,15 +4,19 @@ package swift
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ncw/swift/v2"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/object"
|
"github.com/rclone/rclone/fs/object"
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/rclone/rclone/lib/random"
|
||||||
|
"github.com/rclone/rclone/lib/readers"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
@@ -74,6 +78,80 @@ func (f *Fs) testNoChunk(t *testing.T) {
|
|||||||
// Additional tests that aren't in the framework
|
// Additional tests that aren't in the framework
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
t.Run("NoChunk", f.testNoChunk)
|
t.Run("NoChunk", f.testNoChunk)
|
||||||
|
t.Run("WithChunk", f.testWithChunk)
|
||||||
|
t.Run("WithChunkFail", f.testWithChunkFail)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) testWithChunk(t *testing.T) {
|
||||||
|
preConfChunkSize := f.opt.ChunkSize
|
||||||
|
preConfChunk := f.opt.NoChunk
|
||||||
|
f.opt.NoChunk = false
|
||||||
|
f.opt.ChunkSize = 1024 * fs.Byte
|
||||||
|
defer func() {
|
||||||
|
//restore old config after test
|
||||||
|
f.opt.ChunkSize = preConfChunkSize
|
||||||
|
f.opt.NoChunk = preConfChunk
|
||||||
|
}()
|
||||||
|
|
||||||
|
file := fstest.Item{
|
||||||
|
ModTime: fstest.Time("2020-12-31T04:05:06.499999999Z"),
|
||||||
|
Path: "piped data chunk.txt",
|
||||||
|
Size: -1, // use unknown size during upload
|
||||||
|
}
|
||||||
|
const contentSize = 2048
|
||||||
|
contents := random.String(contentSize)
|
||||||
|
buf := bytes.NewBufferString(contents)
|
||||||
|
uploadHash := hash.NewMultiHasher()
|
||||||
|
in := io.TeeReader(buf, uploadHash)
|
||||||
|
|
||||||
|
file.Size = -1
|
||||||
|
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
|
||||||
|
ctx := context.TODO()
|
||||||
|
obj, err := f.Features().PutStream(ctx, in, obji)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotEmpty(t, obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) testWithChunkFail(t *testing.T) {
|
||||||
|
preConfChunkSize := f.opt.ChunkSize
|
||||||
|
preConfChunk := f.opt.NoChunk
|
||||||
|
f.opt.NoChunk = false
|
||||||
|
f.opt.ChunkSize = 1024 * fs.Byte
|
||||||
|
segmentContainer := f.root + "_segments"
|
||||||
|
defer func() {
|
||||||
|
//restore config
|
||||||
|
f.opt.ChunkSize = preConfChunkSize
|
||||||
|
f.opt.NoChunk = preConfChunk
|
||||||
|
}()
|
||||||
|
path := "piped data chunk with error.txt"
|
||||||
|
file := fstest.Item{
|
||||||
|
ModTime: fstest.Time("2021-01-04T03:46:00.499999999Z"),
|
||||||
|
Path: path,
|
||||||
|
Size: -1, // use unknown size during upload
|
||||||
|
}
|
||||||
|
const contentSize = 4096
|
||||||
|
const errPosition = 3072
|
||||||
|
contents := random.String(contentSize)
|
||||||
|
buf := bytes.NewBufferString(contents[:errPosition])
|
||||||
|
errMessage := "potato"
|
||||||
|
er := &readers.ErrorReader{Err: errors.New(errMessage)}
|
||||||
|
in := ioutil.NopCloser(io.MultiReader(buf, er))
|
||||||
|
|
||||||
|
file.Size = contentSize
|
||||||
|
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
|
||||||
|
ctx := context.TODO()
|
||||||
|
_, err := f.Features().PutStream(ctx, in, obji)
|
||||||
|
// error is potato
|
||||||
|
require.NotNil(t, err)
|
||||||
|
require.Equal(t, errMessage, err.Error())
|
||||||
|
_, _, err = f.c.Object(ctx, f.rootContainer, path)
|
||||||
|
assert.Equal(t, swift.ObjectNotFound, err)
|
||||||
|
prefix := path
|
||||||
|
objs, err := f.c.Objects(ctx, segmentContainer, &swift.ObjectsOpts{
|
||||||
|
Prefix: prefix,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Empty(t, objs)
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
var _ fstests.InternalTester = (*Fs)(nil)
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// +build go1.13,!plan9
|
// +build !plan9
|
||||||
|
|
||||||
// Package tardigrade provides an interface to Tardigrade decentralized object storage.
|
// Package tardigrade provides an interface to Tardigrade decentralized object storage.
|
||||||
package tardigrade
|
package tardigrade
|
||||||
@@ -42,7 +42,7 @@ func init() {
|
|||||||
Name: "tardigrade",
|
Name: "tardigrade",
|
||||||
Description: "Tardigrade Decentralized Cloud Storage",
|
Description: "Tardigrade Decentralized Cloud Storage",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string, configMapper configmap.Mapper) {
|
Config: func(ctx context.Context, name string, configMapper configmap.Mapper) {
|
||||||
provider, _ := configMapper.Get(fs.ConfigProvider)
|
provider, _ := configMapper.Get(fs.ConfigProvider)
|
||||||
|
|
||||||
config.FileDeleteKey(name, fs.ConfigProvider)
|
config.FileDeleteKey(name, fs.ConfigProvider)
|
||||||
@@ -67,12 +67,12 @@ func init() {
|
|||||||
log.Fatalf("Couldn't create access grant: %v", err)
|
log.Fatalf("Couldn't create access grant: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
serialziedAccess, err := access.Serialize()
|
serializedAccess, err := access.Serialize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Couldn't serialize access grant: %v", err)
|
log.Fatalf("Couldn't serialize access grant: %v", err)
|
||||||
}
|
}
|
||||||
configMapper.Set("satellite_address", satellite)
|
configMapper.Set("satellite_address", satellite)
|
||||||
configMapper.Set("access_grant", serialziedAccess)
|
configMapper.Set("access_grant", serializedAccess)
|
||||||
} else if provider == existingProvider {
|
} else if provider == existingProvider {
|
||||||
config.FileDeleteKey(name, "satellite_address")
|
config.FileDeleteKey(name, "satellite_address")
|
||||||
config.FileDeleteKey(name, "api_key")
|
config.FileDeleteKey(name, "api_key")
|
||||||
@@ -165,9 +165,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// NewFs creates a filesystem backed by Tardigrade.
|
// NewFs creates a filesystem backed by Tardigrade.
|
||||||
func NewFs(name, root string, m configmap.Mapper) (_ fs.Fs, err error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs, err error) {
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
// Setup filesystem and connection to Tardigrade
|
// Setup filesystem and connection to Tardigrade
|
||||||
root = norm.NFC.String(root)
|
root = norm.NFC.String(root)
|
||||||
root = strings.Trim(root, "/")
|
root = strings.Trim(root, "/")
|
||||||
@@ -219,7 +217,7 @@ func NewFs(name, root string, m configmap.Mapper) (_ fs.Fs, err error) {
|
|||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
BucketBasedRootOK: true,
|
BucketBasedRootOK: true,
|
||||||
}).Fill(f)
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
project, err := f.connect(ctx)
|
project, err := f.connect(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// +build go1.13,!plan9
|
// +build !plan9
|
||||||
|
|
||||||
package tardigrade
|
package tardigrade
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// +build go1.13,!plan9
|
// +build !plan9
|
||||||
|
|
||||||
// Test Tardigrade filesystem interface
|
// Test Tardigrade filesystem interface
|
||||||
package tardigrade_test
|
package tardigrade_test
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
// +build !go1.13 plan9
|
// +build plan9
|
||||||
|
|
||||||
package tardigrade
|
package tardigrade
|
||||||
|
|||||||
@@ -61,7 +61,7 @@ func (p *EpAll) Action(ctx context.Context, upstreams []*upstream.Fs, path strin
|
|||||||
return p.epall(ctx, upstreams, path)
|
return p.epall(ctx, upstreams, path)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ActionEntries is ACTION category policy but receivng a set of candidate entries
|
// ActionEntries is ACTION category policy but receiving a set of candidate entries
|
||||||
func (p *EpAll) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
func (p *EpAll) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||||
if len(entries) == 0 {
|
if len(entries) == 0 {
|
||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
|||||||
@@ -106,7 +106,7 @@ func (p *EpMfs) Search(ctx context.Context, upstreams []*upstream.Fs, path strin
|
|||||||
return p.mfs(upstreams)
|
return p.mfs(upstreams)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SearchEntries is SEARCH category policy but receivng a set of candidate entries
|
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
|
||||||
func (p *EpMfs) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
func (p *EpMfs) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||||
if len(entries) == 0 {
|
if len(entries) == 0 {
|
||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user