mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
413 Commits
fix-s3-end
...
fix-linux-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f26e41d1c5 | ||
|
|
8a6bf35481 | ||
|
|
f7d27f4bf2 | ||
|
|
378a2d21ee | ||
|
|
3404eb0444 | ||
|
|
13e5701f2a | ||
|
|
432d5d1e20 | ||
|
|
cc05159518 | ||
|
|
119ccb2b95 | ||
|
|
0ef0e908ca | ||
|
|
0063d14dbb | ||
|
|
0d34efb10f | ||
|
|
415f4b2b93 | ||
|
|
07cf5f1d25 | ||
|
|
7d31956169 | ||
|
|
473d443874 | ||
|
|
e294b76121 | ||
|
|
8f3c583870 | ||
|
|
d0d41fe847 | ||
|
|
297f15a3e3 | ||
|
|
d5f0affd4b | ||
|
|
0598aafbfd | ||
|
|
528e22f139 | ||
|
|
f1a8420814 | ||
|
|
e250f1afcd | ||
|
|
ebf24c9872 | ||
|
|
b4c7b240d8 | ||
|
|
22a14a8c98 | ||
|
|
07133b892d | ||
|
|
a8ca18165e | ||
|
|
8c4e71fc84 | ||
|
|
351e2db2ef | ||
|
|
2234feb23d | ||
|
|
fb5125ecee | ||
|
|
e8cbc54a06 | ||
|
|
00512e1303 | ||
|
|
fcfbd3153b | ||
|
|
9a8075b682 | ||
|
|
996037bee9 | ||
|
|
e90537b2e9 | ||
|
|
42c211c6b2 | ||
|
|
3d4f127b33 | ||
|
|
ff966b37af | ||
|
|
3b6effa81a | ||
|
|
8308d5d640 | ||
|
|
14024936a8 | ||
|
|
9065e921c1 | ||
|
|
99788b605e | ||
|
|
d4cc3760e6 | ||
|
|
a6acbd1844 | ||
|
|
389565f5e2 | ||
|
|
4b4198522d | ||
|
|
f7665300c0 | ||
|
|
73beae147f | ||
|
|
92f8e476b7 | ||
|
|
5849148d51 | ||
|
|
37853ec412 | ||
|
|
ae7ff28714 | ||
|
|
9873f4bc74 | ||
|
|
1b200bf69a | ||
|
|
e3fa6fe3cc | ||
|
|
9e1b3861e7 | ||
|
|
e9a753f678 | ||
|
|
708391a5bf | ||
|
|
1cfed18aa7 | ||
|
|
7751d5a00b | ||
|
|
8274712c2c | ||
|
|
625a564ba3 | ||
|
|
2dd2072cdb | ||
|
|
998d1d1727 | ||
|
|
fcb912a664 | ||
|
|
5f938fb9ed | ||
|
|
72b79504ea | ||
|
|
3e2a606adb | ||
|
|
95a6e3e338 | ||
|
|
d06bb55f3f | ||
|
|
9f3694cea3 | ||
|
|
2c50f26c36 | ||
|
|
22d6c8d30d | ||
|
|
96fb75c5a7 | ||
|
|
acd67edf9a | ||
|
|
b26db8e640 | ||
|
|
da955e5d4f | ||
|
|
4f8dab8bce | ||
|
|
000ddc4951 | ||
|
|
3faa84b47c | ||
|
|
e1162ec440 | ||
|
|
30cccc7101 | ||
|
|
1f5a29209e | ||
|
|
45255bccb3 | ||
|
|
055206c4ee | ||
|
|
f3070b82bc | ||
|
|
7e2deffc62 | ||
|
|
ae3ff50580 | ||
|
|
6486ba6344 | ||
|
|
7842000f8a | ||
|
|
1f9c962183 | ||
|
|
279d9ecc56 | ||
|
|
31773ecfbf | ||
|
|
666e34cf69 | ||
|
|
5a84a08b3f | ||
|
|
51a468b2ba | ||
|
|
fc798d800c | ||
|
|
3115ede1d8 | ||
|
|
7a5491ba7b | ||
|
|
a6cf4989b6 | ||
|
|
f489b54fa0 | ||
|
|
6244d1729b | ||
|
|
e97c2a2832 | ||
|
|
56bf9b4a10 | ||
|
|
ceb9406c2f | ||
|
|
1f887f7ba0 | ||
|
|
7db26b6b34 | ||
|
|
37a3309438 | ||
|
|
97be9015a4 | ||
|
|
487e4f09b3 | ||
|
|
09a408664d | ||
|
|
43fa256d56 | ||
|
|
6859c04772 | ||
|
|
38a0539096 | ||
|
|
2cd85813b4 | ||
|
|
e6e6069ecf | ||
|
|
fcf47a8393 | ||
|
|
46a323ae14 | ||
|
|
72be80ddca | ||
|
|
a9e7e7bcc2 | ||
|
|
925c4382e2 | ||
|
|
08c60c3091 | ||
|
|
5c594fea90 | ||
|
|
cc01223535 | ||
|
|
aaacfa51a0 | ||
|
|
c18c66f167 | ||
|
|
d6667d34e7 | ||
|
|
e649cf4d50 | ||
|
|
f080ec437c | ||
|
|
4023eaebe0 | ||
|
|
baf16a65f0 | ||
|
|
70fe2ac852 | ||
|
|
41cf7faea4 | ||
|
|
f226f2dfb1 | ||
|
|
31caa019fa | ||
|
|
0468375054 | ||
|
|
6001f05a12 | ||
|
|
f7b87a8049 | ||
|
|
d379641021 | ||
|
|
84281c9089 | ||
|
|
8e2dc069d2 | ||
|
|
61d6f538b3 | ||
|
|
65b2e378e0 | ||
|
|
dea6bdf3df | ||
|
|
27eb8c7f45 | ||
|
|
1607344613 | ||
|
|
5f138dd822 | ||
|
|
2520c05c4b | ||
|
|
f7f5e87632 | ||
|
|
a7e6806f26 | ||
|
|
d0eb884262 | ||
|
|
ae6874170f | ||
|
|
f5bab284c3 | ||
|
|
c75dfa6436 | ||
|
|
56eb82bdfc | ||
|
|
066e00b470 | ||
|
|
e0c445d36e | ||
|
|
74652bf318 | ||
|
|
b6a95c70e9 | ||
|
|
aca7d0fd22 | ||
|
|
12761b3058 | ||
|
|
3567a47258 | ||
|
|
6b670bd439 | ||
|
|
335ca6d572 | ||
|
|
c4a9e480c9 | ||
|
|
232d304c13 | ||
|
|
44ac79e357 | ||
|
|
0487e465ee | ||
|
|
bb6cfe109d | ||
|
|
864eb89a67 | ||
|
|
4471e6f258 | ||
|
|
e82db0b7d5 | ||
|
|
72e624c5e4 | ||
|
|
6092fa57c3 | ||
|
|
3e15a594b7 | ||
|
|
db8c007983 | ||
|
|
5836da14c2 | ||
|
|
8ed07d11a0 | ||
|
|
1f2ee44c20 | ||
|
|
32798dca25 | ||
|
|
075f98551f | ||
|
|
963ab220f6 | ||
|
|
281a007b1a | ||
|
|
589b7b4873 | ||
|
|
04d2781fda | ||
|
|
5b95fd9588 | ||
|
|
a42643101e | ||
|
|
bcca67efd5 | ||
|
|
7771aaacf6 | ||
|
|
fda06fc17d | ||
|
|
2faa4758e4 | ||
|
|
9a9ef040e3 | ||
|
|
ca403dc90e | ||
|
|
451f4c2a8f | ||
|
|
5f6b105c3e | ||
|
|
d98837b7e6 | ||
|
|
99dd748fec | ||
|
|
bdfe213c47 | ||
|
|
52fbb10b47 | ||
|
|
6cb584f455 | ||
|
|
ec8bbb8d30 | ||
|
|
fcdffab480 | ||
|
|
aeb568c494 | ||
|
|
b07f575d07 | ||
|
|
ebae647dfa | ||
|
|
6fd5b469bc | ||
|
|
78e822dd79 | ||
|
|
a79db20bcd | ||
|
|
d67ef19f6e | ||
|
|
037a6bd1b0 | ||
|
|
09b884aade | ||
|
|
243bcc9d07 | ||
|
|
64cf9ac911 | ||
|
|
15a3ec8fa1 | ||
|
|
2b8af4d23f | ||
|
|
5755e31ef0 | ||
|
|
f4c787ab74 | ||
|
|
4d7b6e14b8 | ||
|
|
9ea7d143dd | ||
|
|
927e721a25 | ||
|
|
bd46f01eb4 | ||
|
|
5f4d7154c0 | ||
|
|
bad8a01850 | ||
|
|
d808c3848a | ||
|
|
3f0bec2ee9 | ||
|
|
8fb9eb2fee | ||
|
|
01fa15a7d9 | ||
|
|
6aaa5d7a75 | ||
|
|
b4d3411637 | ||
|
|
01ddc8ca6c | ||
|
|
16c1e7149e | ||
|
|
0374ea2c79 | ||
|
|
2e2451f8ec | ||
|
|
bd1e3448b3 | ||
|
|
20909fa294 | ||
|
|
c502e00c87 | ||
|
|
9172c9b3dd | ||
|
|
78deab05f9 | ||
|
|
6c9d377bbb | ||
|
|
62ddc9b7f9 | ||
|
|
448ae49fa4 | ||
|
|
5f3c276d0a | ||
|
|
9cea493f58 | ||
|
|
400d1a4468 | ||
|
|
851ce0f4fe | ||
|
|
cc885bd39a | ||
|
|
a1a8c21c70 | ||
|
|
6ef4bd8c45 | ||
|
|
fb316123ec | ||
|
|
270af61665 | ||
|
|
155f4f2e21 | ||
|
|
eaf593884b | ||
|
|
930574c6e9 | ||
|
|
c1586a9866 | ||
|
|
432eb74814 | ||
|
|
92fb644fb6 | ||
|
|
bb92af693a | ||
|
|
eb5fd07131 | ||
|
|
b2ce7c9aa6 | ||
|
|
d6b46e41dd | ||
|
|
254c6ef1dd | ||
|
|
547f943851 | ||
|
|
8611c9f6f7 | ||
|
|
f6576237a4 | ||
|
|
207b64865e | ||
|
|
9ee1b21ec2 | ||
|
|
55a12bd639 | ||
|
|
3b4a57dab9 | ||
|
|
afe158f878 | ||
|
|
722a3f32cc | ||
|
|
9183618082 | ||
|
|
18ebca3979 | ||
|
|
e84d2c9e5f | ||
|
|
e98b61ceeb | ||
|
|
19f9fca2f6 | ||
|
|
7dbf1ab66f | ||
|
|
bfe272bf67 | ||
|
|
cce8936802 | ||
|
|
043bf3567d | ||
|
|
1b2f2c0d69 | ||
|
|
4b376514a6 | ||
|
|
c27e6a89b0 | ||
|
|
76c6e3b15c | ||
|
|
48ec00cc1a | ||
|
|
866600a73b | ||
|
|
d8f4cd4d5f | ||
|
|
d0810b602a | ||
|
|
d5afcf9e34 | ||
|
|
07c4d95f38 | ||
|
|
fd83071b6b | ||
|
|
e042d9089f | ||
|
|
cdfa0beafb | ||
|
|
ddb3b17e96 | ||
|
|
32f71c97ea | ||
|
|
53853116fb | ||
|
|
a887856998 | ||
|
|
0df7466d2b | ||
|
|
23579e3b99 | ||
|
|
3affba6fa6 | ||
|
|
542677d807 | ||
|
|
d481aa8613 | ||
|
|
15e633fa8b | ||
|
|
732c24c624 | ||
|
|
75dfdbf211 | ||
|
|
5f07113a4b | ||
|
|
6a380bcc67 | ||
|
|
97276ce765 | ||
|
|
a23a7a807f | ||
|
|
c6a4caaf7e | ||
|
|
5574733dcb | ||
|
|
49c21d0b6e | ||
|
|
0ea2ce3674 | ||
|
|
3ddf824251 | ||
|
|
68fdff3c27 | ||
|
|
c003485ae3 | ||
|
|
99d5080191 | ||
|
|
2ad217eedd | ||
|
|
a3eb7f1142 | ||
|
|
6d620b6d88 | ||
|
|
9f8357ada7 | ||
|
|
e5a1bcb1ce | ||
|
|
46484022b0 | ||
|
|
ab746ef891 | ||
|
|
6241c1ae43 | ||
|
|
0f8d3fe6a3 | ||
|
|
07afb9e700 | ||
|
|
3165093feb | ||
|
|
4af0c1d902 | ||
|
|
82f9554474 | ||
|
|
d8d53b7aa0 | ||
|
|
8c9048259a | ||
|
|
0361acbde4 | ||
|
|
f5bf0a48f3 | ||
|
|
cec843dd8c | ||
|
|
54a9488e59 | ||
|
|
29fe0177bd | ||
|
|
0e134364ac | ||
|
|
0d8350d95d | ||
|
|
497e373e31 | ||
|
|
ed8fea4aa5 | ||
|
|
4d7f75dd76 | ||
|
|
53e757aea9 | ||
|
|
f578896745 | ||
|
|
13be03cb86 | ||
|
|
864e02409e | ||
|
|
fccc779a15 | ||
|
|
77c7077458 | ||
|
|
ffd4ab222c | ||
|
|
676277e255 | ||
|
|
c0a5283416 | ||
|
|
e405ca7733 | ||
|
|
580d72f0f6 | ||
|
|
22daeaa6f3 | ||
|
|
ca9ad7935a | ||
|
|
dd6e229327 | ||
|
|
4edcd16f5f | ||
|
|
534e3acd06 | ||
|
|
cf75ddabd3 | ||
|
|
6edcacf932 | ||
|
|
51506a7ccd | ||
|
|
a50fd2a2a2 | ||
|
|
efac7e18fb | ||
|
|
02dd8eacea | ||
|
|
e2984227bb | ||
|
|
a35ee30d9f | ||
|
|
f689db4422 | ||
|
|
fb4600f6f9 | ||
|
|
1d0c75b0c2 | ||
|
|
2e435af4de | ||
|
|
62a7765e57 | ||
|
|
5ad942ed87 | ||
|
|
96609e3d6e | ||
|
|
28a8ebce5b | ||
|
|
17854663de | ||
|
|
a4a6b5930a | ||
|
|
e9ae620844 | ||
|
|
e7cfb8ad8e | ||
|
|
786a1c212c | ||
|
|
03bc270730 | ||
|
|
7cef042231 | ||
|
|
1155cc0d3f | ||
|
|
13c3f67ab0 | ||
|
|
ab2cdd840f | ||
|
|
143285e2b7 | ||
|
|
19e8c8d42a | ||
|
|
de9c4a3611 | ||
|
|
d7ad13d929 | ||
|
|
f9d50f677d | ||
|
|
3641993fab | ||
|
|
93d3ae04c7 | ||
|
|
e25e9fbf22 | ||
|
|
fe26d6116d | ||
|
|
06e1e18793 | ||
|
|
23d17b76be | ||
|
|
dfe4e78a77 | ||
|
|
59e7982040 | ||
|
|
c6b0587dc0 | ||
|
|
9baa4d1c3c | ||
|
|
a5390dbbeb | ||
|
|
019a486d5b | ||
|
|
34ce11d2be | ||
|
|
88e8ede0aa | ||
|
|
f6f250c507 | ||
|
|
2c45e901f0 | ||
|
|
9e1443799a | ||
|
|
dd72aff98a | ||
|
|
5039f9be48 |
4
.github/FUNDING.yml
vendored
4
.github/FUNDING.yml
vendored
@@ -1,4 +0,0 @@
|
||||
github: [ncw]
|
||||
patreon: njcw
|
||||
liberapay: ncw
|
||||
custom: ["https://rclone.org/donate/"]
|
||||
6
.github/dependabot.yml
vendored
Normal file
6
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
49
.github/workflows/build.yml
vendored
49
.github/workflows/build.yml
vendored
@@ -8,9 +8,9 @@ name: build
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '*'
|
||||
- '**'
|
||||
tags:
|
||||
- '*'
|
||||
- '**'
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
@@ -27,12 +27,12 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.18', 'go1.19']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.19', 'go1.20']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.21.0-rc.3'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -43,14 +43,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.21.0-rc.3'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '1.20'
|
||||
go: '1.21.0-rc.3'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -59,14 +59,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-11
|
||||
go: '1.20'
|
||||
go: '1.21.0-rc.3'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '1.20'
|
||||
go: '1.21.0-rc.3'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -76,23 +76,23 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.21.0-rc.3'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.18
|
||||
os: ubuntu-latest
|
||||
go: '1.18'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.19
|
||||
os: ubuntu-latest
|
||||
go: '1.19'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.20
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -104,7 +104,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
@@ -130,6 +130,11 @@ jobs:
|
||||
- name: Install Libraries on macOS
|
||||
shell: bash
|
||||
run: |
|
||||
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
|
||||
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
|
||||
unset HOMEBREW_NO_INSTALL_FROM_API
|
||||
brew untap --force homebrew/core
|
||||
brew untap --force homebrew/cask
|
||||
brew update
|
||||
brew install --cask macfuse
|
||||
if: matrix.os == 'macos-11'
|
||||
@@ -217,7 +222,7 @@ jobs:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# working-directory: '$(modulePath)'
|
||||
# Deploy binaries if enabled in config && not a PR && not a fork
|
||||
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
lint:
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
@@ -237,9 +242,9 @@ jobs:
|
||||
|
||||
# Run govulncheck on the latest go version, the one we build binaries with
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.21.0-rc.3'
|
||||
check-latest: true
|
||||
|
||||
- name: Install govulncheck
|
||||
@@ -262,9 +267,9 @@ jobs:
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.21.0-rc.3'
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v3
|
||||
@@ -352,4 +357,4 @@ jobs:
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# Upload artifacts if not a PR && not a fork
|
||||
if: github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
61
.github/workflows/build_publish_beta_docker_image.yml
vendored
Normal file
61
.github/workflows/build_publish_beta_docker_image.yml
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
|
||||
# GitHub Actions at the start of a workflow run to identify the job.
|
||||
# This is used to authenticate against GitHub Container Registry.
|
||||
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
||||
# for more detailed information.
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
push: true # push the image to ghcr
|
||||
tags: |
|
||||
ghcr.io/rclone/rclone:beta
|
||||
rclone/rclone:beta
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
provenance: false
|
||||
# Eventually cache will need to be cleared if builds more frequent than once a week
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
26
.github/workflows/build_publish_docker_image.yml
vendored
26
.github/workflows/build_publish_docker_image.yml
vendored
@@ -1,26 +0,0 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish image
|
||||
uses: ilteoood/docker_buildx@1.1.0
|
||||
with:
|
||||
tag: beta
|
||||
imageName: rclone/rclone
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
14
.github/workflows/winget.yml
vendored
Normal file
14
.github/workflows/winget.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
name: Publish to Winget
|
||||
on:
|
||||
release:
|
||||
types: [released]
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: windows-latest # Action can only run on Windows
|
||||
steps:
|
||||
- uses: vedantmgoyal2009/winget-releaser@v2
|
||||
with:
|
||||
identifier: Rclone.Rclone
|
||||
installers-regex: '-windows-\w+\.zip$'
|
||||
token: ${{ secrets.WINGET_TOKEN }}
|
||||
@@ -2,15 +2,17 @@
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- goimports
|
||||
- revive
|
||||
- ineffassign
|
||||
- structcheck
|
||||
- varcheck
|
||||
- govet
|
||||
- unconvert
|
||||
- staticcheck
|
||||
- gosimple
|
||||
- stylecheck
|
||||
- unused
|
||||
- misspell
|
||||
#- prealloc
|
||||
#- maligned
|
||||
disable-all: true
|
||||
@@ -25,6 +27,30 @@ issues:
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
||||
exclude-rules:
|
||||
|
||||
- linters:
|
||||
- staticcheck
|
||||
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
|
||||
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
timeout: 10m
|
||||
|
||||
linters-settings:
|
||||
revive:
|
||||
rules:
|
||||
- name: unreachable-code
|
||||
disabled: true
|
||||
- name: unused-parameter
|
||||
disabled: true
|
||||
- name: empty-block
|
||||
disabled: true
|
||||
- name: redefines-builtin-id
|
||||
disabled: true
|
||||
- name: superfluous-else
|
||||
disabled: true
|
||||
stylecheck:
|
||||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
||||
|
||||
@@ -11,7 +11,7 @@ RUN ./rclone version
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk --no-cache add ca-certificates fuse tzdata && \
|
||||
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
|
||||
@@ -16,6 +16,8 @@ Current active maintainers of rclone are:
|
||||
| Max Sum | @Max-Sum | union backend |
|
||||
| Fred | @creativeprojects | seafile backend |
|
||||
| Caleb Case | @calebcase | storj backend |
|
||||
| wiserain | @wiserain | pikpak backend |
|
||||
| albertony | @albertony | |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
3933
MANUAL.html
generated
3933
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
4405
MANUAL.txt
generated
4405
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
2
Makefile
2
Makefile
@@ -96,7 +96,7 @@ build_dep:
|
||||
|
||||
# Get the release dependencies we only install on linux
|
||||
release_dep_linux:
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
|
||||
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
|
||||
|
||||
# Get the release dependencies we only install on Windows
|
||||
release_dep_windows:
|
||||
|
||||
@@ -25,18 +25,19 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||
* Arvan Cloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
@@ -50,6 +51,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
||||
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
@@ -60,12 +62,15 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
@@ -80,6 +85,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
|
||||
@@ -10,7 +10,7 @@ This file describes how to make the various kinds of releases
|
||||
## Making a release
|
||||
|
||||
* git checkout master # see below for stable branch
|
||||
* git pull
|
||||
* git pull # IMPORTANT
|
||||
* git status - make sure everything is checked in
|
||||
* Check GitHub actions build for master is Green
|
||||
* make test # see integration test server or run locally
|
||||
@@ -21,6 +21,7 @@ This file describes how to make the various kinds of releases
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX.0"
|
||||
* make retag
|
||||
* git push origin # without --follow-tags so it doesn't push the tag if it fails
|
||||
* git push --follow-tags origin
|
||||
* # Wait for the GitHub builds to complete then...
|
||||
* make fetch_binaries
|
||||
|
||||
@@ -36,6 +36,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/opendrive"
|
||||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/pikpak"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
_ "github.com/rclone/rclone/backend/qingstor"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
//go:build !plan9 && !solaris && !js && go1.18
|
||||
// +build !plan9,!solaris,!js,go1.18
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
package azureblob
|
||||
@@ -58,6 +58,8 @@ const (
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxListChunkSize = 5000 // number of items to read at once
|
||||
modTimeKey = "mtime"
|
||||
dirMetaKey = "hdi_isfolder"
|
||||
dirMetaValue = "true"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
storageDefaultBaseURL = "blob.core.windows.net"
|
||||
@@ -93,6 +95,7 @@ Leave blank to use SAS URL or Emulator, otherwise it needs to be set.
|
||||
If this is blank and if env_auth is set it will be read from the
|
||||
environment variable ` + "`AZURE_STORAGE_ACCOUNT_NAME`" + ` if possible.
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "env_auth",
|
||||
Help: `Read credentials from runtime (environment variables, CLI or MSI).
|
||||
@@ -104,11 +107,13 @@ See the [authentication docs](/azureblob#authentication) for full info.`,
|
||||
Help: `Storage Account Shared Key.
|
||||
|
||||
Leave blank to use SAS URL or Emulator.`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "sas_url",
|
||||
Help: `SAS URL for container level access only.
|
||||
|
||||
Leave blank if using account/key or Emulator.`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "tenant",
|
||||
Help: `ID of the service principal's tenant. Also called its directory ID.
|
||||
@@ -118,6 +123,7 @@ Set this if using
|
||||
- Service principal with certificate
|
||||
- User with username and password
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "client_id",
|
||||
Help: `The ID of the client in use.
|
||||
@@ -127,6 +133,7 @@ Set this if using
|
||||
- Service principal with certificate
|
||||
- User with username and password
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "client_secret",
|
||||
Help: `One of the service principal's client secrets
|
||||
@@ -134,6 +141,7 @@ Set this if using
|
||||
Set this if using
|
||||
- Service principal with client secret
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "client_certificate_path",
|
||||
Help: `Path to a PEM or PKCS12 certificate file including the private key.
|
||||
@@ -171,7 +179,8 @@ Optionally set this if using
|
||||
Set this if using
|
||||
- User with username and password
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: `The user's password
|
||||
@@ -214,17 +223,20 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "msi_object_id",
|
||||
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
|
||||
Advanced: true,
|
||||
Name: "msi_object_id",
|
||||
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "msi_client_id",
|
||||
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
|
||||
Advanced: true,
|
||||
Name: "msi_client_id",
|
||||
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "msi_mi_res_id",
|
||||
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
|
||||
Advanced: true,
|
||||
Name: "msi_mi_res_id",
|
||||
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "use_emulator",
|
||||
Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
|
||||
@@ -363,6 +375,18 @@ This option controls how often unused buffers will be removed from the pool.`,
|
||||
},
|
||||
},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "directory_markers",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Help: `Upload an empty object with a trailing slash when a new directory is created
|
||||
|
||||
Empty folders are unsupported for bucket based remotes, this option
|
||||
creates an empty object ending with "/", to persist the folder.
|
||||
|
||||
This object also has the metadata "` + dirMetaKey + ` = ` + dirMetaValue + `" to conform to
|
||||
the Microsoft standard.
|
||||
`,
|
||||
}, {
|
||||
Name: "no_check_container",
|
||||
Help: `If set, don't attempt to check the container exists or create it.
|
||||
@@ -412,6 +436,7 @@ type Options struct {
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
PublicAccess string `config:"public_access"`
|
||||
DirectoryMarkers bool `config:"directory_markers"`
|
||||
NoCheckContainer bool `config:"no_check_container"`
|
||||
NoHeadObject bool `config:"no_head_object"`
|
||||
}
|
||||
@@ -486,7 +511,7 @@ func parsePath(path string) (root string) {
|
||||
// split returns container and containerPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) {
|
||||
containerName, containerPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
containerName, containerPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
return f.opt.Enc.FromStandardName(containerName), f.opt.Enc.FromStandardPath(containerPath)
|
||||
}
|
||||
|
||||
@@ -664,6 +689,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(ctx, f)
|
||||
if opt.DirectoryMarkers {
|
||||
f.features.CanHaveEmptyDirectories = true
|
||||
fs.Debugf(f, "Using directory markers")
|
||||
}
|
||||
|
||||
// Client options specifying our own transport
|
||||
policyClientOptions := policy.ClientOptions{
|
||||
@@ -690,7 +719,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
cred, err = azidentity.NewDefaultAzureCredential(&options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create azure enviroment credential failed: %w", err)
|
||||
return nil, fmt.Errorf("create azure environment credential failed: %w", err)
|
||||
}
|
||||
case opt.UseEmulator:
|
||||
if opt.Account == "" {
|
||||
@@ -906,7 +935,7 @@ func (f *Fs) cntSVC(containerName string) (containerClient *container.Client) {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *container.BlobItem) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *container.BlobItem) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -917,7 +946,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *container.BlobItem) (fs.Obje
|
||||
return nil, err
|
||||
}
|
||||
} else if !o.fs.opt.NoHeadObject {
|
||||
err := o.readMetaData() // reads info and headers, returning an error
|
||||
err := o.readMetaData(ctx) // reads info and headers, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -928,7 +957,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *container.BlobItem) (fs.Obje
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// getBlobSVC creates a blob client
|
||||
@@ -953,7 +982,7 @@ func (o *Object) updateMetadataWithModTime(modTime time.Time) {
|
||||
}
|
||||
|
||||
// Returns whether file is a directory marker or not
|
||||
func isDirectoryMarker(size int64, metadata map[string]string, remote string) bool {
|
||||
func isDirectoryMarker(size int64, metadata map[string]*string, remote string) bool {
|
||||
// Directory markers are 0 length
|
||||
if size == 0 {
|
||||
endsWithSlash := strings.HasSuffix(remote, "/")
|
||||
@@ -964,31 +993,7 @@ func isDirectoryMarker(size int64, metadata map[string]string, remote string) bo
|
||||
// defacto standard for marking blobs as directories.
|
||||
// Note also that the metadata hasn't been normalised to lower case yet
|
||||
for k, v := range metadata {
|
||||
if strings.EqualFold(k, "hdi_isfolder") && v == "true" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns whether file is a directory marker or not using metadata
|
||||
// with pointers to strings as the SDK seems to use both forms rather
|
||||
// annoyingly.
|
||||
//
|
||||
// NB This is a duplicate of isDirectoryMarker
|
||||
func isDirectoryMarkerP(size int64, metadata map[string]*string, remote string) bool {
|
||||
// Directory markers are 0 length
|
||||
if size == 0 {
|
||||
endsWithSlash := strings.HasSuffix(remote, "/")
|
||||
if endsWithSlash || remote == "" {
|
||||
return true
|
||||
}
|
||||
// Note that metadata with hdi_isfolder = true seems to be a
|
||||
// defacto standard for marking blobs as directories.
|
||||
// Note also that the metadata hasn't been normalised to lower case yet
|
||||
for k, pv := range metadata {
|
||||
if strings.EqualFold(k, "hdi_isfolder") && pv != nil && *pv == "true" {
|
||||
if v != nil && strings.EqualFold(k, dirMetaKey) && *v == dirMetaValue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -1033,6 +1038,7 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
|
||||
Prefix: &directory,
|
||||
MaxResults: &maxResults,
|
||||
})
|
||||
foundItems := 0
|
||||
for pager.More() {
|
||||
var response container.ListBlobsHierarchyResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
@@ -1051,6 +1057,7 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
|
||||
}
|
||||
// Advance marker to next
|
||||
// marker = response.NextMarker
|
||||
foundItems += len(response.Segment.BlobItems)
|
||||
for i := range response.Segment.BlobItems {
|
||||
file := response.Segment.BlobItems[i]
|
||||
// Finish if file name no longer has prefix
|
||||
@@ -1066,20 +1073,27 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
|
||||
fs.Debugf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if isDirectoryMarkerP(*file.Properties.ContentLength, file.Metadata, remote) {
|
||||
continue // skip directory marker
|
||||
isDirectory := isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote)
|
||||
if isDirectory {
|
||||
// Don't insert the root directory
|
||||
if remote == directory {
|
||||
continue
|
||||
}
|
||||
// process directory markers as directories
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if addContainer {
|
||||
remote = path.Join(containerName, remote)
|
||||
}
|
||||
// Send object
|
||||
err = fn(remote, file, false)
|
||||
err = fn(remote, file, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Send the subdirectories
|
||||
foundItems += len(response.Segment.BlobPrefixes)
|
||||
for _, remote := range response.Segment.BlobPrefixes {
|
||||
if remote.Name == nil {
|
||||
fs.Debugf(f, "Nil prefix received")
|
||||
@@ -1102,16 +1116,26 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
|
||||
}
|
||||
}
|
||||
}
|
||||
if f.opt.DirectoryMarkers && foundItems == 0 && directory != "" {
|
||||
// Determine whether the directory exists or not by whether it has a marker
|
||||
_, err := f.readMetaData(ctx, containerName, directory)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert a list item into a DirEntry
|
||||
func (f *Fs) itemToDirEntry(remote string, object *container.BlobItem, isDirectory bool) (fs.DirEntry, error) {
|
||||
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *container.BlobItem, isDirectory bool) (fs.DirEntry, error) {
|
||||
if isDirectory {
|
||||
d := fs.NewDir(remote, time.Time{})
|
||||
return d, nil
|
||||
}
|
||||
o, err := f.newObjectWithInfo(remote, object)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1139,7 +1163,7 @@ func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix strin
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
err = f.list(ctx, containerName, directory, prefix, addContainer, false, int32(f.opt.ListChunkSize), func(remote string, object *container.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1220,7 +1244,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(containerName, directory, prefix string, addContainer bool) error {
|
||||
return f.list(ctx, containerName, directory, prefix, addContainer, true, int32(f.opt.ListChunkSize), func(remote string, object *container.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1314,10 +1338,71 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Create directory marker file and parents
|
||||
func (f *Fs) createDirectoryMarker(ctx context.Context, container, dir string) error {
|
||||
if !f.opt.DirectoryMarkers || container == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Object to be uploaded
|
||||
o := &Object{
|
||||
fs: f,
|
||||
modTime: time.Now(),
|
||||
meta: map[string]string{
|
||||
dirMetaKey: dirMetaValue,
|
||||
},
|
||||
}
|
||||
|
||||
for {
|
||||
_, containerPath := f.split(dir)
|
||||
// Don't create the directory marker if it is the bucket or at the very root
|
||||
if containerPath == "" {
|
||||
break
|
||||
}
|
||||
o.remote = dir + "/"
|
||||
|
||||
// Check to see if object already exists
|
||||
_, err := f.readMetaData(ctx, container, containerPath+"/")
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Upload it if not
|
||||
fs.Debugf(o, "Creating directory marker")
|
||||
content := io.Reader(strings.NewReader(""))
|
||||
err = o.Update(ctx, content, o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating directory marker failed: %w", err)
|
||||
}
|
||||
|
||||
// Now check parent directory exists
|
||||
dir = path.Dir(dir)
|
||||
if dir == "/" || dir == "." {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
container, _ := f.split(dir)
|
||||
return f.makeContainer(ctx, container)
|
||||
e := f.makeContainer(ctx, container)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
return f.createDirectoryMarker(ctx, container, dir)
|
||||
}
|
||||
|
||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
dir := path.Dir(remote)
|
||||
if dir == "/" || dir == "." {
|
||||
dir = ""
|
||||
}
|
||||
return f.Mkdir(ctx, dir)
|
||||
}
|
||||
|
||||
// makeContainer creates the container if it doesn't exist
|
||||
@@ -1417,6 +1502,18 @@ func (f *Fs) deleteContainer(ctx context.Context, containerName string) error {
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
container, directory := f.split(dir)
|
||||
// Remove directory marker file
|
||||
if f.opt.DirectoryMarkers && container != "" && dir != "" {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: dir + "/",
|
||||
}
|
||||
fs.Debugf(o, "Removing directory marker")
|
||||
err := o.Remove(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("removing directory marker failed: %w", err)
|
||||
}
|
||||
}
|
||||
if container == "" || directory != "" {
|
||||
return nil
|
||||
}
|
||||
@@ -1458,7 +1555,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
dstContainer, dstPath := f.split(remote)
|
||||
err := f.makeContainer(ctx, dstContainer)
|
||||
err := f.mkdirParent(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1471,9 +1568,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
srcBlobSVC := srcObj.getBlobSVC()
|
||||
srcURL := srcBlobSVC.URL()
|
||||
|
||||
tier := blob.AccessTier(f.opt.AccessTier)
|
||||
options := blob.StartCopyFromURLOptions{
|
||||
Tier: &tier,
|
||||
Tier: parseTier(f.opt.AccessTier),
|
||||
}
|
||||
var startCopy blob.StartCopyFromURLResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -1552,12 +1648,15 @@ func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
func (o *Object) setMetadata(metadata map[string]string) {
|
||||
// Set o.metadata from metadata
|
||||
func (o *Object) setMetadata(metadata map[string]*string) {
|
||||
if len(metadata) > 0 {
|
||||
// Lower case the metadata
|
||||
o.meta = make(map[string]string, len(metadata))
|
||||
for k, v := range metadata {
|
||||
o.meta[strings.ToLower(k)] = v
|
||||
if v != nil {
|
||||
o.meta[strings.ToLower(k)] = *v
|
||||
}
|
||||
}
|
||||
// Set o.modTime from metadata if it exists and
|
||||
// UseServerModTime isn't in use.
|
||||
@@ -1573,20 +1672,17 @@ func (o *Object) setMetadata(metadata map[string]string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Duplicte of setMetadata but taking pointers to strings
|
||||
func (o *Object) setMetadataP(metadata map[string]*string) {
|
||||
if len(metadata) > 0 {
|
||||
// Convert the format of the metadata
|
||||
newMeta := make(map[string]string, len(metadata))
|
||||
for k, v := range metadata {
|
||||
if v != nil {
|
||||
newMeta[k] = *v
|
||||
}
|
||||
}
|
||||
o.setMetadata(newMeta)
|
||||
} else {
|
||||
o.meta = nil
|
||||
// Get metadata from o.meta
|
||||
func (o *Object) getMetadata() (metadata map[string]*string) {
|
||||
if len(o.meta) == 0 {
|
||||
return nil
|
||||
}
|
||||
metadata = make(map[string]*string, len(o.meta))
|
||||
for k, v := range o.meta {
|
||||
v := v
|
||||
metadata[k] = &v
|
||||
}
|
||||
return metadata
|
||||
}
|
||||
|
||||
// decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in
|
||||
@@ -1696,7 +1792,7 @@ func (o *Object) decodeMetaDataFromBlob(info *container.BlobItem) (err error) {
|
||||
} else {
|
||||
size = *info.Properties.ContentLength
|
||||
}
|
||||
if isDirectoryMarkerP(size, metadata, o.remote) {
|
||||
if isDirectoryMarker(size, metadata, o.remote) {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
|
||||
@@ -1718,7 +1814,7 @@ func (o *Object) decodeMetaDataFromBlob(info *container.BlobItem) (err error) {
|
||||
} else {
|
||||
o.accessTier = *info.Properties.AccessTier
|
||||
}
|
||||
o.setMetadataP(metadata)
|
||||
o.setMetadata(metadata)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1729,17 +1825,34 @@ func (o *Object) getBlobSVC() *blob.Client {
|
||||
return o.fs.getBlobSVC(container, directory)
|
||||
}
|
||||
|
||||
// getBlockBlobSVC creates a block blob client
|
||||
func (o *Object) getBlockBlobSVC() *blockblob.Client {
|
||||
container, directory := o.split()
|
||||
return o.fs.getBlockBlobSVC(container, directory)
|
||||
}
|
||||
|
||||
// clearMetaData clears enough metadata so readMetaData will re-read it
|
||||
func (o *Object) clearMetaData() {
|
||||
o.modTime = time.Time{}
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
func (f *Fs) readMetaData(ctx context.Context, container, containerPath string) (blobProperties blob.GetPropertiesResponse, err error) {
|
||||
if !f.containerOK(container) {
|
||||
return blobProperties, fs.ErrorObjectNotFound
|
||||
}
|
||||
blb := f.getBlobSVC(container, containerPath)
|
||||
|
||||
// Read metadata (this includes metadata)
|
||||
options := blob.GetPropertiesOptions{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
blobProperties, err = blb.GetProperties(ctx, &options)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
// On directories - GetProperties does not work and current SDK does not populate service code correctly hence check regular http response as well
|
||||
if storageErr, ok := err.(*azcore.ResponseError); ok && (storageErr.ErrorCode == string(bloberror.BlobNotFound) || storageErr.StatusCode == http.StatusNotFound) {
|
||||
return blobProperties, fs.ErrorObjectNotFound
|
||||
}
|
||||
return blobProperties, err
|
||||
}
|
||||
return blobProperties, nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// Sets
|
||||
@@ -1748,33 +1861,15 @@ func (o *Object) clearMetaData() {
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.md5
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
container, _ := o.split()
|
||||
if !o.fs.containerOK(container) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
blb := o.getBlobSVC()
|
||||
// fs.Debugf(o, "Blob URL = %q", blb.URL())
|
||||
|
||||
// Read metadata (this includes metadata)
|
||||
options := blob.GetPropertiesOptions{}
|
||||
ctx := context.Background()
|
||||
var blobProperties blob.GetPropertiesResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
blobProperties, err = blb.GetProperties(ctx, &options)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
container, containerPath := o.split()
|
||||
blobProperties, err := o.fs.readMetaData(ctx, container, containerPath)
|
||||
if err != nil {
|
||||
// On directories - GetProperties does not work and current SDK does not populate service code correctly hence check regular http response as well
|
||||
if storageErr, ok := err.(*azcore.ResponseError); ok && (storageErr.ErrorCode == string(bloberror.BlobNotFound) || storageErr.StatusCode == http.StatusNotFound) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return o.decodeMetaDataFromPropertiesResponse(&blobProperties)
|
||||
}
|
||||
|
||||
@@ -1784,7 +1879,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
|
||||
// The error is logged in readMetaData
|
||||
_ = o.readMetaData()
|
||||
_ = o.readMetaData(ctx)
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
@@ -1800,7 +1895,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
blb := o.getBlobSVC()
|
||||
opt := blob.SetMetadataOptions{}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blb.SetMetadata(ctx, o.meta, &opt)
|
||||
_, err := blb.SetMetadata(ctx, o.getMetadata(), &opt)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1871,48 +1966,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return downloadResponse.Body, nil
|
||||
}
|
||||
|
||||
// poolWrapper wraps a pool.Pool as an azblob.TransferManager
|
||||
type poolWrapper struct {
|
||||
pool *pool.Pool
|
||||
bufToken chan struct{}
|
||||
runToken chan struct{}
|
||||
}
|
||||
|
||||
// newPoolWrapper creates an azblob.TransferManager that will use a
|
||||
// pool.Pool with maximum concurrency as specified.
|
||||
func (f *Fs) newPoolWrapper(concurrency int) *poolWrapper {
|
||||
return &poolWrapper{
|
||||
pool: f.pool,
|
||||
bufToken: make(chan struct{}, concurrency),
|
||||
runToken: make(chan struct{}, concurrency),
|
||||
}
|
||||
}
|
||||
|
||||
// Get implements TransferManager.Get().
|
||||
func (pw *poolWrapper) Get() []byte {
|
||||
pw.bufToken <- struct{}{}
|
||||
return pw.pool.Get()
|
||||
}
|
||||
|
||||
// Put implements TransferManager.Put().
|
||||
func (pw *poolWrapper) Put(b []byte) {
|
||||
pw.pool.Put(b)
|
||||
<-pw.bufToken
|
||||
}
|
||||
|
||||
// Run implements TransferManager.Run().
|
||||
func (pw *poolWrapper) Run(f func()) {
|
||||
pw.runToken <- struct{}{}
|
||||
go func() {
|
||||
f()
|
||||
<-pw.runToken
|
||||
}()
|
||||
}
|
||||
|
||||
// Close implements TransferManager.Close().
|
||||
func (pw *poolWrapper) Close() {
|
||||
}
|
||||
|
||||
// Converts a string into a pointer to a string
|
||||
func pString(s string) *string {
|
||||
return &s
|
||||
@@ -2094,10 +2147,9 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
return err
|
||||
}
|
||||
|
||||
tier := blob.AccessTier(o.fs.opt.AccessTier)
|
||||
options := blockblob.CommitBlockListOptions{
|
||||
Metadata: o.meta,
|
||||
Tier: &tier,
|
||||
Metadata: o.getMetadata(),
|
||||
Tier: parseTier(o.fs.opt.AccessTier),
|
||||
HTTPHeaders: httpHeaders,
|
||||
}
|
||||
|
||||
@@ -2141,10 +2193,9 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
||||
b := bytes.NewReader(buf[:n])
|
||||
rs := &readSeekCloser{Reader: b, Seeker: b}
|
||||
|
||||
tier := blob.AccessTier(o.fs.opt.AccessTier)
|
||||
options := blockblob.UploadOptions{
|
||||
Metadata: o.meta,
|
||||
Tier: &tier,
|
||||
Metadata: o.getMetadata(),
|
||||
Tier: parseTier(o.fs.opt.AccessTier),
|
||||
HTTPHeaders: httpHeaders,
|
||||
}
|
||||
|
||||
@@ -2174,12 +2225,17 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if container == "" || containerPath == "" {
|
||||
return fmt.Errorf("can't upload to root - need a container")
|
||||
}
|
||||
err = o.fs.makeContainer(ctx, container)
|
||||
if err != nil {
|
||||
return err
|
||||
// Create parent dir/bucket if not saving directory marker
|
||||
_, isDirMarker := o.meta[dirMetaKey]
|
||||
if !isDirMarker {
|
||||
err = o.fs.mkdirParent(ctx, o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Update Mod time
|
||||
fs.Debugf(nil, "o.meta = %+v", o.meta)
|
||||
o.updateMetadataWithModTime(src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -2227,6 +2283,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
size := src.Size()
|
||||
multipartUpload := size < 0 || size > o.fs.poolSize
|
||||
|
||||
fs.Debugf(nil, "o.meta = %+v", o.meta)
|
||||
if multipartUpload {
|
||||
err = o.uploadMultipart(ctx, in, size, blb, &httpHeaders)
|
||||
} else {
|
||||
@@ -2237,10 +2294,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// Refresh metadata on object
|
||||
o.clearMetaData()
|
||||
err = o.readMetaData()
|
||||
if err != nil {
|
||||
return err
|
||||
if !isDirMarker {
|
||||
o.clearMetaData()
|
||||
err = o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// If tier is not changed or not specified, do not attempt to invoke `SetBlobTier` operation
|
||||
@@ -2314,6 +2373,14 @@ func (o *Object) GetTier() string {
|
||||
return string(o.accessTier)
|
||||
}
|
||||
|
||||
func parseTier(tier string) *blob.AccessTier {
|
||||
if tier == "" {
|
||||
return nil
|
||||
}
|
||||
msTier := blob.AccessTier(tier)
|
||||
return &msTier
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
//go:build !plan9 && !solaris && !js && go1.18
|
||||
// +build !plan9,!solaris,!js,go1.18
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
//go:build !plan9 && !solaris && !js && go1.18
|
||||
// +build !plan9,!solaris,!js,go1.18
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -25,6 +26,25 @@ func TestIntegration(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegration2 runs integration tests against the remote
|
||||
func TestIntegration2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
name := "TestAzureBlob:"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name,
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: defaultChunkSize,
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "directory_markers", Value: "true"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris || js || !go1.18
|
||||
// +build plan9 solaris js !go1.18
|
||||
//go:build plan9 || solaris || js
|
||||
// +build plan9 solaris js
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -75,13 +75,15 @@ func init() {
|
||||
Description: "Backblaze B2",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "account",
|
||||
Help: "Account ID or Application Key ID.",
|
||||
Required: true,
|
||||
Name: "account",
|
||||
Help: "Account ID or Application Key ID.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "key",
|
||||
Help: "Application Key.",
|
||||
Required: true,
|
||||
Name: "key",
|
||||
Help: "Application Key.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
@@ -1221,7 +1223,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
fs.Errorf(object.Name, "Can't create object %v", err)
|
||||
continue
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "deleting")
|
||||
err = f.deleteByID(ctx, object.ID, object.Name)
|
||||
checkErr(err)
|
||||
tr.Done(ctx, err)
|
||||
@@ -1235,7 +1237,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
if err != nil {
|
||||
fs.Errorf(object, "Can't create object %+v", err)
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
|
||||
if oldOnly && last != remote {
|
||||
// Check current version of the file
|
||||
if object.Action == "hide" {
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -45,7 +46,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/youmark/pkcs8"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/jws"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -76,6 +76,11 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
type boxCustomClaims struct {
|
||||
jwt.StandardClaims
|
||||
BoxSubType string `json:"box_sub_type,omitempty"`
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -102,16 +107,18 @@ func init() {
|
||||
return nil, nil
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "0",
|
||||
Advanced: true,
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "0",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "box_config_file",
|
||||
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "access_token",
|
||||
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
||||
Name: "access_token",
|
||||
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "box_sub_type",
|
||||
Default: "user",
|
||||
@@ -178,7 +185,7 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
|
||||
signingHeaders := getSigningHeaders(boxConfig)
|
||||
queryParams := getQueryParams(boxConfig)
|
||||
client := fshttp.NewClient(ctx)
|
||||
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
err = jwtutil.Config("box", name, tokenURL, *claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -194,34 +201,31 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
return boxConfig, nil
|
||||
}
|
||||
|
||||
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
|
||||
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomClaims, err error) {
|
||||
val, err := jwtutil.RandomHex(20)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
|
||||
}
|
||||
|
||||
claims = &jws.ClaimSet{
|
||||
Iss: boxConfig.BoxAppSettings.ClientID,
|
||||
Sub: boxConfig.EnterpriseID,
|
||||
Aud: tokenURL,
|
||||
Exp: time.Now().Add(time.Second * 45).Unix(),
|
||||
PrivateClaims: map[string]interface{}{
|
||||
"box_sub_type": boxSubType,
|
||||
"aud": tokenURL,
|
||||
"jti": val,
|
||||
claims = &boxCustomClaims{
|
||||
//lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely
|
||||
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019
|
||||
StandardClaims: jwt.StandardClaims{
|
||||
Id: val,
|
||||
Issuer: boxConfig.BoxAppSettings.ClientID,
|
||||
Subject: boxConfig.EnterpriseID,
|
||||
Audience: tokenURL,
|
||||
ExpiresAt: time.Now().Add(time.Second * 45).Unix(),
|
||||
},
|
||||
BoxSubType: boxSubType,
|
||||
}
|
||||
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
|
||||
signingHeaders := &jws.Header{
|
||||
Algorithm: "RS256",
|
||||
Typ: "JWT",
|
||||
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
||||
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} {
|
||||
signingHeaders := map[string]interface{}{
|
||||
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
||||
}
|
||||
|
||||
return signingHeaders
|
||||
}
|
||||
|
||||
|
||||
16
backend/cache/cache.go
vendored
16
backend/cache/cache.go
vendored
@@ -76,17 +76,19 @@ func init() {
|
||||
Name: "plex_url",
|
||||
Help: "The URL of the Plex server.",
|
||||
}, {
|
||||
Name: "plex_username",
|
||||
Help: "The username of the Plex user.",
|
||||
Name: "plex_username",
|
||||
Help: "The username of the Plex user.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "plex_password",
|
||||
Help: "The password of the Plex user.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "plex_token",
|
||||
Help: "The plex token for authentication - auto set normally.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
Name: "plex_token",
|
||||
Help: "The plex token for authentication - auto set normally.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "plex_insecure",
|
||||
Help: "Skip all certificate verification when connecting to the Plex server.",
|
||||
@@ -1787,7 +1789,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// StopBackgroundRunners will signall all the runners to stop their work
|
||||
// StopBackgroundRunners will signal all the runners to stop their work
|
||||
// can be triggered from a terminate signal or from testing between runs
|
||||
func (f *Fs) StopBackgroundRunners() {
|
||||
f.cleanupChan <- false
|
||||
|
||||
21
backend/cache/cache_internal_test.go
vendored
21
backend/cache/cache_internal_test.go
vendored
@@ -1098,27 +1098,6 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
|
||||
return l, err
|
||||
}
|
||||
|
||||
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = in.Close()
|
||||
}()
|
||||
|
||||
out, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = out.Close()
|
||||
}()
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
var err error
|
||||
|
||||
|
||||
4
backend/cache/cache_upload_test.go
vendored
4
backend/cache/cache_upload_test.go
vendored
@@ -160,11 +160,11 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
minSize := 5242880
|
||||
maxSize := 10485760
|
||||
totalFiles := 10
|
||||
rand.Seed(time.Now().Unix())
|
||||
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
|
||||
|
||||
lastFile := ""
|
||||
for i := 0; i < totalFiles; i++ {
|
||||
size := int64(rand.Intn(maxSize-minSize) + minSize)
|
||||
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
|
||||
testReader := runInstance.randomReader(t, size)
|
||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package combine implents a backend to combine multiple remotes in a directory tree
|
||||
// Package combine implements a backend to combine multiple remotes in a directory tree
|
||||
package combine
|
||||
|
||||
/*
|
||||
@@ -233,6 +233,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
canMove := true
|
||||
for _, u := range f.upstreams {
|
||||
@@ -289,6 +290,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
}
|
||||
}
|
||||
|
||||
// Enable CleanUp when any upstreams support it
|
||||
if features.CleanUp == nil {
|
||||
for _, u := range f.upstreams {
|
||||
if u.f.Features().CleanUp != nil {
|
||||
features.CleanUp = f.CleanUp
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enable ChangeNotify when any upstreams support it
|
||||
if features.ChangeNotify == nil {
|
||||
for _, u := range f.upstreams {
|
||||
@@ -299,6 +310,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
}
|
||||
}
|
||||
|
||||
// show that we wrap other backends
|
||||
features.Overlay = true
|
||||
|
||||
f.features = features
|
||||
|
||||
// Get common intersection of hashes
|
||||
@@ -351,7 +365,7 @@ func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream
|
||||
return g.Wait()
|
||||
}
|
||||
|
||||
// join the elements together but unline path.Join return empty string
|
||||
// join the elements together but unlike path.Join return empty string
|
||||
func join(elem ...string) string {
|
||||
result := path.Join(elem...)
|
||||
if result == "." {
|
||||
@@ -887,6 +901,100 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
})
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
u, uRemote, err := f.findUpstream(remote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
do := u.f.Features().PublicLink
|
||||
if do == nil {
|
||||
return "", fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx, uRemote, expire, unlink)
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
//
|
||||
// May create duplicates or return errors if src already
|
||||
// exists.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
srcPath := src.Remote()
|
||||
u, uRemote, err := f.findUpstream(srcPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
do := u.f.Features().PutUnchecked
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
uSrc := fs.NewOverrideRemote(src, uRemote)
|
||||
return do(ctx, in, uSrc, options...)
|
||||
}
|
||||
|
||||
// MergeDirs merges the contents of all the directories passed
|
||||
// in into the first one and rmdirs the other directories.
|
||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
if len(dirs) == 0 {
|
||||
return nil
|
||||
}
|
||||
var (
|
||||
u *upstream
|
||||
uDirs []fs.Directory
|
||||
)
|
||||
for _, dir := range dirs {
|
||||
uNew, uDir, err := f.findUpstream(dir.Remote())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if u == nil {
|
||||
u = uNew
|
||||
} else if u != uNew {
|
||||
return fmt.Errorf("can't merge directories from different upstreams")
|
||||
}
|
||||
uDirs = append(uDirs, fs.NewOverrideDirectory(dir, uDir))
|
||||
}
|
||||
do := u.f.Features().MergeDirs
|
||||
if do == nil {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx, uDirs)
|
||||
}
|
||||
|
||||
// CleanUp the trash in the Fs
|
||||
//
|
||||
// Implement this if you have a way of emptying the trash or
|
||||
// otherwise cleaning up old versions of files.
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
|
||||
if do := u.f.Features().CleanUp; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// OpenWriterAt opens with a handle for random access writes
|
||||
//
|
||||
// Pass in the remote desired and the size if known.
|
||||
//
|
||||
// It truncates any existing object
|
||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
u, uRemote, err := f.findUpstream(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
do := u.f.Features().OpenWriterAt
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx, uRemote, size)
|
||||
}
|
||||
|
||||
// Object describes a wrapped Object
|
||||
//
|
||||
// This is a wrapped Object which knows its path prefix
|
||||
@@ -916,7 +1024,7 @@ func (o *Object) String() string {
|
||||
func (o *Object) Remote() string {
|
||||
newPath, err := o.u.pathAdjustment.do(o.Object.String())
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Bad object: %v", err)
|
||||
fs.Errorf(o.Object, "Bad object: %v", err)
|
||||
return err.Error()
|
||||
}
|
||||
return newPath
|
||||
@@ -988,5 +1096,10 @@ var (
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.OpenWriterAter = (*Fs)(nil)
|
||||
_ fs.FullObject = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -10,6 +10,11 @@ import (
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
var (
|
||||
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect"}
|
||||
unimplementableObjectMethods = []string{}
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
@@ -17,8 +22,8 @@ func TestIntegration(t *testing.T) {
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -35,7 +40,9 @@ func TestLocal(t *testing.T) {
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
QuickTestOK: true,
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -51,7 +58,9 @@ func TestMemory(t *testing.T) {
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
QuickTestOK: true,
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -68,6 +77,8 @@ func TestMixed(t *testing.T) {
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -186,6 +186,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
// We support reading MIME types no matter the wrapped fs
|
||||
f.features.ReadMimeType = true
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
"github.com/rfjakob/eme"
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
@@ -37,7 +38,6 @@ const (
|
||||
blockHeaderSize = secretbox.Overhead
|
||||
blockDataSize = 64 * 1024
|
||||
blockSize = blockHeaderSize + blockDataSize
|
||||
encryptedSuffix = ".bin" // when file name encryption is off we add this suffix to make sure the cloud provider doesn't process the file
|
||||
)
|
||||
|
||||
// Errors returned by cipher
|
||||
@@ -53,8 +53,9 @@ var (
|
||||
ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?")
|
||||
ErrorBadBase32Encoding = errors.New("bad base32 filename encoding")
|
||||
ErrorFileClosed = errors.New("file already closed")
|
||||
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - no \"" + encryptedSuffix + "\" suffix")
|
||||
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - does not match suffix")
|
||||
ErrorBadSeek = errors.New("Seek beyond end of file")
|
||||
ErrorSuffixMissingDot = errors.New("suffix config setting should include a '.'")
|
||||
defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1}
|
||||
obfuscQuoteRune = '!'
|
||||
)
|
||||
@@ -169,27 +170,30 @@ func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
|
||||
|
||||
// Cipher defines an encoding and decoding cipher for the crypt backend
|
||||
type Cipher struct {
|
||||
dataKey [32]byte // Key for secretbox
|
||||
nameKey [32]byte // 16,24 or 32 bytes
|
||||
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
||||
block gocipher.Block
|
||||
mode NameEncryptionMode
|
||||
fileNameEnc fileNameEncoding
|
||||
buffers sync.Pool // encrypt/decrypt buffers
|
||||
cryptoRand io.Reader // read crypto random numbers from here
|
||||
dirNameEncrypt bool
|
||||
dataKey [32]byte // Key for secretbox
|
||||
nameKey [32]byte // 16,24 or 32 bytes
|
||||
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
||||
block gocipher.Block
|
||||
mode NameEncryptionMode
|
||||
fileNameEnc fileNameEncoding
|
||||
buffers sync.Pool // encrypt/decrypt buffers
|
||||
cryptoRand io.Reader // read crypto random numbers from here
|
||||
dirNameEncrypt bool
|
||||
passBadBlocks bool // if set passed bad blocks as zeroed blocks
|
||||
encryptedSuffix string
|
||||
}
|
||||
|
||||
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
||||
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
|
||||
c := &Cipher{
|
||||
mode: mode,
|
||||
fileNameEnc: enc,
|
||||
cryptoRand: rand.Reader,
|
||||
dirNameEncrypt: dirNameEncrypt,
|
||||
mode: mode,
|
||||
fileNameEnc: enc,
|
||||
cryptoRand: rand.Reader,
|
||||
dirNameEncrypt: dirNameEncrypt,
|
||||
encryptedSuffix: ".bin",
|
||||
}
|
||||
c.buffers.New = func() interface{} {
|
||||
return make([]byte, blockSize)
|
||||
return new([blockSize]byte)
|
||||
}
|
||||
err := c.Key(password, salt)
|
||||
if err != nil {
|
||||
@@ -198,11 +202,29 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// setEncryptedSuffix set suffix, or an empty string
|
||||
func (c *Cipher) setEncryptedSuffix(suffix string) {
|
||||
if strings.EqualFold(suffix, "none") {
|
||||
c.encryptedSuffix = ""
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(suffix, ".") {
|
||||
fs.Errorf(nil, "crypt: bad suffix: %v", ErrorSuffixMissingDot)
|
||||
suffix = "." + suffix
|
||||
}
|
||||
c.encryptedSuffix = suffix
|
||||
}
|
||||
|
||||
// Call to set bad block pass through
|
||||
func (c *Cipher) setPassBadBlocks(passBadBlocks bool) {
|
||||
c.passBadBlocks = passBadBlocks
|
||||
}
|
||||
|
||||
// Key creates all the internal keys from the password passed in using
|
||||
// scrypt.
|
||||
//
|
||||
// If salt is "" we use a fixed salt just to make attackers lives
|
||||
// slighty harder than using no salt.
|
||||
// slightly harder than using no salt.
|
||||
//
|
||||
// Note that empty password makes all 0x00 keys which is used in the
|
||||
// tests.
|
||||
@@ -230,15 +252,12 @@ func (c *Cipher) Key(password, salt string) (err error) {
|
||||
}
|
||||
|
||||
// getBlock gets a block from the pool of size blockSize
|
||||
func (c *Cipher) getBlock() []byte {
|
||||
return c.buffers.Get().([]byte)
|
||||
func (c *Cipher) getBlock() *[blockSize]byte {
|
||||
return c.buffers.Get().(*[blockSize]byte)
|
||||
}
|
||||
|
||||
// putBlock returns a block to the pool of size blockSize
|
||||
func (c *Cipher) putBlock(buf []byte) {
|
||||
if len(buf) != blockSize {
|
||||
panic("bad blocksize returned to pool")
|
||||
}
|
||||
func (c *Cipher) putBlock(buf *[blockSize]byte) {
|
||||
c.buffers.Put(buf)
|
||||
}
|
||||
|
||||
@@ -508,7 +527,7 @@ func (c *Cipher) encryptFileName(in string) string {
|
||||
// EncryptFileName encrypts a file path
|
||||
func (c *Cipher) EncryptFileName(in string) string {
|
||||
if c.mode == NameEncryptionOff {
|
||||
return in + encryptedSuffix
|
||||
return in + c.encryptedSuffix
|
||||
}
|
||||
return c.encryptFileName(in)
|
||||
}
|
||||
@@ -568,8 +587,8 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
||||
// DecryptFileName decrypts a file path
|
||||
func (c *Cipher) DecryptFileName(in string) (string, error) {
|
||||
if c.mode == NameEncryptionOff {
|
||||
remainingLength := len(in) - len(encryptedSuffix)
|
||||
if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) {
|
||||
remainingLength := len(in) - len(c.encryptedSuffix)
|
||||
if remainingLength == 0 || !strings.HasSuffix(in, c.encryptedSuffix) {
|
||||
return "", ErrorNotAnEncryptedFile
|
||||
}
|
||||
decrypted := in[:remainingLength]
|
||||
@@ -609,7 +628,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
|
||||
// fromReader fills the nonce from an io.Reader - normally the OSes
|
||||
// crypto random number generator
|
||||
func (n *nonce) fromReader(in io.Reader) error {
|
||||
read, err := io.ReadFull(in, (*n)[:])
|
||||
read, err := readers.ReadFill(in, (*n)[:])
|
||||
if read != fileNonceSize {
|
||||
return fmt.Errorf("short read of nonce: %w", err)
|
||||
}
|
||||
@@ -664,8 +683,8 @@ type encrypter struct {
|
||||
in io.Reader
|
||||
c *Cipher
|
||||
nonce nonce
|
||||
buf []byte
|
||||
readBuf []byte
|
||||
buf *[blockSize]byte
|
||||
readBuf *[blockSize]byte
|
||||
bufIndex int
|
||||
bufSize int
|
||||
err error
|
||||
@@ -690,9 +709,9 @@ func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
|
||||
}
|
||||
}
|
||||
// Copy magic into buffer
|
||||
copy(fh.buf, fileMagicBytes)
|
||||
copy((*fh.buf)[:], fileMagicBytes)
|
||||
// Copy nonce into buffer
|
||||
copy(fh.buf[fileMagicSize:], fh.nonce[:])
|
||||
copy((*fh.buf)[fileMagicSize:], fh.nonce[:])
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
@@ -707,22 +726,20 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
|
||||
if fh.bufIndex >= fh.bufSize {
|
||||
// Read data
|
||||
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
||||
readBuf := fh.readBuf[:blockDataSize]
|
||||
n, err = io.ReadFull(fh.in, readBuf)
|
||||
readBuf := (*fh.readBuf)[:blockDataSize]
|
||||
n, err = readers.ReadFill(fh.in, readBuf)
|
||||
if n == 0 {
|
||||
// err can't be nil since:
|
||||
// n == len(buf) if and only if err == nil.
|
||||
return fh.finish(err)
|
||||
}
|
||||
// possibly err != nil here, but we will process the
|
||||
// data and the next call to ReadFull will return 0, err
|
||||
// data and the next call to ReadFill will return 0, err
|
||||
// Encrypt the block using the nonce
|
||||
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
secretbox.Seal((*fh.buf)[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
fh.bufIndex = 0
|
||||
fh.bufSize = blockHeaderSize + n
|
||||
fh.nonce.increment()
|
||||
}
|
||||
n = copy(p, fh.buf[fh.bufIndex:fh.bufSize])
|
||||
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufSize])
|
||||
fh.bufIndex += n
|
||||
return n, nil
|
||||
}
|
||||
@@ -763,8 +780,8 @@ type decrypter struct {
|
||||
nonce nonce
|
||||
initialNonce nonce
|
||||
c *Cipher
|
||||
buf []byte
|
||||
readBuf []byte
|
||||
buf *[blockSize]byte
|
||||
readBuf *[blockSize]byte
|
||||
bufIndex int
|
||||
bufSize int
|
||||
err error
|
||||
@@ -782,12 +799,12 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
||||
limit: -1,
|
||||
}
|
||||
// Read file header (magic + nonce)
|
||||
readBuf := fh.readBuf[:fileHeaderSize]
|
||||
_, err := io.ReadFull(fh.rc, readBuf)
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
readBuf := (*fh.readBuf)[:fileHeaderSize]
|
||||
n, err := readers.ReadFill(fh.rc, readBuf)
|
||||
if n < fileHeaderSize && err == io.EOF {
|
||||
// This read from 0..fileHeaderSize-1 bytes
|
||||
return nil, fh.finishAndClose(ErrorEncryptedFileTooShort)
|
||||
} else if err != nil {
|
||||
} else if err != io.EOF && err != nil {
|
||||
return nil, fh.finishAndClose(err)
|
||||
}
|
||||
// check the magic
|
||||
@@ -845,10 +862,8 @@ func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
|
||||
func (fh *decrypter) fillBuffer() (err error) {
|
||||
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
||||
readBuf := fh.readBuf
|
||||
n, err := io.ReadFull(fh.rc, readBuf)
|
||||
n, err := readers.ReadFill(fh.rc, (*readBuf)[:])
|
||||
if n == 0 {
|
||||
// err can't be nil since:
|
||||
// n == len(buf) if and only if err == nil.
|
||||
return err
|
||||
}
|
||||
// possibly err != nil here, but we will process the data and
|
||||
@@ -856,18 +871,25 @@ func (fh *decrypter) fillBuffer() (err error) {
|
||||
|
||||
// Check header + 1 byte exists
|
||||
if n <= blockHeaderSize {
|
||||
if err != nil {
|
||||
if err != nil && err != io.EOF {
|
||||
return err // return pending error as it is likely more accurate
|
||||
}
|
||||
return ErrorEncryptedFileBadHeader
|
||||
}
|
||||
// Decrypt the block using the nonce
|
||||
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
_, ok := secretbox.Open((*fh.buf)[:0], (*readBuf)[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
if !ok {
|
||||
if err != nil {
|
||||
if err != nil && err != io.EOF {
|
||||
return err // return pending error as it is likely more accurate
|
||||
}
|
||||
return ErrorEncryptedBadBlock
|
||||
if !fh.c.passBadBlocks {
|
||||
return ErrorEncryptedBadBlock
|
||||
}
|
||||
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
|
||||
// Zero out the bad block and continue
|
||||
for i := range (*fh.buf)[:n] {
|
||||
(*fh.buf)[i] = 0
|
||||
}
|
||||
}
|
||||
fh.bufIndex = 0
|
||||
fh.bufSize = n - blockHeaderSize
|
||||
@@ -893,7 +915,7 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
|
||||
if fh.limit >= 0 && fh.limit < int64(toCopy) {
|
||||
toCopy = int(fh.limit)
|
||||
}
|
||||
n = copy(p, fh.buf[fh.bufIndex:fh.bufIndex+toCopy])
|
||||
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufIndex+toCopy])
|
||||
fh.bufIndex += n
|
||||
if fh.limit >= 0 {
|
||||
fh.limit -= int64(n)
|
||||
@@ -904,9 +926,8 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// calculateUnderlying converts an (offset, limit) in a crypted file
|
||||
// into an (underlyingOffset, underlyingLimit) for the underlying
|
||||
// file.
|
||||
// calculateUnderlying converts an (offset, limit) in an encrypted file
|
||||
// into an (underlyingOffset, underlyingLimit) for the underlying file.
|
||||
//
|
||||
// It also returns number of bytes to discard after reading the first
|
||||
// block and number of blocks this is from the start so the nonce can
|
||||
|
||||
@@ -27,14 +27,14 @@ func TestNewNameEncryptionMode(t *testing.T) {
|
||||
{"off", NameEncryptionOff, ""},
|
||||
{"standard", NameEncryptionStandard, ""},
|
||||
{"obfuscate", NameEncryptionObfuscated, ""},
|
||||
{"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""},
|
||||
{"potato", NameEncryptionOff, "unknown file name encryption mode \"potato\""},
|
||||
} {
|
||||
actual, actualErr := NewNameEncryptionMode(test.in)
|
||||
assert.Equal(t, actual, test.expected)
|
||||
if test.expectedErr == "" {
|
||||
assert.NoError(t, actualErr)
|
||||
} else {
|
||||
assert.Error(t, actualErr, test.expectedErr)
|
||||
assert.EqualError(t, actualErr, test.expectedErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -405,6 +405,13 @@ func TestNonStandardEncryptFileName(t *testing.T) {
|
||||
// Off mode
|
||||
c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
|
||||
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
|
||||
// Off mode with custom suffix
|
||||
c, _ = newCipher(NameEncryptionOff, "", "", true, nil)
|
||||
c.setEncryptedSuffix(".jpg")
|
||||
assert.Equal(t, "1/12/123.jpg", c.EncryptFileName("1/12/123"))
|
||||
// Off mode with empty suffix
|
||||
c.setEncryptedSuffix("none")
|
||||
assert.Equal(t, "1/12/123", c.EncryptFileName("1/12/123"))
|
||||
// Obfuscation mode
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
|
||||
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||
@@ -483,21 +490,27 @@ func TestNonStandardDecryptFileName(t *testing.T) {
|
||||
in string
|
||||
expected string
|
||||
expectedErr error
|
||||
customSuffix string
|
||||
}{
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
|
||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil, ""},
|
||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile, ""},
|
||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile, ""},
|
||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil, ""},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil, ""},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil, ""},
|
||||
{NameEncryptionOff, true, "1/12/123.jpg", "1/12/123", nil, ".jpg"},
|
||||
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil, "none"},
|
||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil, ""},
|
||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile, ""},
|
||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil, ""},
|
||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil, ""},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil, ""},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil, ""},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
|
||||
if test.customSuffix != "" {
|
||||
c.setEncryptedSuffix(test.customSuffix)
|
||||
}
|
||||
actual, actualErr := c.DecryptFileName(test.in)
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
@@ -726,7 +739,7 @@ func TestNonceFromReader(t *testing.T) {
|
||||
assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x)
|
||||
buf = bytes.NewBufferString("123456789abcdefghijklmn")
|
||||
err = x.fromReader(buf)
|
||||
assert.Error(t, err, "short read of nonce")
|
||||
assert.EqualError(t, err, "short read of nonce: EOF")
|
||||
}
|
||||
|
||||
func TestNonceFromBuf(t *testing.T) {
|
||||
@@ -1050,7 +1063,7 @@ func TestRandomSource(t *testing.T) {
|
||||
_, _ = source.Read(buf)
|
||||
sink = newRandomSource(1e8)
|
||||
_, err = io.Copy(sink, source)
|
||||
assert.Error(t, err, "Error in stream")
|
||||
assert.EqualError(t, err, "Error in stream at 1")
|
||||
}
|
||||
|
||||
type zeroes struct{}
|
||||
@@ -1167,13 +1180,13 @@ func TestNewEncrypter(t *testing.T) {
|
||||
fh, err := c.newEncrypter(z, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, nonce{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.nonce)
|
||||
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.buf[:32])
|
||||
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, (*fh.buf)[:32])
|
||||
|
||||
// Test error path
|
||||
c.cryptoRand = bytes.NewBufferString("123456789abcdefghijklmn")
|
||||
fh, err = c.newEncrypter(z, nil)
|
||||
assert.Nil(t, fh)
|
||||
assert.Error(t, err, "short read of nonce")
|
||||
assert.EqualError(t, err, "short read of nonce: EOF")
|
||||
}
|
||||
|
||||
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to
|
||||
@@ -1224,7 +1237,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
cd := newCloseDetector(bytes.NewBuffer(file0[:i]))
|
||||
fh, err = c.newDecrypter(cd)
|
||||
assert.Nil(t, fh)
|
||||
assert.Error(t, err, ErrorEncryptedFileTooShort.Error())
|
||||
assert.EqualError(t, err, ErrorEncryptedFileTooShort.Error())
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
}
|
||||
|
||||
@@ -1232,7 +1245,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
cd = newCloseDetector(er)
|
||||
fh, err = c.newDecrypter(cd)
|
||||
assert.Nil(t, fh)
|
||||
assert.Error(t, err, "potato")
|
||||
assert.EqualError(t, err, "potato")
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
|
||||
// bad magic
|
||||
@@ -1243,7 +1256,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
cd := newCloseDetector(bytes.NewBuffer(file0copy))
|
||||
fh, err := c.newDecrypter(cd)
|
||||
assert.Nil(t, fh)
|
||||
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
|
||||
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error())
|
||||
file0copy[i] ^= 0x1
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
}
|
||||
@@ -1495,8 +1508,10 @@ func TestDecrypterRead(t *testing.T) {
|
||||
case i == fileHeaderSize:
|
||||
// This would normally produce an error *except* on the first block
|
||||
expectedErr = nil
|
||||
case i <= fileHeaderSize+blockHeaderSize:
|
||||
expectedErr = ErrorEncryptedFileBadHeader
|
||||
default:
|
||||
expectedErr = io.ErrUnexpectedEOF
|
||||
expectedErr = ErrorEncryptedBadBlock
|
||||
}
|
||||
if expectedErr != nil {
|
||||
assert.EqualError(t, err, expectedErr.Error(), what)
|
||||
@@ -1514,7 +1529,7 @@ func TestDecrypterRead(t *testing.T) {
|
||||
fh, err := c.newDecrypter(cd)
|
||||
assert.NoError(t, err)
|
||||
_, err = io.ReadAll(fh)
|
||||
assert.Error(t, err, "potato")
|
||||
assert.EqualError(t, err, "potato")
|
||||
assert.Equal(t, 0, cd.closed)
|
||||
|
||||
// Test corrupting the input
|
||||
@@ -1525,15 +1540,26 @@ func TestDecrypterRead(t *testing.T) {
|
||||
file16copy[i] ^= 0xFF
|
||||
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
|
||||
if i < fileMagicSize {
|
||||
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
|
||||
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error())
|
||||
assert.Nil(t, fh)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
_, err = io.ReadAll(fh)
|
||||
assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
|
||||
assert.EqualError(t, err, ErrorEncryptedBadBlock.Error())
|
||||
}
|
||||
file16copy[i] ^= 0xFF
|
||||
}
|
||||
|
||||
// Test that we can corrupt a byte and read zeroes if
|
||||
// passBadBlocks is set
|
||||
copy(file16copy, file16)
|
||||
file16copy[len(file16copy)-1] ^= 0xFF
|
||||
c.passBadBlocks = true
|
||||
fh, err = c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
|
||||
assert.NoError(t, err)
|
||||
buf, err := io.ReadAll(fh)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, make([]byte, 16), buf)
|
||||
}
|
||||
|
||||
func TestDecrypterClose(t *testing.T) {
|
||||
@@ -1554,7 +1580,7 @@ func TestDecrypterClose(t *testing.T) {
|
||||
|
||||
// double close
|
||||
err = fh.Close()
|
||||
assert.Error(t, err, ErrorFileClosed.Error())
|
||||
assert.EqualError(t, err, ErrorFileClosed.Error())
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
|
||||
// try again reading the file this time
|
||||
@@ -1581,8 +1607,6 @@ func TestPutGetBlock(t *testing.T) {
|
||||
block := c.getBlock()
|
||||
c.putBlock(block)
|
||||
c.putBlock(block)
|
||||
|
||||
assert.Panics(t, func() { c.putBlock(block[:len(block)-1]) })
|
||||
}
|
||||
|
||||
func TestKey(t *testing.T) {
|
||||
|
||||
@@ -48,7 +48,7 @@ func init() {
|
||||
Help: "Very simple filename obfuscation.",
|
||||
}, {
|
||||
Value: "off",
|
||||
Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
|
||||
Help: "Don't encrypt the file names.\nAdds a \".bin\", or \"suffix\" extension only.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
@@ -79,7 +79,9 @@ NB If filename_encryption is "off" then this option will do nothing.`,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
Default: false,
|
||||
Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
|
||||
Help: `Deprecated: use --server-side-across-configs instead.
|
||||
|
||||
Allow server-side operations (e.g. copy) to work across different crypt configs.
|
||||
|
||||
Normally this option is not what you want, but if you have two crypts
|
||||
pointing to the same backend you can use it.
|
||||
@@ -119,6 +121,15 @@ names, or for debugging purposes.`,
|
||||
Help: "Encrypt file data.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "pass_bad_blocks",
|
||||
Help: `If set this will pass bad blocks through as all 0.
|
||||
|
||||
This should not be set in normal operation, it should only be set if
|
||||
trying to recover an encrypted file with errors and it is desired to
|
||||
recover as much of the file as possible.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "filename_encoding",
|
||||
Help: `How to encode the encrypted filename to text string.
|
||||
@@ -138,10 +149,18 @@ length and if it's case sensitive.`,
|
||||
},
|
||||
{
|
||||
Value: "base32768",
|
||||
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
|
||||
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)",
|
||||
},
|
||||
},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "suffix",
|
||||
Help: `If this is set it will override the default suffix of ".bin".
|
||||
|
||||
Setting suffix to "none" will result in an empty suffix. This may be useful
|
||||
when the path length is critical.`,
|
||||
Default: ".bin",
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -174,6 +193,8 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make cipher: %w", err)
|
||||
}
|
||||
cipher.setEncryptedSuffix(opt.Suffix)
|
||||
cipher.setPassBadBlocks(opt.PassBadBlocks)
|
||||
return cipher, nil
|
||||
}
|
||||
|
||||
@@ -247,6 +268,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
return f, err
|
||||
@@ -262,7 +284,9 @@ type Options struct {
|
||||
Password2 string `config:"password2"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ShowMapping bool `config:"show_mapping"`
|
||||
PassBadBlocks bool `config:"pass_bad_blocks"`
|
||||
FilenameEncoding string `config:"filename_encoding"`
|
||||
Suffix string `config:"suffix"`
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
@@ -454,7 +478,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
|
||||
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
|
||||
}
|
||||
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
||||
}
|
||||
|
||||
@@ -202,7 +202,7 @@ func init() {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
|
||||
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
|
||||
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" && !opt.EnvAuth {
|
||||
return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{
|
||||
OAuth2Config: driveConfig,
|
||||
})
|
||||
@@ -277,20 +277,23 @@ Leave blank normally.
|
||||
Fill in to access "Computers" folders (see docs), or for rclone to use
|
||||
a non root folder as its starting point.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "team_drive",
|
||||
Help: "ID of the Shared Drive (Team Drive).",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
Name: "team_drive",
|
||||
Help: "ID of the Shared Drive (Team Drive).",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "auth_owner_only",
|
||||
Default: false,
|
||||
@@ -416,10 +419,11 @@ date is used.`,
|
||||
Help: "Size of listing chunk 100-1000, 0 to disable.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "impersonate",
|
||||
Default: "",
|
||||
Help: `Impersonate this user when using a service account.`,
|
||||
Advanced: true,
|
||||
Name: "impersonate",
|
||||
Default: "",
|
||||
Help: `Impersonate this user when using a service account.`,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "alternate_export",
|
||||
Default: false,
|
||||
@@ -499,7 +503,9 @@ need to use --ignore size also.`,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
Default: false,
|
||||
Help: `Allow server-side operations (e.g. copy) to work across different drive configs.
|
||||
Help: `Deprecated: use --server-side-across-configs instead.
|
||||
|
||||
Allow server-side operations (e.g. copy) to work across different drive configs.
|
||||
|
||||
This can be useful if you wish to do a server-side copy between two
|
||||
different Google drives. Note that this isn't enabled by default
|
||||
@@ -590,7 +596,8 @@ Note also that opening the folder once in the web interface (with the
|
||||
user you've authenticated rclone with) seems to be enough so that the
|
||||
resource key is no needed.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -598,6 +605,18 @@ resource key is no needed.
|
||||
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||
// Don't encode / as it's a valid name character in drive.
|
||||
Default: encoder.EncodeInvalidUtf8,
|
||||
}, {
|
||||
Name: "env_auth",
|
||||
Help: "Get IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter credentials in the next step.",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
|
||||
@@ -654,6 +673,7 @@ type Options struct {
|
||||
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
|
||||
ResourceKey string `config:"resource_key"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
}
|
||||
|
||||
// Fs represents a remote drive server
|
||||
@@ -761,7 +781,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
|
||||
fs.Errorf(f, "Received download limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && reason == "quotaExceeded" {
|
||||
} else if f.opt.StopOnUploadLimit && (reason == "quotaExceeded" || reason == "storageQuotaExceeded") {
|
||||
fs.Errorf(f, "Received upload limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
|
||||
@@ -1122,6 +1142,12 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create oauth client from service account: %w", err)
|
||||
}
|
||||
} else if opt.EnvAuth {
|
||||
scopes := driveScopes(opt.Scope)
|
||||
oAuthClient, err = google.DefaultClient(ctx, scopes...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create client from environment: %w", err)
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
|
||||
if err != nil {
|
||||
@@ -1493,6 +1519,9 @@ func (f *Fs) newObjectWithExportInfo(
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -2880,6 +2909,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
if f.rootFolderID == "appDataFolder" {
|
||||
changesCall.Spaces("appDataFolder")
|
||||
}
|
||||
changesCall.RestrictToMyDrive(!f.opt.SharedWithMe)
|
||||
changeList, err = changesCall.Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -3861,7 +3891,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newO, err := o.fs.newObjectWithInfo(ctx, src.Remote(), info)
|
||||
newO, err := o.fs.newObjectWithInfo(ctx, o.remote, info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -243,6 +243,15 @@ func (f *Fs) InternalTestShouldRetry(t *testing.T) {
|
||||
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, quotaExceededRetry)
|
||||
assert.Equal(t, quotaExceededError, expectedQuotaError)
|
||||
|
||||
sqEItem := googleapi.ErrorItem{
|
||||
Reason: "storageQuotaExceeded",
|
||||
}
|
||||
generic403.Errors[0] = sqEItem
|
||||
expectedStorageQuotaError := fserrors.FatalError(&generic403)
|
||||
storageQuotaExceededRetry, storageQuotaExceededError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, storageQuotaExceededRetry)
|
||||
assert.Equal(t, storageQuotaExceededError, expectedStorageQuotaError)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
@@ -140,55 +139,12 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
|
||||
return complete, nil
|
||||
}
|
||||
|
||||
// finishBatchJobStatus waits for the batch to complete returning completed entries
|
||||
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
|
||||
if launchBatchStatus.AsyncJobId == "" {
|
||||
return nil, errors.New("wait for batch completion: empty job ID")
|
||||
}
|
||||
var batchStatus *files.UploadSessionFinishBatchJobStatus
|
||||
sleepTime := 100 * time.Millisecond
|
||||
const maxSleepTime = 1 * time.Second
|
||||
startTime := time.Now()
|
||||
try := 1
|
||||
for {
|
||||
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
|
||||
if remaining < 0 {
|
||||
break
|
||||
}
|
||||
err = b.f.pacer.Call(func() (bool, error) {
|
||||
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
|
||||
AsyncJobId: launchBatchStatus.AsyncJobId,
|
||||
})
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
|
||||
} else {
|
||||
if batchStatus.Tag == "complete" {
|
||||
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
|
||||
return batchStatus.Complete, nil
|
||||
}
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
|
||||
}
|
||||
time.Sleep(sleepTime)
|
||||
sleepTime *= 2
|
||||
if sleepTime > maxSleepTime {
|
||||
sleepTime = maxSleepTime
|
||||
}
|
||||
try++
|
||||
}
|
||||
if err == nil {
|
||||
err = errors.New("batch didn't complete")
|
||||
}
|
||||
return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
|
||||
}
|
||||
|
||||
// commit a batch
|
||||
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
|
||||
// If commit fails then signal clients if sync
|
||||
var signalled = b.async
|
||||
defer func() {
|
||||
if err != nil && signalled {
|
||||
if err != nil && !signalled {
|
||||
// Signal to clients that there was an error
|
||||
for _, result := range results {
|
||||
result <- batcherResponse{err: err}
|
||||
|
||||
@@ -58,7 +58,7 @@ import (
|
||||
const (
|
||||
rcloneClientID = "5jcck7diasz0rqy"
|
||||
rcloneEncryptedClientSecret = "fRS5vVLr2v6FbyXYnIgjwBuUAt0osq_QZTXAEcmZ7g"
|
||||
minSleep = 10 * time.Millisecond
|
||||
defaultMinSleep = fs.Duration(10 * time.Millisecond)
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
// Upload chunk size - setting too small makes uploads slow.
|
||||
@@ -182,8 +182,9 @@ client_secret) to use this option as currently rclone's default set of
|
||||
permissions doesn't include "members.read". This can be added once
|
||||
v1.55 or later is in use everywhere.
|
||||
`,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "shared_files",
|
||||
Help: `Instructs rclone to work on individual shared files.
|
||||
@@ -260,8 +261,8 @@ uploaded.
|
||||
The default for this is 0 which means rclone will choose a sensible
|
||||
default based on the batch_mode in use.
|
||||
|
||||
- batch_mode: async - default batch_timeout is 500ms
|
||||
- batch_mode: sync - default batch_timeout is 10s
|
||||
- batch_mode: async - default batch_timeout is 10s
|
||||
- batch_mode: sync - default batch_timeout is 500ms
|
||||
- batch_mode: off - not in use
|
||||
`,
|
||||
Default: fs.Duration(0),
|
||||
@@ -271,6 +272,11 @@ default based on the batch_mode in use.
|
||||
Help: `Max time to wait for a batch to finish committing`,
|
||||
Default: fs.Duration(10 * time.Minute),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "pacer_min_sleep",
|
||||
Default: defaultMinSleep,
|
||||
Help: "Minimum time to sleep between API calls.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -299,6 +305,7 @@ type Options struct {
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
|
||||
AsyncBatch bool `config:"async_batch"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -442,7 +449,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
|
||||
if err != nil {
|
||||
@@ -536,7 +543,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
// if the moint failed we have to abort here
|
||||
// if the mount failed we have to abort here
|
||||
}
|
||||
// if the mount succeeded it's now a normal folder in the users root namespace
|
||||
// we disable shared folder mode and proceed normally
|
||||
@@ -719,7 +726,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
leaf := f.opt.Enc.ToStandardName(entry.Name)
|
||||
d := fs.NewDir(leaf, time.Now()).SetID(entry.SharedFolderId)
|
||||
d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId)
|
||||
entries = append(entries, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -906,7 +913,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
|
||||
remote := path.Join(dir, leaf)
|
||||
if folderInfo != nil {
|
||||
d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
|
||||
d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id)
|
||||
entries = append(entries, d)
|
||||
} else if fileInfo != nil {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, fileInfo)
|
||||
|
||||
@@ -118,6 +118,9 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
|
||||
Single: 1,
|
||||
Pass: f.opt.FilePassword,
|
||||
}
|
||||
if f.opt.CDN {
|
||||
request.CDN = 1
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/download/get_token.cgi",
|
||||
@@ -473,7 +476,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("didnt got an upload node: %w", err)
|
||||
return nil, fmt.Errorf("didn't get an upload node: %w", err)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Got Upload node")
|
||||
|
||||
@@ -38,8 +38,9 @@ func init() {
|
||||
Description: "1Fichier",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
|
||||
Name: "api_key",
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
|
||||
Name: "api_key",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Help: "If you want to download a shared folder, add this parameter.",
|
||||
Name: "shared_folder",
|
||||
@@ -54,6 +55,11 @@ func init() {
|
||||
Name: "folder_password",
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Help: "Set if you wish to use CDN download links.",
|
||||
Name: "cdn",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -89,6 +95,7 @@ type Options struct {
|
||||
SharedFolder string `config:"shared_folder"`
|
||||
FilePassword string `config:"file_password"`
|
||||
FolderPassword string `config:"folder_password"`
|
||||
CDN bool `config:"cdn"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -333,7 +340,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if size > int64(300e9) {
|
||||
return nil, errors.New("File too big, cant upload")
|
||||
return nil, errors.New("File too big, can't upload")
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ type DownloadRequest struct {
|
||||
URL string `json:"url"`
|
||||
Single int `json:"single"`
|
||||
Pass string `json:"pass,omitempty"`
|
||||
CDN int `json:"cdn,omitempty"`
|
||||
}
|
||||
|
||||
// RemoveFolderRequest is the request structure of the corresponding request
|
||||
|
||||
@@ -84,6 +84,7 @@ Leave blank normally.
|
||||
|
||||
Fill in to make rclone start with directory of a given ID.
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "permanent_token",
|
||||
Help: `Permanent Authentication Token.
|
||||
@@ -97,6 +98,7 @@ These tokens are normally valid for several years.
|
||||
|
||||
For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "token",
|
||||
Help: `Session Token.
|
||||
@@ -106,7 +108,8 @@ usually valid for 1 hour.
|
||||
|
||||
Don't set this value - rclone will set it automatically.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "token_expiry",
|
||||
Help: `Token expiry time.
|
||||
|
||||
@@ -48,13 +48,15 @@ func init() {
|
||||
Description: "FTP",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
|
||||
Required: true,
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username.",
|
||||
Default: currentUser,
|
||||
Name: "user",
|
||||
Help: "FTP username.",
|
||||
Default: currentUser,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port number.",
|
||||
@@ -315,18 +317,33 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Return a *textproto.Error if err contains one or nil otherwise
|
||||
func textprotoError(err error) (errX *textproto.Error) {
|
||||
if errors.As(err, &errX) {
|
||||
return errX
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// returns true if this FTP error should be retried
|
||||
func isRetriableFtpError(err error) bool {
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusNotAvailable, ftp.StatusTransfertAborted:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserve to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusNotAvailable:
|
||||
return true, err
|
||||
}
|
||||
if isRetriableFtpError(err) {
|
||||
return true, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
@@ -463,8 +480,7 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
// If not a regular FTP error code then check the connection
|
||||
var tpErr *textproto.Error
|
||||
if !errors.As(err, &tpErr) {
|
||||
if tpErr := textprotoError(err); tpErr != nil {
|
||||
nopErr := c.NoOp()
|
||||
if nopErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
||||
@@ -566,6 +582,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
// set the pool drainer timer going
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
@@ -613,8 +630,7 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
|
||||
// translateErrorFile turns FTP errors into rclone errors if possible for a file
|
||||
func translateErrorFile(err error) error {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorObjectNotFound
|
||||
@@ -625,8 +641,7 @@ func translateErrorFile(err error) error {
|
||||
|
||||
// translateErrorDir turns FTP errors into rclone errors if possible for a directory
|
||||
func translateErrorDir(err error) error {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorDirNotFound
|
||||
@@ -680,6 +695,12 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusBadArguments:
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if entry != nil {
|
||||
@@ -917,8 +938,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
||||
}
|
||||
err = c.MakeDir(f.dirFromStandardPath(abspath))
|
||||
f.putFtpConnection(&c, err)
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
|
||||
err = nil
|
||||
@@ -1087,7 +1107,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
// SetModTime sets the modification time of the object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if !o.fs.fSetTime {
|
||||
fs.Errorf(o.fs, "SetModTime is not supported")
|
||||
fs.Debugf(o.fs, "SetModTime is not supported")
|
||||
return nil
|
||||
}
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
@@ -1159,8 +1179,7 @@ func (f *ftpReadCloser) Close() error {
|
||||
// mask the error if it was caused by a premature close
|
||||
// NB StatusAboutToSend is to work around a bug in pureftpd
|
||||
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
|
||||
err = nil
|
||||
@@ -1186,15 +1205,26 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
}
|
||||
}
|
||||
}
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
|
||||
var (
|
||||
fd *ftp.Response
|
||||
c *ftp.ServerConn
|
||||
)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
c, err = o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return false, err // getFtpConnection has retries already
|
||||
}
|
||||
fd, err = c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
}
|
||||
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
}
|
||||
|
||||
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
|
||||
return rc, nil
|
||||
}
|
||||
@@ -1227,13 +1257,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
|
||||
// Ignore error 250 here - send by some servers
|
||||
if err != nil {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusRequestedFileActionOK:
|
||||
err = nil
|
||||
}
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusRequestedFileActionOK:
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
|
||||
@@ -82,7 +82,8 @@ func init() {
|
||||
saFile, _ := m.Get("service_account_file")
|
||||
saCreds, _ := m.Get("service_account_credentials")
|
||||
anonymous, _ := m.Get("anonymous")
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" {
|
||||
envAuth, _ := m.Get("env_auth")
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" || envAuth == "true" {
|
||||
return nil, nil
|
||||
}
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
@@ -90,15 +91,21 @@ func init() {
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "project_number",
|
||||
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
Name: "project_number",
|
||||
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "user_project",
|
||||
Help: "User project.\n\nOptional - needed only for requester pays.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "anonymous",
|
||||
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
|
||||
@@ -297,6 +304,15 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
Value: "DURABLE_REDUCED_AVAILABILITY",
|
||||
Help: "Durable reduced availability storage class",
|
||||
}},
|
||||
}, {
|
||||
Name: "directory_markers",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Help: `Upload an empty object with a trailing slash when a new directory is created
|
||||
|
||||
Empty folders are unsupported for bucket based remotes, this option creates an empty
|
||||
object ending with "/", to persist the folder.
|
||||
`,
|
||||
}, {
|
||||
Name: "no_check_bucket",
|
||||
Help: `If set, don't attempt to check the bucket exists or create it.
|
||||
@@ -330,6 +346,17 @@ can't check the size and hash but the file contents will be decompressed.
|
||||
Default: (encoder.Base |
|
||||
encoder.EncodeCrLf |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}, {
|
||||
Name: "env_auth",
|
||||
Help: "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter credentials in the next step.",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
@@ -337,6 +364,7 @@ can't check the size and hash but the file contents will be decompressed.
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ProjectNumber string `config:"project_number"`
|
||||
UserProject string `config:"user_project"`
|
||||
ServiceAccountFile string `config:"service_account_file"`
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
Anonymous bool `config:"anonymous"`
|
||||
@@ -349,6 +377,8 @@ type Options struct {
|
||||
Decompress bool `config:"decompress"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
DirectoryMarkers bool `config:"directory_markers"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
@@ -444,7 +474,7 @@ func parsePath(path string) (root string) {
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
@@ -500,6 +530,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
|
||||
}
|
||||
} else if opt.EnvAuth {
|
||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||
if err != nil {
|
||||
@@ -525,6 +560,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
}).Fill(ctx, f)
|
||||
if opt.DirectoryMarkers {
|
||||
f.features.CanHaveEmptyDirectories = true
|
||||
}
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = oAuthClient
|
||||
@@ -541,7 +579,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// Check to see if the object exists
|
||||
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
|
||||
get := f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
get = get.UserProject(f.opt.UserProject)
|
||||
}
|
||||
_, err = get.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -601,9 +643,13 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
directory += "/"
|
||||
}
|
||||
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
|
||||
if f.opt.UserProject != "" {
|
||||
list = list.UserProject(f.opt.UserProject)
|
||||
}
|
||||
if !recurse {
|
||||
list = list.Delimiter("/")
|
||||
}
|
||||
foundItems := 0
|
||||
for {
|
||||
var objects *storage.Objects
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -619,6 +665,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
return err
|
||||
}
|
||||
if !recurse {
|
||||
foundItems += len(objects.Prefixes)
|
||||
var object storage.Object
|
||||
for _, remote := range objects.Prefixes {
|
||||
if !strings.HasSuffix(remote, "/") {
|
||||
@@ -639,22 +686,29 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
}
|
||||
}
|
||||
foundItems += len(objects.Items)
|
||||
for _, object := range objects.Items {
|
||||
remote := f.opt.Enc.ToStandardPath(object.Name)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", object.Name)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
// is this a directory marker?
|
||||
if isDirectory {
|
||||
// Don't insert the root directory
|
||||
if remote == directory {
|
||||
continue
|
||||
}
|
||||
// process directory markers as directories
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
// is this a directory marker?
|
||||
if isDirectory {
|
||||
continue // skip directory marker
|
||||
}
|
||||
err = fn(remote, object, false)
|
||||
|
||||
err = fn(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -664,6 +718,17 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
list.PageToken(objects.NextPageToken)
|
||||
}
|
||||
if f.opt.DirectoryMarkers && foundItems == 0 && directory != "" {
|
||||
// Determine whether the directory exists or not by whether it has a marker
|
||||
_, err := f.readObjectInfo(ctx, bucket, directory)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -707,6 +772,9 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
return nil, errors.New("can't list buckets without project number")
|
||||
}
|
||||
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
|
||||
if f.opt.UserProject != "" {
|
||||
listBuckets = listBuckets.UserProject(f.opt.UserProject)
|
||||
}
|
||||
for {
|
||||
var buckets *storage.Buckets
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -824,10 +892,69 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Create directory marker file and parents
|
||||
func (f *Fs) createDirectoryMarker(ctx context.Context, bucket, dir string) error {
|
||||
if !f.opt.DirectoryMarkers || bucket == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Object to be uploaded
|
||||
o := &Object{
|
||||
fs: f,
|
||||
modTime: time.Now(),
|
||||
}
|
||||
|
||||
for {
|
||||
_, bucketPath := f.split(dir)
|
||||
// Don't create the directory marker if it is the bucket or at the very root
|
||||
if bucketPath == "" {
|
||||
break
|
||||
}
|
||||
o.remote = dir + "/"
|
||||
|
||||
// Check to see if object already exists
|
||||
_, err := o.readObjectInfo(ctx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Upload it if not
|
||||
fs.Debugf(o, "Creating directory marker")
|
||||
content := io.Reader(strings.NewReader(""))
|
||||
err = o.Update(ctx, content, o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating directory marker failed: %w", err)
|
||||
}
|
||||
|
||||
// Now check parent directory exists
|
||||
dir = path.Dir(dir)
|
||||
if dir == "/" || dir == "." {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
bucket, _ := f.split(dir)
|
||||
return f.makeBucket(ctx, bucket)
|
||||
e := f.checkBucket(ctx, bucket)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
return f.createDirectoryMarker(ctx, bucket, dir)
|
||||
|
||||
}
|
||||
|
||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
dir := path.Dir(remote)
|
||||
if dir == "/" || dir == "." {
|
||||
dir = ""
|
||||
}
|
||||
return f.Mkdir(ctx, dir)
|
||||
}
|
||||
|
||||
// makeBucket creates the bucket if it doesn't exist
|
||||
@@ -836,7 +963,11 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
|
||||
list := f.svc.Objects.List(bucket).MaxResults(1).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
list = list.UserProject(f.opt.UserProject)
|
||||
}
|
||||
_, err = list.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -871,7 +1002,11 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
}
|
||||
_, err = insertBucket.Context(ctx).Do()
|
||||
insertBucket = insertBucket.Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
insertBucket = insertBucket.UserProject(f.opt.UserProject)
|
||||
}
|
||||
_, err = insertBucket.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}, nil)
|
||||
@@ -891,12 +1026,28 @@ func (f *Fs) checkBucket(ctx context.Context, bucket string) error {
|
||||
// to delete was not empty.
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
// Remove directory marker file
|
||||
if f.opt.DirectoryMarkers && bucket != "" && dir != "" {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: dir + "/",
|
||||
}
|
||||
fs.Debugf(o, "Removing directory marker")
|
||||
err := o.Remove(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("removing directory marker failed: %w", err)
|
||||
}
|
||||
}
|
||||
if bucket == "" || directory != "" {
|
||||
return nil
|
||||
}
|
||||
return f.cache.Remove(bucket, func() error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
|
||||
deleteBucket := f.svc.Buckets.Delete(bucket).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
deleteBucket = deleteBucket.UserProject(f.opt.UserProject)
|
||||
}
|
||||
err = deleteBucket.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
})
|
||||
@@ -918,7 +1069,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
dstBucket, dstPath := f.split(remote)
|
||||
err := f.checkBucket(ctx, dstBucket)
|
||||
err := f.mkdirParent(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -942,7 +1093,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var rewriteResponse *storage.RewriteResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
|
||||
rewriteRequest = rewriteRequest.Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
rewriteRequest.UserProject(f.opt.UserProject)
|
||||
}
|
||||
rewriteResponse, err = rewriteRequest.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1052,8 +1207,17 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
// readObjectInfo reads the definition for an object
|
||||
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
|
||||
return o.fs.readObjectInfo(ctx, bucket, bucketPath)
|
||||
}
|
||||
|
||||
// readObjectInfo reads the definition for an object
|
||||
func (f *Fs) readObjectInfo(ctx context.Context, bucket, bucketPath string) (object *storage.Object, err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
get := f.svc.Objects.Get(bucket, bucketPath).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
get = get.UserProject(f.opt.UserProject)
|
||||
}
|
||||
object, err = get.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1125,7 +1289,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
copyObject = copyObject.Context(ctx)
|
||||
if o.fs.opt.UserProject != "" {
|
||||
copyObject = copyObject.UserProject(o.fs.opt.UserProject)
|
||||
}
|
||||
newObject, err = copyObject.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1142,6 +1310,9 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.fs.opt.UserProject != "" {
|
||||
o.url = o.url + "&userProject=" + o.fs.opt.UserProject
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1185,11 +1356,14 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err := o.fs.checkBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
// Create parent dir/bucket if not saving directory marker
|
||||
if !strings.HasSuffix(o.remote, "/") {
|
||||
err = o.fs.mkdirParent(ctx, o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
@@ -1234,7 +1408,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = insertObject.Context(ctx).Do()
|
||||
insertObject = insertObject.Context(ctx)
|
||||
if o.fs.opt.UserProject != "" {
|
||||
insertObject = insertObject.UserProject(o.fs.opt.UserProject)
|
||||
}
|
||||
newObject, err = insertObject.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1249,7 +1427,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
|
||||
deleteBucket := o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx)
|
||||
if o.fs.opt.UserProject != "" {
|
||||
deleteBucket = deleteBucket.UserProject(o.fs.opt.UserProject)
|
||||
}
|
||||
err = deleteBucket.Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return err
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -16,3 +17,17 @@ func TestIntegration(t *testing.T) {
|
||||
NilObject: (*googlecloudstorage.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegration2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
name := "TestGoogleCloudStorage"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*googlecloudstorage.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "directory_markers", Value: "true"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -161,7 +161,7 @@ func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bo
|
||||
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
|
||||
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
|
||||
}
|
||||
accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
|
||||
accounting.Stats(ctx).NewCheckingTransfer(obj, "importing").Done(ctx, err)
|
||||
doneCount++
|
||||
}
|
||||
})
|
||||
|
||||
@@ -166,6 +166,7 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}
|
||||
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
|
||||
|
||||
|
||||
@@ -19,9 +19,10 @@ func init() {
|
||||
Description: "Hadoop distributed file system",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "namenode",
|
||||
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
|
||||
Required: true,
|
||||
Name: "namenode",
|
||||
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "Hadoop user name.",
|
||||
@@ -29,6 +30,7 @@ func init() {
|
||||
Value: "root",
|
||||
Help: "Connect to hdfs as root.",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "service_principal_name",
|
||||
Help: `Kerberos service principal name for the namenode.
|
||||
@@ -36,15 +38,16 @@ func init() {
|
||||
Enables KERBEROS authentication. Specifies the Service Principal Name
|
||||
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
|
||||
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "data_transfer_protection",
|
||||
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
|
||||
|
||||
Specifies whether or not authentication, data signature integrity
|
||||
checks, and wire encryption is required when communicating the the
|
||||
datanodes. Possible values are 'authentication', 'integrity' and
|
||||
'privacy'. Used only with KERBEROS enabled.`,
|
||||
checks, and wire encryption are required when communicating with
|
||||
the datanodes. Possible values are 'authentication', 'integrity'
|
||||
and 'privacy'. Used only with KERBEROS enabled.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "privacy",
|
||||
Help: "Ensure authentication, integrity and encryption enabled.",
|
||||
|
||||
@@ -294,15 +294,6 @@ func (f *Fs) copyOrMove(ctx context.Context, isDirectory bool, operationType Cop
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// copyDirectory moves the directory at the source-path to the destination-path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
// The operation will only be successful
|
||||
// if the parent-directory of the destination-path exists.
|
||||
func (f *Fs) copyDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
return f.copyOrMove(ctx, true, CopyOriginalPreserveModTime, source, destination, onExist)
|
||||
}
|
||||
|
||||
// moveDirectory moves the directory at the source-path to the destination-path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
package hidrive
|
||||
|
||||
// FIXME HiDrive only supports file or folder names of 255 characters or less.
|
||||
// Operations that create files oder folder with longer names will throw a HTTP error:
|
||||
// Operations that create files or folders with longer names will throw an HTTP error:
|
||||
// - 422 Unprocessable Entity
|
||||
// A more graceful way for rclone to handle this may be desirable.
|
||||
|
||||
@@ -338,7 +338,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, fmt.Errorf("could not access root-prefix: %w", err)
|
||||
}
|
||||
if item.Type != api.HiDriveObjectTypeDirectory {
|
||||
return nil, errors.New("The root-prefix needs to point to a valid directory or be empty")
|
||||
return nil, errors.New("the root-prefix needs to point to a valid directory or be empty")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -495,7 +495,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
add(file)
|
||||
case fs.ErrorNotAFile:
|
||||
// ...found a directory not a file
|
||||
add(fs.NewDir(remote, timeUnset))
|
||||
add(fs.NewDir(remote, time.Time{}))
|
||||
default:
|
||||
fs.Debugf(remote, "skipping because of error: %v", err)
|
||||
}
|
||||
@@ -507,7 +507,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
name = strings.TrimRight(name, "/")
|
||||
remote := path.Join(dir, name)
|
||||
if isDir {
|
||||
add(fs.NewDir(remote, timeUnset))
|
||||
add(fs.NewDir(remote, time.Time{}))
|
||||
} else {
|
||||
in <- remote
|
||||
}
|
||||
|
||||
@@ -133,11 +133,13 @@ Owner is able to add custom keys. Metadata feature grabs all the keys including
|
||||
},
|
||||
|
||||
Options: []fs.Option{{
|
||||
Name: "access_key_id",
|
||||
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
|
||||
Name: "access_key_id",
|
||||
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "secret_access_key",
|
||||
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
|
||||
Name: "secret_access_key",
|
||||
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
// their official client (https://github.com/jjjake/internetarchive) hardcodes following the two
|
||||
Name: "endpoint",
|
||||
|
||||
@@ -74,6 +74,10 @@ const (
|
||||
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
|
||||
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
|
||||
tele2CloudClientID = "desktop"
|
||||
|
||||
onlimeCloudTokenURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/token"
|
||||
onlimeCloudAuthURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/auth"
|
||||
onlimeCloudClientID = "desktop"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -84,7 +88,7 @@ func init() {
|
||||
Description: "Jottacloud",
|
||||
NewFs: NewFs,
|
||||
Config: Config,
|
||||
Options: []fs.Option{{
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "md5_memory_limit",
|
||||
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
@@ -119,7 +123,7 @@ func init() {
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeWin | // :?"*<>|
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -139,6 +143,9 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
}, {
|
||||
Value: "tele2",
|
||||
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
|
||||
}, {
|
||||
Value: "onlime",
|
||||
Help: "Onlime Cloud authentication.\nUse this if you are using Onlime Cloud.",
|
||||
}})
|
||||
case "auth_type_done":
|
||||
// Jump to next state according to config chosen
|
||||
@@ -261,6 +268,21 @@ machines.`)
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "onlime": // onlime cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, onlimeCloudClientID)
|
||||
m.Set(configTokenURL, onlimeCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: onlimeCloudAuthURL,
|
||||
TokenURL: onlimeCloudTokenURL,
|
||||
},
|
||||
ClientID: onlimeCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "choose_device":
|
||||
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
|
||||
Choosing no, the default, will let you access the storage used for the archive
|
||||
@@ -1838,12 +1860,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err == nil {
|
||||
// if the object exists delete it
|
||||
err = o.remove(ctx, true)
|
||||
if err != nil {
|
||||
if err != nil && err != fs.ErrorObjectNotFound {
|
||||
// if delete failed then report that, unless it was because the file did not exist after all
|
||||
return fmt.Errorf("failed to remove old object: %w", err)
|
||||
}
|
||||
}
|
||||
// if the object does not exist we can just continue but if the error is something different we should report that
|
||||
if err != fs.ErrorObjectNotFound {
|
||||
} else if err != fs.ErrorObjectNotFound {
|
||||
// if the object does not exist we can just continue but if the error is something different we should report that
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -1930,7 +1952,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
o.md5 = result.Md5
|
||||
o.modTime = time.Unix(result.Modified/1000, 0)
|
||||
} else {
|
||||
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still ned to update our metadata
|
||||
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still need to update our metadata
|
||||
return o.readMetaData(ctx, true)
|
||||
}
|
||||
|
||||
@@ -1951,10 +1973,17 @@ func (o *Object) remove(ctx context.Context, hard bool) error {
|
||||
opts.Parameters.Set("dl", "true")
|
||||
}
|
||||
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.jfsSrv.CallXML(ctx, &opts, nil, nil)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// attempting to hard delete will fail if path does not exist, but standard delete will succeed
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
|
||||
@@ -61,9 +61,10 @@ func init() {
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your user name.",
|
||||
Required: true,
|
||||
Name: "user",
|
||||
Help: "Your user name.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
|
||||
@@ -376,7 +377,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
for i, file := range files {
|
||||
remote := path.Join(dir, f.opt.Enc.ToStandardName(file.Name))
|
||||
if file.Type == "dir" {
|
||||
entries[i] = fs.NewDir(remote, time.Unix(0, 0))
|
||||
entries[i] = fs.NewDir(remote, time.Time{})
|
||||
} else {
|
||||
entries[i] = &Object{
|
||||
fs: f,
|
||||
|
||||
@@ -266,7 +266,10 @@ type Object struct {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||
var (
|
||||
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||
errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
|
||||
)
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
@@ -300,6 +303,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
WriteMetadata: true,
|
||||
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
|
||||
FilterAware: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
@@ -310,7 +314,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err == nil {
|
||||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||
}
|
||||
// Check to see if this is a .rclonelink if not found
|
||||
hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
|
||||
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
|
||||
fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
|
||||
}
|
||||
if err == nil && f.isRegular(fi.Mode()) {
|
||||
// Handle the odd case, that a symlink was specified by name without the link suffix
|
||||
if !hasLinkSuffix && opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
return nil, errLinksNeedsSuffix
|
||||
}
|
||||
// It is a file, so use the parent as the root
|
||||
f.root = filepath.Dir(f.root)
|
||||
// return an error with an fs which points to the parent
|
||||
@@ -503,7 +516,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
continue
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("failed to read directory %q: %w", namepath, fierr)
|
||||
fierr = fmt.Errorf("failed to get info about directory entry %q: %w", namepath, fierr)
|
||||
fs.Errorf(dir, "%v", fierr)
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||
continue
|
||||
@@ -524,6 +537,10 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
localPath := filepath.Join(fsDirPath, name)
|
||||
fi, err = os.Stat(localPath)
|
||||
// Quietly skip errors on excluded files and directories
|
||||
if err != nil && useFilter && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
if os.IsNotExist(err) || isCircularSymlinkError(err) {
|
||||
// Skip bad symlinks and circular symlinks
|
||||
err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
|
||||
@@ -536,11 +553,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
mode = fi.Mode()
|
||||
}
|
||||
// Don't include non directory if not included
|
||||
// we leave directory filtering to the layer above
|
||||
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
if fi.IsDir() {
|
||||
// Ignore directories which are symlinks. These are junction points under windows which
|
||||
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
||||
@@ -553,6 +565,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
newRemote += linkSuffix
|
||||
}
|
||||
// Don't include non directory if not included
|
||||
// we leave directory filtering to the layer above
|
||||
if useFilter && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
fso, err := f.newObjectWithInfo(newRemote, fi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -14,10 +14,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
@@ -145,6 +147,20 @@ func TestSymlink(t *testing.T) {
|
||||
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
|
||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
|
||||
// Check that NewFs works with the suffixed version and --links
|
||||
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
|
||||
"links": "true",
|
||||
})
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
require.Equal(t, dir, f2.(*Fs).root)
|
||||
|
||||
// Check that NewFs doesn't see the non suffixed version with --links
|
||||
f2, err = NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"), configmap.Simple{
|
||||
"links": "true",
|
||||
})
|
||||
require.Equal(t, errLinksNeedsSuffix, err)
|
||||
require.Nil(t, f2)
|
||||
|
||||
// Check reading the object
|
||||
in, err := o.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -395,3 +411,147 @@ func TestFilter(t *testing.T) {
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
func testFilterSymlink(t *testing.T, copyLinks bool) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
when := time.Now()
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Create a file, a directory, a symlink to a file, a symlink to a directory and a dangling symlink
|
||||
r.WriteFile("included.file", "included file", when)
|
||||
r.WriteFile("included.dir/included.sub.file", "included sub file", when)
|
||||
require.NoError(t, os.Symlink("included.file", filepath.Join(r.LocalName, "included.file.link")))
|
||||
require.NoError(t, os.Symlink("included.dir", filepath.Join(r.LocalName, "included.dir.link")))
|
||||
require.NoError(t, os.Symlink("dangling", filepath.Join(r.LocalName, "dangling.link")))
|
||||
|
||||
defer func() {
|
||||
// Reset -L/-l mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Lstat
|
||||
}()
|
||||
if copyLinks {
|
||||
// Set fs into "-L" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
} else {
|
||||
// Set fs into "-l" mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = true
|
||||
f.lstat = os.Lstat
|
||||
}
|
||||
|
||||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
|
||||
// Reset global error count
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
// Add a filter
|
||||
ctx, fi := filter.AddConfig(ctx)
|
||||
require.NoError(t, fi.AddRule("+ included.file"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir/**"))
|
||||
if copyLinks {
|
||||
require.NoError(t, fi.AddRule("+ included.file.link"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir.link/**"))
|
||||
} else {
|
||||
require.NoError(t, fi.AddRule("+ included.file.link.rclonelink"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir.link.rclonelink"))
|
||||
}
|
||||
require.NoError(t, fi.AddRule("- *"))
|
||||
|
||||
// Check listing without use filter flag
|
||||
entries, err := f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
if copyLinks {
|
||||
// Check 1 global errors one for each dangling symlink
|
||||
assert.Equal(t, int64(1), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
} else {
|
||||
// Check 0 global errors as dangling symlink copied properly
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
}
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
|
||||
sort.Sort(entries)
|
||||
if copyLinks {
|
||||
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
|
||||
} else {
|
||||
require.Equal(t, "[dangling.link.rclonelink included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
// Add user filter flag
|
||||
ctx = filter.SetUseFilter(ctx, true)
|
||||
|
||||
// Check listing with use filter flag
|
||||
entries, err = f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
sort.Sort(entries)
|
||||
if copyLinks {
|
||||
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
|
||||
} else {
|
||||
require.Equal(t, "[included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
// Check listing through a symlink still works
|
||||
entries, err = f.List(ctx, "included.dir")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included.dir/included.sub.file]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
func TestFilterSymlinkCopyLinks(t *testing.T) {
|
||||
testFilterSymlink(t, true)
|
||||
}
|
||||
|
||||
func TestFilterSymlinkLinks(t *testing.T) {
|
||||
testFilterSymlink(t, false)
|
||||
}
|
||||
|
||||
func TestCopySymlink(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
when := time.Now()
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Create a file and a symlink to it
|
||||
r.WriteFile("src/file.txt", "hello world", when)
|
||||
require.NoError(t, os.Symlink("file.txt", filepath.Join(r.LocalName, "src", "link.txt")))
|
||||
defer func() {
|
||||
// Reset -L/-l mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Lstat
|
||||
}()
|
||||
|
||||
// Set fs into "-l/--links" mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = true
|
||||
f.lstat = os.Lstat
|
||||
|
||||
// Create dst
|
||||
require.NoError(t, f.Mkdir(ctx, "dst"))
|
||||
|
||||
// Do copy from src into dst
|
||||
src, err := f.NewObject(ctx, "src/link.txt.rclonelink")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, src)
|
||||
dst, err := operations.Copy(ctx, f, nil, "dst/link.txt.rclonelink", src)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dst)
|
||||
|
||||
// Test that we made a symlink and it has the right contents
|
||||
dstPath := filepath.Join(r.LocalName, "dst", "link.txt")
|
||||
linkContents, err := os.Readlink(dstPath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "file.txt", linkContents)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -23,7 +24,7 @@ func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
||||
// Check statx() is available as it was only introduced in kernel 4.11
|
||||
// If not, fall back to fstatat() which was introduced in 2.6.16 which is guaranteed for all Go versions
|
||||
var stat unix.Statx_t
|
||||
if unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
|
||||
if runtime.GOOS != "android" && unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
|
||||
readMetadataFromFileFn = readMetadataFromFileStatx
|
||||
} else {
|
||||
readMetadataFromFileFn = readMetadataFromFileFstatat
|
||||
@@ -91,7 +92,7 @@ func readMetadataFromFileFstatat(o *Object, m *fs.Metadata) (err error) {
|
||||
// The types of t.Sec and t.Nsec vary from int32 to int64 on
|
||||
// different Linux architectures so we need to cast them to
|
||||
// int64 here and hence need to quiet the linter about
|
||||
// unecessary casts.
|
||||
// unnecessary casts.
|
||||
//
|
||||
// nolint: unconvert
|
||||
m.Set(key, time.Unix(int64(t.Sec), int64(t.Nsec)).Format(metadataTimeFormat))
|
||||
|
||||
@@ -85,10 +85,11 @@ func init() {
|
||||
Name: "mailru",
|
||||
Description: "Mail.ru Cloud",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User name (usually email).",
|
||||
Required: true,
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User name (usually email).",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: `Password.
|
||||
@@ -213,7 +214,7 @@ Supported quirks: atomicmkdir binlist unknowndirs`,
|
||||
encoder.EncodeWin | // :?"*<>|
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -58,9 +58,10 @@ func init() {
|
||||
Description: "Mega",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User name.",
|
||||
Required: true,
|
||||
Name: "user",
|
||||
Help: "User name.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
@@ -83,6 +84,17 @@ than permanently deleting them. If you specify this then rclone will
|
||||
permanently delete objects instead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_https",
|
||||
Help: `Use HTTPS for transfers.
|
||||
|
||||
MEGA uses plain text HTTP connections by default.
|
||||
Some ISPs throttle HTTP connections, this causes transfers to become very slow.
|
||||
Enabling this will force MEGA to use HTTPS for all transfers.
|
||||
HTTPS is normally not necessary since all data is already encrypted anyway.
|
||||
Enabling it will increase CPU usage and add network overhead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -100,6 +112,7 @@ type Options struct {
|
||||
Pass string `config:"pass"`
|
||||
Debug bool `config:"debug"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UseHTTPS bool `config:"use_https"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -204,6 +217,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if srv == nil {
|
||||
srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
||||
srv.SetHTTPS(opt.UseHTTPS)
|
||||
srv.SetLogger(func(format string, v ...interface{}) {
|
||||
fs.Infof("*go-mega*", format, v...)
|
||||
})
|
||||
|
||||
@@ -65,11 +65,13 @@ HTTP is provided primarily for debugging purposes.`,
|
||||
Help: `Domain+path of NetStorage host to connect to.
|
||||
|
||||
Format should be ` + "`<domain>/<internal folders>`",
|
||||
Required: true,
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "account",
|
||||
Help: "Set the NetStorage account name",
|
||||
Required: true,
|
||||
Name: "account",
|
||||
Help: "Set the NetStorage account name",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "secret",
|
||||
Help: `Set the NetStorage account secret/G2O key for authentication.
|
||||
@@ -819,6 +821,8 @@ func (f *Fs) getAuth(req *http.Request) error {
|
||||
// Set Authorization header
|
||||
dataHeader := generateDataHeader(f)
|
||||
path := req.URL.RequestURI()
|
||||
//lint:ignore SA1008 false positive when running staticcheck, the header name is according to docs even if not canonical
|
||||
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1008
|
||||
actionHeader := req.Header["X-Akamai-ACS-Action"][0]
|
||||
fs.Debugf(nil, "NetStorage API %s call %s for path %q", req.Method, actionHeader, path)
|
||||
req.Header.Set("X-Akamai-ACS-Auth-Data", dataHeader)
|
||||
|
||||
@@ -126,6 +126,7 @@ type HashesType struct {
|
||||
Sha1Hash string `json:"sha1Hash"` // hex encoded SHA1 hash for the contents of the file (if available)
|
||||
Crc32Hash string `json:"crc32Hash"` // hex encoded CRC32 value of the file (if available)
|
||||
QuickXorHash string `json:"quickXorHash"` // base64 encoded QuickXorHash value of the file (if available)
|
||||
Sha256Hash string `json:"sha256Hash"` // hex encoded SHA256 value of the file (if available)
|
||||
}
|
||||
|
||||
// FileFacet groups file-related data on OneDrive into a single structure.
|
||||
|
||||
@@ -131,10 +131,11 @@ Note that the chunks will be buffered into memory.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "drive_id",
|
||||
Help: "The ID of the drive to use.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
Name: "drive_id",
|
||||
Help: "The ID of the drive to use.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "drive_type",
|
||||
Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
|
||||
@@ -148,7 +149,8 @@ This isn't normally needed, but in special circumstances you might
|
||||
know the folder ID that you wish to access but not be able to get
|
||||
there through a path traversal.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "access_scopes",
|
||||
Help: `Set scopes to be requested by rclone.
|
||||
@@ -196,7 +198,9 @@ listing, set this option.`,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
Default: false,
|
||||
Help: `Allow server-side operations (e.g. copy) to work across different onedrive configs.
|
||||
Help: `Deprecated: use --server-side-across-configs instead.
|
||||
|
||||
Allow server-side operations (e.g. copy) to work across different onedrive configs.
|
||||
|
||||
This will only work if you are copying between two OneDrive *Personal* drives AND
|
||||
the files to copy are already shared between them. In other cases, rclone will
|
||||
@@ -257,6 +261,67 @@ this flag there.
|
||||
Help: `Set the password for links created by the link command.
|
||||
|
||||
At the time of writing this only works with OneDrive personal paid accounts.
|
||||
`,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "hash_type",
|
||||
Default: "auto",
|
||||
Help: `Specify the hash in use for the backend.
|
||||
|
||||
This specifies the hash type in use. If set to "auto" it will use the
|
||||
default hash which is QuickXorHash.
|
||||
|
||||
Before rclone 1.62 an SHA1 hash was used by default for Onedrive
|
||||
Personal. For 1.62 and later the default is to use a QuickXorHash for
|
||||
all onedrive types. If an SHA1 hash is desired then set this option
|
||||
accordingly.
|
||||
|
||||
From July 2023 QuickXorHash will be the only available hash for
|
||||
both OneDrive for Business and OneDriver Personal.
|
||||
|
||||
This can be set to "none" to not use any hashes.
|
||||
|
||||
If the hash requested does not exist on the object, it will be
|
||||
returned as an empty string which is treated as a missing hash by
|
||||
rclone.
|
||||
`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "auto",
|
||||
Help: "Rclone chooses the best hash",
|
||||
}, {
|
||||
Value: "quickxor",
|
||||
Help: "QuickXor",
|
||||
}, {
|
||||
Value: "sha1",
|
||||
Help: "SHA1",
|
||||
}, {
|
||||
Value: "sha256",
|
||||
Help: "SHA256",
|
||||
}, {
|
||||
Value: "crc32",
|
||||
Help: "CRC32",
|
||||
}, {
|
||||
Value: "none",
|
||||
Help: "None - don't use any hashes",
|
||||
}},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "av_override",
|
||||
Default: false,
|
||||
Help: `Allows download of files the server thinks has a virus.
|
||||
|
||||
The onedrive/sharepoint server may check files uploaded with an Anti
|
||||
Virus checker. If it detects any potential viruses or malware it will
|
||||
block download of the file.
|
||||
|
||||
In this case you will see a message like this
|
||||
|
||||
server reports this file is infected with a virus - use --onedrive-av-override to download anyway: Infected (name of virus): 403 Forbidden:
|
||||
|
||||
If you are 100% sure you want to download this file anyway then use
|
||||
the --onedrive-av-override flag, or av_override = true in the config
|
||||
file.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -597,6 +662,8 @@ type Options struct {
|
||||
LinkScope string `config:"link_scope"`
|
||||
LinkType string `config:"link_type"`
|
||||
LinkPassword string `config:"link_password"`
|
||||
HashType string `config:"hash_type"`
|
||||
AVOverride bool `config:"av_override"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -613,6 +680,7 @@ type Fs struct {
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
driveID string // ID to use for querying Microsoft Graph
|
||||
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
|
||||
hashType hash.Type // type of the hash we are using
|
||||
}
|
||||
|
||||
// Object describes a OneDrive object
|
||||
@@ -626,8 +694,7 @@ type Object struct {
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
id string // ID of the object
|
||||
sha1 string // SHA-1 of the object content
|
||||
quickxorhash string // QuickXorHash of the object content
|
||||
hash string // Hash of the content, usually QuickXorHash but set as hash_type
|
||||
mimeType string // Content-Type of object from server (may not be as uploaded)
|
||||
}
|
||||
|
||||
@@ -882,6 +949,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
driveType: opt.DriveType,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
hashType: QuickXorHashType,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -891,6 +959,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Set the user defined hash
|
||||
if opt.HashType == "auto" || opt.HashType == "" {
|
||||
opt.HashType = QuickXorHashType.String()
|
||||
}
|
||||
err = f.hashType.Set(opt.HashType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Disable change polling in China region
|
||||
// See: https://github.com/rclone/rclone/issues/6444
|
||||
if f.opt.Region == regionCN {
|
||||
@@ -1556,10 +1633,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
if f.driveType == driveTypePersonal {
|
||||
return hash.Set(hash.SHA1)
|
||||
}
|
||||
return hash.Set(QuickXorHashType)
|
||||
return hash.Set(f.hashType)
|
||||
}
|
||||
|
||||
// PublicLink returns a link for downloading without account.
|
||||
@@ -1674,6 +1748,10 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
token := make(chan struct{}, f.ci.Checkers)
|
||||
var wg sync.WaitGroup
|
||||
err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to list %q: %v", path, err)
|
||||
return nil
|
||||
}
|
||||
err = entries.ForObjectError(func(obj fs.Object) error {
|
||||
o, ok := obj.(*Object)
|
||||
if !ok {
|
||||
@@ -1768,14 +1846,8 @@ func (o *Object) rootPath() string {
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if o.fs.driveType == driveTypePersonal {
|
||||
if t == hash.SHA1 {
|
||||
return o.sha1, nil
|
||||
}
|
||||
} else {
|
||||
if t == QuickXorHashType {
|
||||
return o.quickxorhash, nil
|
||||
}
|
||||
if t == o.fs.hashType {
|
||||
return o.hash, nil
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -1806,16 +1878,23 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
file := info.GetFile()
|
||||
if file != nil {
|
||||
o.mimeType = file.MimeType
|
||||
if file.Hashes.Sha1Hash != "" {
|
||||
o.sha1 = strings.ToLower(file.Hashes.Sha1Hash)
|
||||
}
|
||||
if file.Hashes.QuickXorHash != "" {
|
||||
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
|
||||
} else {
|
||||
o.quickxorhash = hex.EncodeToString(h)
|
||||
o.hash = ""
|
||||
switch o.fs.hashType {
|
||||
case QuickXorHashType:
|
||||
if file.Hashes.QuickXorHash != "" {
|
||||
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
|
||||
} else {
|
||||
o.hash = hex.EncodeToString(h)
|
||||
}
|
||||
}
|
||||
case hash.SHA1:
|
||||
o.hash = strings.ToLower(file.Hashes.Sha1Hash)
|
||||
case hash.SHA256:
|
||||
o.hash = strings.ToLower(file.Hashes.Sha256Hash)
|
||||
case hash.CRC32:
|
||||
o.hash = strings.ToLower(file.Hashes.Crc32Hash)
|
||||
}
|
||||
}
|
||||
fileSystemInfo := info.GetFileSystemInfo()
|
||||
@@ -1911,12 +1990,20 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
var resp *http.Response
|
||||
opts := o.fs.newOptsCall(o.id, "GET", "/content")
|
||||
opts.Options = options
|
||||
if o.fs.opt.AVOverride {
|
||||
opts.Parameters = url.Values{"AVOverride": {"1"}}
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
if virus := resp.Header.Get("X-Virus-Infected"); virus != "" {
|
||||
err = fmt.Errorf("server reports this file is infected with a virus - use --onedrive-av-override to download anyway: %s: %w", virus, err)
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@ func New() hash.Hash {
|
||||
func (q *quickXorHash) Write(p []byte) (n int, err error) {
|
||||
var i int
|
||||
// fill last remain
|
||||
lastRemain := int(q.size) % dataSize
|
||||
lastRemain := q.size % dataSize
|
||||
if lastRemain != 0 {
|
||||
i += xorBytes(q.data[lastRemain:], p)
|
||||
}
|
||||
|
||||
@@ -42,9 +42,10 @@ func init() {
|
||||
Description: "OpenDrive",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "username",
|
||||
Help: "Username.",
|
||||
Required: true,
|
||||
Name: "username",
|
||||
Help: "Username.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Password.",
|
||||
|
||||
@@ -92,14 +92,16 @@ func newOptions() []fs.Option {
|
||||
Help: noAuthHelpText,
|
||||
}},
|
||||
}, {
|
||||
Name: "namespace",
|
||||
Help: "Object storage namespace",
|
||||
Required: true,
|
||||
Name: "namespace",
|
||||
Help: "Object storage namespace",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "compartment",
|
||||
Help: "Object storage compartment OCID",
|
||||
Provider: "!no_auth",
|
||||
Required: true,
|
||||
Name: "compartment",
|
||||
Help: "Object storage compartment OCID",
|
||||
Provider: "!no_auth",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Object storage Region",
|
||||
@@ -289,7 +291,7 @@ Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/using
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_kms_key_id",
|
||||
Help: `if using using your own master key in vault, this header specifies the
|
||||
Help: `if using your own master key in vault, this header specifies the
|
||||
OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call
|
||||
the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key.
|
||||
Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.`,
|
||||
|
||||
@@ -589,12 +589,7 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Durat
|
||||
if operations.SkipDestructive(ctx, what, "remove pending upload") {
|
||||
continue
|
||||
}
|
||||
ignoreErr := f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
|
||||
if ignoreErr != nil {
|
||||
// fs.Debugf(f, "ignoring error %s", ignoreErr)
|
||||
}
|
||||
} else {
|
||||
// fs.Debugf(f, "ignoring %s", what)
|
||||
_ = f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
|
||||
}
|
||||
} else {
|
||||
fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.")
|
||||
|
||||
@@ -110,10 +110,11 @@ func init() {
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "d0",
|
||||
Advanced: true,
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "d0",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "hostname",
|
||||
Help: `Hostname to connect to.
|
||||
@@ -138,7 +139,8 @@ with rclone authorize.
|
||||
This is only required when you want to use the cleanup command. Due to a bug
|
||||
in the pcloud API the required API does not support OAuth authentication so
|
||||
we have to rely on user password authentication for it.`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your pcloud password.",
|
||||
|
||||
536
backend/pikpak/api/types.go
Normal file
536
backend/pikpak/api/types.go
Normal file
@@ -0,0 +1,536 @@
|
||||
// Package api has type definitions for pikpak
|
||||
//
|
||||
// Manually obtained from the API responses using Browse Dev. Tool and https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// "2022-09-17T14:31:06.056+08:00"
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Time represents date and time information for the pikpak API, by using RFC3339
|
||||
type Time time.Time
|
||||
|
||||
// MarshalJSON turns a Time into JSON (in UTC)
|
||||
func (t *Time) MarshalJSON() (out []byte, err error) {
|
||||
timeString := (*time.Time)(t).Format(timeFormat)
|
||||
return []byte(timeString), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Time
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
if string(data) == "null" || string(data) == `""` {
|
||||
return nil
|
||||
}
|
||||
newT, err := time.Parse(timeFormat, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(newT)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
KindOfFolder = "drive#folder"
|
||||
KindOfFile = "drive#file"
|
||||
KindOfFileList = "drive#fileList"
|
||||
KindOfResumable = "drive#resumable"
|
||||
KindOfForm = "drive#form"
|
||||
ThumbnailSizeS = "SIZE_SMALL"
|
||||
ThumbnailSizeM = "SIZE_MEDIUM"
|
||||
ThumbnailSizeL = "SIZE_LARGE"
|
||||
PhaseTypeComplete = "PHASE_TYPE_COMPLETE"
|
||||
PhaseTypeRunning = "PHASE_TYPE_RUNNING"
|
||||
PhaseTypeError = "PHASE_TYPE_ERROR"
|
||||
PhaseTypePending = "PHASE_TYPE_PENDING"
|
||||
UploadTypeForm = "UPLOAD_TYPE_FORM"
|
||||
UploadTypeResumable = "UPLOAD_TYPE_RESUMABLE"
|
||||
ListLimit = 100
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Error details api error from pikpak
|
||||
type Error struct {
|
||||
Reason string `json:"error"` // short description of the reason, e.g. "file_name_empty" "invalid_request"
|
||||
Code int `json:"error_code"`
|
||||
URL string `json:"error_url,omitempty"`
|
||||
Message string `json:"error_description,omitempty"`
|
||||
// can have either of `error_details` or `details``
|
||||
ErrorDetails []*ErrorDetails `json:"error_details,omitempty"`
|
||||
Details []*ErrorDetails `json:"details,omitempty"`
|
||||
}
|
||||
|
||||
// ErrorDetails contains further details of api error
|
||||
type ErrorDetails struct {
|
||||
Type string `json:"@type,omitempty"`
|
||||
Reason string `json:"reason,omitempty"`
|
||||
Domain string `json:"domain,omitempty"`
|
||||
Metadata struct {
|
||||
} `json:"metadata,omitempty"` // TODO: undiscovered yet
|
||||
Locale string `json:"locale,omitempty"` // e.g. "en"
|
||||
Message string `json:"message,omitempty"`
|
||||
StackEntries []interface{} `json:"stack_entries,omitempty"` // TODO: undiscovered yet
|
||||
Detail string `json:"detail,omitempty"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q (%d)", e.Reason, e.Code)
|
||||
if e.Message != "" {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Filters contains parameters for filters when listing.
|
||||
//
|
||||
// possible operators
|
||||
// * in: a list of comma-separated string
|
||||
// * eq: "true" or "false"
|
||||
// * gt or lt: time format string, e.g. "2023-01-28T10:56:49.757+08:00"
|
||||
type Filters struct {
|
||||
Phase map[string]string `json:"phase,omitempty"` // "in" or "eq"
|
||||
Trashed map[string]bool `json:"trashed,omitempty"` // "eq"
|
||||
Kind map[string]string `json:"kind,omitempty"` // "eq"
|
||||
Starred map[string]bool `json:"starred,omitempty"` // "eq"
|
||||
ModifiedTime map[string]string `json:"modified_time,omitempty"` // "gt" or "lt"
|
||||
}
|
||||
|
||||
// Set sets filter values using field name, operator and corresponding value
|
||||
func (f *Filters) Set(field, operator, value string) {
|
||||
if value == "" {
|
||||
// UNSET for empty values
|
||||
return
|
||||
}
|
||||
r := reflect.ValueOf(f)
|
||||
fd := reflect.Indirect(r).FieldByName(field)
|
||||
if v, err := strconv.ParseBool(value); err == nil {
|
||||
fd.Set(reflect.ValueOf(map[string]bool{operator: v}))
|
||||
} else {
|
||||
fd.Set(reflect.ValueOf(map[string]string{operator: value}))
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Common Elements
|
||||
|
||||
// Link contains a download URL for opening files
|
||||
type Link struct {
|
||||
URL string `json:"url"`
|
||||
Token string `json:"token"`
|
||||
Expire Time `json:"expire"`
|
||||
Type string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
// Valid reports whether l is non-nil, has an URL, and is not expired.
|
||||
func (l *Link) Valid() bool {
|
||||
return l != nil && l.URL != "" && time.Now().Add(10*time.Second).Before(time.Time(l.Expire))
|
||||
}
|
||||
|
||||
// URL is a basic form of URL
|
||||
type URL struct {
|
||||
Kind string `json:"kind,omitempty"` // e.g. "upload#url"
|
||||
URL string `json:"url,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Base Elements
|
||||
|
||||
// FileList contains a list of File elements
|
||||
type FileList struct {
|
||||
Kind string `json:"kind,omitempty"` // drive#fileList
|
||||
Files []*File `json:"files,omitempty"`
|
||||
NextPageToken string `json:"next_page_token"`
|
||||
Version string `json:"version,omitempty"`
|
||||
VersionOutdated bool `json:"version_outdated,omitempty"`
|
||||
}
|
||||
|
||||
// File is a basic element representing a single file object
|
||||
//
|
||||
// There are two types of download links,
|
||||
// 1) one from File.WebContentLink or File.Links.ApplicationOctetStream.URL and
|
||||
// 2) the other from File.Medias[].Link.URL.
|
||||
// Empirically, 2) is less restrictive to multiple concurrent range-requests
|
||||
// for a single file, i.e. supports for higher `--multi-thread-streams=N`.
|
||||
// However, it is not generally applicable as it is only for meadia.
|
||||
type File struct {
|
||||
Apps []*FileApp `json:"apps,omitempty"`
|
||||
Audit *FileAudit `json:"audit,omitempty"`
|
||||
Collection string `json:"collection,omitempty"` // TODO
|
||||
CreatedTime Time `json:"created_time,omitempty"`
|
||||
DeleteTime Time `json:"delete_time,omitempty"`
|
||||
FileCategory string `json:"file_category,omitempty"`
|
||||
FileExtension string `json:"file_extension,omitempty"`
|
||||
FolderType string `json:"folder_type,omitempty"`
|
||||
Hash string `json:"hash,omitempty"` // sha1 but NOT a valid file hash. looks like a torrent hash
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Kind string `json:"kind,omitempty"` // "drive#file"
|
||||
Links *FileLinks `json:"links,omitempty"`
|
||||
Md5Checksum string `json:"md5_checksum,omitempty"`
|
||||
Medias []*Media `json:"medias,omitempty"`
|
||||
MimeType string `json:"mime_type,omitempty"`
|
||||
ModifiedTime Time `json:"modified_time,omitempty"` // updated when renamed or moved
|
||||
Name string `json:"name,omitempty"`
|
||||
OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO
|
||||
OriginalURL string `json:"original_url,omitempty"`
|
||||
Params *FileParams `json:"params,omitempty"`
|
||||
ParentID string `json:"parent_id,omitempty"`
|
||||
Phase string `json:"phase,omitempty"`
|
||||
Revision int `json:"revision,omitempty,string"`
|
||||
Size int64 `json:"size,omitempty,string"`
|
||||
SortName string `json:"sort_name,omitempty"`
|
||||
Space string `json:"space,omitempty"`
|
||||
SpellName []interface{} `json:"spell_name,omitempty"` // TODO maybe list of something?
|
||||
Starred bool `json:"starred,omitempty"`
|
||||
ThumbnailLink string `json:"thumbnail_link,omitempty"`
|
||||
Trashed bool `json:"trashed,omitempty"`
|
||||
UserID string `json:"user_id,omitempty"`
|
||||
UserModifiedTime Time `json:"user_modified_time,omitempty"`
|
||||
WebContentLink string `json:"web_content_link,omitempty"`
|
||||
Writable bool `json:"writable,omitempty"`
|
||||
}
|
||||
|
||||
// FileLinks includes links to file at backend
|
||||
type FileLinks struct {
|
||||
ApplicationOctetStream *Link `json:"application/octet-stream,omitempty"`
|
||||
}
|
||||
|
||||
// FileAudit contains audit information for the file
|
||||
type FileAudit struct {
|
||||
Status string `json:"status,omitempty"` // "STATUS_OK"
|
||||
Message string `json:"message,omitempty"`
|
||||
Title string `json:"title,omitempty"`
|
||||
}
|
||||
|
||||
// Media contains info about supported version of media, e.g. original, transcoded, etc
|
||||
type Media struct {
|
||||
MediaID string `json:"media_id,omitempty"`
|
||||
MediaName string `json:"media_name,omitempty"`
|
||||
Video struct {
|
||||
Height int `json:"height,omitempty"`
|
||||
Width int `json:"width,omitempty"`
|
||||
Duration int64 `json:"duration,omitempty"`
|
||||
BitRate int `json:"bit_rate,omitempty"`
|
||||
FrameRate int `json:"frame_rate,omitempty"`
|
||||
VideoCodec string `json:"video_codec,omitempty"` // "h264", "hevc"
|
||||
AudioCodec string `json:"audio_codec,omitempty"` // "pcm_bluray", "aac"
|
||||
VideoType string `json:"video_type,omitempty"` // "mpegts"
|
||||
HdrType string `json:"hdr_type,omitempty"`
|
||||
} `json:"video,omitempty"`
|
||||
Link *Link `json:"link,omitempty"`
|
||||
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
||||
VipTypes []interface{} `json:"vip_types,omitempty"` // TODO maybe list of something?
|
||||
RedirectLink string `json:"redirect_link,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
IsDefault bool `json:"is_default,omitempty"`
|
||||
Priority int `json:"priority,omitempty"`
|
||||
IsOrigin bool `json:"is_origin,omitempty"`
|
||||
ResolutionName string `json:"resolution_name,omitempty"`
|
||||
IsVisible bool `json:"is_visible,omitempty"`
|
||||
Category string `json:"category,omitempty"`
|
||||
}
|
||||
|
||||
// FileParams includes parameters for instant open
|
||||
type FileParams struct {
|
||||
Duration int64 `json:"duration,omitempty,string"` // in seconds
|
||||
Height int `json:"height,omitempty,string"`
|
||||
Platform string `json:"platform,omitempty"` // "Upload"
|
||||
PlatformIcon string `json:"platform_icon,omitempty"`
|
||||
URL string `json:"url,omitempty"`
|
||||
Width int `json:"width,omitempty,string"`
|
||||
}
|
||||
|
||||
// FileApp includes parameters for instant open
|
||||
type FileApp struct {
|
||||
ID string `json:"id,omitempty"` // "decompress" for rar files
|
||||
Name string `json:"name,omitempty"` // decompress" for rar files
|
||||
Access []interface{} `json:"access,omitempty"`
|
||||
Link string `json:"link,omitempty"` // "https://mypikpak.com/drive/decompression/{File.Id}?gcid={File.Hash}\u0026wv-style=topbar%3Ahide"
|
||||
RedirectLink string `json:"redirect_link,omitempty"`
|
||||
VipTypes []interface{} `json:"vip_types,omitempty"`
|
||||
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
IsDefault bool `json:"is_default,omitempty"`
|
||||
Params struct {
|
||||
} `json:"params,omitempty"` // TODO
|
||||
CategoryIds []interface{} `json:"category_ids,omitempty"`
|
||||
AdSceneType int `json:"ad_scene_type,omitempty"`
|
||||
Space string `json:"space,omitempty"`
|
||||
Links struct {
|
||||
} `json:"links,omitempty"` // TODO
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// TaskList contains a list of Task elements
|
||||
type TaskList struct {
|
||||
Tasks []*Task `json:"tasks,omitempty"` // "drive#task"
|
||||
NextPageToken string `json:"next_page_token"`
|
||||
ExpiresIn int `json:"expires_in,omitempty"`
|
||||
}
|
||||
|
||||
// Task is a basic element representing a single task such as offline download and upload
|
||||
type Task struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#task"
|
||||
ID string `json:"id,omitempty"` // task id?
|
||||
Name string `json:"name,omitempty"` // torrent name?
|
||||
Type string `json:"type,omitempty"` // "offline"
|
||||
UserID string `json:"user_id,omitempty"`
|
||||
Statuses []interface{} `json:"statuses,omitempty"` // TODO
|
||||
StatusSize int `json:"status_size,omitempty"` // TODO
|
||||
Params *TaskParams `json:"params,omitempty"` // TODO
|
||||
FileID string `json:"file_id,omitempty"`
|
||||
FileName string `json:"file_name,omitempty"`
|
||||
FileSize string `json:"file_size,omitempty"`
|
||||
Message string `json:"message,omitempty"` // e.g. "Saving"
|
||||
CreatedTime Time `json:"created_time,omitempty"`
|
||||
UpdatedTime Time `json:"updated_time,omitempty"`
|
||||
ThirdTaskID string `json:"third_task_id,omitempty"` // TODO
|
||||
Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING"
|
||||
Progress int `json:"progress,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
Callback string `json:"callback,omitempty"`
|
||||
ReferenceResource interface{} `json:"reference_resource,omitempty"` // TODO
|
||||
Space string `json:"space,omitempty"`
|
||||
}
|
||||
|
||||
// TaskParams includes parameters informing status of Task
|
||||
type TaskParams struct {
|
||||
Age string `json:"age,omitempty"`
|
||||
PredictSpeed string `json:"predict_speed,omitempty"`
|
||||
PredictType string `json:"predict_type,omitempty"`
|
||||
URL string `json:"url,omitempty"`
|
||||
}
|
||||
|
||||
// Form contains parameters for upload by multipart/form-data
|
||||
type Form struct {
|
||||
Headers struct{} `json:"headers"`
|
||||
Kind string `json:"kind"` // "drive#form"
|
||||
Method string `json:"method"` // "POST"
|
||||
MultiParts struct {
|
||||
OSSAccessKeyID string `json:"OSSAccessKeyId"`
|
||||
Signature string `json:"Signature"`
|
||||
Callback string `json:"callback"`
|
||||
Key string `json:"key"`
|
||||
Policy string `json:"policy"`
|
||||
XUserData string `json:"x:user_data"`
|
||||
} `json:"multi_parts"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// Resumable contains parameters for upload by resumable
|
||||
type Resumable struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#resumable"
|
||||
Provider string `json:"provider,omitempty"` // e.g. "PROVIDER_ALIYUN"
|
||||
Params *ResumableParams `json:"params,omitempty"`
|
||||
}
|
||||
|
||||
// ResumableParams specifies resumable paramegers
|
||||
type ResumableParams struct {
|
||||
AccessKeyID string `json:"access_key_id,omitempty"`
|
||||
AccessKeySecret string `json:"access_key_secret,omitempty"`
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
Endpoint string `json:"endpoint,omitempty"`
|
||||
Expiration Time `json:"expiration,omitempty"`
|
||||
Key string `json:"key,omitempty"`
|
||||
SecurityToken string `json:"security_token,omitempty"`
|
||||
}
|
||||
|
||||
// FileInArchive is a basic element in archive
|
||||
type FileInArchive struct {
|
||||
Index int `json:"index,omitempty"`
|
||||
Filename string `json:"filename,omitempty"`
|
||||
Filesize string `json:"filesize,omitempty"`
|
||||
MimeType string `json:"mime_type,omitempty"`
|
||||
Gcid string `json:"gcid,omitempty"`
|
||||
Kind string `json:"kind,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
Path string `json:"path,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// NewFile is a response to RequestNewFile
|
||||
type NewFile struct {
|
||||
File *File `json:"file,omitempty"`
|
||||
Form *Form `json:"form,omitempty"`
|
||||
Resumable *Resumable `json:"resumable,omitempty"`
|
||||
Task *Task `json:"task,omitempty"` // null in this case
|
||||
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE"
|
||||
}
|
||||
|
||||
// NewTask is a response to RequestNewTask
|
||||
type NewTask struct {
|
||||
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_URL"
|
||||
File *File `json:"file,omitempty"` // null in this case
|
||||
Task *Task `json:"task,omitempty"`
|
||||
URL *URL `json:"url,omitempty"` // {"kind": "upload#url"}
|
||||
}
|
||||
|
||||
// About informs drive status
|
||||
type About struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#about"
|
||||
Quota *Quota `json:"quota,omitempty"`
|
||||
ExpiresAt string `json:"expires_at,omitempty"`
|
||||
Quotas struct {
|
||||
} `json:"quotas,omitempty"` // maybe []*Quota?
|
||||
}
|
||||
|
||||
// Quota informs drive quota
|
||||
type Quota struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#quota"
|
||||
Limit int64 `json:"limit,omitempty,string"` // limit in bytes
|
||||
Usage int64 `json:"usage,omitempty,string"` // bytes in use
|
||||
UsageInTrash int64 `json:"usage_in_trash,omitempty,string"` // bytes in trash but this seems not working
|
||||
PlayTimesLimit string `json:"play_times_limit,omitempty"` // maybe in seconds
|
||||
PlayTimesUsage string `json:"play_times_usage,omitempty"` // maybe in seconds
|
||||
}
|
||||
|
||||
// Share is a response to RequestShare
|
||||
//
|
||||
// used in PublicLink()
|
||||
type Share struct {
|
||||
ShareID string `json:"share_id,omitempty"`
|
||||
ShareURL string `json:"share_url,omitempty"`
|
||||
PassCode string `json:"pass_code,omitempty"`
|
||||
ShareText string `json:"share_text,omitempty"`
|
||||
}
|
||||
|
||||
// User contains user account information
|
||||
//
|
||||
// GET https://user.mypikpak.com/v1/user/me
|
||||
type User struct {
|
||||
Sub string `json:"sub,omitempty"` // userid for internal use
|
||||
Name string `json:"name,omitempty"` // Username
|
||||
Picture string `json:"picture,omitempty"` // URL to Avatar image
|
||||
Email string `json:"email,omitempty"` // redacted email address
|
||||
Providers *[]UserProvider `json:"providers,omitempty"` // OAuth provider
|
||||
PhoneNumber string `json:"phone_number,omitempty"`
|
||||
Password string `json:"password,omitempty"` // "SET" if configured
|
||||
Status string `json:"status,omitempty"` // "ACTIVE"
|
||||
CreatedAt Time `json:"created_at,omitempty"`
|
||||
PasswordUpdatedAt Time `json:"password_updated_at,omitempty"`
|
||||
}
|
||||
|
||||
// UserProvider details third-party authentication
|
||||
type UserProvider struct {
|
||||
ID string `json:"id,omitempty"` // e.g. "google.com"
|
||||
ProviderUserID string `json:"provider_user_id,omitempty"`
|
||||
Name string `json:"name,omitempty"` // username
|
||||
}
|
||||
|
||||
// VIP includes subscription details about premium account
|
||||
//
|
||||
// GET https://api-drive.mypikpak.com/drive/v1/privilege/vip
|
||||
type VIP struct {
|
||||
Result string `json:"result,omitempty"` // "ACCEPTED"
|
||||
Message string `json:"message,omitempty"`
|
||||
RedirectURI string `json:"redirect_uri,omitempty"`
|
||||
Data struct {
|
||||
Expire Time `json:"expire,omitempty"`
|
||||
Status string `json:"status,omitempty"` // "invalid" or "ok"
|
||||
Type string `json:"type,omitempty"` // "novip" or "platinum"
|
||||
UserID string `json:"user_id,omitempty"` // same as User.Sub
|
||||
} `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
// DecompressResult is a response to RequestDecompress
|
||||
type DecompressResult struct {
|
||||
Status string `json:"status,omitempty"` // "OK"
|
||||
StatusText string `json:"status_text,omitempty"`
|
||||
TaskID string `json:"task_id,omitempty"` // same as File.Id
|
||||
FilesNum int `json:"files_num,omitempty"` // number of files in archive
|
||||
RedirectLink string `json:"redirect_link,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// RequestShare is to request for file share
|
||||
type RequestShare struct {
|
||||
FileIds []string `json:"file_ids,omitempty"`
|
||||
ShareTo string `json:"share_to,omitempty"` // "publiclink",
|
||||
ExpirationDays int `json:"expiration_days,omitempty"` // -1 = 'forever'
|
||||
PassCodeOption string `json:"pass_code_option,omitempty"` // "NOT_REQUIRED"
|
||||
}
|
||||
|
||||
// RequestBatch is to request for batch actions
|
||||
type RequestBatch struct {
|
||||
Ids []string `json:"ids,omitempty"`
|
||||
To map[string]string `json:"to,omitempty"`
|
||||
}
|
||||
|
||||
// RequestNewFile is to request for creating a new `drive#folder` or `drive#file`
|
||||
type RequestNewFile struct {
|
||||
// always required
|
||||
Kind string `json:"kind"` // "drive#folder" or "drive#file"
|
||||
Name string `json:"name"`
|
||||
ParentID string `json:"parent_id"`
|
||||
FolderType string `json:"folder_type"`
|
||||
// only when uploading a new file
|
||||
Hash string `json:"hash,omitempty"` // sha1sum
|
||||
Resumable map[string]string `json:"resumable,omitempty"` // {"provider": "PROVIDER_ALIYUN"}
|
||||
Size int64 `json:"size,omitempty"`
|
||||
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE"
|
||||
}
|
||||
|
||||
// RequestNewTask is to request for creating a new task like offline downloads
|
||||
//
|
||||
// Name and ParentID can be left empty.
|
||||
type RequestNewTask struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#file"
|
||||
Name string `json:"name,omitempty"`
|
||||
ParentID string `json:"parent_id,omitempty"`
|
||||
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_URL"
|
||||
URL *URL `json:"url,omitempty"` // {"url": downloadUrl}
|
||||
FolderType string `json:"folder_type,omitempty"` // "" if parent_id else "DOWNLOAD"
|
||||
}
|
||||
|
||||
// RequestDecompress is to request for decompress of archive files
|
||||
type RequestDecompress struct {
|
||||
Gcid string `json:"gcid,omitempty"` // same as File.Hash
|
||||
Password string `json:"password,omitempty"` // ""
|
||||
FileID string `json:"file_id,omitempty"`
|
||||
Files []*FileInArchive `json:"files,omitempty"` // can request selected files to be decompressed
|
||||
DefaultParent bool `json:"default_parent,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// NOT implemented YET
|
||||
|
||||
// RequestArchiveFileList is to request for a list of files in archive
|
||||
//
|
||||
// POST https://api-drive.mypikpak.com/decompress/v1/list
|
||||
type RequestArchiveFileList struct {
|
||||
Gcid string `json:"gcid,omitempty"` // same as api.File.Hash
|
||||
Path string `json:"path,omitempty"` // "" by default
|
||||
Password string `json:"password,omitempty"` // "" by default
|
||||
FileID string `json:"file_id,omitempty"`
|
||||
}
|
||||
|
||||
// ArchiveFileList is a response to RequestArchiveFileList
|
||||
type ArchiveFileList struct {
|
||||
Status string `json:"status,omitempty"` // "OK"
|
||||
StatusText string `json:"status_text,omitempty"` // ""
|
||||
TaskID string `json:"task_id,omitempty"` // ""
|
||||
CurrentPath string `json:"current_path,omitempty"` // ""
|
||||
Title string `json:"title,omitempty"`
|
||||
FileSize int64 `json:"file_size,omitempty"`
|
||||
Gcid string `json:"gcid,omitempty"` // same as File.Hash
|
||||
Files []*FileInArchive `json:"files,omitempty"`
|
||||
}
|
||||
253
backend/pikpak/helper.go
Normal file
253
backend/pikpak/helper.go
Normal file
@@ -0,0 +1,253 @@
|
||||
package pikpak
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/backend/pikpak/api"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Globals
|
||||
const (
|
||||
cachePrefix = "rclone-pikpak-sha1sum-"
|
||||
)
|
||||
|
||||
// requestDecompress requests decompress of compressed files
|
||||
func (f *Fs) requestDecompress(ctx context.Context, file *api.File, password string) (info *api.DecompressResult, err error) {
|
||||
req := &api.RequestDecompress{
|
||||
Gcid: file.Hash,
|
||||
Password: password,
|
||||
FileID: file.ID,
|
||||
Files: []*api.FileInArchive{},
|
||||
DefaultParent: true,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/decompress/v1/decompress",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// getUserInfo gets UserInfo from API
|
||||
func (f *Fs) getUserInfo(ctx context.Context) (info *api.User, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://user.mypikpak.com/v1/user/me",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get userinfo: %w", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getVIPInfo gets VIPInfo from API
|
||||
func (f *Fs) getVIPInfo(ctx context.Context) (info *api.VIP, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://api-drive.mypikpak.com/drive/v1/privilege/vip",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get vip info: %w", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// requestBatchAction requests batch actions to API
|
||||
//
|
||||
// action can be one of batch{Copy,Delete,Trash,Untrash}
|
||||
func (f *Fs) requestBatchAction(ctx context.Context, action string, req *api.RequestBatch) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/files:" + action,
|
||||
NoResponse: true, // Only returns `{"task_id":""}
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, nil)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("batch action %q failed: %w", action, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// requestNewTask requests a new api.NewTask and returns api.Task
|
||||
func (f *Fs) requestNewTask(ctx context.Context, req *api.RequestNewTask) (info *api.Task, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/files",
|
||||
}
|
||||
var newTask api.NewTask
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &newTask)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newTask.Task, nil
|
||||
}
|
||||
|
||||
// requestNewFile requests a new api.NewFile and returns api.File
|
||||
func (f *Fs) requestNewFile(ctx context.Context, req *api.RequestNewFile) (info *api.NewFile, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/files",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// getFile gets api.File from API for the ID passed
|
||||
// and returns rich information containing additional fields below
|
||||
// * web_content_link
|
||||
// * thumbnail_link
|
||||
// * links
|
||||
// * medias
|
||||
func (f *Fs) getFile(ctx context.Context, ID string) (info *api.File, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/v1/files/" + ID,
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
if err == nil && info.Phase != api.PhaseTypeComplete {
|
||||
// could be pending right after file is created/uploaded.
|
||||
return true, errors.New("not PHASE_TYPE_COMPLETE")
|
||||
}
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// patchFile updates attributes of the file by ID
|
||||
//
|
||||
// currently known patchable fields are
|
||||
// * name
|
||||
func (f *Fs) patchFile(ctx context.Context, ID string, req *api.File) (info *api.File, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PATCH",
|
||||
Path: "/drive/v1/files/" + ID,
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// getAbout gets drive#quota information from server
|
||||
func (f *Fs) getAbout(ctx context.Context) (info *api.About, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/v1/about",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// requestShare returns information about ssharable links
|
||||
func (f *Fs) requestShare(ctx context.Context, req *api.RequestShare) (info *api.Share, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/share",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Read the sha1 of in returning a reader which will read the same contents
|
||||
//
|
||||
// The cleanup function should be called when out is finished with
|
||||
// regardless of whether this function returned an error or not.
|
||||
func readSHA1(in io.Reader, size, threshold int64) (sha1sum string, out io.Reader, cleanup func(), err error) {
|
||||
// we need an SHA1
|
||||
hash := sha1.New()
|
||||
// use the teeReader to write to the local file AND calculate the SHA1 while doing so
|
||||
teeReader := io.TeeReader(in, hash)
|
||||
|
||||
// nothing to clean up by default
|
||||
cleanup = func() {}
|
||||
|
||||
// don't cache small files on disk to reduce wear of the disk
|
||||
if size > threshold {
|
||||
var tempFile *os.File
|
||||
|
||||
// create the cache file
|
||||
tempFile, err = os.CreateTemp("", cachePrefix)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_ = os.Remove(tempFile.Name()) // Delete the file - may not work on Windows
|
||||
|
||||
// clean up the file after we are done downloading
|
||||
cleanup = func() {
|
||||
// the file should normally already be close, but just to make sure
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name()) // delete the cache file after we are done - may be deleted already
|
||||
}
|
||||
|
||||
// copy the ENTIRE file to disc and calculate the SHA1 in the process
|
||||
if _, err = io.Copy(tempFile, teeReader); err != nil {
|
||||
return
|
||||
}
|
||||
// jump to the start of the local file so we can pass it along
|
||||
if _, err = tempFile.Seek(0, 0); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// replace the already read source with a reader of our cached file
|
||||
out = tempFile
|
||||
} else {
|
||||
// that's a small file, just read it into memory
|
||||
var inData []byte
|
||||
inData, err = io.ReadAll(teeReader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// set the reader to our read memory block
|
||||
out = bytes.NewReader(inData)
|
||||
}
|
||||
return hex.EncodeToString(hash.Sum(nil)), out, cleanup, nil
|
||||
}
|
||||
1718
backend/pikpak/pikpak.go
Normal file
1718
backend/pikpak/pikpak.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/pikpak/pikpak_test.go
Normal file
17
backend/pikpak/pikpak_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test PikPak filesystem interface
|
||||
package pikpak_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/pikpak"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestPikPak:",
|
||||
NilObject: (*pikpak.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -82,14 +82,15 @@ func init() {
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "api_key",
|
||||
Help: `API Key.
|
||||
|
||||
This is not normally used - use oauth instead.
|
||||
`,
|
||||
Hide: fs.OptionHideBoth,
|
||||
Default: "",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Default: "",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -99,7 +100,7 @@ This is not normally used - use oauth instead.
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeDoubleQuote |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
@@ -252,9 +253,12 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
return f.putUnchecked(ctx, in, src, src.Remote(), options...)
|
||||
}
|
||||
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
|
||||
size := src.Size()
|
||||
remote := src.Remote()
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -540,24 +544,59 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
modTime := src.ModTime(ctx)
|
||||
var resp struct {
|
||||
File putio.File `json:"file"`
|
||||
}
|
||||
// For some unknown reason the API sometimes returns the name
|
||||
// already exists unless we upload to a temporary name and
|
||||
// rename
|
||||
//
|
||||
// {"error_id":null,"error_message":"Name already exist","error_type":"NAME_ALREADY_EXIST","error_uri":"http://api.put.io/v2/docs","extra":{},"status":"ERROR","status_code":400}
|
||||
suffix := "." + random.String(8)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
|
||||
params.Set("parent_id", directoryID)
|
||||
params.Set("name", f.opt.Enc.FromStandardName(leaf))
|
||||
params.Set("name", f.opt.Enc.FromStandardName(leaf+suffix))
|
||||
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/copy", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
// fs.Debugf(f, "copying file (%d) to parent_id: %s", srcObj.file.ID, directoryID)
|
||||
_, err = f.client.Do(req, nil)
|
||||
_, err = f.client.Do(req, &resp)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewObject(ctx, remote)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(resp.File.ID, 10))
|
||||
params.Set("name", f.opt.Enc.FromStandardName(leaf))
|
||||
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/rename", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
_, err = f.client.Do(req, &resp)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o, err = f.newObjectWithInfo(ctx, remote, resp.File)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = o.SetModTime(ctx, modTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
@@ -579,6 +618,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
modTime := src.ModTime(ctx)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
|
||||
@@ -596,7 +636,15 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewObject(ctx, remote)
|
||||
o, err = f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = o.SetModTime(ctx, modTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
|
||||
@@ -275,7 +275,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newObj, err := o.fs.PutUnchecked(ctx, in, src, options...)
|
||||
newObj, err := o.fs.putUnchecked(ctx, in, src, o.remote, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -67,7 +67,7 @@ func init() {
|
||||
NoOffline: true,
|
||||
})
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
@@ -77,7 +77,7 @@ func init() {
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -49,11 +49,13 @@ func init() {
|
||||
Help: "Get QingStor credentials from the environment (env vars or IAM).",
|
||||
}},
|
||||
}, {
|
||||
Name: "access_key_id",
|
||||
Help: "QingStor Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Name: "access_key_id",
|
||||
Help: "QingStor Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "secret_access_key",
|
||||
Help: "QingStor Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Name: "secret_access_key",
|
||||
Help: "QingStor Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Enter an endpoint URL to connection QingStor API.\n\nLeave blank will use the default value \"https://qingstor.com:443\".",
|
||||
|
||||
513
backend/s3/s3.go
513
backend/s3/s3.go
@@ -1,4 +1,4 @@
|
||||
// Package s3 provides an interface to Amazon S3 oject storage
|
||||
// Package s3 provides an interface to Amazon S3 object storage
|
||||
package s3
|
||||
|
||||
//go:generate go run gen_setfrom.go -o setfrom.go
|
||||
@@ -66,7 +66,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, China Mobile, Cloudflare, GCS, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Leviia, Liara, Lyve Cloud, Minio, Netease, Petabox, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS, Qiniu and Wasabi",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
@@ -91,6 +91,9 @@ func init() {
|
||||
}, {
|
||||
Value: "Alibaba",
|
||||
Help: "Alibaba Cloud Object Storage System (OSS) formerly Aliyun",
|
||||
}, {
|
||||
Value: "ArvanCloud",
|
||||
Help: "Arvan Cloud Object Storage (AOS)",
|
||||
}, {
|
||||
Value: "Ceph",
|
||||
Help: "Ceph Object Storage",
|
||||
@@ -100,15 +103,15 @@ func init() {
|
||||
}, {
|
||||
Value: "Cloudflare",
|
||||
Help: "Cloudflare R2 Storage",
|
||||
}, {
|
||||
Value: "ArvanCloud",
|
||||
Help: "Arvan Cloud Object Storage (AOS)",
|
||||
}, {
|
||||
Value: "DigitalOcean",
|
||||
Help: "DigitalOcean Spaces",
|
||||
}, {
|
||||
Value: "Dreamhost",
|
||||
Help: "Dreamhost DreamObjects",
|
||||
}, {
|
||||
Value: "GCS",
|
||||
Help: "Google Cloud Storage",
|
||||
}, {
|
||||
Value: "HuaweiOBS",
|
||||
Help: "Huawei Object Storage Service",
|
||||
@@ -124,6 +127,9 @@ func init() {
|
||||
}, {
|
||||
Value: "LyveCloud",
|
||||
Help: "Seagate Lyve Cloud",
|
||||
}, {
|
||||
Value: "Leviia",
|
||||
Help: "Leviia Object Storage",
|
||||
}, {
|
||||
Value: "Liara",
|
||||
Help: "Liara Object Storage",
|
||||
@@ -133,6 +139,9 @@ func init() {
|
||||
}, {
|
||||
Value: "Netease",
|
||||
Help: "Netease Object Storage (NOS)",
|
||||
}, {
|
||||
Value: "Petabox",
|
||||
Help: "Petabox Object Storage",
|
||||
}, {
|
||||
Value: "RackCorp",
|
||||
Help: "RackCorp Object Storage",
|
||||
@@ -148,6 +157,9 @@ func init() {
|
||||
}, {
|
||||
Value: "Storj",
|
||||
Help: "Storj (S3 Compatible Gateway)",
|
||||
}, {
|
||||
Value: "Synology",
|
||||
Help: "Synology C2 Object Storage",
|
||||
}, {
|
||||
Value: "TencentCOS",
|
||||
Help: "Tencent Cloud Object Storage (COS)",
|
||||
@@ -173,11 +185,13 @@ func init() {
|
||||
Help: "Get AWS credentials from the environment (env vars or IAM).",
|
||||
}},
|
||||
}, {
|
||||
Name: "access_key_id",
|
||||
Help: "AWS Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Name: "access_key_id",
|
||||
Help: "AWS Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "secret_access_key",
|
||||
Help: "AWS Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Name: "secret_access_key",
|
||||
Help: "AWS Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
// References:
|
||||
// 1. https://docs.aws.amazon.com/general/latest/gr/rande.html
|
||||
@@ -437,10 +451,50 @@ func init() {
|
||||
Value: "eu-south-2",
|
||||
Help: "Logrono, Spain",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region where your bucket will be created and your data stored.\n",
|
||||
Provider: "Petabox",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "us-east-1",
|
||||
Help: "US East (N. Virginia)",
|
||||
}, {
|
||||
Value: "eu-central-1",
|
||||
Help: "Europe (Frankfurt)",
|
||||
}, {
|
||||
Value: "ap-southeast-1",
|
||||
Help: "Asia Pacific (Singapore)",
|
||||
}, {
|
||||
Value: "me-south-1",
|
||||
Help: "Middle East (Bahrain)",
|
||||
}, {
|
||||
Value: "sa-east-1",
|
||||
Help: "South America (São Paulo)",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region where your data stored.\n",
|
||||
Provider: "Synology",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "eu-001",
|
||||
Help: "Europe Region 1",
|
||||
}, {
|
||||
Value: "eu-002",
|
||||
Help: "Europe Region 2",
|
||||
}, {
|
||||
Value: "us-001",
|
||||
Help: "US Region 1",
|
||||
}, {
|
||||
Value: "us-002",
|
||||
Help: "US Region 2",
|
||||
}, {
|
||||
Value: "tw-001",
|
||||
Help: "Asia (Taiwan)",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Qiniu,RackCorp,Scaleway,Storj,Synology,TencentCOS,HuaweiOBS,IDrive",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
@@ -549,15 +603,15 @@ func init() {
|
||||
Help: "Anhui China (Huainan)",
|
||||
}},
|
||||
}, {
|
||||
// ArvanCloud endpoints: https://www.arvancloud.com/en/products/cloud-storage
|
||||
// ArvanCloud endpoints: https://www.arvancloud.ir/en/products/cloud-storage
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Arvan Cloud Object Storage (AOS) API.",
|
||||
Provider: "ArvanCloud",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "s3.ir-thr-at1.arvanstorage.com",
|
||||
Help: "The default endpoint - a good choice if you are unsure.\nTehran Iran (Asiatech)",
|
||||
Value: "s3.ir-thr-at1.arvanstorage.ir",
|
||||
Help: "The default endpoint - a good choice if you are unsure.\nTehran Iran (Simin)",
|
||||
}, {
|
||||
Value: "s3.ir-tbz-sh1.arvanstorage.com",
|
||||
Value: "s3.ir-tbz-sh1.arvanstorage.ir",
|
||||
Help: "Tabriz Iran (Shahriar)",
|
||||
}},
|
||||
}, {
|
||||
@@ -765,6 +819,39 @@ func init() {
|
||||
Value: "s3-eu-south-2.ionoscloud.com",
|
||||
Help: "Logrono, Spain",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Petabox S3 Object Storage.\n\nSpecify the endpoint from the same region.",
|
||||
Provider: "Petabox",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "s3.petabox.io",
|
||||
Help: "US East (N. Virginia)",
|
||||
}, {
|
||||
Value: "s3.us-east-1.petabox.io",
|
||||
Help: "US East (N. Virginia)",
|
||||
}, {
|
||||
Value: "s3.eu-central-1.petabox.io",
|
||||
Help: "Europe (Frankfurt)",
|
||||
}, {
|
||||
Value: "s3.ap-southeast-1.petabox.io",
|
||||
Help: "Asia Pacific (Singapore)",
|
||||
}, {
|
||||
Value: "s3.me-south-1.petabox.io",
|
||||
Help: "Middle East (Bahrain)",
|
||||
}, {
|
||||
Value: "s3.sa-east-1.petabox.io",
|
||||
Help: "South America (São Paulo)",
|
||||
}},
|
||||
}, {
|
||||
// Leviia endpoints: https://www.leviia.com/object-storage/
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Leviia Object Storage API.",
|
||||
Provider: "Leviia",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "s3.leviia.com",
|
||||
Help: "The default endpoint\nLeviia",
|
||||
}},
|
||||
}, {
|
||||
// Liara endpoints: https://liara.ir/landing/object-storage
|
||||
Name: "endpoint",
|
||||
@@ -934,6 +1021,14 @@ func init() {
|
||||
Value: "s3.eu-central-1.stackpathstorage.com",
|
||||
Help: "EU Endpoint",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Google Cloud Storage.",
|
||||
Provider: "GCS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://storage.googleapis.com",
|
||||
Help: "Google Cloud Storage endpoint",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Storj Gateway.",
|
||||
@@ -942,6 +1037,26 @@ func init() {
|
||||
Value: "gateway.storjshare.io",
|
||||
Help: "Global Hosted Gateway",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Synology C2 Object Storage API.",
|
||||
Provider: "Synology",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "eu-001.s3.synologyc2.net",
|
||||
Help: "EU Endpoint 1",
|
||||
}, {
|
||||
Value: "eu-002.s3.synologyc2.net",
|
||||
Help: "EU Endpoint 2",
|
||||
}, {
|
||||
Value: "us-001.s3.synologyc2.net",
|
||||
Help: "US Endpoint 1",
|
||||
}, {
|
||||
Value: "us-002.s3.synologyc2.net",
|
||||
Help: "US Endpoint 2",
|
||||
}, {
|
||||
Value: "tw-001.s3.synologyc2.net",
|
||||
Help: "TW Endpoint 1",
|
||||
}},
|
||||
}, {
|
||||
// cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224
|
||||
Name: "endpoint",
|
||||
@@ -1098,7 +1213,7 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,Liara,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Scaleway,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -1200,8 +1315,12 @@ func init() {
|
||||
Help: "Liara Iran endpoint",
|
||||
Provider: "Liara",
|
||||
}, {
|
||||
Value: "s3.ir-thr-at1.arvanstorage.com",
|
||||
Help: "ArvanCloud Tehran Iran (Asiatech) endpoint",
|
||||
Value: "s3.ir-thr-at1.arvanstorage.ir",
|
||||
Help: "ArvanCloud Tehran Iran (Simin) endpoint",
|
||||
Provider: "ArvanCloud",
|
||||
}, {
|
||||
Value: "s3.ir-tbz-sh1.arvanstorage.ir",
|
||||
Help: "ArvanCloud Tabriz Iran (Shahriar) endpoint",
|
||||
Provider: "ArvanCloud",
|
||||
}},
|
||||
}, {
|
||||
@@ -1385,7 +1504,7 @@ func init() {
|
||||
Provider: "ArvanCloud",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "ir-thr-at1",
|
||||
Help: "Tehran Iran (Asiatech)",
|
||||
Help: "Tehran Iran (Simin)",
|
||||
}, {
|
||||
Value: "ir-tbz-sh1",
|
||||
Help: "Tabriz Iran (Shahriar)",
|
||||
@@ -1582,7 +1701,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Leviia,Liara,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS,Petabox",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -1597,7 +1716,7 @@ doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
If the acl is an empty string then no X-Amz-Acl: header is added and
|
||||
the default (private) will be used.
|
||||
`,
|
||||
Provider: "!Storj,Cloudflare",
|
||||
Provider: "!Storj,Synology,Cloudflare",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "default",
|
||||
Help: "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
|
||||
@@ -1713,6 +1832,7 @@ header is added and the default (private) will be used.
|
||||
Value: "arn:aws:kms:us-east-1:*",
|
||||
Help: "arn:aws:kms:*",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "sse_customer_key",
|
||||
Help: `To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.
|
||||
@@ -1724,6 +1844,7 @@ Alternatively you can provide --sse-customer-key-base64.`,
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "sse_customer_key_base64",
|
||||
Help: `If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.
|
||||
@@ -1735,6 +1856,7 @@ Alternatively you can provide --sse-customer-key.`,
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "sse_customer_key_md5",
|
||||
Help: `If using SSE-C you may provide the secret encryption key MD5 checksum (optional).
|
||||
@@ -1747,6 +1869,7 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in S3.",
|
||||
@@ -1825,7 +1948,7 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Help: "Standard storage class",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://www.arvancloud.com/en/products/cloud-storage
|
||||
// Mapping from here: https://www.arvancloud.ir/en/products/cloud-storage
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in ArvanCloud.",
|
||||
Provider: "ArvanCloud",
|
||||
@@ -1852,7 +1975,7 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Help: "Infrequent access storage mode",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://www.scaleway.com/en/docs/object-storage-glacier/#-Scaleway-Storage-Classes
|
||||
// Mapping from here: https://www.scaleway.com/en/docs/storage/object/quickstart/
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in S3.",
|
||||
Provider: "Scaleway",
|
||||
@@ -1861,10 +1984,13 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Help: "Default.",
|
||||
}, {
|
||||
Value: "STANDARD",
|
||||
Help: "The Standard class for any upload.\nSuitable for on-demand content like streaming or CDN.",
|
||||
Help: "The Standard class for any upload.\nSuitable for on-demand content like streaming or CDN.\nAvailable in all regions.",
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Archived storage.\nPrices are lower, but it needs to be restored first to be accessed.",
|
||||
Help: "Archived storage.\nPrices are lower, but it needs to be restored first to be accessed.\nAvailable in FR-PAR and NL-AMS regions.",
|
||||
}, {
|
||||
Value: "ONEZONE_IA",
|
||||
Help: "One Zone - Infrequent Access.\nA good choice for storing secondary backup copies or easily re-creatable data.\nAvailable in the FR-PAR region only.",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://developer.qiniu.com/kodo/5906/storage-type
|
||||
@@ -1985,9 +2111,10 @@ If empty it will default to the environment variable "AWS_PROFILE" or
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "session_token",
|
||||
Help: "An AWS session token.",
|
||||
Advanced: true,
|
||||
Name: "session_token",
|
||||
Help: "An AWS session token.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
@@ -2182,6 +2309,15 @@ See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rcl
|
||||
This is usually set to a CloudFront CDN URL as AWS S3 offers
|
||||
cheaper egress for data downloaded through the CloudFront network.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "directory_markers",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Help: `Upload an empty object with a trailing slash when a new directory is created
|
||||
|
||||
Empty folders are unsupported for bucket based remotes, this option creates an empty
|
||||
object ending with "/", to persist the folder.
|
||||
`,
|
||||
}, {
|
||||
Name: "use_multipart_etag",
|
||||
Help: `Whether to use ETag in multipart uploads for verification
|
||||
@@ -2258,6 +2394,24 @@ will decompress the object on the fly.
|
||||
If this is set to unset (the default) then rclone will choose
|
||||
according to the provider setting what to apply, but you can override
|
||||
rclone's choice here.
|
||||
`, "|", "`"),
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_accept_encoding_gzip",
|
||||
Help: strings.ReplaceAll(`Whether to send |Accept-Encoding: gzip| header.
|
||||
|
||||
By default, rclone will append |Accept-Encoding: gzip| to the request to download
|
||||
compressed objects whenever possible.
|
||||
|
||||
However some providers such as Google Cloud Storage may alter the HTTP headers, breaking
|
||||
the signature of the request.
|
||||
|
||||
A symptom of this would be receiving errors like
|
||||
|
||||
SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided.
|
||||
|
||||
In this case, you might want to try disabling this option.
|
||||
`, "|", "`"),
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
@@ -2266,6 +2420,11 @@ rclone's choice here.
|
||||
Help: `Suppress setting and reading of system metadata`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "sts_endpoint",
|
||||
Help: "Endpoint for STS.\n\nLeave blank if using AWS to use the default endpoint for the region.",
|
||||
Provider: "AWS",
|
||||
Advanced: true,
|
||||
},
|
||||
}})
|
||||
}
|
||||
@@ -2352,6 +2511,7 @@ type Options struct {
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
STSEndpoint string `config:"sts_endpoint"`
|
||||
LocationConstraint string `config:"location_constraint"`
|
||||
ACL string `config:"acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
@@ -2387,12 +2547,14 @@ type Options struct {
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
DisableHTTP2 bool `config:"disable_http2"`
|
||||
DownloadURL string `config:"download_url"`
|
||||
DirectoryMarkers bool `config:"directory_markers"`
|
||||
UseMultipartEtag fs.Tristate `config:"use_multipart_etag"`
|
||||
UsePresignedRequest bool `config:"use_presigned_request"`
|
||||
Versions bool `config:"versions"`
|
||||
VersionAt fs.Time `config:"version_at"`
|
||||
Decompress bool `config:"decompress"`
|
||||
MightGzip fs.Tristate `config:"might_gzip"`
|
||||
UseAcceptEncodingGzip fs.Tristate `config:"use_accept_encoding_gzip"`
|
||||
NoSystemMetadata bool `config:"no_system_metadata"`
|
||||
}
|
||||
|
||||
@@ -2528,7 +2690,7 @@ func parsePath(path string) (root string) {
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
@@ -2560,6 +2722,38 @@ func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||
}
|
||||
}
|
||||
|
||||
// Default name resolver
|
||||
var defaultResolver = endpoints.DefaultResolver()
|
||||
|
||||
// resolve (service, region) to endpoint
|
||||
//
|
||||
// Used to set endpoint for s3 services and not for other services
|
||||
type resolver map[string]string
|
||||
|
||||
// Add a service to the resolver, ignoring empty urls
|
||||
func (r resolver) addService(service, url string) {
|
||||
if url == "" {
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(url, "http") {
|
||||
url = "https://" + url
|
||||
}
|
||||
r[service] = url
|
||||
}
|
||||
|
||||
// EndpointFor return the endpoint for s3 if set or the default if not
|
||||
func (r resolver) EndpointFor(service, region string, opts ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
|
||||
fs.Debugf(nil, "Resolving service %q region %q", service, region)
|
||||
url, ok := r[service]
|
||||
if ok {
|
||||
return endpoints.ResolvedEndpoint{
|
||||
URL: url,
|
||||
SigningRegion: region,
|
||||
}, nil
|
||||
}
|
||||
return defaultResolver.EndpointFor(service, region, opts...)
|
||||
}
|
||||
|
||||
// s3Connection makes a connection to s3
|
||||
func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S3, *session.Session, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
@@ -2638,8 +2832,12 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
||||
if opt.Region != "" {
|
||||
awsConfig.WithRegion(opt.Region)
|
||||
}
|
||||
if opt.Endpoint != "" {
|
||||
awsConfig.WithEndpoint(opt.Endpoint)
|
||||
if opt.Endpoint != "" || opt.STSEndpoint != "" {
|
||||
// If endpoints are set, override the relevant services only
|
||||
r := make(resolver)
|
||||
r.addService("s3", opt.Endpoint)
|
||||
r.addService("sts", opt.STSEndpoint)
|
||||
awsConfig.WithEndpointResolver(r)
|
||||
}
|
||||
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
@@ -2749,11 +2947,12 @@ func setEndpointValueForIDriveE2(m configmap.Mapper) (err error) {
|
||||
// These should be differences from AWS S3
|
||||
func setQuirks(opt *Options) {
|
||||
var (
|
||||
listObjectsV2 = true
|
||||
virtualHostStyle = true
|
||||
urlEncodeListings = true
|
||||
useMultipartEtag = true
|
||||
mightGzip = true // assume all providers might gzip until proven otherwise
|
||||
listObjectsV2 = true
|
||||
virtualHostStyle = true
|
||||
urlEncodeListings = true
|
||||
useMultipartEtag = true
|
||||
useAcceptEncodingGzip = true
|
||||
mightGzip = true // assume all providers might gzip until proven otherwise
|
||||
)
|
||||
switch opt.Provider {
|
||||
case "AWS":
|
||||
@@ -2795,6 +2994,8 @@ func setQuirks(opt *Options) {
|
||||
// listObjectsV2 supported - https://api.ionos.com/docs/s3/#Basic-Operations-get-Bucket-list-type-2
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
case "Petabox":
|
||||
// No quirks
|
||||
case "Liara":
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
@@ -2830,14 +3031,23 @@ func setQuirks(opt *Options) {
|
||||
if opt.ChunkSize < 64*fs.Mebi {
|
||||
opt.ChunkSize = 64 * fs.Mebi
|
||||
}
|
||||
case "Synology":
|
||||
useMultipartEtag = false
|
||||
case "TencentCOS":
|
||||
listObjectsV2 = false // untested
|
||||
useMultipartEtag = false // untested
|
||||
case "Wasabi":
|
||||
// No quirks
|
||||
case "Leviia":
|
||||
// No quirks
|
||||
case "Qiniu":
|
||||
useMultipartEtag = false
|
||||
urlEncodeListings = false
|
||||
virtualHostStyle = false
|
||||
case "GCS":
|
||||
// Google break request Signature by mutating accept-encoding HTTP header
|
||||
// https://github.com/rclone/rclone/issues/6670
|
||||
useAcceptEncodingGzip = false
|
||||
case "Other":
|
||||
listObjectsV2 = false
|
||||
virtualHostStyle = false
|
||||
@@ -2882,6 +3092,12 @@ func setQuirks(opt *Options) {
|
||||
opt.MightGzip.Valid = true
|
||||
opt.MightGzip.Value = mightGzip
|
||||
}
|
||||
|
||||
// set UseAcceptEncodingGzip if not manually set
|
||||
if !opt.UseAcceptEncodingGzip.Valid {
|
||||
opt.UseAcceptEncodingGzip.Valid = true
|
||||
opt.UseAcceptEncodingGzip.Value = useAcceptEncodingGzip
|
||||
}
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
@@ -2906,6 +3122,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(nil, "name = %q, root = %q, opt = %#v", name, root, opt)
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("s3: chunk size: %w", err)
|
||||
@@ -2915,7 +3132,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, fmt.Errorf("s3: upload cutoff: %w", err)
|
||||
}
|
||||
if opt.Versions && opt.VersionAt.IsSet() {
|
||||
return nil, errors.New("s3: cant use --s3-versions and --s3-version-at at the same time")
|
||||
return nil, errors.New("s3: can't use --s3-versions and --s3-version-at at the same time")
|
||||
}
|
||||
if opt.BucketACL == "" {
|
||||
opt.BucketACL = opt.ACL
|
||||
@@ -2996,6 +3213,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.Provider == "IDrive" {
|
||||
f.features.SetTier = false
|
||||
}
|
||||
if opt.DirectoryMarkers {
|
||||
f.features.CanHaveEmptyDirectories = true
|
||||
}
|
||||
// f.listMultipartUploads()
|
||||
|
||||
if f.rootBucket != "" && f.rootDirectory != "" && !opt.NoHeadObject && !strings.HasSuffix(root, "/") {
|
||||
@@ -3036,6 +3256,7 @@ func (f *Fs) getMetaDataListing(ctx context.Context, wantRemote string) (info *s
|
||||
err = f.list(ctx, listOpt{
|
||||
bucket: bucket,
|
||||
directory: bucketPath,
|
||||
prefix: f.rootDirectory,
|
||||
recurse: true,
|
||||
withVersions: f.opt.Versions,
|
||||
findFile: true,
|
||||
@@ -3426,24 +3647,25 @@ var errEndList = errors.New("end list")
|
||||
|
||||
// list options
|
||||
type listOpt struct {
|
||||
bucket string // bucket to list
|
||||
directory string // directory with bucket
|
||||
prefix string // prefix to remove from listing
|
||||
addBucket bool // if set, the bucket is added to the start of the remote
|
||||
recurse bool // if set, recurse to read sub directories
|
||||
withVersions bool // if set, versions are produced
|
||||
hidden bool // if set, return delete markers as objects with size == isDeleteMarker
|
||||
findFile bool // if set, it will look for files called (bucket, directory)
|
||||
versionAt fs.Time // if set only show versions <= this time
|
||||
bucket string // bucket to list
|
||||
directory string // directory with bucket
|
||||
prefix string // prefix to remove from listing
|
||||
addBucket bool // if set, the bucket is added to the start of the remote
|
||||
recurse bool // if set, recurse to read sub directories
|
||||
withVersions bool // if set, versions are produced
|
||||
hidden bool // if set, return delete markers as objects with size == isDeleteMarker
|
||||
findFile bool // if set, it will look for files called (bucket, directory)
|
||||
versionAt fs.Time // if set only show versions <= this time
|
||||
noSkipMarkers bool // if set return dir marker objects
|
||||
}
|
||||
|
||||
// list lists the objects into the function supplied with the opt
|
||||
// supplied.
|
||||
func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
if opt.prefix != "" {
|
||||
opt.prefix += "/"
|
||||
}
|
||||
if !opt.findFile {
|
||||
if opt.prefix != "" {
|
||||
opt.prefix += "/"
|
||||
}
|
||||
if opt.directory != "" {
|
||||
opt.directory += "/"
|
||||
}
|
||||
@@ -3486,6 +3708,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
default:
|
||||
listBucket = f.newV2List(&req)
|
||||
}
|
||||
foundItems := 0
|
||||
for {
|
||||
var resp *s3.ListObjectsV2Output
|
||||
var err error
|
||||
@@ -3527,6 +3750,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
return err
|
||||
}
|
||||
if !opt.recurse {
|
||||
foundItems += len(resp.CommonPrefixes)
|
||||
for _, commonPrefix := range resp.CommonPrefixes {
|
||||
if commonPrefix.Prefix == nil {
|
||||
fs.Logf(f, "Nil common prefix received")
|
||||
@@ -3547,7 +3771,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
}
|
||||
remote = remote[len(opt.prefix):]
|
||||
if opt.addBucket {
|
||||
remote = path.Join(opt.bucket, remote)
|
||||
remote = bucket.Join(opt.bucket, remote)
|
||||
}
|
||||
remote = strings.TrimSuffix(remote, "/")
|
||||
err = fn(remote, &s3.Object{Key: &remote}, nil, true)
|
||||
@@ -3559,6 +3783,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
foundItems += len(resp.Contents)
|
||||
for i, object := range resp.Contents {
|
||||
remote := aws.StringValue(object.Key)
|
||||
if urlEncodeListings {
|
||||
@@ -3573,19 +3798,29 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(opt.prefix):]
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
if opt.addBucket {
|
||||
remote = path.Join(opt.bucket, remote)
|
||||
}
|
||||
isDirectory := (remote == "" || strings.HasSuffix(remote, "/")) && object.Size != nil && *object.Size == 0
|
||||
// is this a directory marker?
|
||||
if isDirectory && object.Size != nil && *object.Size == 0 {
|
||||
continue // skip directory marker
|
||||
if isDirectory {
|
||||
if opt.noSkipMarkers {
|
||||
// process directory markers as files
|
||||
isDirectory = false
|
||||
} else {
|
||||
// Don't insert the root directory
|
||||
if remote == opt.directory {
|
||||
continue
|
||||
}
|
||||
// process directory markers as directories
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
}
|
||||
}
|
||||
remote = remote[len(opt.prefix):]
|
||||
if opt.addBucket {
|
||||
remote = bucket.Join(opt.bucket, remote)
|
||||
}
|
||||
if versionIDs != nil {
|
||||
err = fn(remote, object, versionIDs[i], false)
|
||||
err = fn(remote, object, versionIDs[i], isDirectory)
|
||||
} else {
|
||||
err = fn(remote, object, nil, false)
|
||||
err = fn(remote, object, nil, isDirectory)
|
||||
}
|
||||
if err != nil {
|
||||
if err == errEndList {
|
||||
@@ -3598,6 +3833,20 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
break
|
||||
}
|
||||
}
|
||||
if f.opt.DirectoryMarkers && foundItems == 0 && opt.directory != "" {
|
||||
// Determine whether the directory exists or not by whether it has a marker
|
||||
req := s3.HeadObjectInput{
|
||||
Bucket: &opt.bucket,
|
||||
Key: &opt.directory,
|
||||
}
|
||||
_, err := f.headObject(ctx, &req)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3788,10 +4037,70 @@ func (f *Fs) bucketExists(ctx context.Context, bucket string) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Create directory marker file and parents
|
||||
func (f *Fs) createDirectoryMarker(ctx context.Context, bucket, dir string) error {
|
||||
if !f.opt.DirectoryMarkers || bucket == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Object to be uploaded
|
||||
o := &Object{
|
||||
fs: f,
|
||||
meta: map[string]string{
|
||||
metaMtime: swift.TimeToFloatString(time.Now()),
|
||||
},
|
||||
}
|
||||
|
||||
for {
|
||||
_, bucketPath := f.split(dir)
|
||||
// Don't create the directory marker if it is the bucket or at the very root
|
||||
if bucketPath == "" {
|
||||
break
|
||||
}
|
||||
o.remote = dir + "/"
|
||||
|
||||
// Check to see if object already exists
|
||||
_, err := o.headObject(ctx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Upload it if not
|
||||
fs.Debugf(o, "Creating directory marker")
|
||||
content := io.Reader(strings.NewReader(""))
|
||||
err = o.Update(ctx, content, o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating directory marker failed: %w", err)
|
||||
}
|
||||
|
||||
// Now check parent directory exists
|
||||
dir = path.Dir(dir)
|
||||
if dir == "/" || dir == "." {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
bucket, _ := f.split(dir)
|
||||
return f.makeBucket(ctx, bucket)
|
||||
e := f.makeBucket(ctx, bucket)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
return f.createDirectoryMarker(ctx, bucket, dir)
|
||||
}
|
||||
|
||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
dir := path.Dir(remote)
|
||||
if dir == "/" || dir == "." {
|
||||
dir = ""
|
||||
}
|
||||
return f.Mkdir(ctx, dir)
|
||||
}
|
||||
|
||||
// makeBucket creates the bucket if it doesn't exist
|
||||
@@ -3832,6 +4141,18 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
bucket, directory := f.split(dir)
|
||||
// Remove directory marker file
|
||||
if f.opt.DirectoryMarkers && bucket != "" && dir != "" {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: dir + "/",
|
||||
}
|
||||
fs.Debugf(o, "Removing directory marker")
|
||||
err := o.Remove(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("removing directory marker failed: %w", err)
|
||||
}
|
||||
}
|
||||
if bucket == "" || directory != "" {
|
||||
return nil
|
||||
}
|
||||
@@ -3869,7 +4190,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
|
||||
req.Bucket = &dstBucket
|
||||
req.ACL = stringPointerOrNil(f.opt.ACL)
|
||||
req.Key = &dstPath
|
||||
source := pathEscape(path.Join(srcBucket, srcPath))
|
||||
source := pathEscape(bucket.Join(srcBucket, srcPath))
|
||||
if src.versionID != nil {
|
||||
source += fmt.Sprintf("?versionId=%s", *src.versionID)
|
||||
}
|
||||
@@ -4030,7 +4351,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, errNotWithVersionAt
|
||||
}
|
||||
dstBucket, dstPath := f.split(remote)
|
||||
err := f.makeBucket(ctx, dstBucket)
|
||||
err := f.mkdirParent(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -4526,13 +4847,14 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
delErr <- operations.DeleteFiles(ctx, delChan)
|
||||
}()
|
||||
checkErr(f.list(ctx, listOpt{
|
||||
bucket: bucket,
|
||||
directory: directory,
|
||||
prefix: f.rootDirectory,
|
||||
addBucket: f.rootBucket == "",
|
||||
recurse: true,
|
||||
withVersions: versioned,
|
||||
hidden: true,
|
||||
bucket: bucket,
|
||||
directory: directory,
|
||||
prefix: f.rootDirectory,
|
||||
addBucket: f.rootBucket == "",
|
||||
recurse: true,
|
||||
withVersions: versioned,
|
||||
hidden: true,
|
||||
noSkipMarkers: true,
|
||||
}, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
@@ -4542,7 +4864,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
fs.Errorf(object, "Can't create object %+v", err)
|
||||
return nil
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
|
||||
// Work out whether the file is the current version or not
|
||||
isCurrentVersion := !versioned || !version.Match(remote)
|
||||
fs.Debugf(nil, "%q version %v", remote, version.Match(remote))
|
||||
@@ -4656,22 +4978,26 @@ func (o *Object) headObject(ctx context.Context) (resp *s3.HeadObjectOutput, err
|
||||
Key: &bucketPath,
|
||||
VersionId: o.versionID,
|
||||
}
|
||||
if o.fs.opt.RequesterPays {
|
||||
return o.fs.headObject(ctx, &req)
|
||||
}
|
||||
|
||||
func (f *Fs) headObject(ctx context.Context, req *s3.HeadObjectInput) (resp *s3.HeadObjectOutput, err error) {
|
||||
if f.opt.RequesterPays {
|
||||
req.RequestPayer = aws.String(s3.RequestPayerRequester)
|
||||
}
|
||||
if o.fs.opt.SSECustomerAlgorithm != "" {
|
||||
req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm
|
||||
if f.opt.SSECustomerAlgorithm != "" {
|
||||
req.SSECustomerAlgorithm = &f.opt.SSECustomerAlgorithm
|
||||
}
|
||||
if o.fs.opt.SSECustomerKey != "" {
|
||||
req.SSECustomerKey = &o.fs.opt.SSECustomerKey
|
||||
if f.opt.SSECustomerKey != "" {
|
||||
req.SSECustomerKey = &f.opt.SSECustomerKey
|
||||
}
|
||||
if o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||
req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5
|
||||
if f.opt.SSECustomerKeyMD5 != "" {
|
||||
req.SSECustomerKeyMD5 = &f.opt.SSECustomerKeyMD5
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
resp, err = o.fs.c.HeadObjectWithContext(ctx, &req)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
resp, err = f.c.HeadObjectWithContext(ctx, req)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.RequestFailure); ok {
|
||||
@@ -4681,7 +5007,9 @@ func (o *Object) headObject(ctx context.Context) (resp *s3.HeadObjectOutput, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
o.fs.cache.MarkOK(bucket)
|
||||
if req.Bucket != nil {
|
||||
f.cache.MarkOK(*req.Bucket)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -4918,7 +5246,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Override the automatic decompression in the transport to
|
||||
// download compressed files as-is
|
||||
httpReq.HTTPRequest.Header.Set("Accept-Encoding", "gzip")
|
||||
if o.fs.opt.UseAcceptEncodingGzip.Value {
|
||||
httpReq.HTTPRequest.Header.Set("Accept-Encoding", "gzip")
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
@@ -5034,7 +5364,9 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
}
|
||||
uid := cout.UploadId
|
||||
|
||||
uploadCtx, cancel := context.WithCancel(ctx)
|
||||
defer atexit.OnError(&err, func() {
|
||||
cancel()
|
||||
if o.fs.opt.LeavePartsOnError {
|
||||
return
|
||||
}
|
||||
@@ -5054,7 +5386,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
})()
|
||||
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
g, gCtx = errgroup.WithContext(uploadCtx)
|
||||
finished = false
|
||||
partsMu sync.Mutex // to protect parts
|
||||
parts []*s3.CompletedPart
|
||||
@@ -5136,7 +5468,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
uout, err := f.c.UploadPartWithContext(gCtx, uploadPartReq)
|
||||
if err != nil {
|
||||
if partNum <= int64(concurrency) {
|
||||
return f.shouldRetry(ctx, err)
|
||||
return f.shouldRetry(gCtx, err)
|
||||
}
|
||||
// retry all chunks once have done the first batch
|
||||
return true, err
|
||||
@@ -5168,7 +5500,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
|
||||
var resp *s3.CompleteMultipartUploadOutput
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
|
||||
resp, err = f.c.CompleteMultipartUploadWithContext(uploadCtx, &s3.CompleteMultipartUploadInput{
|
||||
Bucket: req.Bucket,
|
||||
Key: req.Key,
|
||||
MultipartUpload: &s3.CompletedMultipartUpload{
|
||||
@@ -5177,7 +5509,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
RequestPayer: req.RequestPayer,
|
||||
UploadId: uid,
|
||||
})
|
||||
return f.shouldRetry(ctx, err)
|
||||
return f.shouldRetry(uploadCtx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return wantETag, gotETag, nil, fmt.Errorf("multipart upload failed to finalise: %w", err)
|
||||
@@ -5326,9 +5658,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return errNotWithVersionAt
|
||||
}
|
||||
bucket, bucketPath := o.split()
|
||||
err := o.fs.makeBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
// Create parent dir/bucket if not saving directory marker
|
||||
if !strings.HasSuffix(o.remote, "/") {
|
||||
err := o.fs.mkdirParent(ctx, o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
modTime := src.ModTime(ctx)
|
||||
size := src.Size()
|
||||
@@ -5651,7 +5986,7 @@ func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error)
|
||||
setMetadata("content-disposition", o.contentDisposition)
|
||||
setMetadata("content-encoding", o.contentEncoding)
|
||||
setMetadata("content-language", o.contentLanguage)
|
||||
setMetadata("tier", o.storageClass)
|
||||
metadata["tier"] = o.GetTier()
|
||||
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
@@ -6,15 +6,19 @@ import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -250,7 +254,8 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Create an object
|
||||
const fileName = "test-versions.txt"
|
||||
const dirName = "versions"
|
||||
const fileName = dirName + "/" + "test-versions.txt"
|
||||
contents := random.String(100)
|
||||
item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
@@ -280,11 +285,12 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
}()
|
||||
|
||||
// Read the contents
|
||||
entries, err := f.List(ctx, "")
|
||||
entries, err := f.List(ctx, dirName)
|
||||
require.NoError(t, err)
|
||||
tests := 0
|
||||
var fileNameVersion string
|
||||
for _, entry := range entries {
|
||||
t.Log(entry)
|
||||
remote := entry.Remote()
|
||||
if remote == fileName {
|
||||
t.Run("ReadCurrent", func(t *testing.T) {
|
||||
@@ -309,6 +315,23 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
require.NotNil(t, o)
|
||||
assert.Equal(t, int64(100), o.Size(), o.Remote())
|
||||
})
|
||||
|
||||
// Check we can make a NewFs from that object with a version suffix
|
||||
t.Run("NewFs", func(t *testing.T) {
|
||||
newPath := bucket.Join(fs.ConfigStringFull(f), fileNameVersion)
|
||||
// Make sure --s3-versions is set in the config of the new remote
|
||||
fs.Debugf(nil, "oldPath = %q", newPath)
|
||||
lastColon := strings.LastIndex(newPath, ":")
|
||||
require.True(t, lastColon >= 0)
|
||||
newPath = newPath[:lastColon] + ",versions" + newPath[lastColon:]
|
||||
fs.Debugf(nil, "newPath = %q", newPath)
|
||||
fNew, err := cache.Get(ctx, newPath)
|
||||
// This should return pointing to a file
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
require.NotNil(t, fNew)
|
||||
// With the directory the directory above
|
||||
assert.Equal(t, dirName, path.Base(fs.ConfigStringFull(fNew)))
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("VersionAt", func(t *testing.T) {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -20,6 +21,24 @@ func TestIntegration(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegration2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
name := "TestS3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "directory_markers", Value: "true"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
// URL parameters that need to be added to the signature
|
||||
var s3ParamsToSign = map[string]struct{}{
|
||||
"delete": {},
|
||||
"acl": {},
|
||||
"location": {},
|
||||
"logging": {},
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package seafile
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -17,19 +16,19 @@ func TestShouldAllowShutdownTwice(t *testing.T) {
|
||||
renew.Shutdown()
|
||||
}
|
||||
|
||||
func TestRenewal(t *testing.T) {
|
||||
func TestRenewalInTimeLimit(t *testing.T) {
|
||||
var count int64
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(2) // run the renewal twice
|
||||
renew := NewRenew(time.Millisecond, func() error {
|
||||
renew := NewRenew(100*time.Millisecond, func() error {
|
||||
atomic.AddInt64(&count, 1)
|
||||
wg.Done()
|
||||
return nil
|
||||
})
|
||||
wg.Wait()
|
||||
time.Sleep(time.Second)
|
||||
renew.Shutdown()
|
||||
|
||||
// it is technically possible that a third renewal gets triggered between Wait() and Shutdown()
|
||||
assert.GreaterOrEqual(t, atomic.LoadInt64(&count), int64(2))
|
||||
// there's no guarantee the CI agent can handle a simple goroutine
|
||||
renewCount := atomic.LoadInt64(&count)
|
||||
t.Logf("renew count = %d", renewCount)
|
||||
assert.Greater(t, renewCount, int64(0))
|
||||
assert.Less(t, renewCount, int64(11))
|
||||
}
|
||||
|
||||
@@ -67,15 +67,18 @@ func init() {
|
||||
Value: "https://cloud.seafile.com/",
|
||||
Help: "Connect to cloud.seafile.com.",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: configUser,
|
||||
Help: "User name (usually email address).",
|
||||
Required: true,
|
||||
Name: configUser,
|
||||
Help: "User name (usually email address).",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
// Password is not required, it will be left blank for 2FA
|
||||
Name: configPassword,
|
||||
Help: "Password.",
|
||||
IsPassword: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config2FA,
|
||||
Help: "Two-factor authentication ('true' if the account has 2FA enabled).",
|
||||
@@ -87,6 +90,7 @@ func init() {
|
||||
Name: configLibraryKey,
|
||||
Help: "Library password (for encrypted libraries only).\n\nLeave blank if you pass it through the command line.",
|
||||
IsPassword: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: configCreateLibrary,
|
||||
Help: "Should rclone create a library if it doesn't exist.",
|
||||
@@ -94,9 +98,10 @@ func init() {
|
||||
Default: false,
|
||||
}, {
|
||||
// Keep the authentication token after entering the 2FA code
|
||||
Name: configAuthToken,
|
||||
Help: "Authentication token.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Name: configAuthToken,
|
||||
Help: "Authentication token.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
|
||||
@@ -953,11 +953,9 @@ func (f *Fs) emptyLibraryTrash(ctx context.Context, libraryID string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// === API v2 from the official documentation, but that have been replaced by the much better v2.1 (undocumented as of Apr 2020)
|
||||
// === getDirectoryEntriesAPIv2 is needed to keep compatibility with seafile v6,
|
||||
// === the others can probably be removed after the API v2.1 is documented
|
||||
|
||||
func (f *Fs) getDirectoryEntriesAPIv2(ctx context.Context, libraryID, dirPath string) ([]api.DirEntry, error) {
|
||||
// API v2 from the official documentation, but that have been replaced by the much better v2.1 (undocumented as of Apr 2020)
|
||||
// getDirectoryEntriesAPIv2 is needed to keep compatibility with seafile v6.
|
||||
// API Documentation
|
||||
// https://download.seafile.com/published/web-api/v2.1/directories.md#user-content-List%20Items%20in%20Directory
|
||||
if libraryID == "" {
|
||||
@@ -1001,95 +999,3 @@ func (f *Fs) getDirectoryEntriesAPIv2(ctx context.Context, libraryID, dirPath st
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (f *Fs) copyFileAPIv2(ctx context.Context, srcLibraryID, srcPath, dstLibraryID, dstPath string) (*api.FileInfo, error) {
|
||||
// API Documentation
|
||||
// https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Copy%20File
|
||||
if srcLibraryID == "" || dstLibraryID == "" {
|
||||
return nil, errors.New("libraryID and/or file path argument(s) missing")
|
||||
}
|
||||
srcPath = path.Join("/", srcPath)
|
||||
dstPath = path.Join("/", dstPath)
|
||||
|
||||
// Older API does not seem to accept JSON input here either
|
||||
postParameters := url.Values{
|
||||
"operation": {"copy"},
|
||||
"dst_repo": {dstLibraryID},
|
||||
"dst_dir": {f.opt.Enc.FromStandardPath(dstPath)},
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: APIv20 + srcLibraryID + "/file/",
|
||||
Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(srcPath)}},
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Body: bytes.NewBuffer([]byte(postParameters.Encode())),
|
||||
}
|
||||
result := &api.FileInfo{}
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
if resp.StatusCode == 401 || resp.StatusCode == 403 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("failed to copy file %s:'%s' to %s:'%s': %w", srcLibraryID, srcPath, dstLibraryID, dstPath, err)
|
||||
}
|
||||
err = rest.DecodeJSON(resp, &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.decodeFileInfo(result), nil
|
||||
}
|
||||
|
||||
func (f *Fs) renameFileAPIv2(ctx context.Context, libraryID, filePath, newname string) error {
|
||||
// API Documentation
|
||||
// https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Rename%20File
|
||||
if libraryID == "" || newname == "" {
|
||||
return errors.New("libraryID and/or file path argument(s) missing")
|
||||
}
|
||||
filePath = path.Join("/", filePath)
|
||||
|
||||
// No luck with JSON input with the older api2
|
||||
postParameters := url.Values{
|
||||
"operation": {"rename"},
|
||||
"reloaddir": {"true"}, // This is an undocumented trick to avoid an http code 301 response (found in https://github.com/haiwen/seahub/blob/master/seahub/api2/views.py)
|
||||
"newname": {f.opt.Enc.FromStandardName(newname)},
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: APIv20 + libraryID + "/file/",
|
||||
Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(filePath)}},
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Body: bytes.NewBuffer([]byte(postParameters.Encode())),
|
||||
NoRedirect: true,
|
||||
NoResponse: true,
|
||||
}
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
if resp.StatusCode == 301 {
|
||||
// This is the normal response from the server
|
||||
return nil
|
||||
}
|
||||
if resp.StatusCode == 401 || resp.StatusCode == 403 {
|
||||
return fs.ErrorPermissionDenied
|
||||
}
|
||||
if resp.StatusCode == 404 {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("failed to rename file: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
iofs "io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
@@ -58,13 +59,15 @@ func init() {
|
||||
Description: "SSH/SFTP",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "SSH host to connect to.\n\nE.g. \"example.com\".",
|
||||
Required: true,
|
||||
Name: "host",
|
||||
Help: "SSH host to connect to.\n\nE.g. \"example.com\".",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "SSH username.",
|
||||
Default: currentUser,
|
||||
Name: "user",
|
||||
Help: "SSH username.",
|
||||
Default: currentUser,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "SSH port number.",
|
||||
@@ -74,8 +77,9 @@ func init() {
|
||||
Help: "SSH password, leave blank to use ssh-agent.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "key_pem",
|
||||
Help: "Raw PEM-encoded private key.\n\nIf specified, will override key_file parameter.",
|
||||
Name: "key_pem",
|
||||
Help: "Raw PEM-encoded private key.\n\nIf specified, will override key_file parameter.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "key_file",
|
||||
Help: "Path to PEM-encoded private key file.\n\nLeave blank or set key-use-agent to use ssh-agent." + env.ShellExpandHelp,
|
||||
@@ -86,6 +90,7 @@ func init() {
|
||||
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
||||
in the new OpenSSH format can't be used.`,
|
||||
IsPassword: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pubkey_file",
|
||||
Help: `Optional path to public key file.
|
||||
@@ -323,7 +328,7 @@ Pass multiple variables space separated, eg
|
||||
|
||||
VAR1=value VAR2=value
|
||||
|
||||
and pass variables with spaces in in quotes, eg
|
||||
and pass variables with spaces in quotes, eg
|
||||
|
||||
"VAR3=value with space" "VAR4=value with space" VAR5=nospacehere
|
||||
|
||||
@@ -367,6 +372,20 @@ At least one must match with server configuration. This can be checked for examp
|
||||
Example:
|
||||
|
||||
umac-64-etm@openssh.com umac-128-etm@openssh.com hmac-sha2-256-etm@openssh.com
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "host_key_algorithms",
|
||||
Default: fs.SpaceSepList{},
|
||||
Help: `Space separated list of host key algorithms, ordered by preference.
|
||||
|
||||
At least one must match with server configuration. This can be checked for example using ssh -Q HostKeyAlgorithms.
|
||||
|
||||
Note: This can affect the outcome of key negotiation with the server even if server host key validation is not enabled.
|
||||
|
||||
Example:
|
||||
|
||||
ssh-ed25519 ssh-rsa ssh-dss
|
||||
`,
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -407,6 +426,7 @@ type Options struct {
|
||||
Ciphers fs.SpaceSepList `config:"ciphers"`
|
||||
KeyExchange fs.SpaceSepList `config:"key_exchange"`
|
||||
MACs fs.SpaceSepList `config:"macs"`
|
||||
HostKeyAlgorithms fs.SpaceSepList `config:"host_key_algorithms"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
@@ -739,6 +759,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ClientVersion: "SSH-2.0-" + f.ci.UserAgent,
|
||||
}
|
||||
|
||||
if len(opt.HostKeyAlgorithms) != 0 {
|
||||
sshConfig.HostKeyAlgorithms = []string(opt.HostKeyAlgorithms)
|
||||
}
|
||||
|
||||
if opt.KnownHostsFile != "" {
|
||||
hostcallback, err := knownhosts.New(env.ShellExpand(opt.KnownHostsFile))
|
||||
if err != nil {
|
||||
@@ -782,10 +806,32 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, fmt.Errorf("couldn't read ssh agent signers: %w", err)
|
||||
}
|
||||
if keyFile != "" {
|
||||
// If `opt.KeyUseAgent` is false, then it's expected that `opt.KeyFile` contains the private key
|
||||
// and `${opt.KeyFile}.pub` contains the public key.
|
||||
//
|
||||
// If `opt.KeyUseAgent` is true, then it's expected that `opt.KeyFile` contains the public key.
|
||||
// This is how it works with openssh; the `IdentityFile` in openssh config points to the public key.
|
||||
// It's not necessary to specify the public key explicitly when using ssh-agent, since openssh and rclone
|
||||
// will try all the keys they find in the ssh-agent until they find one that works. But just like
|
||||
// `IdentityFile` is used in openssh config to limit the search to one specific key, so does
|
||||
// `opt.KeyFile` in rclone config limit the search to that specific key.
|
||||
//
|
||||
// However, previous versions of rclone would always expect to find the public key in
|
||||
// `${opt.KeyFile}.pub` even if `opt.KeyUseAgent` was true. So for the sake of backward compatibility
|
||||
// we still first attempt to read the public key from `${opt.KeyFile}.pub`. But if it fails with
|
||||
// an `fs.ErrNotExist` then we also try to read the public key from `opt.KeyFile`.
|
||||
pubBytes, err := os.ReadFile(keyFile + ".pub")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read public key file: %w", err)
|
||||
if errors.Is(err, iofs.ErrNotExist) && opt.KeyUseAgent {
|
||||
pubBytes, err = os.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read public key file: %w", err)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("failed to read public key file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
pub, _, _, _, err := ssh.ParseAuthorizedKey(pubBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse public key file: %w", err)
|
||||
@@ -807,8 +853,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
}
|
||||
|
||||
// Load key file if specified
|
||||
if keyFile != "" || opt.KeyPem != "" {
|
||||
// Load key file as a private key, if specified. This is only needed when not using an ssh agent.
|
||||
if (keyFile != "" && !opt.KeyUseAgent) || opt.KeyPem != "" {
|
||||
var key []byte
|
||||
if opt.KeyPem == "" {
|
||||
key, err = os.ReadFile(keyFile)
|
||||
@@ -952,6 +998,7 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
SlowHash: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
// Make a connection and pool it to return errors early
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
@@ -1023,7 +1070,7 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
}
|
||||
}
|
||||
f.putSftpConnection(&c, err)
|
||||
if root != "" {
|
||||
if root != "" && !strings.HasSuffix(root, "/") {
|
||||
// Check to see if the root is actually an existing file,
|
||||
// and if so change the filesystem root to its parent directory.
|
||||
oldAbsRoot := f.absRoot
|
||||
@@ -1126,13 +1173,6 @@ func (f *Fs) dirExists(ctx context.Context, dir string) (bool, error) {
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
root := path.Join(f.absRoot, dir)
|
||||
ok, err := f.dirExists(ctx, root)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("List failed: %w", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
sftpDir := root
|
||||
if sftpDir == "" {
|
||||
sftpDir = "."
|
||||
@@ -1144,6 +1184,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
infos, err := c.sftpClient.ReadDir(sftpDir)
|
||||
f.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return nil, fmt.Errorf("error listing %q: %w", dir, err)
|
||||
}
|
||||
for _, info := range infos {
|
||||
@@ -1287,10 +1330,17 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Move: %w", err)
|
||||
}
|
||||
err = c.sftpClient.Rename(
|
||||
srcObj.path(),
|
||||
path.Join(f.absRoot, remote),
|
||||
)
|
||||
srcPath, dstPath := srcObj.path(), path.Join(f.absRoot, remote)
|
||||
if _, ok := c.sftpClient.HasExtension("posix-rename@openssh.com"); ok {
|
||||
err = c.sftpClient.PosixRename(srcPath, dstPath)
|
||||
} else {
|
||||
// If haven't got PosixRename then remove source first before renaming
|
||||
err = c.sftpClient.Remove(dstPath)
|
||||
if err != nil && !errors.Is(err, iofs.ErrNotExist) {
|
||||
fs.Errorf(f, "Move: Failed to remove existing file %q: %v", dstPath, err)
|
||||
}
|
||||
err = c.sftpClient.Rename(srcPath, dstPath)
|
||||
}
|
||||
f.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Move Rename failed: %w", err)
|
||||
|
||||
@@ -155,7 +155,7 @@ func init() {
|
||||
CheckAuth: checkAuth,
|
||||
})
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to multipart upload.",
|
||||
Default: defaultUploadCutoff,
|
||||
@@ -182,6 +182,7 @@ standard values here or any folder ID (long hex number ID).`,
|
||||
Value: "top",
|
||||
Help: "Access the home, favorites, and shared folders as well as the connectors.",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Default: defaultChunkSize,
|
||||
@@ -216,7 +217,7 @@ be set manually to something like: https://XXX.sharefile.com
|
||||
encoder.EncodeLeftSpace |
|
||||
encoder.EncodeLeftPeriod |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -775,8 +776,13 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
}
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// FIXMEPutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
//
|
||||
// PutStream no longer appears to work - the streamed uploads need the
|
||||
// size specified at the start otherwise we get this error:
|
||||
//
|
||||
// upload failed: file size does not match (-2)
|
||||
func (f *Fs) FIXMEPutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
@@ -1453,12 +1459,12 @@ func (o *Object) ID() string {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
// _ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
|
||||
@@ -45,7 +45,8 @@ func init() {
|
||||
|
||||
Note that siad must run with --disable-api-security to open API port for other hosts (not recommended).
|
||||
Keep default if Sia daemon runs on localhost.`,
|
||||
Default: "http://127.0.0.1:9980",
|
||||
Default: "http://127.0.0.1:9980",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "api_password",
|
||||
Help: `Sia Daemon API Password.
|
||||
|
||||
@@ -34,9 +34,10 @@ func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
|
||||
|
||||
d := &smb2.Dialer{
|
||||
Initiator: &smb2.NTLMInitiator{
|
||||
User: f.opt.User,
|
||||
Password: pass,
|
||||
Domain: f.opt.Domain,
|
||||
User: f.opt.User,
|
||||
Password: pass,
|
||||
Domain: f.opt.Domain,
|
||||
TargetSPN: f.opt.SPN,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -105,9 +106,9 @@ func (f *Fs) getSessions() int32 {
|
||||
func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err error) {
|
||||
// As we are pooling these connections we need to decouple
|
||||
// them from the current context
|
||||
ctx = context.Background()
|
||||
bgCtx := context.Background()
|
||||
|
||||
c, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port)
|
||||
c, err = f.dial(bgCtx, "tcp", f.opt.Host+":"+f.opt.Port)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't connect SMB: %w", err)
|
||||
}
|
||||
@@ -118,7 +119,7 @@ func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err erro
|
||||
_ = c.smbSession.Logoff()
|
||||
return nil, fmt.Errorf("couldn't initialize SMB: %w", err)
|
||||
}
|
||||
c.smbShare = c.smbShare.WithContext(ctx)
|
||||
c.smbShare = c.smbShare.WithContext(bgCtx)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
@@ -41,13 +41,15 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "SMB server hostname to connect to.\n\nE.g. \"example.com\".",
|
||||
Required: true,
|
||||
Name: "host",
|
||||
Help: "SMB server hostname to connect to.\n\nE.g. \"example.com\".",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "SMB username.",
|
||||
Default: currentUser,
|
||||
Name: "user",
|
||||
Help: "SMB username.",
|
||||
Default: currentUser,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "SMB port number.",
|
||||
@@ -57,9 +59,22 @@ func init() {
|
||||
Help: "SMB password.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "domain",
|
||||
Help: "Domain name for NTLM authentication.",
|
||||
Default: "WORKGROUP",
|
||||
Name: "domain",
|
||||
Help: "Domain name for NTLM authentication.",
|
||||
Default: "WORKGROUP",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "spn",
|
||||
Help: `Service principal name.
|
||||
|
||||
Rclone presents this name to the server. Some servers use this as further
|
||||
authentication, and it often needs to be set for clusters. For example:
|
||||
|
||||
cifs/remotehost:1020
|
||||
|
||||
Leave blank if not sure.
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
@@ -109,6 +124,7 @@ type Options struct {
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Domain string `config:"domain"`
|
||||
SPN string `config:"spn"`
|
||||
HideSpecial bool `config:"hide_special_share"`
|
||||
CaseInsensitive bool `config:"case_insensitive"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
@@ -435,7 +451,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
|
||||
share, dir := f.split("/")
|
||||
if share == "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
// Just return empty info rather than an error if called on the root
|
||||
return &fs.Usage{}, nil
|
||||
}
|
||||
dir = f.toSambaPath(dir)
|
||||
|
||||
|
||||
@@ -98,9 +98,10 @@ func init() {
|
||||
},
|
||||
}},
|
||||
{
|
||||
Name: "access_grant",
|
||||
Help: "Access grant.",
|
||||
Provider: "existing",
|
||||
Name: "access_grant",
|
||||
Help: "Access grant.",
|
||||
Provider: "existing",
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "satellite_address",
|
||||
@@ -120,14 +121,16 @@ func init() {
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "api_key",
|
||||
Help: "API key.",
|
||||
Provider: newProvider,
|
||||
Name: "api_key",
|
||||
Help: "API key.",
|
||||
Provider: newProvider,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "passphrase",
|
||||
Help: "Encryption passphrase.\n\nTo access existing objects enter passphrase used for uploading.",
|
||||
Provider: newProvider,
|
||||
Name: "passphrase",
|
||||
Help: "Encryption passphrase.\n\nTo access existing objects enter passphrase used for uploading.",
|
||||
Provider: newProvider,
|
||||
Sensitive: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
@@ -528,7 +531,11 @@ func (f *Fs) NewObject(ctx context.Context, relative string) (_ fs.Object, err e
|
||||
// May create the object even if it returns an error - if so will return the
|
||||
// object and the error, otherwise will return nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (_ fs.Object, err error) {
|
||||
fs.Debugf(f, "cp input ./%s # %+v %d", src.Remote(), options, src.Size())
|
||||
return f.put(ctx, in, src, src.Remote(), options...)
|
||||
}
|
||||
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options ...fs.OpenOption) (_ fs.Object, err error) {
|
||||
fs.Debugf(f, "cp input ./%s # %+v %d", remote, options, src.Size())
|
||||
|
||||
// Reject options we don't support.
|
||||
for _, option := range options {
|
||||
@@ -539,7 +546,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
}
|
||||
}
|
||||
|
||||
bucketName, bucketPath := f.absolute(src.Remote())
|
||||
bucketName, bucketPath := f.absolute(remote)
|
||||
|
||||
upload, err := f.project.UploadObject(ctx, bucketName, bucketPath, nil)
|
||||
if err != nil {
|
||||
@@ -549,7 +556,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
if err != nil {
|
||||
aerr := upload.Abort()
|
||||
if aerr != nil && !errors.Is(aerr, uplink.ErrUploadDone) {
|
||||
fs.Errorf(f, "cp input ./%s %+v: %+v", src.Remote(), options, aerr)
|
||||
fs.Errorf(f, "cp input ./%s %+v: %+v", remote, options, aerr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -574,7 +581,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
}
|
||||
|
||||
err = fserrors.RetryError(err)
|
||||
fs.Errorf(f, "cp input ./%s %+v: %+v\n", src.Remote(), options, err)
|
||||
fs.Errorf(f, "cp input ./%s %+v: %+v\n", remote, options, err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
@@ -589,11 +596,19 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
return nil, err
|
||||
}
|
||||
err = fserrors.RetryError(errors.New("bucket was not available, now created, the upload must be retried"))
|
||||
} else if errors.Is(err, uplink.ErrTooManyRequests) {
|
||||
// Storj has a rate limit of 1 per second of uploading to the same file.
|
||||
// This produces ErrTooManyRequests here, so we wait 1 second and retry.
|
||||
//
|
||||
// See: https://github.com/storj/uplink/issues/149
|
||||
fs.Debugf(f, "uploading too fast - sleeping for 1 second: %v", err)
|
||||
time.Sleep(time.Second)
|
||||
err = fserrors.RetryError(err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newObjectFromUplink(f, src.Remote(), upload.Info()), nil
|
||||
return newObjectFromUplink(f, remote, upload.Info()), nil
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate
|
||||
|
||||
@@ -176,9 +176,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (_ io.ReadC
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
fs.Debugf(o, "cp input ./%s %+v", src.Remote(), options)
|
||||
fs.Debugf(o, "cp input ./%s %+v", o.Remote(), options)
|
||||
|
||||
oNew, err := o.fs.Put(ctx, in, src, options...)
|
||||
oNew, err := o.fs.put(ctx, in, src, o.Remote(), options...)
|
||||
|
||||
if err == nil {
|
||||
*o = *(oNew.(*Object))
|
||||
|
||||
@@ -132,42 +132,50 @@ func init() {
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
}, Options: []fs.Option{{
|
||||
Name: "app_id",
|
||||
Help: "Sugarsync App ID.\n\nLeave blank to use rclone's.",
|
||||
Name: "app_id",
|
||||
Help: "Sugarsync App ID.\n\nLeave blank to use rclone's.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "access_key_id",
|
||||
Help: "Sugarsync Access Key ID.\n\nLeave blank to use rclone's.",
|
||||
Name: "access_key_id",
|
||||
Help: "Sugarsync Access Key ID.\n\nLeave blank to use rclone's.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "private_access_key",
|
||||
Help: "Sugarsync Private Access Key.\n\nLeave blank to use rclone's.",
|
||||
Name: "private_access_key",
|
||||
Help: "Sugarsync Private Access Key.\n\nLeave blank to use rclone's.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "hard_delete",
|
||||
Help: "Permanently delete files if true\notherwise put them in the deleted files.",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "refresh_token",
|
||||
Help: "Sugarsync refresh token.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Name: "refresh_token",
|
||||
Help: "Sugarsync refresh token.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "authorization",
|
||||
Help: "Sugarsync authorization.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Name: "authorization",
|
||||
Help: "Sugarsync authorization.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "authorization_expiry",
|
||||
Help: "Sugarsync authorization expiry.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Sugarsync user.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Name: "user",
|
||||
Help: "Sugarsync user.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "root_id",
|
||||
Help: "Sugarsync root id.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Name: "root_id",
|
||||
Help: "Sugarsync root id.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "deleted_id",
|
||||
Help: "Sugarsync deleted folder id.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Name: "deleted_id",
|
||||
Help: "Sugarsync deleted folder id.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -101,7 +100,7 @@ but other operations such as Remove and Copy will fail.
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "swift",
|
||||
Description: "OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH)",
|
||||
Description: "OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)",
|
||||
NewFs: NewFs,
|
||||
Options: append([]fs.Option{{
|
||||
Name: "env_auth",
|
||||
@@ -117,11 +116,13 @@ func init() {
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "User name to log in (OS_USERNAME).",
|
||||
Name: "user",
|
||||
Help: "User name to log in (OS_USERNAME).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "key",
|
||||
Help: "API key or password (OS_PASSWORD).",
|
||||
Name: "key",
|
||||
Help: "API key or password (OS_PASSWORD).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "auth",
|
||||
Help: "Authentication URL for server (OS_AUTH_URL).",
|
||||
@@ -143,22 +144,30 @@ func init() {
|
||||
}, {
|
||||
Value: "https://auth.cloud.ovh.net/v3",
|
||||
Help: "OVH",
|
||||
}, {
|
||||
Value: "https://authenticate.ain.net",
|
||||
Help: "Blomp Cloud Storage",
|
||||
}},
|
||||
}, {
|
||||
Name: "user_id",
|
||||
Help: "User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).",
|
||||
Name: "user_id",
|
||||
Help: "User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "domain",
|
||||
Help: "User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)",
|
||||
Name: "domain",
|
||||
Help: "User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "tenant",
|
||||
Help: "Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME).",
|
||||
Name: "tenant",
|
||||
Help: "Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "tenant_id",
|
||||
Help: "Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID).",
|
||||
Name: "tenant_id",
|
||||
Help: "Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "tenant_domain",
|
||||
Help: "Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME).",
|
||||
Name: "tenant_domain",
|
||||
Help: "Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region name - optional (OS_REGION_NAME).",
|
||||
@@ -166,17 +175,21 @@ func init() {
|
||||
Name: "storage_url",
|
||||
Help: "Storage URL - optional (OS_STORAGE_URL).",
|
||||
}, {
|
||||
Name: "auth_token",
|
||||
Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN).",
|
||||
Name: "auth_token",
|
||||
Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "application_credential_id",
|
||||
Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID).",
|
||||
Name: "application_credential_id",
|
||||
Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "application_credential_name",
|
||||
Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME).",
|
||||
Name: "application_credential_name",
|
||||
Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "application_credential_secret",
|
||||
Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET).",
|
||||
Name: "application_credential_secret",
|
||||
Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "auth_version",
|
||||
Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION).",
|
||||
@@ -1328,23 +1341,6 @@ func (o *Object) removeSegmentsLargeObject(ctx context.Context, containerSegment
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) getSegmentsDlo(ctx context.Context) (segmentsContainer string, prefix string, err error) {
|
||||
if err = o.readMetaData(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
dirManifest := o.headers["X-Object-Manifest"]
|
||||
dirManifest, err = url.PathUnescape(dirManifest)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
delimiter := strings.Index(dirManifest, "/")
|
||||
if len(dirManifest) == 0 || delimiter < 0 {
|
||||
err = errors.New("missing or wrong structure of manifest of Dynamic large object")
|
||||
return
|
||||
}
|
||||
return dirManifest[:delimiter], dirManifest[delimiter+1:], nil
|
||||
}
|
||||
|
||||
// urlEncode encodes a string so that it is a valid URL
|
||||
//
|
||||
// We don't use any of Go's standard methods as we need `/` not
|
||||
@@ -1576,6 +1572,10 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
// Remove file/manifest first
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectDelete(ctx, container, containerPath)
|
||||
if err == swift.ObjectNotFound {
|
||||
fs.Errorf(o, "Dangling object - ignoring: %v", err)
|
||||
err = nil
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -49,8 +49,7 @@ func (e Errors) Error() string {
|
||||
|
||||
if len(e) == 0 {
|
||||
buf.WriteString("no error")
|
||||
}
|
||||
if len(e) == 1 {
|
||||
} else if len(e) == 1 {
|
||||
buf.WriteString("1 error: ")
|
||||
} else {
|
||||
fmt.Fprintf(&buf, "%d errors: ", len(e))
|
||||
@@ -61,8 +60,17 @@ func (e Errors) Error() string {
|
||||
buf.WriteString("; ")
|
||||
}
|
||||
|
||||
buf.WriteString(err.Error())
|
||||
if err != nil {
|
||||
buf.WriteString(err.Error())
|
||||
} else {
|
||||
buf.WriteString("nil error")
|
||||
}
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Unwrap returns the wrapped errors
|
||||
func (e Errors) Unwrap() []error {
|
||||
return e
|
||||
}
|
||||
|
||||
94
backend/union/errors_test.go
Normal file
94
backend/union/errors_test.go
Normal file
@@ -0,0 +1,94 @@
|
||||
//go:build go1.20
|
||||
// +build go1.20
|
||||
|
||||
package union
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var (
|
||||
err1 = errors.New("Error 1")
|
||||
err2 = errors.New("Error 2")
|
||||
err3 = errors.New("Error 3")
|
||||
)
|
||||
|
||||
func TestErrorsMap(t *testing.T) {
|
||||
es := Errors{
|
||||
nil,
|
||||
err1,
|
||||
err2,
|
||||
}
|
||||
want := Errors{
|
||||
err2,
|
||||
}
|
||||
got := es.Map(func(e error) error {
|
||||
if e == err1 {
|
||||
return nil
|
||||
}
|
||||
return e
|
||||
})
|
||||
assert.Equal(t, want, got)
|
||||
}
|
||||
|
||||
func TestErrorsFilterNil(t *testing.T) {
|
||||
es := Errors{
|
||||
nil,
|
||||
err1,
|
||||
nil,
|
||||
err2,
|
||||
nil,
|
||||
}
|
||||
want := Errors{
|
||||
err1,
|
||||
err2,
|
||||
}
|
||||
got := es.FilterNil()
|
||||
assert.Equal(t, want, got)
|
||||
}
|
||||
|
||||
func TestErrorsErr(t *testing.T) {
|
||||
// Check not all nil case
|
||||
es := Errors{
|
||||
nil,
|
||||
err1,
|
||||
nil,
|
||||
err2,
|
||||
nil,
|
||||
}
|
||||
want := Errors{
|
||||
err1,
|
||||
err2,
|
||||
}
|
||||
got := es.Err()
|
||||
|
||||
// Check all nil case
|
||||
assert.Equal(t, want, got)
|
||||
es = Errors{
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
}
|
||||
assert.Nil(t, es.Err())
|
||||
}
|
||||
|
||||
func TestErrorsError(t *testing.T) {
|
||||
assert.Equal(t, "no error", Errors{}.Error())
|
||||
assert.Equal(t, "1 error: Error 1", Errors{err1}.Error())
|
||||
assert.Equal(t, "1 error: nil error", Errors{nil}.Error())
|
||||
assert.Equal(t, "2 errors: Error 1; Error 2", Errors{err1, err2}.Error())
|
||||
}
|
||||
|
||||
func TestErrorsUnwrap(t *testing.T) {
|
||||
es := Errors{
|
||||
err1,
|
||||
err2,
|
||||
}
|
||||
assert.Equal(t, []error{err1, err2}, es.Unwrap())
|
||||
assert.True(t, errors.Is(es, err1))
|
||||
assert.True(t, errors.Is(es, err2))
|
||||
assert.False(t, errors.Is(es, err3))
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package policy
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -109,9 +108,7 @@ func findEntry(ctx context.Context, f fs.Fs, remote string) fs.DirEntry {
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
// random modtime for root
|
||||
randomNow := time.Unix(time.Now().Unix()-rand.Int63n(10000), 0)
|
||||
return fs.NewDir("", randomNow)
|
||||
return fs.NewDir("", time.Time{})
|
||||
}
|
||||
found := false
|
||||
for _, e := range entries {
|
||||
|
||||
@@ -756,14 +756,6 @@ func (f *Fs) create(ctx context.Context, path string) ([]*upstream.Fs, error) {
|
||||
return f.createPolicy.Create(ctx, f.upstreams, path)
|
||||
}
|
||||
|
||||
func (f *Fs) createEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
return f.createPolicy.CreateEntries(entries...)
|
||||
}
|
||||
|
||||
func (f *Fs) search(ctx context.Context, path string) (*upstream.Fs, error) {
|
||||
return f.searchPolicy.Search(ctx, f.upstreams, path)
|
||||
}
|
||||
|
||||
func (f *Fs) searchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
return f.searchPolicy.SearchEntries(entries...)
|
||||
}
|
||||
@@ -809,6 +801,24 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
return errs.Err()
|
||||
}
|
||||
|
||||
// CleanUp the trash in the Fs
|
||||
//
|
||||
// Implement this if you have a way of emptying the trash or
|
||||
// otherwise cleaning up old versions of files.
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
errs := Errors(make([]error, len(f.upstreams)))
|
||||
multithread(len(f.upstreams), func(i int) {
|
||||
u := f.upstreams[i]
|
||||
if do := u.Features().CleanUp; do != nil {
|
||||
err := do(ctx)
|
||||
if err != nil {
|
||||
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
|
||||
}
|
||||
}
|
||||
})
|
||||
return errs.Err()
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
@@ -892,6 +902,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
canMove, slowHash := true, false
|
||||
for _, f := range upstreams {
|
||||
@@ -922,6 +933,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
}
|
||||
|
||||
// show that we wrap other backends
|
||||
features.Overlay = true
|
||||
|
||||
f.features = features
|
||||
|
||||
// Get common intersection of hashes
|
||||
@@ -968,4 +982,5 @@ var (
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -11,6 +11,11 @@ import (
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
var (
|
||||
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "PublicLink", "PutUnchecked", "MergeDirs", "OpenWriterAt"}
|
||||
unimplementableObjectMethods = []string{}
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
@@ -18,8 +23,8 @@ func TestIntegration(t *testing.T) {
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -39,8 +44,8 @@ func TestStandard(t *testing.T) {
|
||||
{Name: name, Key: "create_policy", Value: "epmfs"},
|
||||
{Name: name, Key: "search_policy", Value: "ff"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
@@ -61,8 +66,8 @@ func TestRO(t *testing.T) {
|
||||
{Name: name, Key: "create_policy", Value: "epmfs"},
|
||||
{Name: name, Key: "search_policy", Value: "ff"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
@@ -83,8 +88,8 @@ func TestNC(t *testing.T) {
|
||||
{Name: name, Key: "create_policy", Value: "epmfs"},
|
||||
{Name: name, Key: "search_policy", Value: "ff"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
@@ -105,8 +110,8 @@ func TestPolicy1(t *testing.T) {
|
||||
{Name: name, Key: "create_policy", Value: "lus"},
|
||||
{Name: name, Key: "search_policy", Value: "all"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
@@ -127,8 +132,8 @@ func TestPolicy2(t *testing.T) {
|
||||
{Name: name, Key: "create_policy", Value: "rand"},
|
||||
{Name: name, Key: "search_policy", Value: "ff"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
@@ -149,8 +154,8 @@ func TestPolicy3(t *testing.T) {
|
||||
{Name: name, Key: "create_policy", Value: "all"},
|
||||
{Name: name, Key: "search_policy", Value: "all"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -43,8 +43,14 @@ func init() {
|
||||
Description: "Uptobox",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Help: "Your access token.\n\nGet it from https://uptobox.com/my_account.",
|
||||
Name: "access_token",
|
||||
Help: "Your access token.\n\nGet it from https://uptobox.com/my_account.",
|
||||
Name: "access_token",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Help: "Set to make uploaded files private",
|
||||
Name: "private",
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -63,6 +69,7 @@ func init() {
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
AccessToken string `config:"access_token"`
|
||||
Private bool `config:"private"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -75,6 +82,7 @@ type Fs struct {
|
||||
srv *rest.Client
|
||||
pacer *fs.Pacer
|
||||
IDRegexp *regexp.Regexp
|
||||
public string // "0" to make objects private
|
||||
}
|
||||
|
||||
// Object represents an Uptobox object
|
||||
@@ -211,10 +219,13 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
|
||||
CanHaveEmptyDirectories: true,
|
||||
ReadMimeType: false,
|
||||
}).Fill(ctx, f)
|
||||
if f.opt.Private {
|
||||
f.public = "0"
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
f.srv = rest.NewClient(client).SetRoot(apiBaseURL)
|
||||
f.IDRegexp = regexp.MustCompile(`https://uptobox\.com/([a-zA-Z0-9]+)`)
|
||||
f.IDRegexp = regexp.MustCompile(`^https://uptobox\.com/([a-zA-Z0-9]+)`)
|
||||
|
||||
_, err = f.readMetaDataForPath(ctx, f.dirPath(""), &api.MetadataRequestOptions{Limit: 10})
|
||||
if err != nil {
|
||||
@@ -472,11 +483,11 @@ func (f *Fs) updateFileInformation(ctx context.Context, update *api.UpdateFileIn
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) error {
|
||||
if size > int64(200e9) { // max size 200GB
|
||||
return nil, errors.New("file too big, can't upload")
|
||||
return errors.New("file too big, can't upload")
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
return fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
// yes it does take 4 requests if we're uploading to root and 6+ if we're uploading to any subdir :(
|
||||
|
||||
@@ -494,19 +505,19 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if info.StatusCode != 0 {
|
||||
return nil, fmt.Errorf("putUnchecked api error: %d - %s", info.StatusCode, info.Message)
|
||||
return fmt.Errorf("putUnchecked api error: %d - %s", info.StatusCode, info.Message)
|
||||
}
|
||||
// we need to have a safe name for the upload to work
|
||||
tmpName := "rcloneTemp" + random.String(8)
|
||||
upload, err := f.uploadFile(ctx, in, size, tmpName, info.Data.UploadLink, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if len(upload.Files) != 1 {
|
||||
return nil, errors.New("upload unexpected response")
|
||||
return errors.New("upload unexpected response")
|
||||
}
|
||||
match := f.IDRegexp.FindStringSubmatch(upload.Files[0].URL)
|
||||
|
||||
@@ -521,23 +532,27 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
// this might need some more error handling. if any of the following requests fail
|
||||
// we'll leave an orphaned temporary file floating around somewhere
|
||||
// they rarely fail though
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
err = f.move(ctx, fullBase, match[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// rename file to final name
|
||||
err = f.updateFileInformation(ctx, &api.UpdateFileInformation{Token: f.opt.AccessToken, FileCode: match[1], NewName: f.opt.Enc.FromStandardName(leaf)})
|
||||
err = f.updateFileInformation(ctx, &api.UpdateFileInformation{
|
||||
Token: f.opt.AccessToken,
|
||||
FileCode: match[1],
|
||||
NewName: f.opt.Enc.FromStandardName(leaf),
|
||||
Public: f.public,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// finally fetch the file object.
|
||||
return f.NewObject(ctx, remote)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
@@ -567,7 +582,11 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.putUnchecked(ctx, in, src.Remote(), src.Size(), options...)
|
||||
err := f.putUnchecked(ctx, in, src.Remote(), src.Size(), options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewObject(ctx, src.Remote())
|
||||
}
|
||||
|
||||
// CreateDir dir creates a directory with the given parent path
|
||||
@@ -660,7 +679,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Data.CurrentFolder.FileCount > 0 {
|
||||
if len(info.Data.Folders) > 0 || len(info.Data.Files) > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
@@ -696,7 +715,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// rename to final name if we need to
|
||||
if needRename {
|
||||
err := f.updateFileInformation(ctx, &api.UpdateFileInformation{Token: f.opt.AccessToken, FileCode: srcObj.code, NewName: f.opt.Enc.FromStandardName(dstLeaf)})
|
||||
err := f.updateFileInformation(ctx, &api.UpdateFileInformation{
|
||||
Token: f.opt.AccessToken,
|
||||
FileCode: srcObj.code,
|
||||
NewName: f.opt.Enc.FromStandardName(dstLeaf),
|
||||
Public: f.public,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move: failed final rename: %w", err)
|
||||
}
|
||||
@@ -888,7 +912,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
if needRename {
|
||||
err := f.updateFileInformation(ctx, &api.UpdateFileInformation{Token: f.opt.AccessToken, FileCode: newObj.(*Object).code, NewName: f.opt.Enc.FromStandardName(dstLeaf)})
|
||||
err := f.updateFileInformation(ctx, &api.UpdateFileInformation{
|
||||
Token: f.opt.AccessToken,
|
||||
FileCode: newObj.(*Object).code,
|
||||
NewName: f.opt.Enc.FromStandardName(dstLeaf),
|
||||
Public: f.public,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: failed final rename: %w", err)
|
||||
}
|
||||
@@ -923,7 +952,8 @@ func (o *Object) Remote() string {
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return time.Now()
|
||||
ci := fs.GetConfig(ctx)
|
||||
return time.Time(ci.DefaultTime)
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
@@ -1000,7 +1030,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// upload with new size but old name
|
||||
info, err := o.fs.putUnchecked(ctx, in, o.Remote(), src.Size(), options...)
|
||||
err := o.fs.putUnchecked(ctx, in, o.Remote(), src.Size(), options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1011,6 +1041,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return fmt.Errorf("failed to remove old version: %w", err)
|
||||
}
|
||||
|
||||
// Fetch new object after deleting the duplicate
|
||||
info, err := o.fs.NewObject(ctx, o.Remote())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Replace guts of old object with new one
|
||||
*o = *info.(*Object)
|
||||
|
||||
|
||||
@@ -75,6 +75,7 @@ type Prop struct {
|
||||
Size int64 `xml:"DAV: prop>getcontentlength,omitempty"`
|
||||
Modified Time `xml:"DAV: prop>getlastmodified,omitempty"`
|
||||
Checksums []string `xml:"prop>checksums>checksum,omitempty"`
|
||||
MESha1Hex *string `xml:"ME: prop>sha1hex,omitempty"` // Fastmail-specific sha1 checksum
|
||||
}
|
||||
|
||||
// Parse a status of the form "HTTP/1.1 200 OK" or "HTTP/1.1 200"
|
||||
@@ -102,22 +103,27 @@ func (p *Prop) StatusOK() bool {
|
||||
|
||||
// Hashes returns a map of all checksums - may be nil
|
||||
func (p *Prop) Hashes() (hashes map[hash.Type]string) {
|
||||
if len(p.Checksums) == 0 {
|
||||
return nil
|
||||
}
|
||||
hashes = make(map[hash.Type]string)
|
||||
for _, checksums := range p.Checksums {
|
||||
checksums = strings.ToLower(checksums)
|
||||
for _, checksum := range strings.Split(checksums, " ") {
|
||||
switch {
|
||||
case strings.HasPrefix(checksum, "sha1:"):
|
||||
hashes[hash.SHA1] = checksum[5:]
|
||||
case strings.HasPrefix(checksum, "md5:"):
|
||||
hashes[hash.MD5] = checksum[4:]
|
||||
if len(p.Checksums) > 0 {
|
||||
hashes = make(map[hash.Type]string)
|
||||
for _, checksums := range p.Checksums {
|
||||
checksums = strings.ToLower(checksums)
|
||||
for _, checksum := range strings.Split(checksums, " ") {
|
||||
switch {
|
||||
case strings.HasPrefix(checksum, "sha1:"):
|
||||
hashes[hash.SHA1] = checksum[5:]
|
||||
case strings.HasPrefix(checksum, "md5:"):
|
||||
hashes[hash.MD5] = checksum[4:]
|
||||
}
|
||||
}
|
||||
}
|
||||
return hashes
|
||||
} else if p.MESha1Hex != nil {
|
||||
hashes = make(map[hash.Type]string)
|
||||
hashes[hash.SHA1] = *p.MESha1Hex
|
||||
return hashes
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
return hashes
|
||||
}
|
||||
|
||||
// PropValue is a tagged name and value
|
||||
|
||||
214
backend/webdav/chunking.go
Normal file
214
backend/webdav/chunking.go
Normal file
@@ -0,0 +1,214 @@
|
||||
package webdav
|
||||
|
||||
/*
|
||||
chunked update for Nextcloud
|
||||
see https://docs.nextcloud.com/server/20/developer_manual/client_apis/WebDAV/chunking.html
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
func (f *Fs) shouldRetryChunkMerge(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
// Not found. Can be returned by NextCloud when merging chunks of an upload.
|
||||
if resp != nil && resp.StatusCode == 404 {
|
||||
return true, err
|
||||
}
|
||||
|
||||
// 423 LOCKED
|
||||
if resp != nil && resp.StatusCode == 423 {
|
||||
return false, fmt.Errorf("merging the uploaded chunks failed with 423 LOCKED. This usually happens when the chunks merging is still in progress on NextCloud, but it may also indicate a failed transfer: %w", err)
|
||||
}
|
||||
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
}
|
||||
|
||||
// set the chunk size for testing
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
return
|
||||
}
|
||||
|
||||
func (o *Object) getChunksUploadDir() (string, error) {
|
||||
hasher := md5.New()
|
||||
_, err := hasher.Write([]byte(o.filePath()))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("chunked upload couldn't hash URL: %w", err)
|
||||
}
|
||||
uploadDir := "rclone-chunked-upload-" + hex.EncodeToString(hasher.Sum(nil))
|
||||
return uploadDir, nil
|
||||
}
|
||||
|
||||
func (f *Fs) getChunksUploadURL() (string, error) {
|
||||
submatch := nextCloudURLRegex.FindStringSubmatch(f.endpointURL)
|
||||
if submatch == nil {
|
||||
return "", errors.New("the remote url looks incorrect. Note that nextcloud chunked uploads require you to use the /dav/files/USER endpoint instead of /webdav. Please check 'rclone config show remotename' to verify that the url field ends in /dav/files/USERNAME")
|
||||
}
|
||||
|
||||
baseURL, user := submatch[1], submatch[2]
|
||||
chunksUploadURL := fmt.Sprintf("%s/dav/uploads/%s/", baseURL, user)
|
||||
|
||||
return chunksUploadURL, nil
|
||||
}
|
||||
|
||||
func (o *Object) shouldUseChunkedUpload(src fs.ObjectInfo) bool {
|
||||
return o.fs.canChunk && o.fs.opt.ChunkSize > 0 && src.Size() > int64(o.fs.opt.ChunkSize)
|
||||
}
|
||||
|
||||
func (o *Object) updateChunked(ctx context.Context, in0 io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
var uploadDir string
|
||||
|
||||
// see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html#starting-a-chunked-upload
|
||||
uploadDir, err = o.createChunksUploadDirectory(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
partObj := &Object{
|
||||
fs: o.fs,
|
||||
}
|
||||
|
||||
// see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html#uploading-chunks
|
||||
err = o.uploadChunks(ctx, in0, src.Size(), partObj, uploadDir, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html#assembling-the-chunks
|
||||
err = o.mergeChunks(ctx, uploadDir, options, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) uploadChunks(ctx context.Context, in0 io.Reader, size int64, partObj *Object, uploadDir string, options []fs.OpenOption) error {
|
||||
chunkSize := int64(partObj.fs.opt.ChunkSize)
|
||||
|
||||
// TODO: upload chunks in parallel for faster transfer speeds
|
||||
for offset := int64(0); offset < size; offset += chunkSize {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
contentLength := chunkSize
|
||||
|
||||
// Last chunk may be smaller
|
||||
if size-offset < contentLength {
|
||||
contentLength = size - offset
|
||||
}
|
||||
|
||||
endOffset := offset + contentLength - 1
|
||||
|
||||
partObj.remote = fmt.Sprintf("%s/%015d-%015d", uploadDir, offset, endOffset)
|
||||
// Enable low-level HTTP 2 retries.
|
||||
// 2022-04-28 15:59:06 ERROR : stuff/video.avi: Failed to copy: uploading chunk failed: Put "https://censored.com/remote.php/dav/uploads/Admin/rclone-chunked-upload-censored/000006113198080-000006123683840": http2: Transport: cannot retry err [http2: Transport received Server's graceful shutdown GOAWAY] after Request.Body was written; define Request.GetBody to avoid this error
|
||||
|
||||
buf := make([]byte, chunkSize)
|
||||
in := readers.NewRepeatableLimitReaderBuffer(in0, buf, chunkSize)
|
||||
|
||||
getBody := func() (io.ReadCloser, error) {
|
||||
// RepeatableReader{} plays well with accounting so rewinding doesn't make the progress buggy
|
||||
if _, err := in.Seek(0, io.SeekStart); err == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return io.NopCloser(in), nil
|
||||
}
|
||||
|
||||
err := partObj.updateSimple(ctx, in, getBody, partObj.remote, contentLength, "application/x-www-form-urlencoded", nil, o.fs.chunksUploadURL, options...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("uploading chunk failed: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) createChunksUploadDirectory(ctx context.Context) (string, error) {
|
||||
uploadDir, err := o.getChunksUploadDir()
|
||||
if err != nil {
|
||||
return uploadDir, err
|
||||
}
|
||||
|
||||
err = o.purgeUploadedChunks(ctx, uploadDir)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("chunked upload couldn't purge upload directory: %w", err)
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "MKCOL",
|
||||
Path: uploadDir + "/",
|
||||
NoResponse: true,
|
||||
RootURL: o.fs.chunksUploadURL,
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("making upload directory failed: %w", err)
|
||||
}
|
||||
return uploadDir, err
|
||||
}
|
||||
|
||||
func (o *Object) mergeChunks(ctx context.Context, uploadDir string, options []fs.OpenOption, src fs.ObjectInfo) error {
|
||||
var resp *http.Response
|
||||
|
||||
// see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html?highlight=chunk#assembling-the-chunks
|
||||
opts := rest.Opts{
|
||||
Method: "MOVE",
|
||||
Path: path.Join(uploadDir, ".file"),
|
||||
NoResponse: true,
|
||||
Options: options,
|
||||
RootURL: o.fs.chunksUploadURL,
|
||||
}
|
||||
destinationURL, err := rest.URLJoin(o.fs.endpoint, o.filePath())
|
||||
if err != nil {
|
||||
return fmt.Errorf("finalize chunked upload couldn't join URL: %w", err)
|
||||
}
|
||||
opts.ExtraHeaders = o.extraHeaders(ctx, src)
|
||||
opts.ExtraHeaders["Destination"] = destinationURL.String()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetryChunkMerge(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("finalize chunked upload failed, destinationURL: \"%s\": %w", destinationURL, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (o *Object) purgeUploadedChunks(ctx context.Context, uploadDir string) error {
|
||||
// clean the upload directory if it exists (this means that a previous try didn't clean up properly).
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: uploadDir + "/",
|
||||
NoResponse: true,
|
||||
RootURL: o.fs.chunksUploadURL,
|
||||
}
|
||||
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
|
||||
|
||||
// directory doesn't exist, no need to purge
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"net/url"
|
||||
"os/exec"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -42,7 +43,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
minSleep = fs.Duration(10 * time.Millisecond)
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDepth = "1" // depth for PROPFIND
|
||||
@@ -76,6 +77,9 @@ func init() {
|
||||
Name: "vendor",
|
||||
Help: "Name of the WebDAV site/service/software you are using.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "fastmail",
|
||||
Help: "Fastmail Files",
|
||||
}, {
|
||||
Value: "nextcloud",
|
||||
Help: "Nextcloud",
|
||||
}, {
|
||||
@@ -92,15 +96,17 @@ func init() {
|
||||
Help: "Other site/service or software",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "User name.\n\nIn case NTLM authentication is used, the username should be in the format 'Domain\\User'.",
|
||||
Name: "user",
|
||||
Help: "User name.\n\nIn case NTLM authentication is used, the username should be in the format 'Domain\\User'.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "bearer_token",
|
||||
Help: "Bearer token instead of user/pass (e.g. a Macaroon).",
|
||||
Name: "bearer_token",
|
||||
Help: "Bearer token instead of user/pass (e.g. a Macaroon).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "bearer_token_command",
|
||||
Help: "Command to run to get a bearer token.",
|
||||
@@ -124,6 +130,22 @@ You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'
|
||||
`,
|
||||
Default: fs.CommaSepList{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "pacer_min_sleep",
|
||||
Help: "Minimum time to sleep between API calls.",
|
||||
Default: minSleep,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "nextcloud_chunk_size",
|
||||
Help: `Nextcloud upload chunk size.
|
||||
|
||||
We recommend configuring your NextCloud instance to increase the max chunk size to 1 GB for better upload performances.
|
||||
See https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/big_file_upload_configuration.html#adjust-chunk-size-on-nextcloud-side
|
||||
|
||||
Set to 0 to disable chunked uploading.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: 10 * fs.Mebi, // Default NextCloud `max_chunk_size` is `10 MiB`. See https://github.com/nextcloud/server/blob/0447b53bda9fe95ea0cbed765aa332584605d652/apps/files/lib/App.php#L57
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -138,6 +160,8 @@ type Options struct {
|
||||
BearerTokenCommand string `config:"bearer_token_command"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
Headers fs.CommaSepList `config:"headers"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
ChunkSize fs.SizeSuffix `config:"nextcloud_chunk_size"`
|
||||
}
|
||||
|
||||
// Fs represents a remote webdav
|
||||
@@ -153,11 +177,15 @@ type Fs struct {
|
||||
precision time.Duration // mod time precision
|
||||
canStream bool // set if can stream
|
||||
useOCMtime bool // set if can use X-OC-Mtime
|
||||
propsetMtime bool // set if can use propset
|
||||
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
|
||||
checkBeforePurge bool // enables extra check that directory to purge really exists
|
||||
hasMD5 bool // set if can use owncloud style checksums for MD5
|
||||
hasSHA1 bool // set if can use owncloud style checksums for SHA1
|
||||
hasOCMD5 bool // set if can use owncloud style checksums for MD5
|
||||
hasOCSHA1 bool // set if can use owncloud style checksums for SHA1
|
||||
hasMESHA1 bool // set if can use fastmail style checksums for SHA1
|
||||
ntlmAuthMu sync.Mutex // mutex to serialize NTLM auth roundtrips
|
||||
chunksUploadURL string // upload URL for nextcloud chunked
|
||||
canChunk bool // set if nextcloud and nextcloud_chunk_size is set
|
||||
}
|
||||
|
||||
// Object describes a webdav object
|
||||
@@ -278,7 +306,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
|
||||
},
|
||||
NoRedirect: true,
|
||||
}
|
||||
if f.hasMD5 || f.hasSHA1 {
|
||||
if f.hasOCMD5 || f.hasOCSHA1 {
|
||||
opts.Body = bytes.NewBuffer(owncloudProps)
|
||||
}
|
||||
var result api.Multistatus
|
||||
@@ -411,7 +439,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt: *opt,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
precision: fs.ModTimeNotSupported,
|
||||
}
|
||||
|
||||
@@ -543,19 +571,42 @@ func (f *Fs) fetchAndSetBearerToken() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// The WebDAV url can optionally be suffixed with a path. This suffix needs to be ignored for determining the temporary upload directory of chunks.
|
||||
var nextCloudURLRegex = regexp.MustCompile(`^(.*)/dav/files/([^/]+)`)
|
||||
|
||||
// setQuirks adjusts the Fs for the vendor passed in
|
||||
func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
||||
switch vendor {
|
||||
case "fastmail":
|
||||
f.canStream = true
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.hasMESHA1 = true
|
||||
case "owncloud":
|
||||
f.canStream = true
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.hasMD5 = true
|
||||
f.hasSHA1 = true
|
||||
f.propsetMtime = true
|
||||
f.hasOCMD5 = true
|
||||
f.hasOCSHA1 = true
|
||||
case "nextcloud":
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.hasSHA1 = true
|
||||
f.propsetMtime = true
|
||||
f.hasOCSHA1 = true
|
||||
f.canChunk = true
|
||||
|
||||
if f.opt.ChunkSize == 0 {
|
||||
fs.Logf(nil, "Chunked uploads are disabled because nextcloud_chunk_size is set to 0")
|
||||
} else {
|
||||
chunksUploadURL, err := f.getChunksUploadURL()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.chunksUploadURL = chunksUploadURL
|
||||
fs.Logf(nil, "Chunks temporary upload directory: %s", f.chunksUploadURL)
|
||||
}
|
||||
case "sharepoint":
|
||||
// To mount sharepoint, two Cookies are required
|
||||
// They have to be set instead of BasicAuth
|
||||
@@ -667,7 +718,7 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
||||
"Depth": depth,
|
||||
},
|
||||
}
|
||||
if f.hasMD5 || f.hasSHA1 {
|
||||
if f.hasOCMD5 || f.hasOCSHA1 {
|
||||
opts.Body = bytes.NewBuffer(owncloudProps)
|
||||
}
|
||||
var result api.Multistatus
|
||||
@@ -996,7 +1047,7 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
||||
dstPath := f.filePath(remote)
|
||||
err := f.mkParentDir(ctx, dstPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy mkParentDir failed: %w", err)
|
||||
return nil, fmt.Errorf("copy mkParentDir failed: %w", err)
|
||||
}
|
||||
destinationURL, err := rest.URLJoin(f.endpoint, dstPath)
|
||||
if err != nil {
|
||||
@@ -1009,7 +1060,7 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
||||
NoResponse: true,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Destination": destinationURL.String(),
|
||||
"Overwrite": "F",
|
||||
"Overwrite": "T",
|
||||
},
|
||||
}
|
||||
if f.useOCMtime {
|
||||
@@ -1021,11 +1072,18 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
||||
return srcFs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy call failed: %w", err)
|
||||
return nil, fmt.Errorf("copy call failed: %w", err)
|
||||
}
|
||||
dstObj, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy NewObject failed: %w", err)
|
||||
return nil, fmt.Errorf("copy NewObject failed: %w", err)
|
||||
}
|
||||
if f.useOCMtime && resp.Header.Get("X-OC-Mtime") != "accepted" && f.propsetMtime {
|
||||
fs.Debugf(dstObj, "Setting modtime after copy to %v", src.ModTime(ctx))
|
||||
err = dstObj.SetModTime(ctx, src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to set modtime: %w", err)
|
||||
}
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
@@ -1109,7 +1167,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
NoResponse: true,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Destination": addSlash(destinationURL.String()),
|
||||
"Overwrite": "F",
|
||||
"Overwrite": "T",
|
||||
},
|
||||
}
|
||||
// Direct the MOVE/COPY to the source server
|
||||
@@ -1126,10 +1184,10 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
hashes := hash.Set(hash.None)
|
||||
if f.hasMD5 {
|
||||
if f.hasOCMD5 {
|
||||
hashes.Add(hash.MD5)
|
||||
}
|
||||
if f.hasSHA1 {
|
||||
if f.hasOCSHA1 || f.hasMESHA1 {
|
||||
hashes.Add(hash.SHA1)
|
||||
}
|
||||
return hashes
|
||||
@@ -1197,10 +1255,10 @@ func (o *Object) Remote() string {
|
||||
|
||||
// Hash returns the SHA1 or MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t == hash.MD5 && o.fs.hasMD5 {
|
||||
if t == hash.MD5 && o.fs.hasOCMD5 {
|
||||
return o.md5, nil
|
||||
}
|
||||
if t == hash.SHA1 && o.fs.hasSHA1 {
|
||||
if t == hash.SHA1 && (o.fs.hasOCSHA1 || o.fs.hasMESHA1) {
|
||||
return o.sha1, nil
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
@@ -1222,12 +1280,12 @@ func (o *Object) setMetaData(info *api.Prop) (err error) {
|
||||
o.hasMetaData = true
|
||||
o.size = info.Size
|
||||
o.modTime = time.Time(info.Modified)
|
||||
if o.fs.hasMD5 || o.fs.hasSHA1 {
|
||||
if o.fs.hasOCMD5 || o.fs.hasOCSHA1 || o.fs.hasMESHA1 {
|
||||
hashes := info.Hashes()
|
||||
if o.fs.hasSHA1 {
|
||||
if o.fs.hasOCSHA1 || o.fs.hasMESHA1 {
|
||||
o.sha1 = hashes[hash.SHA1]
|
||||
}
|
||||
if o.fs.hasMD5 {
|
||||
if o.fs.hasOCMD5 {
|
||||
o.md5 = hashes[hash.MD5]
|
||||
}
|
||||
}
|
||||
@@ -1261,8 +1319,53 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Set modified time using propset
|
||||
//
|
||||
// <d:multistatus xmlns:d="DAV:" xmlns:s="http://sabredav.org/ns"><d:response><d:href>/ocm/remote.php/webdav/office/wir.jpg</d:href><d:propstat><d:prop><d:lastmodified/></d:prop><d:status>HTTP/1.1 200 OK</d:status></d:propstat></d:response></d:multistatus>
|
||||
var owncloudPropset = `<?xml version="1.0" encoding="utf-8" ?>
|
||||
<D:propertyupdate xmlns:D="DAV:">
|
||||
<D:set>
|
||||
<D:prop>
|
||||
<lastmodified xmlns="DAV:">%d</lastmodified>
|
||||
</D:prop>
|
||||
</D:set>
|
||||
</D:propertyupdate>
|
||||
`
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if o.fs.propsetMtime {
|
||||
opts := rest.Opts{
|
||||
Method: "PROPPATCH",
|
||||
Path: o.filePath(),
|
||||
NoRedirect: true,
|
||||
Body: strings.NewReader(fmt.Sprintf(owncloudPropset, modTime.Unix())),
|
||||
}
|
||||
var result api.Multistatus
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// does not exist
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("couldn't set modified time: %w", err)
|
||||
}
|
||||
// FIXME check if response is valid
|
||||
if len(result.Responses) == 1 && result.Responses[0].Props.StatusOK() {
|
||||
// update cached modtime
|
||||
o.modTime = modTime
|
||||
return nil
|
||||
}
|
||||
// fallback
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
@@ -1304,36 +1407,72 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return fmt.Errorf("Update mkParentDir failed: %w", err)
|
||||
}
|
||||
|
||||
size := src.Size()
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: o.filePath(),
|
||||
Body: in,
|
||||
NoResponse: true,
|
||||
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Options: options,
|
||||
if o.shouldUseChunkedUpload(src) {
|
||||
fs.Debugf(src, "Update will use the chunked upload strategy")
|
||||
err = o.updateChunked(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(src, "Update will use the normal upload strategy (no chunks)")
|
||||
contentType := fs.MimeType(ctx, src)
|
||||
filePath := o.filePath()
|
||||
extraHeaders := o.extraHeaders(ctx, src)
|
||||
// TODO: define getBody() to enable low-level HTTP/2 retries
|
||||
err = o.updateSimple(ctx, in, nil, filePath, src.Size(), contentType, extraHeaders, o.fs.endpointURL, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if o.fs.useOCMtime || o.fs.hasMD5 || o.fs.hasSHA1 {
|
||||
opts.ExtraHeaders = map[string]string{}
|
||||
|
||||
// read metadata from remote
|
||||
o.hasMetaData = false
|
||||
return o.readMetaData(ctx)
|
||||
}
|
||||
|
||||
func (o *Object) extraHeaders(ctx context.Context, src fs.ObjectInfo) map[string]string {
|
||||
extraHeaders := map[string]string{}
|
||||
if o.fs.useOCMtime || o.fs.hasOCMD5 || o.fs.hasOCSHA1 {
|
||||
if o.fs.useOCMtime {
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
|
||||
extraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
|
||||
}
|
||||
// Set one upload checksum
|
||||
// Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5
|
||||
// Nextcloud stores the checksum you supply (SHA1 or MD5) but only stores one
|
||||
if o.fs.hasSHA1 {
|
||||
if o.fs.hasOCSHA1 {
|
||||
if sha1, _ := src.Hash(ctx, hash.SHA1); sha1 != "" {
|
||||
opts.ExtraHeaders["OC-Checksum"] = "SHA1:" + sha1
|
||||
extraHeaders["OC-Checksum"] = "SHA1:" + sha1
|
||||
}
|
||||
}
|
||||
if o.fs.hasMD5 && opts.ExtraHeaders["OC-Checksum"] == "" {
|
||||
if o.fs.hasOCMD5 && extraHeaders["OC-Checksum"] == "" {
|
||||
if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" {
|
||||
opts.ExtraHeaders["OC-Checksum"] = "MD5:" + md5
|
||||
extraHeaders["OC-Checksum"] = "MD5:" + md5
|
||||
}
|
||||
}
|
||||
}
|
||||
return extraHeaders
|
||||
}
|
||||
|
||||
// Standard update in one request (no chunks)
|
||||
func (o *Object) updateSimple(ctx context.Context, body io.Reader, getBody func() (io.ReadCloser, error), filePath string, size int64, contentType string, extraHeaders map[string]string, rootURL string, options ...fs.OpenOption) (err error) {
|
||||
var resp *http.Response
|
||||
|
||||
if extraHeaders == nil {
|
||||
extraHeaders = map[string]string{}
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: filePath,
|
||||
GetBody: getBody,
|
||||
Body: body,
|
||||
NoResponse: true,
|
||||
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
||||
ContentType: contentType,
|
||||
Options: options,
|
||||
ExtraHeaders: extraHeaders,
|
||||
RootURL: rootURL,
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
@@ -1349,9 +1488,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
_ = o.Remove(ctx)
|
||||
return err
|
||||
}
|
||||
// read metadata from remote
|
||||
o.hasMetaData = false
|
||||
return o.readMetaData(ctx)
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Test Webdav filesystem interface
|
||||
package webdav_test
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/webdav"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
@@ -13,7 +13,10 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestWebdavNextcloud:",
|
||||
NilObject: (*webdav.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: 1 * fs.Mebi,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -24,7 +27,10 @@ func TestIntegration2(t *testing.T) {
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestWebdavOwncloud:",
|
||||
NilObject: (*webdav.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
Skip: true,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -35,7 +41,10 @@ func TestIntegration3(t *testing.T) {
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestWebdavRclone:",
|
||||
NilObject: (*webdav.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
Skip: true,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -46,6 +55,10 @@ func TestIntegration4(t *testing.T) {
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestWebdavNTLM:",
|
||||
NilObject: (*webdav.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user