mirror of
https://github.com/rclone/rclone.git
synced 2025-12-10 13:23:21 +00:00
Compare commits
336 Commits
build
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0dde16d776 | ||
|
|
175d4bc553 | ||
|
|
4851f1796c | ||
|
|
4ff8899b2c | ||
|
|
8f29a0b0a1 | ||
|
|
8b0e76e53b | ||
|
|
233fef5c4d | ||
|
|
b9586c3e03 | ||
|
|
0dc0ab1330 | ||
|
|
a6bbdb35a0 | ||
|
|
b33cb77b6c | ||
|
|
d51322bb5f | ||
|
|
e718ab6091 | ||
|
|
0a9e6e130f | ||
|
|
3358b9049c | ||
|
|
847734d421 | ||
|
|
f7b255d4ec | ||
|
|
24c752ed9e | ||
|
|
a99d155fd4 | ||
|
|
f72b32b470 | ||
|
|
9be7f99bf8 | ||
|
|
6858bf242e | ||
|
|
e8c6867e4c | ||
|
|
50fbd6b049 | ||
|
|
0783cab952 | ||
|
|
886ac7af1d | ||
|
|
3c40238f02 | ||
|
|
46ca0dd7fe | ||
|
|
2e968e7ce0 | ||
|
|
1886c552db | ||
|
|
38ab3dd5b1 | ||
|
|
1d02e1219a | ||
|
|
035d3f344c | ||
|
|
7d45aee70f | ||
|
|
f30789180d | ||
|
|
7cb05a84e9 | ||
|
|
6d4c625bfb | ||
|
|
4eccc40168 | ||
|
|
e451f9c999 | ||
|
|
321488441e | ||
|
|
bd99e05ff0 | ||
|
|
6440052fbd | ||
|
|
4afb59bc93 | ||
|
|
0343670375 | ||
|
|
5b2b372ba9 | ||
|
|
08c35ae741 | ||
|
|
ecea0cd6f9 | ||
|
|
80e6389a50 | ||
|
|
a3ccf4d8a0 | ||
|
|
31df39d356 | ||
|
|
03d3811f7f | ||
|
|
83b83f7768 | ||
|
|
71138082ea | ||
|
|
cf94824426 | ||
|
|
16971ab6b9 | ||
|
|
9f75af38e3 | ||
|
|
b5e4d39b05 | ||
|
|
4d19afdbbf | ||
|
|
2ebfedce85 | ||
|
|
1a4b85b6e7 | ||
|
|
5052b80298 | ||
|
|
fada870ff0 | ||
|
|
38f456c527 | ||
|
|
e6d82ac6ee | ||
|
|
4c74ded85a | ||
|
|
43848f5c42 | ||
|
|
fb895f69a1 | ||
|
|
b204090325 | ||
|
|
1821d86911 | ||
|
|
7ce67347fb | ||
|
|
0228bbff39 | ||
|
|
6890bd7738 | ||
|
|
bc5d1dfaf3 | ||
|
|
c33aeb705f | ||
|
|
12cf8e71df | ||
|
|
ec5ddb68a8 | ||
|
|
8335596207 | ||
|
|
4f56ab2341 | ||
|
|
8b5b7ecfd9 | ||
|
|
2aa2cfc70e | ||
|
|
7265b2331f | ||
|
|
0dd56ff2a3 | ||
|
|
2443cb284e | ||
|
|
0f3aa17fb6 | ||
|
|
8f74e7d331 | ||
|
|
ee92673e1b | ||
|
|
55655efabf | ||
|
|
700e6e11fd | ||
|
|
edb47076b5 | ||
|
|
e5fd97b8d2 | ||
|
|
bc57a31859 | ||
|
|
4adb48fbbc | ||
|
|
c41d0f7d3a | ||
|
|
d34ba258b0 | ||
|
|
05d54a95b8 | ||
|
|
f16b39165b | ||
|
|
86edb26fd5 | ||
|
|
203e1bdbf9 | ||
|
|
a522c056fe | ||
|
|
31adc7d89f | ||
|
|
c559ab7c58 | ||
|
|
80610ef774 | ||
|
|
a6c943a1ad | ||
|
|
53e0dbb5cb | ||
|
|
3a0000526b | ||
|
|
1fa6941e26 | ||
|
|
9bb7ad31e6 | ||
|
|
da8c6847ad | ||
|
|
d240d044c3 | ||
|
|
1056ace80f | ||
|
|
a06c1c0cb7 | ||
|
|
7672c3d586 | ||
|
|
f361cdf1cb | ||
|
|
26d3c71bab | ||
|
|
c76396f03c | ||
|
|
059ad47336 | ||
|
|
becc068d36 | ||
|
|
94deb6bd6f | ||
|
|
cc09978b79 | ||
|
|
409dc75328 | ||
|
|
fb30c5f8dd | ||
|
|
203df6cc58 | ||
|
|
459e10d599 | ||
|
|
1ba4fd1d83 | ||
|
|
77553b8dd5 | ||
|
|
5420dbbe38 | ||
|
|
87b71dd6b9 | ||
|
|
a0bcdc2638 | ||
|
|
e42fa9f92d | ||
|
|
4586104dc7 | ||
|
|
c4c360a285 | ||
|
|
ce4860b9b6 | ||
|
|
ed87f82d21 | ||
|
|
0a82929b94 | ||
|
|
1e8ee3b813 | ||
|
|
eaab3f5271 | ||
|
|
25b05f1210 | ||
|
|
2dc1b07863 | ||
|
|
49acacec2e | ||
|
|
70d2fe6568 | ||
|
|
f28c83c6de | ||
|
|
2cf44e584c | ||
|
|
bba9027817 | ||
|
|
51859af8d9 | ||
|
|
4f60f8915d | ||
|
|
6663eb346f | ||
|
|
1d0e1ea0b5 | ||
|
|
71631621c4 | ||
|
|
31e904d84c | ||
|
|
30c9843e3d | ||
|
|
c8a834f0e8 | ||
|
|
b272c50c4c | ||
|
|
b8700e8042 | ||
|
|
73193b0565 | ||
|
|
c4eef3065f | ||
|
|
ba2a642961 | ||
|
|
979c6a573d | ||
|
|
bbb866018e | ||
|
|
7706f02294 | ||
|
|
6df7913181 | ||
|
|
c079495d1f | ||
|
|
3bf1ac5b07 | ||
|
|
091caa34c6 | ||
|
|
d507e9be39 | ||
|
|
40b3251e41 | ||
|
|
484d955ea8 | ||
|
|
8fa9f255a0 | ||
|
|
e7f11af1ca | ||
|
|
0b5c4cc442 | ||
|
|
178ddafdc7 | ||
|
|
ad316ec6e3 | ||
|
|
61b022dfc3 | ||
|
|
1903b4c1a2 | ||
|
|
f7cbcf556f | ||
|
|
3581e628c0 | ||
|
|
62c41bf449 | ||
|
|
c5864e113b | ||
|
|
39259a5bd1 | ||
|
|
2e376eb3b9 | ||
|
|
de8e9d4693 | ||
|
|
710cf49bc6 | ||
|
|
8dacac60ea | ||
|
|
3a80d4d4b4 | ||
|
|
a531f987a8 | ||
|
|
e906b8d0c4 | ||
|
|
a5932ef91a | ||
|
|
3afa563eaf | ||
|
|
9d9654b31f | ||
|
|
cfe257f13d | ||
|
|
0375efbd35 | ||
|
|
cad1954213 | ||
|
|
604e37caa5 | ||
|
|
b249d384b9 | ||
|
|
04e91838db | ||
|
|
94829aaec5 | ||
|
|
f574e3395c | ||
|
|
2bc155a96a | ||
|
|
adc8ea3427 | ||
|
|
068eea025c | ||
|
|
4510aa679a | ||
|
|
79281354c7 | ||
|
|
f57a178719 | ||
|
|
44f2e2ed39 | ||
|
|
13e1752d94 | ||
|
|
bb82c0e43b | ||
|
|
1af7151e73 | ||
|
|
fd63478ed6 | ||
|
|
5133b05c74 | ||
|
|
6ba96ede4b | ||
|
|
2896973964 | ||
|
|
be123d85ff | ||
|
|
b1b9562ab7 | ||
|
|
5146b66569 | ||
|
|
8898372d5a | ||
|
|
091fe9e453 | ||
|
|
8fdb68e41a | ||
|
|
c124aa2ed3 | ||
|
|
54e8bb89f7 | ||
|
|
50c1b594ab | ||
|
|
72437a9ca2 | ||
|
|
8ed55c61e1 | ||
|
|
bd598c1ceb | ||
|
|
7e30665102 | ||
|
|
d44957a09c | ||
|
|
37524e2dea | ||
|
|
2f6a6c8233 | ||
|
|
4ad40b6554 | ||
|
|
4f33d64f25 | ||
|
|
519623d9f1 | ||
|
|
913278327b | ||
|
|
a9b05e4c7a | ||
|
|
5d6d79e7d4 | ||
|
|
11de074cbf | ||
|
|
e9ab177a32 | ||
|
|
f3f4fba98d | ||
|
|
03fccdd67b | ||
|
|
231083647e | ||
|
|
0e203a7546 | ||
|
|
a7dd787569 | ||
|
|
689555033e | ||
|
|
4fc4898287 | ||
|
|
b003169088 | ||
|
|
babd112665 | ||
|
|
71b9b4ad7a | ||
|
|
4368863fcb | ||
|
|
04d49bf0ea | ||
|
|
d7aa37d263 | ||
|
|
379dffa61c | ||
|
|
5fd4ece31f | ||
|
|
fc3f95190b | ||
|
|
d6f5652b65 | ||
|
|
b5cbb7520d | ||
|
|
a170dfa55b | ||
|
|
1449c5b5ba | ||
|
|
35fe609722 | ||
|
|
cce399515f | ||
|
|
8c5af2f51c | ||
|
|
c639d3656e | ||
|
|
d9fbbba5c3 | ||
|
|
fd87560388 | ||
|
|
d87720a787 | ||
|
|
d541caa52b | ||
|
|
fd1665ae93 | ||
|
|
457d80e8a9 | ||
|
|
c5a3e86df8 | ||
|
|
4026e8db20 | ||
|
|
c9ce686231 | ||
|
|
b085598cbc | ||
|
|
bb47dccdeb | ||
|
|
7a279d2789 | ||
|
|
9bd5df658a | ||
|
|
d512e4d566 | ||
|
|
3dd68c824a | ||
|
|
fbe73c993b | ||
|
|
d915f75edf | ||
|
|
26b629f42f | ||
|
|
ceaac2194c | ||
|
|
1f14b6aa35 | ||
|
|
dd75af6a18 | ||
|
|
99e8a63df2 | ||
|
|
0019e18ac3 | ||
|
|
218c3bf6e9 | ||
|
|
8f9702583d | ||
|
|
e6578fb5a1 | ||
|
|
fa1d7da272 | ||
|
|
813708c24d | ||
|
|
fee4716343 | ||
|
|
6e9a675b3f | ||
|
|
7f5a444350 | ||
|
|
d2916ac5c7 | ||
|
|
3369a15285 | ||
|
|
58aee30de7 | ||
|
|
ef919241a6 | ||
|
|
d5386bb9a7 | ||
|
|
bf46ea5611 | ||
|
|
b8a379c9c9 | ||
|
|
8c37a9c2ef | ||
|
|
963a72ce01 | ||
|
|
a4962e21d1 | ||
|
|
9e200531b1 | ||
|
|
04683f2032 | ||
|
|
b41f7994da | ||
|
|
13a5ffe391 | ||
|
|
85deea82e4 | ||
|
|
89a8ea7a91 | ||
|
|
c8912eb6a0 | ||
|
|
01674949a1 | ||
|
|
98e1d3ee73 | ||
|
|
50d7a80331 | ||
|
|
bc3e8e1abd | ||
|
|
30e80d0716 | ||
|
|
f288920696 | ||
|
|
fa2bbd705c | ||
|
|
43a794860f | ||
|
|
adfe6b3bad | ||
|
|
091ccb649c | ||
|
|
2e02d49578 | ||
|
|
514535ad46 | ||
|
|
b010591c96 | ||
|
|
1aaee9edce | ||
|
|
3f0e9f5fca | ||
|
|
cfd0d28742 | ||
|
|
e7a2b322ec | ||
|
|
d3a0805a2b | ||
|
|
d4edf8ac18 | ||
|
|
87d14b000a | ||
|
|
12bded980b | ||
|
|
6e0e76af9d | ||
|
|
6f9b2f7b9b | ||
|
|
f61d79396d | ||
|
|
9b22e38450 | ||
|
|
9e4fe18830 | ||
|
|
ae5cc1ab37 | ||
|
|
d4be38ec02 | ||
|
|
115cff3007 | ||
|
|
70b862f026 |
31
.github/workflows/build.yml
vendored
31
.github/workflows/build.yml
vendored
@@ -95,12 +95,12 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: ${{ matrix.go }}
|
go-version: ${{ matrix.go }}
|
||||||
check-latest: true
|
check-latest: true
|
||||||
@@ -216,15 +216,15 @@ jobs:
|
|||||||
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
|
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
id: setup-go
|
id: setup-go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '>=1.24.0-rc.1'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
cache: false
|
cache: false
|
||||||
|
|
||||||
@@ -239,13 +239,13 @@ jobs:
|
|||||||
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
||||||
|
|
||||||
- name: Code quality test (Linux)
|
- name: Code quality test (Linux)
|
||||||
uses: golangci/golangci-lint-action@v6
|
uses: golangci/golangci-lint-action@v9
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
skip-cache: true
|
skip-cache: true
|
||||||
|
|
||||||
- name: Code quality test (Windows)
|
- name: Code quality test (Windows)
|
||||||
uses: golangci/golangci-lint-action@v6
|
uses: golangci/golangci-lint-action@v9
|
||||||
env:
|
env:
|
||||||
GOOS: "windows"
|
GOOS: "windows"
|
||||||
with:
|
with:
|
||||||
@@ -253,7 +253,7 @@ jobs:
|
|||||||
skip-cache: true
|
skip-cache: true
|
||||||
|
|
||||||
- name: Code quality test (macOS)
|
- name: Code quality test (macOS)
|
||||||
uses: golangci/golangci-lint-action@v6
|
uses: golangci/golangci-lint-action@v9
|
||||||
env:
|
env:
|
||||||
GOOS: "darwin"
|
GOOS: "darwin"
|
||||||
with:
|
with:
|
||||||
@@ -261,7 +261,7 @@ jobs:
|
|||||||
skip-cache: true
|
skip-cache: true
|
||||||
|
|
||||||
- name: Code quality test (FreeBSD)
|
- name: Code quality test (FreeBSD)
|
||||||
uses: golangci/golangci-lint-action@v6
|
uses: golangci/golangci-lint-action@v9
|
||||||
env:
|
env:
|
||||||
GOOS: "freebsd"
|
GOOS: "freebsd"
|
||||||
with:
|
with:
|
||||||
@@ -269,7 +269,7 @@ jobs:
|
|||||||
skip-cache: true
|
skip-cache: true
|
||||||
|
|
||||||
- name: Code quality test (OpenBSD)
|
- name: Code quality test (OpenBSD)
|
||||||
uses: golangci/golangci-lint-action@v6
|
uses: golangci/golangci-lint-action@v9
|
||||||
env:
|
env:
|
||||||
GOOS: "openbsd"
|
GOOS: "openbsd"
|
||||||
with:
|
with:
|
||||||
@@ -283,14 +283,17 @@ jobs:
|
|||||||
run: govulncheck ./...
|
run: govulncheck ./...
|
||||||
|
|
||||||
- name: Check Markdown format
|
- name: Check Markdown format
|
||||||
uses: DavidAnson/markdownlint-cli2-action@v20
|
uses: DavidAnson/markdownlint-cli2-action@v22
|
||||||
with:
|
with:
|
||||||
globs: |
|
globs: |
|
||||||
CONTRIBUTING.md
|
CONTRIBUTING.md
|
||||||
MAINTAINERS.md
|
MAINTAINERS.md
|
||||||
README.md
|
README.md
|
||||||
RELEASE.md
|
RELEASE.md
|
||||||
docs/content/{authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
|
CODE_OF_CONDUCT.md
|
||||||
|
librclone\README.md
|
||||||
|
backend\s3\README.md
|
||||||
|
docs/content/{_index,authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
|
||||||
|
|
||||||
- name: Scan edits of autogenerated files
|
- name: Scan edits of autogenerated files
|
||||||
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
|
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
|
||||||
@@ -304,13 +307,13 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
# Upgrade together with NDK version
|
# Upgrade together with NDK version
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: '>=1.25.0-rc.1'
|
go-version: '>=1.25.0-rc.1'
|
||||||
|
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ jobs:
|
|||||||
df -h .
|
df -h .
|
||||||
|
|
||||||
- name: Checkout Repository
|
- name: Checkout Repository
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -92,7 +92,7 @@ jobs:
|
|||||||
# There's no way around this, because "ImageOS" is only available to
|
# There's no way around this, because "ImageOS" is only available to
|
||||||
# processes, but the setup-go action uses it in its key.
|
# processes, but the setup-go action uses it in its key.
|
||||||
id: imageos
|
id: imageos
|
||||||
uses: actions/github-script@v7
|
uses: actions/github-script@v8
|
||||||
with:
|
with:
|
||||||
result-encoding: string
|
result-encoding: string
|
||||||
script: |
|
script: |
|
||||||
@@ -183,7 +183,7 @@ jobs:
|
|||||||
touch "/tmp/digests/${digest#sha256:}"
|
touch "/tmp/digests/${digest#sha256:}"
|
||||||
|
|
||||||
- name: Upload Image Digest
|
- name: Upload Image Digest
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v5
|
||||||
with:
|
with:
|
||||||
name: digests-${{ env.PLATFORM }}
|
name: digests-${{ env.PLATFORM }}
|
||||||
path: /tmp/digests/*
|
path: /tmp/digests/*
|
||||||
@@ -198,7 +198,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Download Image Digests
|
- name: Download Image Digests
|
||||||
uses: actions/download-artifact@v5
|
uses: actions/download-artifact@v6
|
||||||
with:
|
with:
|
||||||
path: /tmp/digests
|
path: /tmp/digests
|
||||||
pattern: digests-*
|
pattern: digests-*
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ jobs:
|
|||||||
sudo rm -rf /usr/share/dotnet || true
|
sudo rm -rf /usr/share/dotnet || true
|
||||||
df -h .
|
df -h .
|
||||||
- name: Checkout master
|
- name: Checkout master
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Build and publish docker plugin
|
- name: Build and publish docker plugin
|
||||||
|
|||||||
270
.golangci.yml
270
.golangci.yml
@@ -1,144 +1,160 @@
|
|||||||
# golangci-lint configuration options
|
version: "2"
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
|
# Configure the linter set. To avoid unexpected results the implicit default
|
||||||
|
# set is ignored and all the ones to use are explicitly enabled.
|
||||||
|
default: none
|
||||||
enable:
|
enable:
|
||||||
|
# Default
|
||||||
- errcheck
|
- errcheck
|
||||||
- goimports
|
|
||||||
- revive
|
|
||||||
- ineffassign
|
|
||||||
- govet
|
- govet
|
||||||
- unconvert
|
- ineffassign
|
||||||
- staticcheck
|
- staticcheck
|
||||||
- gosimple
|
|
||||||
- stylecheck
|
|
||||||
- unused
|
- unused
|
||||||
- misspell
|
# Additional
|
||||||
- gocritic
|
- gocritic
|
||||||
#- prealloc
|
- misspell
|
||||||
#- maligned
|
#- prealloc # TODO
|
||||||
disable-all: true
|
- revive
|
||||||
|
- unconvert
|
||||||
|
exclusions:
|
||||||
|
rules:
|
||||||
|
- linters:
|
||||||
|
- revive
|
||||||
|
text: 'var-naming: avoid meaningless package names'
|
||||||
|
- linters:
|
||||||
|
- revive
|
||||||
|
text: 'var-naming: avoid package names that conflict with Go standard library package names'
|
||||||
|
# Configure checks. Mostly using defaults but with some commented exceptions.
|
||||||
|
settings:
|
||||||
|
govet:
|
||||||
|
enable-all: true
|
||||||
|
disable:
|
||||||
|
- fieldalignment
|
||||||
|
- shadow
|
||||||
|
staticcheck:
|
||||||
|
# With staticcheck there is only one setting, so to extend the implicit
|
||||||
|
# default value it must be explicitly included.
|
||||||
|
checks:
|
||||||
|
# Default
|
||||||
|
- all
|
||||||
|
- -ST1000
|
||||||
|
- -ST1003
|
||||||
|
- -ST1016
|
||||||
|
- -ST1020
|
||||||
|
- -ST1021
|
||||||
|
- -ST1022
|
||||||
|
# Disable quickfix checks
|
||||||
|
- -QF*
|
||||||
|
gocritic:
|
||||||
|
# With gocritic there are different settings, but since enabled-checks
|
||||||
|
# and disabled-checks cannot both be set, for full customization the
|
||||||
|
# alternative is to disable all defaults and explicitly enable the ones
|
||||||
|
# to use.
|
||||||
|
disable-all: true
|
||||||
|
enabled-checks:
|
||||||
|
#- appendAssign # Skip default
|
||||||
|
- argOrder
|
||||||
|
- assignOp
|
||||||
|
- badCall
|
||||||
|
- badCond
|
||||||
|
#- captLocal # Skip default
|
||||||
|
- caseOrder
|
||||||
|
- codegenComment
|
||||||
|
#- commentFormatting # Skip default
|
||||||
|
- defaultCaseOrder
|
||||||
|
- deprecatedComment
|
||||||
|
- dupArg
|
||||||
|
- dupBranchBody
|
||||||
|
- dupCase
|
||||||
|
- dupSubExpr
|
||||||
|
- elseif
|
||||||
|
#- exitAfterDefer # Skip default
|
||||||
|
- flagDeref
|
||||||
|
- flagName
|
||||||
|
#- ifElseChain # Skip default
|
||||||
|
- mapKey
|
||||||
|
- newDeref
|
||||||
|
- offBy1
|
||||||
|
- regexpMust
|
||||||
|
- ruleguard # Enable additional check that are not enabled by default
|
||||||
|
#- singleCaseSwitch # Skip default
|
||||||
|
- sloppyLen
|
||||||
|
- sloppyTypeAssert
|
||||||
|
- switchTrue
|
||||||
|
- typeSwitchVar
|
||||||
|
- underef
|
||||||
|
- unlambda
|
||||||
|
- unslice
|
||||||
|
- valSwap
|
||||||
|
- wrapperFunc
|
||||||
|
settings:
|
||||||
|
ruleguard:
|
||||||
|
rules: ${base-path}/bin/rules.go
|
||||||
|
revive:
|
||||||
|
# With revive there is in reality only one setting, and when at least one
|
||||||
|
# rule are specified then only these rules will be considered, defaults
|
||||||
|
# and all others are then implicitly disabled, so must explicitly enable
|
||||||
|
# all rules to be used.
|
||||||
|
rules:
|
||||||
|
- name: blank-imports
|
||||||
|
disabled: false
|
||||||
|
- name: context-as-argument
|
||||||
|
disabled: false
|
||||||
|
- name: context-keys-type
|
||||||
|
disabled: false
|
||||||
|
- name: dot-imports
|
||||||
|
disabled: false
|
||||||
|
#- name: empty-block # Skip default
|
||||||
|
# disabled: true
|
||||||
|
- name: error-naming
|
||||||
|
disabled: false
|
||||||
|
- name: error-return
|
||||||
|
disabled: false
|
||||||
|
- name: error-strings
|
||||||
|
disabled: false
|
||||||
|
- name: errorf
|
||||||
|
disabled: false
|
||||||
|
- name: exported
|
||||||
|
disabled: false
|
||||||
|
#- name: increment-decrement # Skip default
|
||||||
|
# disabled: true
|
||||||
|
- name: indent-error-flow
|
||||||
|
disabled: false
|
||||||
|
- name: package-comments
|
||||||
|
disabled: false
|
||||||
|
- name: range
|
||||||
|
disabled: false
|
||||||
|
- name: receiver-naming
|
||||||
|
disabled: false
|
||||||
|
#- name: redefines-builtin-id # Skip default
|
||||||
|
# disabled: true
|
||||||
|
#- name: superfluous-else # Skip default
|
||||||
|
# disabled: true
|
||||||
|
- name: time-naming
|
||||||
|
disabled: false
|
||||||
|
- name: unexported-return
|
||||||
|
disabled: false
|
||||||
|
#- name: unreachable-code # Skip default
|
||||||
|
# disabled: true
|
||||||
|
#- name: unused-parameter # Skip default
|
||||||
|
# disabled: true
|
||||||
|
- name: var-declaration
|
||||||
|
disabled: false
|
||||||
|
- name: var-naming
|
||||||
|
disabled: false
|
||||||
|
|
||||||
|
|
||||||
|
formatters:
|
||||||
|
enable:
|
||||||
|
- goimports
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
# Enable some lints excluded by default
|
|
||||||
exclude-use-default: false
|
|
||||||
|
|
||||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||||
max-issues-per-linter: 0
|
max-issues-per-linter: 0
|
||||||
|
|
||||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||||
max-same-issues: 0
|
max-same-issues: 0
|
||||||
|
|
||||||
exclude-rules:
|
|
||||||
|
|
||||||
- linters:
|
|
||||||
- staticcheck
|
|
||||||
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
|
|
||||||
|
|
||||||
# don't disable the revive messages about comments on exported functions
|
|
||||||
include:
|
|
||||||
- EXC0012
|
|
||||||
- EXC0013
|
|
||||||
- EXC0014
|
|
||||||
- EXC0015
|
|
||||||
|
|
||||||
run:
|
run:
|
||||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
# Timeout for total work, e.g. 30s, 5m, 5m30s. Default is 0 (disabled).
|
||||||
timeout: 10m
|
timeout: 10m
|
||||||
|
|
||||||
linters-settings:
|
|
||||||
revive:
|
|
||||||
# setting rules seems to disable all the rules, so re-enable them here
|
|
||||||
rules:
|
|
||||||
- name: blank-imports
|
|
||||||
disabled: false
|
|
||||||
- name: context-as-argument
|
|
||||||
disabled: false
|
|
||||||
- name: context-keys-type
|
|
||||||
disabled: false
|
|
||||||
- name: dot-imports
|
|
||||||
disabled: false
|
|
||||||
- name: empty-block
|
|
||||||
disabled: true
|
|
||||||
- name: error-naming
|
|
||||||
disabled: false
|
|
||||||
- name: error-return
|
|
||||||
disabled: false
|
|
||||||
- name: error-strings
|
|
||||||
disabled: false
|
|
||||||
- name: errorf
|
|
||||||
disabled: false
|
|
||||||
- name: exported
|
|
||||||
disabled: false
|
|
||||||
- name: increment-decrement
|
|
||||||
disabled: true
|
|
||||||
- name: indent-error-flow
|
|
||||||
disabled: false
|
|
||||||
- name: package-comments
|
|
||||||
disabled: false
|
|
||||||
- name: range
|
|
||||||
disabled: false
|
|
||||||
- name: receiver-naming
|
|
||||||
disabled: false
|
|
||||||
- name: redefines-builtin-id
|
|
||||||
disabled: true
|
|
||||||
- name: superfluous-else
|
|
||||||
disabled: true
|
|
||||||
- name: time-naming
|
|
||||||
disabled: false
|
|
||||||
- name: unexported-return
|
|
||||||
disabled: false
|
|
||||||
- name: unreachable-code
|
|
||||||
disabled: true
|
|
||||||
- name: unused-parameter
|
|
||||||
disabled: true
|
|
||||||
- name: var-declaration
|
|
||||||
disabled: false
|
|
||||||
- name: var-naming
|
|
||||||
disabled: false
|
|
||||||
stylecheck:
|
|
||||||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
|
||||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
|
||||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
|
||||||
gocritic:
|
|
||||||
# Enable all default checks with some exceptions and some additions (commented).
|
|
||||||
# Cannot use both enabled-checks and disabled-checks, so must specify all to be used.
|
|
||||||
disable-all: true
|
|
||||||
enabled-checks:
|
|
||||||
#- appendAssign # Enabled by default
|
|
||||||
- argOrder
|
|
||||||
- assignOp
|
|
||||||
- badCall
|
|
||||||
- badCond
|
|
||||||
#- captLocal # Enabled by default
|
|
||||||
- caseOrder
|
|
||||||
- codegenComment
|
|
||||||
#- commentFormatting # Enabled by default
|
|
||||||
- defaultCaseOrder
|
|
||||||
- deprecatedComment
|
|
||||||
- dupArg
|
|
||||||
- dupBranchBody
|
|
||||||
- dupCase
|
|
||||||
- dupSubExpr
|
|
||||||
- elseif
|
|
||||||
#- exitAfterDefer # Enabled by default
|
|
||||||
- flagDeref
|
|
||||||
- flagName
|
|
||||||
#- ifElseChain # Enabled by default
|
|
||||||
- mapKey
|
|
||||||
- newDeref
|
|
||||||
- offBy1
|
|
||||||
- regexpMust
|
|
||||||
- ruleguard # Not enabled by default
|
|
||||||
#- singleCaseSwitch # Enabled by default
|
|
||||||
- sloppyLen
|
|
||||||
- sloppyTypeAssert
|
|
||||||
- switchTrue
|
|
||||||
- typeSwitchVar
|
|
||||||
- underef
|
|
||||||
- unlambda
|
|
||||||
- unslice
|
|
||||||
- valSwap
|
|
||||||
- wrapperFunc
|
|
||||||
settings:
|
|
||||||
ruleguard:
|
|
||||||
rules: "${configDir}/bin/rules.go"
|
|
||||||
|
|||||||
@@ -41,3 +41,32 @@ single-title: # MD025
|
|||||||
# Markdown files we must use whatever works in the final HTML generated docs.
|
# Markdown files we must use whatever works in the final HTML generated docs.
|
||||||
# Suppress Markdownlint warning: Link fragments should be valid.
|
# Suppress Markdownlint warning: Link fragments should be valid.
|
||||||
link-fragments: false # MD051
|
link-fragments: false # MD051
|
||||||
|
|
||||||
|
# Restrict the languages and language identifiers to use for code blocks.
|
||||||
|
# We only want those supported by both Hugo and GitHub. These are documented
|
||||||
|
# here:
|
||||||
|
# https://gohugo.io/content-management/syntax-highlighting/#languages
|
||||||
|
# https://docs.github.com//get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks#syntax-highlighting
|
||||||
|
# In addition, we only want to allow identifiers (aliases) that correspond to
|
||||||
|
# the same language in Hugo and GitHub, and preferrably also VSCode and other
|
||||||
|
# commonly used tools, to avoid confusion. An example of this is that "shell"
|
||||||
|
# by some are considered an identifier for shell scripts, i.e. an alias for
|
||||||
|
# "sh", while others consider it an identifier for shell sessions, i.e. an
|
||||||
|
# alias for "console". Although Hugo and GitHub in this case are consistent and
|
||||||
|
# have choosen the former, using "sh" instead, and not allowing use of "shell",
|
||||||
|
# avoids the confusion entirely.
|
||||||
|
fenced-code-language: # MD040
|
||||||
|
allowed_languages:
|
||||||
|
- text
|
||||||
|
- console
|
||||||
|
- sh
|
||||||
|
- bat
|
||||||
|
- ini
|
||||||
|
- json
|
||||||
|
- yaml
|
||||||
|
- go
|
||||||
|
- python
|
||||||
|
- c++
|
||||||
|
- c#
|
||||||
|
- java
|
||||||
|
- powershell
|
||||||
|
|||||||
80
CODE_OF_CONDUCT.md
Normal file
80
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
# Rclone Code of Conduct
|
||||||
|
|
||||||
|
Like the technical community as a whole, the Rclone team and community
|
||||||
|
is made up of a mixture of professionals and volunteers from all over
|
||||||
|
the world, working on every aspect of the mission - including
|
||||||
|
mentorship, teaching, and connecting people.
|
||||||
|
|
||||||
|
Diversity is one of our huge strengths, but it can also lead to
|
||||||
|
communication issues and unhappiness. To that end, we have a few
|
||||||
|
ground rules that we ask people to adhere to. This code applies
|
||||||
|
equally to founders, mentors and those seeking help and guidance.
|
||||||
|
|
||||||
|
This isn't an exhaustive list of things that you can't do. Rather,
|
||||||
|
take it in the spirit in which it's intended - a guide to make it
|
||||||
|
easier to enrich all of us and the technical communities in which we
|
||||||
|
participate.
|
||||||
|
|
||||||
|
This code of conduct applies to all spaces managed by the Rclone
|
||||||
|
project or Rclone Services Ltd. This includes the issue tracker, the
|
||||||
|
forum, the GitHub site, the wiki, any other online services or
|
||||||
|
in-person events. In addition, violations of this code outside these
|
||||||
|
spaces may affect a person's ability to participate within them.
|
||||||
|
|
||||||
|
- **Be friendly and patient.**
|
||||||
|
- **Be welcoming.** We strive to be a community that welcomes and
|
||||||
|
supports people of all backgrounds and identities. This includes,
|
||||||
|
but is not limited to members of any race, ethnicity, culture,
|
||||||
|
national origin, colour, immigration status, social and economic
|
||||||
|
class, educational level, sex, sexual orientation, gender identity
|
||||||
|
and expression, age, size, family status, political belief,
|
||||||
|
religion, and mental and physical ability.
|
||||||
|
- **Be considerate.** Your work will be used by other people, and you
|
||||||
|
in turn will depend on the work of others. Any decision you take
|
||||||
|
will affect users and colleagues, and you should take those
|
||||||
|
consequences into account when making decisions. Remember that we're
|
||||||
|
a world-wide community, so you might not be communicating in someone
|
||||||
|
else's primary language.
|
||||||
|
- **Be respectful.** Not all of us will agree all the time, but
|
||||||
|
disagreement is no excuse for poor behavior and poor manners. We
|
||||||
|
might all experience some frustration now and then, but we cannot
|
||||||
|
allow that frustration to turn into a personal attack. It's
|
||||||
|
important to remember that a community where people feel
|
||||||
|
uncomfortable or threatened is not a productive one. Members of the
|
||||||
|
Rclone community should be respectful when dealing with other
|
||||||
|
members as well as with people outside the Rclone community.
|
||||||
|
- **Be careful in the words that you choose.** We are a community of
|
||||||
|
professionals, and we conduct ourselves professionally. Be kind to
|
||||||
|
others. Do not insult or put down other participants. Harassment and
|
||||||
|
other exclusionary behavior aren't acceptable. This includes, but is
|
||||||
|
not limited to:
|
||||||
|
- Violent threats or language directed against another person.
|
||||||
|
- Discriminatory jokes and language.
|
||||||
|
- Posting sexually explicit or violent material.
|
||||||
|
- Posting (or threatening to post) other people's personally
|
||||||
|
identifying information ("doxing").
|
||||||
|
- Personal insults, especially those using racist or sexist terms.
|
||||||
|
- Unwelcome sexual attention.
|
||||||
|
- Advocating for, or encouraging, any of the above behavior.
|
||||||
|
- Repeated harassment of others. In general, if someone asks you to
|
||||||
|
stop, then stop.
|
||||||
|
- **When we disagree, try to understand why.** Disagreements, both
|
||||||
|
social and technical, happen all the time and Rclone is no
|
||||||
|
exception. It is important that we resolve disagreements and
|
||||||
|
differing views constructively. Remember that we're different. The
|
||||||
|
strength of Rclone comes from its varied community, people from a
|
||||||
|
wide range of backgrounds. Different people have different
|
||||||
|
perspectives on issues. Being unable to understand why someone holds
|
||||||
|
a viewpoint doesn't mean that they're wrong. Don't forget that it is
|
||||||
|
human to err and blaming each other doesn't get us anywhere.
|
||||||
|
Instead, focus on helping to resolve issues and learning from
|
||||||
|
mistakes.
|
||||||
|
|
||||||
|
If you believe someone is violating the code of conduct, we ask that
|
||||||
|
you report it by emailing [info@rclone.com](mailto:info@rclone.com).
|
||||||
|
|
||||||
|
Original text courtesy of the [Speak Up! project](http://web.archive.org/web/20141109123859/http://speakup.io/coc.html).
|
||||||
|
|
||||||
|
## Questions?
|
||||||
|
|
||||||
|
If you have questions, please feel free to [contact us](mailto:info@rclone.com).
|
||||||
134
CONTRIBUTING.md
134
CONTRIBUTING.md
@@ -38,7 +38,7 @@ and [email](https://docs.github.com/en/github/setting-up-and-managing-your-githu
|
|||||||
Next open your terminal, change directory to your preferred folder and initialise
|
Next open your terminal, change directory to your preferred folder and initialise
|
||||||
your local rclone project:
|
your local rclone project:
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
git clone https://github.com/rclone/rclone.git
|
git clone https://github.com/rclone/rclone.git
|
||||||
cd rclone
|
cd rclone
|
||||||
git remote rename origin upstream
|
git remote rename origin upstream
|
||||||
@@ -53,13 +53,13 @@ executed from the rclone folder created above.
|
|||||||
|
|
||||||
Now [install Go](https://golang.org/doc/install) and verify your installation:
|
Now [install Go](https://golang.org/doc/install) and verify your installation:
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
go version
|
go version
|
||||||
```
|
```
|
||||||
|
|
||||||
Great, you can now compile and execute your own version of rclone:
|
Great, you can now compile and execute your own version of rclone:
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
go build
|
go build
|
||||||
./rclone version
|
./rclone version
|
||||||
```
|
```
|
||||||
@@ -68,7 +68,7 @@ go build
|
|||||||
more accurate version number in the executable as well as enable you to specify
|
more accurate version number in the executable as well as enable you to specify
|
||||||
more build options.) Finally make a branch to add your new feature
|
more build options.) Finally make a branch to add your new feature
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
git checkout -b my-new-feature
|
git checkout -b my-new-feature
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -80,7 +80,7 @@ and a quick view on the rclone [code organisation](#code-organisation).
|
|||||||
When ready - test the affected functionality and run the unit tests for the
|
When ready - test the affected functionality and run the unit tests for the
|
||||||
code you changed
|
code you changed
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
cd folder/with/changed/files
|
cd folder/with/changed/files
|
||||||
go test -v
|
go test -v
|
||||||
```
|
```
|
||||||
@@ -99,7 +99,7 @@ Make sure you
|
|||||||
|
|
||||||
When you are done with that push your changes to GitHub:
|
When you are done with that push your changes to GitHub:
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
git push -u origin my-new-feature
|
git push -u origin my-new-feature
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -119,7 +119,7 @@ or [squash your commits](#squashing-your-commits).
|
|||||||
|
|
||||||
Follow the guideline for [commit messages](#commit-messages) and then:
|
Follow the guideline for [commit messages](#commit-messages) and then:
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
git checkout my-new-feature # To switch to your branch
|
git checkout my-new-feature # To switch to your branch
|
||||||
git status # To see the new and changed files
|
git status # To see the new and changed files
|
||||||
git add FILENAME # To select FILENAME for the commit
|
git add FILENAME # To select FILENAME for the commit
|
||||||
@@ -130,7 +130,7 @@ git log # To verify the commit. Use q to quit the log
|
|||||||
|
|
||||||
You can modify the message or changes in the latest commit using:
|
You can modify the message or changes in the latest commit using:
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
git commit --amend
|
git commit --amend
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -145,7 +145,7 @@ pushed to GitHub.
|
|||||||
|
|
||||||
Your previously pushed commits are replaced by:
|
Your previously pushed commits are replaced by:
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
git push --force origin my-new-feature
|
git push --force origin my-new-feature
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -154,7 +154,7 @@ git push --force origin my-new-feature
|
|||||||
To base your changes on the latest version of the
|
To base your changes on the latest version of the
|
||||||
[rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
[rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
git checkout master
|
git checkout master
|
||||||
git fetch upstream
|
git fetch upstream
|
||||||
git merge --ff-only
|
git merge --ff-only
|
||||||
@@ -170,7 +170,7 @@ If you rebase commits that have been pushed to GitHub, then you will have to
|
|||||||
|
|
||||||
To combine your commits into one commit:
|
To combine your commits into one commit:
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
git log # To count the commits to squash, e.g. the last 2
|
git log # To count the commits to squash, e.g. the last 2
|
||||||
git reset --soft HEAD~2 # To undo the 2 latest commits
|
git reset --soft HEAD~2 # To undo the 2 latest commits
|
||||||
git status # To check everything is as expected
|
git status # To check everything is as expected
|
||||||
@@ -178,13 +178,13 @@ git status # To check everything is as expected
|
|||||||
|
|
||||||
If everything is fine, then make the new combined commit:
|
If everything is fine, then make the new combined commit:
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
git commit # To commit the undone commits as one
|
git commit # To commit the undone commits as one
|
||||||
```
|
```
|
||||||
|
|
||||||
otherwise, you may roll back using:
|
otherwise, you may roll back using:
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
git reflog # To check that HEAD{1} is your previous state
|
git reflog # To check that HEAD{1} is your previous state
|
||||||
git reset --soft 'HEAD@{1}' # To roll back to your previous state
|
git reset --soft 'HEAD@{1}' # To roll back to your previous state
|
||||||
```
|
```
|
||||||
@@ -219,13 +219,13 @@ to check an error return).
|
|||||||
rclone's tests are run from the go testing framework, so at the top
|
rclone's tests are run from the go testing framework, so at the top
|
||||||
level you can run this to run all the tests.
|
level you can run this to run all the tests.
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
go test -v ./...
|
go test -v ./...
|
||||||
```
|
```
|
||||||
|
|
||||||
You can also use `make`, if supported by your platform
|
You can also use `make`, if supported by your platform
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
make quicktest
|
make quicktest
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -246,7 +246,7 @@ need to make a remote called `TestDrive`.
|
|||||||
You can then run the unit tests in the drive directory. These tests
|
You can then run the unit tests in the drive directory. These tests
|
||||||
are skipped if `TestDrive:` isn't defined.
|
are skipped if `TestDrive:` isn't defined.
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
cd backend/drive
|
cd backend/drive
|
||||||
go test -v
|
go test -v
|
||||||
```
|
```
|
||||||
@@ -255,7 +255,7 @@ You can then run the integration tests which test all of rclone's
|
|||||||
operations. Normally these get run against the local file system,
|
operations. Normally these get run against the local file system,
|
||||||
but they can be run against any of the remotes.
|
but they can be run against any of the remotes.
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
cd fs/sync
|
cd fs/sync
|
||||||
go test -v -remote TestDrive:
|
go test -v -remote TestDrive:
|
||||||
go test -v -remote TestDrive: -fast-list
|
go test -v -remote TestDrive: -fast-list
|
||||||
@@ -268,9 +268,8 @@ If you want to use the integration test framework to run these tests
|
|||||||
altogether with an HTML report and test retries then from the
|
altogether with an HTML report and test retries then from the
|
||||||
project root:
|
project root:
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
go install github.com/rclone/rclone/fstest/test_all
|
go run ./fstest/test_all -backends drive
|
||||||
test_all -backends drive
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Full integration testing
|
### Full integration testing
|
||||||
@@ -278,19 +277,19 @@ test_all -backends drive
|
|||||||
If you want to run all the integration tests against all the remotes,
|
If you want to run all the integration tests against all the remotes,
|
||||||
then change into the project root and run
|
then change into the project root and run
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
make check
|
make check
|
||||||
make test
|
make test
|
||||||
```
|
```
|
||||||
|
|
||||||
The commands may require some extra go packages which you can install with
|
The commands may require some extra go packages which you can install with
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
make build_dep
|
make build_dep
|
||||||
```
|
```
|
||||||
|
|
||||||
The full integration tests are run daily on the integration test server. You can
|
The full integration tests are run daily on the integration test server. You can
|
||||||
find the results at <https://pub.rclone.org/integration-tests/>
|
find the results at <https://integration.rclone.org>
|
||||||
|
|
||||||
## Code Organisation
|
## Code Organisation
|
||||||
|
|
||||||
@@ -349,11 +348,13 @@ If you are adding a new feature then please update the documentation.
|
|||||||
|
|
||||||
The documentation sources are generally in Markdown format, in conformance
|
The documentation sources are generally in Markdown format, in conformance
|
||||||
with the CommonMark specification and compatible with GitHub Flavored
|
with the CommonMark specification and compatible with GitHub Flavored
|
||||||
Markdown (GFM). The markdown format is checked as part of the lint operation
|
Markdown (GFM). The markdown format and style is checked as part of the lint
|
||||||
that runs automatically on pull requests, to enforce standards and consistency.
|
operation that runs automatically on pull requests, to enforce standards and
|
||||||
This is based on the [markdownlint](https://github.com/DavidAnson/markdownlint)
|
consistency. This is based on the [markdownlint](https://github.com/DavidAnson/markdownlint)
|
||||||
tool, which can also be integrated into editors so you can perform the same
|
tool by David Anson, which can also be integrated into editors so you can
|
||||||
checks while writing.
|
perform the same checks while writing. It generally follows Ciro Santilli's
|
||||||
|
[Markdown Style Guide](https://cirosantilli.com/markdown-style-guide), which
|
||||||
|
is good source if you want to know more.
|
||||||
|
|
||||||
HTML pages, served as website <rclone.org>, are generated from the Markdown,
|
HTML pages, served as website <rclone.org>, are generated from the Markdown,
|
||||||
using [Hugo](https://gohugo.io). Note that when generating the HTML pages,
|
using [Hugo](https://gohugo.io). Note that when generating the HTML pages,
|
||||||
@@ -382,7 +383,7 @@ If you add a new general flag (not for a backend), then document it in
|
|||||||
alphabetical order.
|
alphabetical order.
|
||||||
|
|
||||||
If you add a new backend option/flag, then it should be documented in
|
If you add a new backend option/flag, then it should be documented in
|
||||||
the source file in the `Help:` field.
|
the source file in the `Help:` field:
|
||||||
|
|
||||||
- Start with the most important information about the option,
|
- Start with the most important information about the option,
|
||||||
as a single sentence on a single line.
|
as a single sentence on a single line.
|
||||||
@@ -404,6 +405,30 @@ the source file in the `Help:` field.
|
|||||||
as an unordered list, therefore a single line break is enough to
|
as an unordered list, therefore a single line break is enough to
|
||||||
create a new list item. Also, for enumeration texts like name of
|
create a new list item. Also, for enumeration texts like name of
|
||||||
countries, it looks better without an ending period/full stop character.
|
countries, it looks better without an ending period/full stop character.
|
||||||
|
- You can run `make backenddocs` to verify the resulting Markdown.
|
||||||
|
- This will update the autogenerated sections of the backend docs Markdown
|
||||||
|
files under `docs/content`.
|
||||||
|
- It requires you to have [Python](https://www.python.org) installed.
|
||||||
|
- The `backenddocs` make target runs the Python script `bin/make_backend_docs.py`,
|
||||||
|
and you can also run this directly, optionally with the name of a backend
|
||||||
|
as argument to only update the docs for a specific backend.
|
||||||
|
- **Do not** commit the updated Markdown files. This operation is run as part of
|
||||||
|
the release process. Since any manual changes in the autogenerated sections
|
||||||
|
of the Markdown files will then be lost, we have a pull request check that
|
||||||
|
reports error for any changes within the autogenerated sections. Should you
|
||||||
|
have done manual changes outside of the autogenerated sections they must be
|
||||||
|
committed, of course.
|
||||||
|
- You can run `make serve` to verify the resulting website.
|
||||||
|
- This will build the website and serve it locally, so you can open it in
|
||||||
|
your web browser and verify that the end result looks OK. Check specifically
|
||||||
|
any added links, also in light of the note above regarding different algorithms
|
||||||
|
for generated header anchors.
|
||||||
|
- It requires you to have the [Hugo](https://gohugo.io) tool available.
|
||||||
|
- The `serve` make target depends on the `website` target, which runs the
|
||||||
|
`hugo` command from the `docs` directory to build the website, and then
|
||||||
|
it serves the website locally with an embedded web server using a command
|
||||||
|
`hugo server --logLevel info -w --disableFastRender --ignoreCache`, so you
|
||||||
|
can run similar Hugo commands directly as well.
|
||||||
|
|
||||||
When writing documentation for an entirely new backend,
|
When writing documentation for an entirely new backend,
|
||||||
see [backend documentation](#backend-documentation).
|
see [backend documentation](#backend-documentation).
|
||||||
@@ -420,6 +445,11 @@ for small changes in the docs which makes it very easy. Just remember the
|
|||||||
caveat when linking to header anchors, noted above, which means that GitHub's
|
caveat when linking to header anchors, noted above, which means that GitHub's
|
||||||
Markdown preview may not be an entirely reliable verification of the results.
|
Markdown preview may not be an entirely reliable verification of the results.
|
||||||
|
|
||||||
|
After your changes have been merged, you can verify them on
|
||||||
|
[tip.rclone.org](https://tip.rclone.org). This site is updated daily with the
|
||||||
|
current state of the master branch at 07:00 UTC. The changes will be on the main
|
||||||
|
[rclone.org](https://rclone.org) site once they have been included in a release.
|
||||||
|
|
||||||
## Making a release
|
## Making a release
|
||||||
|
|
||||||
There are separate instructions for making a release in the RELEASE.md
|
There are separate instructions for making a release in the RELEASE.md
|
||||||
@@ -478,7 +508,7 @@ To add a dependency `github.com/ncw/new_dependency` see the
|
|||||||
instructions below. These will fetch the dependency and add it to
|
instructions below. These will fetch the dependency and add it to
|
||||||
`go.mod` and `go.sum`.
|
`go.mod` and `go.sum`.
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
go get github.com/ncw/new_dependency
|
go get github.com/ncw/new_dependency
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -492,7 +522,7 @@ and `go.sum` in the same commit as your other changes.
|
|||||||
|
|
||||||
If you need to update a dependency then run
|
If you need to update a dependency then run
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
go get golang.org/x/crypto
|
go get golang.org/x/crypto
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -581,8 +611,7 @@ remote or an fs.
|
|||||||
- Add your backend to `fstest/test_all/config.yaml`
|
- Add your backend to `fstest/test_all/config.yaml`
|
||||||
- Once you've done that then you can use the integration test framework from
|
- Once you've done that then you can use the integration test framework from
|
||||||
the project root:
|
the project root:
|
||||||
- go install ./...
|
- `go run ./fstest/test_all -backends remote`
|
||||||
- test_all -backends remote
|
|
||||||
|
|
||||||
Or if you want to run the integration tests manually:
|
Or if you want to run the integration tests manually:
|
||||||
|
|
||||||
@@ -621,44 +650,7 @@ in the web browser and the links (internal and external) all work.
|
|||||||
|
|
||||||
## Adding a new s3 provider
|
## Adding a new s3 provider
|
||||||
|
|
||||||
It is quite easy to add a new S3 provider to rclone.
|
[Please see the guide in the S3 backend directory](backend/s3/README.md).
|
||||||
|
|
||||||
You'll need to modify the following files
|
|
||||||
|
|
||||||
- `backend/s3/s3.go`
|
|
||||||
- Add the provider to `providerOption` at the top of the file
|
|
||||||
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
|
||||||
- Exclude your provider from generic config questions (eg `region` and `endpoint).
|
|
||||||
- Add the provider to the `setQuirks` function - see the documentation there.
|
|
||||||
- `docs/content/s3.md`
|
|
||||||
- Add the provider at the top of the page.
|
|
||||||
- Add a section about the provider linked from there.
|
|
||||||
- Make sure this is in alphabetical order in the `Providers` section.
|
|
||||||
- Add a transcript of a trial `rclone config` session
|
|
||||||
- Edit the transcript to remove things which might change in subsequent versions
|
|
||||||
- **Do not** alter or add to the autogenerated parts of `s3.md`
|
|
||||||
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
|
|
||||||
- `README.md` - this is the home page in github
|
|
||||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
|
||||||
- `docs/content/_index.md` - this is the home page of rclone.org
|
|
||||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
|
||||||
|
|
||||||
When adding the provider, endpoints, quirks, docs etc keep them in
|
|
||||||
alphabetical order by `Provider` name, but with `AWS` first and
|
|
||||||
`Other` last.
|
|
||||||
|
|
||||||
Once you've written the docs, run `make serve` and check they look OK
|
|
||||||
in the web browser and the links (internal and external) all work.
|
|
||||||
|
|
||||||
Once you've written the code, test `rclone config` works to your
|
|
||||||
satisfaction, and check the integration tests work `go test -v -remote
|
|
||||||
NewS3Provider:`. You may need to adjust the quirks to get them to
|
|
||||||
pass. Some providers just can't pass the tests with control characters
|
|
||||||
in the names so if these fail and the provider doesn't support
|
|
||||||
`urlEncodeListings` in the quirks then ignore them. Note that the
|
|
||||||
`SetTier` test may also fail on non AWS providers.
|
|
||||||
|
|
||||||
For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone/rclone/commit/eb3082a1ebdb76d5625f14cedec3f5154a5e7b10).
|
|
||||||
|
|
||||||
## Writing a plugin
|
## Writing a plugin
|
||||||
|
|
||||||
|
|||||||
56669
MANUAL.html
generated
56669
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
13993
MANUAL.txt
generated
13993
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
15
Makefile
15
Makefile
@@ -100,6 +100,7 @@ compiletest:
|
|||||||
check: rclone
|
check: rclone
|
||||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||||
@golangci-lint run $(LINTTAGS) ./...
|
@golangci-lint run $(LINTTAGS) ./...
|
||||||
|
@bin/markdown-lint
|
||||||
@echo "-- END CODE QUALITY REPORT ---------------------------------"
|
@echo "-- END CODE QUALITY REPORT ---------------------------------"
|
||||||
|
|
||||||
# Get the build dependencies
|
# Get the build dependencies
|
||||||
@@ -113,21 +114,21 @@ release_dep_linux:
|
|||||||
# Update dependencies
|
# Update dependencies
|
||||||
showupdates:
|
showupdates:
|
||||||
@echo "*** Direct dependencies that could be updated ***"
|
@echo "*** Direct dependencies that could be updated ***"
|
||||||
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
@go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
||||||
|
|
||||||
# Update direct dependencies only
|
# Update direct dependencies only
|
||||||
updatedirect:
|
updatedirect:
|
||||||
GO111MODULE=on go get -d $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
go get $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||||
GO111MODULE=on go mod tidy
|
go mod tidy
|
||||||
|
|
||||||
# Update direct and indirect dependencies and test dependencies
|
# Update direct and indirect dependencies and test dependencies
|
||||||
update:
|
update:
|
||||||
GO111MODULE=on go get -d -u -t ./...
|
go get -u -t ./...
|
||||||
GO111MODULE=on go mod tidy
|
go mod tidy
|
||||||
|
|
||||||
# Tidy the module dependencies
|
# Tidy the module dependencies
|
||||||
tidy:
|
tidy:
|
||||||
GO111MODULE=on go mod tidy
|
go mod tidy
|
||||||
|
|
||||||
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||||
|
|
||||||
@@ -144,9 +145,11 @@ MANUAL.txt: MANUAL.md
|
|||||||
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
||||||
|
|
||||||
commanddocs: rclone
|
commanddocs: rclone
|
||||||
|
go generate ./lib/transform
|
||||||
-@rmdir -p '$$HOME/.config/rclone'
|
-@rmdir -p '$$HOME/.config/rclone'
|
||||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
|
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
|
||||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||||
|
go run bin/make_bisync_docs.go ./docs/content/
|
||||||
|
|
||||||
backenddocs: rclone bin/make_backend_docs.py
|
backenddocs: rclone bin/make_backend_docs.py
|
||||||
-@rmdir -p '$$HOME/.config/rclone'
|
-@rmdir -p '$$HOME/.config/rclone'
|
||||||
|
|||||||
@@ -34,6 +34,7 @@ directories to and from different cloud storage providers.
|
|||||||
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||||
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||||
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||||
|
- Cubbit DS3 [:page_facing_up:](https://rclone.org/s3/#Cubbit)
|
||||||
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||||
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||||
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||||
@@ -50,6 +51,7 @@ directories to and from different cloud storage providers.
|
|||||||
- Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
- Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||||
- Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
- Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||||
- HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
- HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||||
|
- Hetzner Object Storage [:page_facing_up:](https://rclone.org/s3/#hetzner)
|
||||||
- Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
- Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
||||||
- HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
- HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||||
- HTTP [:page_facing_up:](https://rclone.org/http/)
|
- HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||||
@@ -59,6 +61,7 @@ directories to and from different cloud storage providers.
|
|||||||
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||||
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||||
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||||
|
- Intercolo Object Storage [:page_facing_up:](https://rclone.org/s3/#intercolo)
|
||||||
- IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
- IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||||
- Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
- Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||||
- Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
- Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
||||||
@@ -94,6 +97,7 @@ directories to and from different cloud storage providers.
|
|||||||
- Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
- Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||||
- QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
- QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||||
- Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
- Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
||||||
|
- Rabata Cloud Storage [:page_facing_up:](https://rclone.org/s3/#Rabata)
|
||||||
- Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
- Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||||
- Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
- Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||||
- RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
- RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||||
@@ -103,8 +107,11 @@ directories to and from different cloud storage providers.
|
|||||||
- Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
|
- Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
|
||||||
- SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
- SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||||
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||||
|
- Servercore Object Storage [:page_facing_up:](https://rclone.org/s3/#servercore)
|
||||||
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||||
|
- Shade [:page_facing_up:](https://rclone.org/shade/)
|
||||||
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||||
|
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
|
||||||
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||||
- Storj [:page_facing_up:](https://rclone.org/storj/)
|
- Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||||
- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||||
@@ -125,6 +132,7 @@ Please see [the full list of all storage providers and their features](https://r
|
|||||||
These backends adapt or modify other storage providers
|
These backends adapt or modify other storage providers
|
||||||
|
|
||||||
- Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
|
- Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
|
||||||
|
- Archive: read archive files [:page_facing_up:](https://rclone.org/archive/)
|
||||||
- Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
|
- Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
|
||||||
- Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
|
- Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
|
||||||
- Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
|
- Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
|
||||||
|
|||||||
23
RELEASE.md
23
RELEASE.md
@@ -21,6 +21,7 @@ This file describes how to make the various kinds of releases
|
|||||||
- make doc
|
- make doc
|
||||||
- git status - to check for new man pages - git add them
|
- git status - to check for new man pages - git add them
|
||||||
- git commit -a -v -m "Version v1.XX.0"
|
- git commit -a -v -m "Version v1.XX.0"
|
||||||
|
- make check
|
||||||
- make retag
|
- make retag
|
||||||
- git push origin # without --follow-tags so it doesn't push the tag if it fails
|
- git push origin # without --follow-tags so it doesn't push the tag if it fails
|
||||||
- git push --follow-tags origin
|
- git push --follow-tags origin
|
||||||
@@ -60,7 +61,7 @@ If `make updatedirect` added a `toolchain` directive then remove it.
|
|||||||
We don't want to force a toolchain on our users. Linux packagers are
|
We don't want to force a toolchain on our users. Linux packagers are
|
||||||
often using a version of Go that is a few versions out of date.
|
often using a version of Go that is a few versions out of date.
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
||||||
go get -d $(cat /tmp/potential-upgrades)
|
go get -d $(cat /tmp/potential-upgrades)
|
||||||
go mod tidy -go=1.22 -compat=1.22
|
go mod tidy -go=1.22 -compat=1.22
|
||||||
@@ -70,7 +71,7 @@ If the `go mod tidy` fails use the output from it to remove the
|
|||||||
package which can't be upgraded from `/tmp/potential-upgrades` when
|
package which can't be upgraded from `/tmp/potential-upgrades` when
|
||||||
done
|
done
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
git co go.mod go.sum
|
git co go.mod go.sum
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -102,7 +103,7 @@ The above procedure will not upgrade major versions, so v2 to v3.
|
|||||||
However this tool can show which major versions might need to be
|
However this tool can show which major versions might need to be
|
||||||
upgraded:
|
upgraded:
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
go run github.com/icholy/gomajor@latest list -major
|
go run github.com/icholy/gomajor@latest list -major
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -112,7 +113,7 @@ Expect API breakage when updating major versions.
|
|||||||
|
|
||||||
At some point after the release run
|
At some point after the release run
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
bin/tidy-beta v1.55
|
bin/tidy-beta v1.55
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -159,7 +160,7 @@ which is a private repo containing artwork from sponsors.
|
|||||||
|
|
||||||
Create an update website branch based off the last release
|
Create an update website branch based off the last release
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
git co -b update-website
|
git co -b update-website
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -167,19 +168,19 @@ If the branch already exists, double check there are no commits that need saving
|
|||||||
|
|
||||||
Now reset the branch to the last release
|
Now reset the branch to the last release
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
git reset --hard v1.64.0
|
git reset --hard v1.64.0
|
||||||
```
|
```
|
||||||
|
|
||||||
Create the changes, check them in, test with `make serve` then
|
Create the changes, check them in, test with `make serve` then
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
make upload_test_website
|
make upload_test_website
|
||||||
```
|
```
|
||||||
|
|
||||||
Check out <https://test.rclone.org> and when happy
|
Check out <https://test.rclone.org> and when happy
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
make upload_website
|
make upload_website
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -189,14 +190,14 @@ Cherry pick any changes back to master and the stable branch if it is active.
|
|||||||
|
|
||||||
To do a basic build of rclone's docker image to debug builds locally:
|
To do a basic build of rclone's docker image to debug builds locally:
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
docker buildx build --load -t rclone/rclone:testing --progress=plain .
|
docker buildx build --load -t rclone/rclone:testing --progress=plain .
|
||||||
docker run --rm rclone/rclone:testing version
|
docker run --rm rclone/rclone:testing version
|
||||||
```
|
```
|
||||||
|
|
||||||
To test the multipatform build
|
To test the multipatform build
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
|
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -204,6 +205,6 @@ To make a full build then set the tags correctly and add `--push`
|
|||||||
|
|
||||||
Note that you can't only build one architecture - you need to build them all.
|
Note that you can't only build one architecture - you need to build them all.
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ package all
|
|||||||
import (
|
import (
|
||||||
// Active file systems
|
// Active file systems
|
||||||
_ "github.com/rclone/rclone/backend/alias"
|
_ "github.com/rclone/rclone/backend/alias"
|
||||||
|
_ "github.com/rclone/rclone/backend/archive"
|
||||||
_ "github.com/rclone/rclone/backend/azureblob"
|
_ "github.com/rclone/rclone/backend/azureblob"
|
||||||
_ "github.com/rclone/rclone/backend/azurefiles"
|
_ "github.com/rclone/rclone/backend/azurefiles"
|
||||||
_ "github.com/rclone/rclone/backend/b2"
|
_ "github.com/rclone/rclone/backend/b2"
|
||||||
@@ -54,6 +55,7 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/s3"
|
_ "github.com/rclone/rclone/backend/s3"
|
||||||
_ "github.com/rclone/rclone/backend/seafile"
|
_ "github.com/rclone/rclone/backend/seafile"
|
||||||
_ "github.com/rclone/rclone/backend/sftp"
|
_ "github.com/rclone/rclone/backend/sftp"
|
||||||
|
_ "github.com/rclone/rclone/backend/shade"
|
||||||
_ "github.com/rclone/rclone/backend/sharefile"
|
_ "github.com/rclone/rclone/backend/sharefile"
|
||||||
_ "github.com/rclone/rclone/backend/sia"
|
_ "github.com/rclone/rclone/backend/sia"
|
||||||
_ "github.com/rclone/rclone/backend/smb"
|
_ "github.com/rclone/rclone/backend/smb"
|
||||||
|
|||||||
679
backend/archive/archive.go
Normal file
679
backend/archive/archive.go
Normal file
@@ -0,0 +1,679 @@
|
|||||||
|
//go:build !plan9
|
||||||
|
|
||||||
|
// Package archive implements a backend to access archive files in a remote
|
||||||
|
package archive
|
||||||
|
|
||||||
|
// FIXME factor common code between backends out - eg VFS initialization
|
||||||
|
|
||||||
|
// FIXME can we generalize the VFS handle caching and use it in zip backend
|
||||||
|
|
||||||
|
// Factor more stuff out if possible
|
||||||
|
|
||||||
|
// Odd stats which are probably coming from the VFS
|
||||||
|
// * tensorflow.sqfs: 0% /3.074Gi, 204.426Ki/s, 4h22m46s
|
||||||
|
|
||||||
|
// FIXME this will perform poorly for unpacking as the VFS Reader is bad
|
||||||
|
// at multiple streams - need cache mode setting?
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
// Import all the required archivers here
|
||||||
|
_ "github.com/rclone/rclone/backend/archive/squashfs"
|
||||||
|
_ "github.com/rclone/rclone/backend/archive/zip"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/archive/archiver"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/cache"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Register with Fs
|
||||||
|
func init() {
|
||||||
|
fsi := &fs.RegInfo{
|
||||||
|
Name: "archive",
|
||||||
|
Description: "Read archives",
|
||||||
|
NewFs: NewFs,
|
||||||
|
MetadataInfo: &fs.MetadataInfo{
|
||||||
|
Help: `Any metadata supported by the underlying remote is read and written.`,
|
||||||
|
},
|
||||||
|
Options: []fs.Option{{
|
||||||
|
Name: "remote",
|
||||||
|
Help: `Remote to wrap to read archives from.
|
||||||
|
|
||||||
|
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
||||||
|
"myremote:bucket" or "myremote:".
|
||||||
|
|
||||||
|
If this is left empty, then the archive backend will use the root as
|
||||||
|
the remote.
|
||||||
|
|
||||||
|
This means that you can use :archive:remote:path and it will be
|
||||||
|
equivalent to setting remote="remote:path".
|
||||||
|
`,
|
||||||
|
Required: false,
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
fs.Register(fsi)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
Remote string `config:"remote"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fs represents a archive of upstreams
|
||||||
|
type Fs struct {
|
||||||
|
name string // name of this remote
|
||||||
|
features *fs.Features // optional features
|
||||||
|
opt Options // options for this Fs
|
||||||
|
root string // the path we are working on
|
||||||
|
f fs.Fs // remote we are wrapping
|
||||||
|
wrapper fs.Fs // fs that wraps us
|
||||||
|
|
||||||
|
mu sync.Mutex // protects the below
|
||||||
|
archives map[string]*archive // the archives we have, by path
|
||||||
|
}
|
||||||
|
|
||||||
|
// A single open archive
|
||||||
|
type archive struct {
|
||||||
|
archiver archiver.Archiver // archiver responsible
|
||||||
|
remote string // path to the archive
|
||||||
|
prefix string // prefix to add on to listings
|
||||||
|
root string // root of the archive to remove from listings
|
||||||
|
mu sync.Mutex // protects the following variables
|
||||||
|
f fs.Fs // the archive Fs, may be nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If remote is an archive then return it otherwise return nil
|
||||||
|
func findArchive(remote string) *archive {
|
||||||
|
// FIXME use something faster than linear search?
|
||||||
|
for _, archiver := range archiver.Archivers {
|
||||||
|
if strings.HasSuffix(remote, archiver.Extension) {
|
||||||
|
return &archive{
|
||||||
|
archiver: archiver,
|
||||||
|
remote: remote,
|
||||||
|
prefix: remote,
|
||||||
|
root: "",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find an archive buried in remote
|
||||||
|
func subArchive(remote string) *archive {
|
||||||
|
archive := findArchive(remote)
|
||||||
|
if archive != nil {
|
||||||
|
return archive
|
||||||
|
}
|
||||||
|
parent := path.Dir(remote)
|
||||||
|
if parent == "/" || parent == "." {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return subArchive(parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If remote is an archive then return it otherwise return nil
|
||||||
|
func (f *Fs) findArchive(remote string) (archive *archive) {
|
||||||
|
archive = findArchive(remote)
|
||||||
|
if archive != nil {
|
||||||
|
f.mu.Lock()
|
||||||
|
f.archives[remote] = archive
|
||||||
|
f.mu.Unlock()
|
||||||
|
}
|
||||||
|
return archive
|
||||||
|
}
|
||||||
|
|
||||||
|
// Instantiate archive if it hasn't been instantiated yet
|
||||||
|
//
|
||||||
|
// This is done lazily so that we can list a directory full of
|
||||||
|
// archives without opening them all.
|
||||||
|
func (a *archive) init(ctx context.Context, f fs.Fs) (fs.Fs, error) {
|
||||||
|
a.mu.Lock()
|
||||||
|
defer a.mu.Unlock()
|
||||||
|
if a.f != nil {
|
||||||
|
return a.f, nil
|
||||||
|
}
|
||||||
|
newFs, err := a.archiver.New(ctx, f, a.remote, a.prefix, a.root)
|
||||||
|
if err != nil && err != fs.ErrorIsFile {
|
||||||
|
return nil, fmt.Errorf("failed to create archive %q: %w", a.remote, err)
|
||||||
|
}
|
||||||
|
a.f = newFs
|
||||||
|
return a.f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFs constructs an Fs from the path.
|
||||||
|
//
|
||||||
|
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||||
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs.Fs, err error) {
|
||||||
|
// defer log.Trace(nil, "name=%q, root=%q, m=%v", name, root, m)("f=%+v, err=%v", &outFs, &err)
|
||||||
|
// Parse config into Options struct
|
||||||
|
opt := new(Options)
|
||||||
|
err = configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
remote := opt.Remote
|
||||||
|
origRoot := root
|
||||||
|
|
||||||
|
// If remote is empty, use the root instead
|
||||||
|
if remote == "" {
|
||||||
|
remote = root
|
||||||
|
root = ""
|
||||||
|
}
|
||||||
|
isDirectory := strings.HasSuffix(remote, "/")
|
||||||
|
remote = strings.TrimRight(remote, "/")
|
||||||
|
if remote == "" {
|
||||||
|
remote = "/"
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(remote, name+":") {
|
||||||
|
return nil, errors.New("can't point archive remote at itself - check the value of the upstreams setting")
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = isDirectory
|
||||||
|
|
||||||
|
foundArchive := subArchive(remote)
|
||||||
|
if foundArchive != nil {
|
||||||
|
fs.Debugf(nil, "Found archiver for %q remote %q", foundArchive.archiver.Extension, foundArchive.remote)
|
||||||
|
// Archive path
|
||||||
|
foundArchive.root = strings.Trim(remote[len(foundArchive.remote):], "/")
|
||||||
|
// Path to the archive
|
||||||
|
archiveRemote := remote[:len(foundArchive.remote)]
|
||||||
|
// Remote is archive leaf name
|
||||||
|
foundArchive.remote = path.Base(archiveRemote)
|
||||||
|
foundArchive.prefix = ""
|
||||||
|
// Point remote to archive file
|
||||||
|
remote = archiveRemote
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure to remove trailing . referring to the current dir
|
||||||
|
if path.Base(root) == "." {
|
||||||
|
root = strings.TrimSuffix(root, ".")
|
||||||
|
}
|
||||||
|
remotePath := fspath.JoinRootPath(remote, root)
|
||||||
|
wrappedFs, err := cache.Get(ctx, remotePath)
|
||||||
|
if err != fs.ErrorIsFile && err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f := &Fs{
|
||||||
|
name: name,
|
||||||
|
//root: path.Join(remotePath, root),
|
||||||
|
root: origRoot,
|
||||||
|
opt: *opt,
|
||||||
|
f: wrappedFs,
|
||||||
|
archives: make(map[string]*archive),
|
||||||
|
}
|
||||||
|
cache.PinUntilFinalized(f.f, f)
|
||||||
|
// the features here are ones we could support, and they are
|
||||||
|
// ANDed with the ones from wrappedFs
|
||||||
|
f.features = (&fs.Features{
|
||||||
|
CaseInsensitive: true,
|
||||||
|
DuplicateFiles: false,
|
||||||
|
ReadMimeType: true,
|
||||||
|
WriteMimeType: true,
|
||||||
|
CanHaveEmptyDirectories: true,
|
||||||
|
BucketBased: true,
|
||||||
|
SetTier: true,
|
||||||
|
GetTier: true,
|
||||||
|
ReadMetadata: true,
|
||||||
|
WriteMetadata: true,
|
||||||
|
UserMetadata: true,
|
||||||
|
PartialUploads: true,
|
||||||
|
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
|
|
||||||
|
if foundArchive != nil {
|
||||||
|
fs.Debugf(f, "Root is an archive")
|
||||||
|
if err != fs.ErrorIsFile {
|
||||||
|
return nil, fmt.Errorf("expecting to find a file at %q", remote)
|
||||||
|
}
|
||||||
|
return foundArchive.init(ctx, f.f)
|
||||||
|
}
|
||||||
|
// Correct root if definitely pointing to a file
|
||||||
|
if err == fs.ErrorIsFile {
|
||||||
|
f.root = path.Dir(f.root)
|
||||||
|
if f.root == "." || f.root == "/" {
|
||||||
|
f.root = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return f, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name of the remote (as passed into NewFs)
|
||||||
|
func (f *Fs) Name() string {
|
||||||
|
return f.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root of the remote (as passed into NewFs)
|
||||||
|
func (f *Fs) Root() string {
|
||||||
|
return f.root
|
||||||
|
}
|
||||||
|
|
||||||
|
// String converts this Fs to a string
|
||||||
|
func (f *Fs) String() string {
|
||||||
|
return fmt.Sprintf("archive root '%s'", f.root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Features returns the optional features of this Fs
|
||||||
|
func (f *Fs) Features() *fs.Features {
|
||||||
|
return f.features
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rmdir removes the root directory of the Fs object
|
||||||
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
|
return f.f.Rmdir(ctx, dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||||
|
func (f *Fs) Hashes() hash.Set {
|
||||||
|
return f.f.Hashes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mkdir makes the root directory of the Fs object
|
||||||
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
|
return f.f.Mkdir(ctx, dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Purge all files in the directory
|
||||||
|
//
|
||||||
|
// Implement this if you have a way of deleting all the files
|
||||||
|
// quicker than just running Remove() on the result of List()
|
||||||
|
//
|
||||||
|
// Return an error if it doesn't exist
|
||||||
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||||
|
do := f.f.Features().Purge
|
||||||
|
if do == nil {
|
||||||
|
return fs.ErrorCantPurge
|
||||||
|
}
|
||||||
|
return do(ctx, dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy src to this remote using server-side copy operations.
|
||||||
|
//
|
||||||
|
// This is stored with the remote path given.
|
||||||
|
//
|
||||||
|
// It returns the destination Object and a possible error.
|
||||||
|
//
|
||||||
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
|
//
|
||||||
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
|
do := f.f.Features().Copy
|
||||||
|
if do == nil {
|
||||||
|
return nil, fs.ErrorCantCopy
|
||||||
|
}
|
||||||
|
// FIXME
|
||||||
|
// o, ok := src.(*Object)
|
||||||
|
// if !ok {
|
||||||
|
// return nil, fs.ErrorCantCopy
|
||||||
|
// }
|
||||||
|
return do(ctx, src, remote)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move src to this remote using server-side move operations.
|
||||||
|
//
|
||||||
|
// This is stored with the remote path given.
|
||||||
|
//
|
||||||
|
// It returns the destination Object and a possible error.
|
||||||
|
//
|
||||||
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
|
//
|
||||||
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
|
do := f.f.Features().Move
|
||||||
|
if do == nil {
|
||||||
|
return nil, fs.ErrorCantMove
|
||||||
|
}
|
||||||
|
// FIXME
|
||||||
|
// o, ok := src.(*Object)
|
||||||
|
// if !ok {
|
||||||
|
// return nil, fs.ErrorCantMove
|
||||||
|
// }
|
||||||
|
return do(ctx, src, remote)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
|
// using server-side move operations.
|
||||||
|
//
|
||||||
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
|
//
|
||||||
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
|
//
|
||||||
|
// If destination exists then return fs.ErrorDirExists
|
||||||
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||||
|
do := f.f.Features().DirMove
|
||||||
|
if do == nil {
|
||||||
|
return fs.ErrorCantDirMove
|
||||||
|
}
|
||||||
|
srcFs, ok := src.(*Fs)
|
||||||
|
if !ok {
|
||||||
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
|
return fs.ErrorCantDirMove
|
||||||
|
}
|
||||||
|
return do(ctx, srcFs.f, srcRemote, dstRemote)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChangeNotify calls the passed function with a path
|
||||||
|
// that has had changes. If the implementation
|
||||||
|
// uses polling, it should adhere to the given interval.
|
||||||
|
// At least one value will be written to the channel,
|
||||||
|
// specifying the initial value and updated values might
|
||||||
|
// follow. A 0 Duration should pause the polling.
|
||||||
|
// The ChangeNotify implementation must empty the channel
|
||||||
|
// regularly. When the channel gets closed, the implementation
|
||||||
|
// should stop polling and release resources.
|
||||||
|
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||||
|
do := f.f.Features().ChangeNotify
|
||||||
|
if do == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||||
|
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
||||||
|
notifyFunc(path, entryType)
|
||||||
|
}
|
||||||
|
do(ctx, wrappedNotifyFunc, ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirCacheFlush resets the directory cache - used in testing
|
||||||
|
// as an optional interface
|
||||||
|
func (f *Fs) DirCacheFlush() {
|
||||||
|
do := f.f.Features().DirCacheFlush
|
||||||
|
if do != nil {
|
||||||
|
do()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
|
var o fs.Object
|
||||||
|
var err error
|
||||||
|
if stream {
|
||||||
|
o, err = f.f.Features().PutStream(ctx, in, src, options...)
|
||||||
|
} else {
|
||||||
|
o, err = f.f.Put(ctx, in, src, options...)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return o, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put in to the remote path with the modTime given of the given size
|
||||||
|
//
|
||||||
|
// May create the object even if it returns an error - if so
|
||||||
|
// will return the object and the error, otherwise will return
|
||||||
|
// nil and the error
|
||||||
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
|
o, err := f.NewObject(ctx, src.Remote())
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
return o, o.Update(ctx, in, src, options...)
|
||||||
|
case fs.ErrorObjectNotFound:
|
||||||
|
return f.put(ctx, in, src, false, options...)
|
||||||
|
default:
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
|
//
|
||||||
|
// May create the object even if it returns an error - if so
|
||||||
|
// will return the object and the error, otherwise will return
|
||||||
|
// nil and the error
|
||||||
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
|
o, err := f.NewObject(ctx, src.Remote())
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
return o, o.Update(ctx, in, src, options...)
|
||||||
|
case fs.ErrorObjectNotFound:
|
||||||
|
return f.put(ctx, in, src, true, options...)
|
||||||
|
default:
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// About gets quota information from the Fs
|
||||||
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
|
do := f.f.Features().About
|
||||||
|
if do == nil {
|
||||||
|
return nil, errors.New("not supported by underlying remote")
|
||||||
|
}
|
||||||
|
return do(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the Fs for the directory
|
||||||
|
func (f *Fs) findFs(ctx context.Context, dir string) (subFs fs.Fs, err error) {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
|
||||||
|
subFs = f.f
|
||||||
|
|
||||||
|
// FIXME should do this with a better datastructure like a prefix tree
|
||||||
|
// FIXME want to find the longest first otherwise nesting won't work
|
||||||
|
dirSlash := dir + "/"
|
||||||
|
for archiverRemote, archive := range f.archives {
|
||||||
|
subRemote := archiverRemote + "/"
|
||||||
|
if strings.HasPrefix(dirSlash, subRemote) {
|
||||||
|
subFs, err = archive.init(ctx, f.f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return subFs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List the objects and directories in dir into entries. The
|
||||||
|
// entries can be returned in any order but should be for a
|
||||||
|
// complete directory.
|
||||||
|
//
|
||||||
|
// dir should be "" to list the root, and should not have
|
||||||
|
// trailing slashes.
|
||||||
|
//
|
||||||
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
|
// found.
|
||||||
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
|
||||||
|
|
||||||
|
subFs, err := f.findFs(ctx, dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
entries, err = subFs.List(ctx, dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for i, entry := range entries {
|
||||||
|
// Can only unarchive files
|
||||||
|
if o, ok := entry.(fs.Object); ok {
|
||||||
|
remote := o.Remote()
|
||||||
|
archive := f.findArchive(remote)
|
||||||
|
if archive != nil {
|
||||||
|
// Overwrite entry with directory
|
||||||
|
entries[i] = fs.NewDir(remote, o.ModTime(ctx))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewObject creates a new remote archive file object
|
||||||
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
|
|
||||||
|
dir := path.Dir(remote)
|
||||||
|
if dir == "/" || dir == "." {
|
||||||
|
dir = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
subFs, err := f.findFs(ctx, dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
o, err := subFs.NewObject(ctx, remote)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return o, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Precision is the greatest precision of all the archivers
|
||||||
|
func (f *Fs) Precision() time.Duration {
|
||||||
|
return time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown the backend, closing any background tasks and any
|
||||||
|
// cached connections.
|
||||||
|
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||||
|
if do := f.f.Features().Shutdown; do != nil {
|
||||||
|
return do(ctx)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||||
|
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||||
|
do := f.f.Features().PublicLink
|
||||||
|
if do == nil {
|
||||||
|
return "", errors.New("PublicLink not supported")
|
||||||
|
}
|
||||||
|
return do(ctx, remote, expire, unlink)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutUnchecked in to the remote path with the modTime given of the given size
|
||||||
|
//
|
||||||
|
// May create the object even if it returns an error - if so
|
||||||
|
// will return the object and the error, otherwise will return
|
||||||
|
// nil and the error
|
||||||
|
//
|
||||||
|
// May create duplicates or return errors if src already
|
||||||
|
// exists.
|
||||||
|
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
|
do := f.f.Features().PutUnchecked
|
||||||
|
if do == nil {
|
||||||
|
return nil, errors.New("can't PutUnchecked")
|
||||||
|
}
|
||||||
|
o, err := do(ctx, in, src, options...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return o, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeDirs merges the contents of all the directories passed
|
||||||
|
// in into the first one and rmdirs the other directories.
|
||||||
|
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||||
|
if len(dirs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
do := f.f.Features().MergeDirs
|
||||||
|
if do == nil {
|
||||||
|
return errors.New("MergeDirs not supported")
|
||||||
|
}
|
||||||
|
return do(ctx, dirs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CleanUp the trash in the Fs
|
||||||
|
//
|
||||||
|
// Implement this if you have a way of emptying the trash or
|
||||||
|
// otherwise cleaning up old versions of files.
|
||||||
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||||
|
do := f.f.Features().CleanUp
|
||||||
|
if do == nil {
|
||||||
|
return errors.New("not supported by underlying remote")
|
||||||
|
}
|
||||||
|
return do(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenWriterAt opens with a handle for random access writes
|
||||||
|
//
|
||||||
|
// Pass in the remote desired and the size if known.
|
||||||
|
//
|
||||||
|
// It truncates any existing object
|
||||||
|
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||||
|
do := f.f.Features().OpenWriterAt
|
||||||
|
if do == nil {
|
||||||
|
return nil, fs.ErrorNotImplemented
|
||||||
|
}
|
||||||
|
return do(ctx, remote, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnWrap returns the Fs that this Fs is wrapping
|
||||||
|
func (f *Fs) UnWrap() fs.Fs {
|
||||||
|
return f.f
|
||||||
|
}
|
||||||
|
|
||||||
|
// WrapFs returns the Fs that is wrapping this Fs
|
||||||
|
func (f *Fs) WrapFs() fs.Fs {
|
||||||
|
return f.wrapper
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWrapper sets the Fs that is wrapping this Fs
|
||||||
|
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
||||||
|
f.wrapper = wrapper
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||||
|
//
|
||||||
|
// Pass in the remote and the src object
|
||||||
|
// You can also use options to hint at the desired chunk size
|
||||||
|
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
||||||
|
do := f.f.Features().OpenChunkWriter
|
||||||
|
if do == nil {
|
||||||
|
return info, nil, fs.ErrorNotImplemented
|
||||||
|
}
|
||||||
|
return do(ctx, remote, src, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserInfo returns info about the connected user
|
||||||
|
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
|
||||||
|
do := f.f.Features().UserInfo
|
||||||
|
if do == nil {
|
||||||
|
return nil, fs.ErrorNotImplemented
|
||||||
|
}
|
||||||
|
return do(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disconnect the current user
|
||||||
|
func (f *Fs) Disconnect(ctx context.Context) error {
|
||||||
|
do := f.f.Features().Disconnect
|
||||||
|
if do == nil {
|
||||||
|
return fs.ErrorNotImplemented
|
||||||
|
}
|
||||||
|
return do(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the interfaces are satisfied
|
||||||
|
var (
|
||||||
|
_ fs.Fs = (*Fs)(nil)
|
||||||
|
_ fs.Purger = (*Fs)(nil)
|
||||||
|
_ fs.PutStreamer = (*Fs)(nil)
|
||||||
|
_ fs.Copier = (*Fs)(nil)
|
||||||
|
_ fs.Mover = (*Fs)(nil)
|
||||||
|
_ fs.DirMover = (*Fs)(nil)
|
||||||
|
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||||
|
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||||
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
|
_ fs.Shutdowner = (*Fs)(nil)
|
||||||
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
|
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||||
|
_ fs.MergeDirser = (*Fs)(nil)
|
||||||
|
_ fs.CleanUpper = (*Fs)(nil)
|
||||||
|
_ fs.OpenWriterAter = (*Fs)(nil)
|
||||||
|
_ fs.OpenChunkWriter = (*Fs)(nil)
|
||||||
|
_ fs.UserInfoer = (*Fs)(nil)
|
||||||
|
_ fs.Disconnecter = (*Fs)(nil)
|
||||||
|
// FIXME _ fs.FullObject = (*Object)(nil)
|
||||||
|
)
|
||||||
221
backend/archive/archive_internal_test.go
Normal file
221
backend/archive/archive_internal_test.go
Normal file
@@ -0,0 +1,221 @@
|
|||||||
|
//go:build !plan9
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/cache"
|
||||||
|
"github.com/rclone/rclone/fs/filter"
|
||||||
|
"github.com/rclone/rclone/fs/operations"
|
||||||
|
"github.com/rclone/rclone/fstest"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FIXME need to test Open with seek
|
||||||
|
|
||||||
|
// run - run a shell command
|
||||||
|
func run(t *testing.T, args ...string) {
|
||||||
|
cmd := exec.Command(args[0], args[1:]...)
|
||||||
|
fs.Debugf(nil, "run args = %v", args)
|
||||||
|
out, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf(`
|
||||||
|
----------------------------
|
||||||
|
Failed to run %v: %v
|
||||||
|
Command output was:
|
||||||
|
%s
|
||||||
|
----------------------------
|
||||||
|
`, args, err, out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check the dst and src are identical
|
||||||
|
func checkTree(ctx context.Context, name string, t *testing.T, dstArchive, src string, expectedCount int) {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
fs.Debugf(nil, "check %q vs %q", dstArchive, src)
|
||||||
|
Farchive, err := cache.Get(ctx, dstArchive)
|
||||||
|
if err != fs.ErrorIsFile {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
Fsrc, err := cache.Get(ctx, src)
|
||||||
|
if err != fs.ErrorIsFile {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var matches bytes.Buffer
|
||||||
|
opt := operations.CheckOpt{
|
||||||
|
Fdst: Farchive,
|
||||||
|
Fsrc: Fsrc,
|
||||||
|
Match: &matches,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, action := range []string{"Check", "Download"} {
|
||||||
|
t.Run(action, func(t *testing.T) {
|
||||||
|
matches.Reset()
|
||||||
|
if action == "Download" {
|
||||||
|
assert.NoError(t, operations.CheckDownload(ctx, &opt))
|
||||||
|
} else {
|
||||||
|
assert.NoError(t, operations.Check(ctx, &opt))
|
||||||
|
}
|
||||||
|
if expectedCount > 0 {
|
||||||
|
assert.Equal(t, expectedCount, strings.Count(matches.String(), "\n"))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("NewObject", func(t *testing.T) {
|
||||||
|
// Check we can run NewObject on all files and read them
|
||||||
|
assert.NoError(t, operations.ListFn(ctx, Fsrc, func(srcObj fs.Object) {
|
||||||
|
if t.Failed() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
remote := srcObj.Remote()
|
||||||
|
archiveObj, err := Farchive.NewObject(ctx, remote)
|
||||||
|
require.NoError(t, err, remote)
|
||||||
|
assert.Equal(t, remote, archiveObj.Remote(), remote)
|
||||||
|
|
||||||
|
// Test that the contents are the same
|
||||||
|
archiveBuf := fstests.ReadObject(ctx, t, archiveObj, -1)
|
||||||
|
srcBuf := fstests.ReadObject(ctx, t, srcObj, -1)
|
||||||
|
assert.Equal(t, srcBuf, archiveBuf)
|
||||||
|
|
||||||
|
if len(srcBuf) < 81 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that Open works with SeekOption
|
||||||
|
assert.Equal(t, srcBuf[50:], fstests.ReadObject(ctx, t, archiveObj, -1, &fs.SeekOption{Offset: 50}), "contents differ after seek")
|
||||||
|
|
||||||
|
// Tests that Open works with RangeOption
|
||||||
|
for _, test := range []struct {
|
||||||
|
ro fs.RangeOption
|
||||||
|
wantStart, wantEnd int
|
||||||
|
}{
|
||||||
|
{fs.RangeOption{Start: 5, End: 15}, 5, 16},
|
||||||
|
{fs.RangeOption{Start: 80, End: -1}, 80, len(srcBuf)},
|
||||||
|
{fs.RangeOption{Start: 81, End: 100000}, 81, len(srcBuf)},
|
||||||
|
{fs.RangeOption{Start: -1, End: 20}, len(srcBuf) - 20, len(srcBuf)}, // if start is omitted this means get the final bytes
|
||||||
|
// {fs.RangeOption{Start: -1, End: -1}, 0, len(srcBuf)}, - this seems to work but the RFC doesn't define it
|
||||||
|
} {
|
||||||
|
got := fstests.ReadObject(ctx, t, archiveObj, -1, &test.ro)
|
||||||
|
foundAt := strings.Index(srcBuf, got)
|
||||||
|
help := fmt.Sprintf("%#v failed want [%d:%d] got [%d:%d]", test.ro, test.wantStart, test.wantEnd, foundAt, foundAt+len(got))
|
||||||
|
assert.Equal(t, srcBuf[test.wantStart:test.wantEnd], got, help)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that the modtimes are correct
|
||||||
|
fstest.AssertTimeEqualWithPrecision(t, remote, srcObj.ModTime(ctx), archiveObj.ModTime(ctx), Farchive.Precision())
|
||||||
|
|
||||||
|
// Test that the sizes are correct
|
||||||
|
assert.Equal(t, srcObj.Size(), archiveObj.Size())
|
||||||
|
|
||||||
|
// Test that Strings are OK
|
||||||
|
assert.Equal(t, srcObj.String(), archiveObj.String())
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
|
||||||
|
// t.Logf("Fdst ------------- %v", Fdst)
|
||||||
|
// operations.List(ctx, Fdst, os.Stdout)
|
||||||
|
// t.Logf("Fsrc ------------- %v", Fsrc)
|
||||||
|
// operations.List(ctx, Fsrc, os.Stdout)
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// test creating and reading back some archives
|
||||||
|
//
|
||||||
|
// Note that this uses rclone and zip as external binaries.
|
||||||
|
func testArchive(t *testing.T, archiveName string, archiveFn func(t *testing.T, output, input string)) {
|
||||||
|
ctx := context.Background()
|
||||||
|
checkFiles := 1000
|
||||||
|
|
||||||
|
// create random test input files
|
||||||
|
inputRoot := t.TempDir()
|
||||||
|
input := filepath.Join(inputRoot, archiveName)
|
||||||
|
require.NoError(t, os.Mkdir(input, 0777))
|
||||||
|
run(t, "rclone", "test", "makefiles", "--files", strconv.Itoa(checkFiles), "--ascii", input)
|
||||||
|
|
||||||
|
// Create the archive
|
||||||
|
output := t.TempDir()
|
||||||
|
zipFile := path.Join(output, archiveName)
|
||||||
|
archiveFn(t, zipFile, input)
|
||||||
|
|
||||||
|
// Check the archive itself
|
||||||
|
checkTree(ctx, "Archive", t, ":archive:"+zipFile, input, checkFiles)
|
||||||
|
|
||||||
|
// Now check a subdirectory
|
||||||
|
fis, err := os.ReadDir(input)
|
||||||
|
require.NoError(t, err)
|
||||||
|
subDir := "NOT FOUND"
|
||||||
|
aFile := "NOT FOUND"
|
||||||
|
for _, fi := range fis {
|
||||||
|
if fi.IsDir() {
|
||||||
|
subDir = fi.Name()
|
||||||
|
} else {
|
||||||
|
aFile = fi.Name()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
checkTree(ctx, "SubDir", t, ":archive:"+zipFile+"/"+subDir, filepath.Join(input, subDir), 0)
|
||||||
|
|
||||||
|
// Now check a single file
|
||||||
|
fiCtx, fi := filter.AddConfig(ctx)
|
||||||
|
require.NoError(t, fi.AddRule("+ "+aFile))
|
||||||
|
require.NoError(t, fi.AddRule("- *"))
|
||||||
|
checkTree(fiCtx, "SingleFile", t, ":archive:"+zipFile+"/"+aFile, filepath.Join(input, aFile), 0)
|
||||||
|
|
||||||
|
// Now check the level above
|
||||||
|
checkTree(ctx, "Root", t, ":archive:"+output, inputRoot, checkFiles)
|
||||||
|
// run(t, "cp", "-a", inputRoot, output, "/tmp/test-"+archiveName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure we have the executable named
|
||||||
|
func skipIfNoExe(t *testing.T, exeName string) {
|
||||||
|
_, err := exec.LookPath(exeName)
|
||||||
|
if err != nil {
|
||||||
|
t.Skipf("%s executable not installed", exeName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test creating and reading back some archives
|
||||||
|
//
|
||||||
|
// Note that this uses rclone and zip as external binaries.
|
||||||
|
func TestArchiveZip(t *testing.T) {
|
||||||
|
fstest.Initialise()
|
||||||
|
skipIfNoExe(t, "zip")
|
||||||
|
skipIfNoExe(t, "rclone")
|
||||||
|
testArchive(t, "test.zip", func(t *testing.T, output, input string) {
|
||||||
|
oldcwd, err := os.Getwd()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, os.Chdir(input))
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, os.Chdir(oldcwd))
|
||||||
|
}()
|
||||||
|
run(t, "zip", "-9r", output, ".")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test creating and reading back some archives
|
||||||
|
//
|
||||||
|
// Note that this uses rclone and squashfs as external binaries.
|
||||||
|
func TestArchiveSquashfs(t *testing.T) {
|
||||||
|
fstest.Initialise()
|
||||||
|
skipIfNoExe(t, "mksquashfs")
|
||||||
|
skipIfNoExe(t, "rclone")
|
||||||
|
testArchive(t, "test.sqfs", func(t *testing.T, output, input string) {
|
||||||
|
run(t, "mksquashfs", input, output)
|
||||||
|
})
|
||||||
|
}
|
||||||
67
backend/archive/archive_test.go
Normal file
67
backend/archive/archive_test.go
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
//go:build !plan9
|
||||||
|
|
||||||
|
// Test Archive filesystem interface
|
||||||
|
package archive_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
|
_ "github.com/rclone/rclone/backend/memory"
|
||||||
|
"github.com/rclone/rclone/fstest"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
unimplementableFsMethods = []string{"ListR", "ListP", "MkdirMetadata", "DirSetModTime"}
|
||||||
|
// In these tests we receive objects from the underlying remote which don't implement these methods
|
||||||
|
unimplementableObjectMethods = []string{"GetTier", "ID", "Metadata", "MimeType", "SetTier", "UnWrap", "SetMetadata"}
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIntegration runs integration tests against the remote
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
if *fstest.RemoteName == "" {
|
||||||
|
t.Skip("Skipping as -remote not set")
|
||||||
|
}
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: *fstest.RemoteName,
|
||||||
|
UnimplementableFsMethods: unimplementableFsMethods,
|
||||||
|
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLocal(t *testing.T) {
|
||||||
|
if *fstest.RemoteName != "" {
|
||||||
|
t.Skip("Skipping as -remote set")
|
||||||
|
}
|
||||||
|
remote := t.TempDir()
|
||||||
|
name := "TestArchiveLocal"
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: name + ":",
|
||||||
|
ExtraConfig: []fstests.ExtraConfigItem{
|
||||||
|
{Name: name, Key: "type", Value: "archive"},
|
||||||
|
{Name: name, Key: "remote", Value: remote},
|
||||||
|
},
|
||||||
|
QuickTestOK: true,
|
||||||
|
UnimplementableFsMethods: unimplementableFsMethods,
|
||||||
|
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemory(t *testing.T) {
|
||||||
|
if *fstest.RemoteName != "" {
|
||||||
|
t.Skip("Skipping as -remote set")
|
||||||
|
}
|
||||||
|
remote := ":memory:"
|
||||||
|
name := "TestArchiveMemory"
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: name + ":",
|
||||||
|
ExtraConfig: []fstests.ExtraConfigItem{
|
||||||
|
{Name: name, Key: "type", Value: "archive"},
|
||||||
|
{Name: name, Key: "remote", Value: remote},
|
||||||
|
},
|
||||||
|
QuickTestOK: true,
|
||||||
|
UnimplementableFsMethods: unimplementableFsMethods,
|
||||||
|
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||||
|
})
|
||||||
|
}
|
||||||
7
backend/archive/archive_unsupported.go
Normal file
7
backend/archive/archive_unsupported.go
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Build for archive for unsupported platforms to stop go complaining
|
||||||
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
|
//go:build plan9
|
||||||
|
|
||||||
|
// Package archive implements a backend to access archive files in a remote
|
||||||
|
package archive
|
||||||
24
backend/archive/archiver/archiver.go
Normal file
24
backend/archive/archiver/archiver.go
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
// Package archiver registers all the archivers
|
||||||
|
package archiver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Archiver describes an archive package
|
||||||
|
type Archiver struct {
|
||||||
|
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
||||||
|
// prefix with prefix and rooted at root
|
||||||
|
New func(ctx context.Context, f fs.Fs, remote, prefix, root string) (fs.Fs, error)
|
||||||
|
Extension string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Archivers is a slice of all registered archivers
|
||||||
|
var Archivers []Archiver
|
||||||
|
|
||||||
|
// Register adds the archivers provided to the list of known archivers
|
||||||
|
func Register(as ...Archiver) {
|
||||||
|
Archivers = append(Archivers, as...)
|
||||||
|
}
|
||||||
233
backend/archive/base/base.go
Normal file
233
backend/archive/base/base.go
Normal file
@@ -0,0 +1,233 @@
|
|||||||
|
// Package base is a base archive Fs
|
||||||
|
package base
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"path"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/vfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Fs represents a wrapped fs.Fs
|
||||||
|
type Fs struct {
|
||||||
|
f fs.Fs
|
||||||
|
wrapper fs.Fs
|
||||||
|
name string
|
||||||
|
features *fs.Features // optional features
|
||||||
|
vfs *vfs.VFS
|
||||||
|
node vfs.Node // archive object
|
||||||
|
remote string // remote of the archive object
|
||||||
|
prefix string // position for objects
|
||||||
|
prefixSlash string // position for objects with a slash on
|
||||||
|
root string // position to read from within the archive
|
||||||
|
}
|
||||||
|
|
||||||
|
var errNotImplemented = errors.New("internal error: method not implemented in archiver")
|
||||||
|
|
||||||
|
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
||||||
|
// prefix with prefix and rooted at root
|
||||||
|
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (*Fs, error) {
|
||||||
|
// FIXME vfs cache?
|
||||||
|
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
|
||||||
|
fs.Debugf(nil, "New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
|
||||||
|
VFS := vfs.New(wrappedFs, nil)
|
||||||
|
node, err := VFS.Stat(remote)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f := &Fs{
|
||||||
|
f: wrappedFs,
|
||||||
|
name: path.Join(fs.ConfigString(wrappedFs), remote),
|
||||||
|
vfs: VFS,
|
||||||
|
node: node,
|
||||||
|
remote: remote,
|
||||||
|
root: root,
|
||||||
|
prefix: prefix,
|
||||||
|
prefixSlash: prefix + "/",
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME
|
||||||
|
// the features here are ones we could support, and they are
|
||||||
|
// ANDed with the ones from wrappedFs
|
||||||
|
//
|
||||||
|
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
|
||||||
|
f.features = (&fs.Features{
|
||||||
|
CaseInsensitive: false,
|
||||||
|
DuplicateFiles: false,
|
||||||
|
ReadMimeType: false, // MimeTypes not supported with gzip
|
||||||
|
WriteMimeType: false,
|
||||||
|
BucketBased: false,
|
||||||
|
CanHaveEmptyDirectories: true,
|
||||||
|
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
|
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name of the remote (as passed into NewFs)
|
||||||
|
func (f *Fs) Name() string {
|
||||||
|
return f.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root of the remote (as passed into NewFs)
|
||||||
|
func (f *Fs) Root() string {
|
||||||
|
return f.root
|
||||||
|
}
|
||||||
|
|
||||||
|
// Features returns the optional features of this Fs
|
||||||
|
func (f *Fs) Features() *fs.Features {
|
||||||
|
return f.features
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a description of the FS
|
||||||
|
func (f *Fs) String() string {
|
||||||
|
return f.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// List the objects and directories in dir into entries. The
|
||||||
|
// entries can be returned in any order but should be for a
|
||||||
|
// complete directory.
|
||||||
|
//
|
||||||
|
// dir should be "" to list the root, and should not have
|
||||||
|
// trailing slashes.
|
||||||
|
//
|
||||||
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
|
// found.
|
||||||
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
return nil, errNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewObject finds the Object at remote.
|
||||||
|
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||||
|
return nil, errNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
|
// Precision of the ModTimes in this Fs
|
||||||
|
func (f *Fs) Precision() time.Duration {
|
||||||
|
return time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mkdir makes the directory (container, bucket)
|
||||||
|
//
|
||||||
|
// Shouldn't return an error if it already exists
|
||||||
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
|
return vfs.EROFS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
|
//
|
||||||
|
// Return an error if it doesn't exist or isn't empty
|
||||||
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
|
return vfs.EROFS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put in to the remote path with the modTime given of the given size
|
||||||
|
//
|
||||||
|
// May create the object even if it returns an error - if so
|
||||||
|
// will return the object and the error, otherwise will return
|
||||||
|
// nil and the error
|
||||||
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||||
|
return nil, vfs.EROFS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hashes returns the supported hash sets.
|
||||||
|
func (f *Fs) Hashes() hash.Set {
|
||||||
|
return hash.Set(hash.None)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnWrap returns the Fs that this Fs is wrapping
|
||||||
|
func (f *Fs) UnWrap() fs.Fs {
|
||||||
|
return f.f
|
||||||
|
}
|
||||||
|
|
||||||
|
// WrapFs returns the Fs that is wrapping this Fs
|
||||||
|
func (f *Fs) WrapFs() fs.Fs {
|
||||||
|
return f.wrapper
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWrapper sets the Fs that is wrapping this Fs
|
||||||
|
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
||||||
|
f.wrapper = wrapper
|
||||||
|
}
|
||||||
|
|
||||||
|
// Object describes an object to be read from the raw zip file
|
||||||
|
type Object struct {
|
||||||
|
f *Fs
|
||||||
|
remote string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fs returns read only access to the Fs that this object is part of
|
||||||
|
func (o *Object) Fs() fs.Info {
|
||||||
|
return o.f
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a string version
|
||||||
|
func (o *Object) String() string {
|
||||||
|
if o == nil {
|
||||||
|
return "<nil>"
|
||||||
|
}
|
||||||
|
return o.Remote()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remote returns the remote path
|
||||||
|
func (o *Object) Remote() string {
|
||||||
|
return o.remote
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the size of the file
|
||||||
|
func (o *Object) Size() int64 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModTime returns the modification time of the object
|
||||||
|
//
|
||||||
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
|
// LastModified returned in the http headers
|
||||||
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
|
return time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetModTime sets the modification time of the local fs object
|
||||||
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
|
return vfs.EROFS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Storable raturns a boolean indicating if this object is storable
|
||||||
|
func (o *Object) Storable() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hash returns the selected checksum of the file
|
||||||
|
// If no checksum is available it returns ""
|
||||||
|
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||||
|
return "", hash.ErrUnsupported
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||||
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||||
|
return nil, errNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update in to the object with the modTime given of the given size
|
||||||
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
|
return vfs.EROFS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove an object
|
||||||
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
|
return vfs.EROFS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the interfaces are satisfied
|
||||||
|
var (
|
||||||
|
_ fs.Fs = (*Fs)(nil)
|
||||||
|
_ fs.UnWrapper = (*Fs)(nil)
|
||||||
|
_ fs.Wrapper = (*Fs)(nil)
|
||||||
|
_ fs.Object = (*Object)(nil)
|
||||||
|
)
|
||||||
165
backend/archive/squashfs/cache.go
Normal file
165
backend/archive/squashfs/cache.go
Normal file
@@ -0,0 +1,165 @@
|
|||||||
|
package squashfs
|
||||||
|
|
||||||
|
// Could just be using bare object Open with RangeRequest which
|
||||||
|
// would transfer the minimum amount of data but may be slower.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/fs"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/diskfs/go-diskfs/backend"
|
||||||
|
"github.com/rclone/rclone/vfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Cache file handles for accessing the file
|
||||||
|
type cache struct {
|
||||||
|
node vfs.Node
|
||||||
|
fhsMu sync.Mutex
|
||||||
|
fhs []cacheHandle
|
||||||
|
}
|
||||||
|
|
||||||
|
// A cached file handle
|
||||||
|
type cacheHandle struct {
|
||||||
|
offset int64
|
||||||
|
fh vfs.Handle
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a new cache
|
||||||
|
func newCache(node vfs.Node) *cache {
|
||||||
|
return &cache{
|
||||||
|
node: node,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get a vfs.Handle from the pool or open one
|
||||||
|
//
|
||||||
|
// This tries to find an open file handle which doesn't require seeking.
|
||||||
|
func (c *cache) open(off int64) (fh vfs.Handle, err error) {
|
||||||
|
c.fhsMu.Lock()
|
||||||
|
defer c.fhsMu.Unlock()
|
||||||
|
|
||||||
|
if len(c.fhs) > 0 {
|
||||||
|
// Look for exact match first
|
||||||
|
for i, cfh := range c.fhs {
|
||||||
|
if cfh.offset == off {
|
||||||
|
// fs.Debugf(nil, "CACHE MATCH")
|
||||||
|
c.fhs = append(c.fhs[:i], c.fhs[i+1:]...)
|
||||||
|
return cfh.fh, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// fs.Debugf(nil, "CACHE MISS")
|
||||||
|
// Just take the first one if not found
|
||||||
|
cfh := c.fhs[0]
|
||||||
|
c.fhs = c.fhs[1:]
|
||||||
|
return cfh.fh, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fh, err = c.node.Open(os.O_RDONLY)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to open squashfs archive: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fh, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close a vfs.Handle or return it to the pool
|
||||||
|
//
|
||||||
|
// off should be the offset the file handle would read from without seeking
|
||||||
|
func (c *cache) close(fh vfs.Handle, off int64) {
|
||||||
|
c.fhsMu.Lock()
|
||||||
|
defer c.fhsMu.Unlock()
|
||||||
|
|
||||||
|
c.fhs = append(c.fhs, cacheHandle{
|
||||||
|
offset: off,
|
||||||
|
fh: fh,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAt reads len(p) bytes into p starting at offset off in the underlying
|
||||||
|
// input source. It returns the number of bytes read (0 <= n <= len(p)) and any
|
||||||
|
// error encountered.
|
||||||
|
//
|
||||||
|
// When ReadAt returns n < len(p), it returns a non-nil error explaining why
|
||||||
|
// more bytes were not returned. In this respect, ReadAt is stricter than Read.
|
||||||
|
//
|
||||||
|
// Even if ReadAt returns n < len(p), it may use all of p as scratch
|
||||||
|
// space during the call. If some data is available but not len(p) bytes,
|
||||||
|
// ReadAt blocks until either all the data is available or an error occurs.
|
||||||
|
// In this respect ReadAt is different from Read.
|
||||||
|
//
|
||||||
|
// If the n = len(p) bytes returned by ReadAt are at the end of the input
|
||||||
|
// source, ReadAt may return either err == EOF or err == nil.
|
||||||
|
//
|
||||||
|
// If ReadAt is reading from an input source with a seek offset, ReadAt should
|
||||||
|
// not affect nor be affected by the underlying seek offset.
|
||||||
|
//
|
||||||
|
// Clients of ReadAt can execute parallel ReadAt calls on the same input
|
||||||
|
// source.
|
||||||
|
//
|
||||||
|
// Implementations must not retain p.
|
||||||
|
func (c *cache) ReadAt(p []byte, off int64) (n int, err error) {
|
||||||
|
fh, err := c.open(off)
|
||||||
|
if err != nil {
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
c.close(fh, off+int64(len(p)))
|
||||||
|
}()
|
||||||
|
// fs.Debugf(nil, "ReadAt(p[%d], off=%d, fh=%p)", len(p), off, fh)
|
||||||
|
return fh.ReadAt(p, off)
|
||||||
|
}
|
||||||
|
|
||||||
|
var errCacheNotImplemented = errors.New("internal error: squashfs cache doesn't implement method")
|
||||||
|
|
||||||
|
// WriteAt method dummy stub to satisfy interface
|
||||||
|
func (c *cache) WriteAt(p []byte, off int64) (n int, err error) {
|
||||||
|
return 0, errCacheNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seek method dummy stub to satisfy interface
|
||||||
|
func (c *cache) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
return 0, errCacheNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read method dummy stub to satisfy interface
|
||||||
|
func (c *cache) Read(p []byte) (n int, err error) {
|
||||||
|
return 0, errCacheNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cache) Stat() (fs.FileInfo, error) {
|
||||||
|
return nil, errCacheNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the file
|
||||||
|
func (c *cache) Close() (err error) {
|
||||||
|
c.fhsMu.Lock()
|
||||||
|
defer c.fhsMu.Unlock()
|
||||||
|
|
||||||
|
// Close any open file handles
|
||||||
|
for i := range c.fhs {
|
||||||
|
fh := &c.fhs[i]
|
||||||
|
newErr := fh.fh.Close()
|
||||||
|
if err == nil {
|
||||||
|
err = newErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.fhs = nil
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sys returns OS-specific file for ioctl calls via fd
|
||||||
|
func (c *cache) Sys() (*os.File, error) {
|
||||||
|
return nil, errCacheNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writable returns file for read-write operations
|
||||||
|
func (c *cache) Writable() (backend.WritableFile, error) {
|
||||||
|
return nil, errCacheNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
|
// check interfaces
|
||||||
|
var _ backend.Storage = (*cache)(nil)
|
||||||
446
backend/archive/squashfs/squashfs.go
Normal file
446
backend/archive/squashfs/squashfs.go
Normal file
@@ -0,0 +1,446 @@
|
|||||||
|
// Package squashfs implements a squashfs archiver for the archive backend
|
||||||
|
package squashfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/diskfs/go-diskfs/filesystem/squashfs"
|
||||||
|
"github.com/rclone/rclone/backend/archive/archiver"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/fs/log"
|
||||||
|
"github.com/rclone/rclone/lib/readers"
|
||||||
|
"github.com/rclone/rclone/vfs"
|
||||||
|
"github.com/rclone/rclone/vfs/vfscommon"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
archiver.Register(archiver.Archiver{
|
||||||
|
New: New,
|
||||||
|
Extension: ".sqfs",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fs represents a wrapped fs.Fs
|
||||||
|
type Fs struct {
|
||||||
|
f fs.Fs
|
||||||
|
wrapper fs.Fs
|
||||||
|
name string
|
||||||
|
features *fs.Features // optional features
|
||||||
|
vfs *vfs.VFS
|
||||||
|
sqfs *squashfs.FileSystem // interface to the squashfs
|
||||||
|
c *cache
|
||||||
|
node vfs.Node // squashfs file object - set if reading
|
||||||
|
remote string // remote of the squashfs file object
|
||||||
|
prefix string // position for objects
|
||||||
|
prefixSlash string // position for objects with a slash on
|
||||||
|
root string // position to read from within the archive
|
||||||
|
}
|
||||||
|
|
||||||
|
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
||||||
|
// prefix with prefix and rooted at root
|
||||||
|
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) {
|
||||||
|
// FIXME vfs cache?
|
||||||
|
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
|
||||||
|
fs.Debugf(nil, "Squashfs: New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
|
||||||
|
vfsOpt := vfscommon.Opt
|
||||||
|
vfsOpt.ReadWait = 0
|
||||||
|
VFS := vfs.New(wrappedFs, &vfsOpt)
|
||||||
|
node, err := VFS.Stat(remote)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c := newCache(node)
|
||||||
|
|
||||||
|
// FIXME blocksize
|
||||||
|
sqfs, err := squashfs.Read(c, node.Size(), 0, 1024*1024)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read squashfs: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f := &Fs{
|
||||||
|
f: wrappedFs,
|
||||||
|
name: path.Join(fs.ConfigString(wrappedFs), remote),
|
||||||
|
vfs: VFS,
|
||||||
|
node: node,
|
||||||
|
sqfs: sqfs,
|
||||||
|
c: c,
|
||||||
|
remote: remote,
|
||||||
|
root: strings.Trim(root, "/"),
|
||||||
|
prefix: prefix,
|
||||||
|
prefixSlash: prefix + "/",
|
||||||
|
}
|
||||||
|
if prefix == "" {
|
||||||
|
f.prefixSlash = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
singleObject := false
|
||||||
|
|
||||||
|
// Find the directory the root points to
|
||||||
|
if f.root != "" && !strings.HasSuffix(root, "/") {
|
||||||
|
native, err := f.toNative("")
|
||||||
|
if err == nil {
|
||||||
|
native = strings.TrimRight(native, "/")
|
||||||
|
_, err := f.newObjectNative(native)
|
||||||
|
if err == nil {
|
||||||
|
// If it pointed to a file, find the directory above
|
||||||
|
f.root = path.Dir(f.root)
|
||||||
|
if f.root == "." || f.root == "/" {
|
||||||
|
f.root = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME
|
||||||
|
// the features here are ones we could support, and they are
|
||||||
|
// ANDed with the ones from wrappedFs
|
||||||
|
//
|
||||||
|
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
|
||||||
|
f.features = (&fs.Features{
|
||||||
|
CaseInsensitive: false,
|
||||||
|
DuplicateFiles: false,
|
||||||
|
ReadMimeType: false, // MimeTypes not supported with gsquashfs
|
||||||
|
WriteMimeType: false,
|
||||||
|
BucketBased: false,
|
||||||
|
CanHaveEmptyDirectories: true,
|
||||||
|
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
|
|
||||||
|
if singleObject {
|
||||||
|
return f, fs.ErrorIsFile
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name of the remote (as passed into NewFs)
|
||||||
|
func (f *Fs) Name() string {
|
||||||
|
return f.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root of the remote (as passed into NewFs)
|
||||||
|
func (f *Fs) Root() string {
|
||||||
|
return f.root
|
||||||
|
}
|
||||||
|
|
||||||
|
// Features returns the optional features of this Fs
|
||||||
|
func (f *Fs) Features() *fs.Features {
|
||||||
|
return f.features
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a description of the FS
|
||||||
|
func (f *Fs) String() string {
|
||||||
|
return fmt.Sprintf("Squashfs %q", f.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This turns a remote into a native path in the squashfs starting with a /
|
||||||
|
func (f *Fs) toNative(remote string) (string, error) {
|
||||||
|
native := strings.Trim(remote, "/")
|
||||||
|
if f.prefix == "" {
|
||||||
|
native = "/" + native
|
||||||
|
} else if native == f.prefix {
|
||||||
|
native = "/"
|
||||||
|
} else if !strings.HasPrefix(native, f.prefixSlash) {
|
||||||
|
return "", fmt.Errorf("internal error: %q doesn't start with prefix %q", native, f.prefixSlash)
|
||||||
|
} else {
|
||||||
|
native = native[len(f.prefix):]
|
||||||
|
}
|
||||||
|
if f.root != "" {
|
||||||
|
native = "/" + f.root + native
|
||||||
|
}
|
||||||
|
return native, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Turn a (nativeDir, leaf) into a remote
|
||||||
|
func (f *Fs) fromNative(nativeDir string, leaf string) string {
|
||||||
|
// fs.Debugf(nil, "nativeDir = %q, leaf = %q, root=%q", nativeDir, leaf, f.root)
|
||||||
|
dir := nativeDir
|
||||||
|
if f.root != "" {
|
||||||
|
dir = strings.TrimPrefix(dir, "/"+f.root)
|
||||||
|
}
|
||||||
|
remote := f.prefixSlash + strings.Trim(path.Join(dir, leaf), "/")
|
||||||
|
// fs.Debugf(nil, "dir = %q, remote=%q", dir, remote)
|
||||||
|
return remote
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert a FileInfo into an Object from native dir
|
||||||
|
func (f *Fs) objectFromFileInfo(nativeDir string, item squashfs.FileStat) *Object {
|
||||||
|
return &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: f.fromNative(nativeDir, item.Name()),
|
||||||
|
size: item.Size(),
|
||||||
|
modTime: item.ModTime(),
|
||||||
|
item: item,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// List the objects and directories in dir into entries. The
|
||||||
|
// entries can be returned in any order but should be for a
|
||||||
|
// complete directory.
|
||||||
|
//
|
||||||
|
// dir should be "" to list the root, and should not have
|
||||||
|
// trailing slashes.
|
||||||
|
//
|
||||||
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
|
// found.
|
||||||
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
||||||
|
|
||||||
|
nativeDir, err := f.toNative(dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
items, err := f.sqfs.ReadDir(nativeDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("read squashfs: couldn't read directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
entries = make(fs.DirEntries, 0, len(items))
|
||||||
|
for _, fi := range items {
|
||||||
|
item, ok := fi.(squashfs.FileStat)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi)
|
||||||
|
}
|
||||||
|
// fs.Debugf(item.Name(), "entry = %#v", item)
|
||||||
|
var entry fs.DirEntry
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error reading item %q: %q", item.Name(), err)
|
||||||
|
}
|
||||||
|
if item.IsDir() {
|
||||||
|
var remote = f.fromNative(nativeDir, item.Name())
|
||||||
|
entry = fs.NewDir(remote, item.ModTime())
|
||||||
|
} else {
|
||||||
|
if item.Mode().IsRegular() {
|
||||||
|
entry = f.objectFromFileInfo(nativeDir, item)
|
||||||
|
} else {
|
||||||
|
fs.Debugf(item.Name(), "FIXME Not regular file - skipping")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
entries = append(entries, entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// fs.Debugf(f, "dir=%q, entries=%v", dir, entries)
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newObjectNative finds the object at the native path passed in
|
||||||
|
func (f *Fs) newObjectNative(nativePath string) (o fs.Object, err error) {
|
||||||
|
// get the path and filename
|
||||||
|
dir, leaf := path.Split(nativePath)
|
||||||
|
dir = strings.TrimRight(dir, "/")
|
||||||
|
leaf = strings.Trim(leaf, "/")
|
||||||
|
|
||||||
|
// FIXME need to detect directory not found
|
||||||
|
fis, err := f.sqfs.ReadDir(dir)
|
||||||
|
if err != nil {
|
||||||
|
|
||||||
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fi := range fis {
|
||||||
|
if fi.Name() == leaf {
|
||||||
|
if fi.IsDir() {
|
||||||
|
return nil, fs.ErrorNotAFile
|
||||||
|
}
|
||||||
|
item, ok := fi.(squashfs.FileStat)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi)
|
||||||
|
}
|
||||||
|
o = f.objectFromFileInfo(dir, item)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if o == nil {
|
||||||
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
return o, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewObject finds the Object at remote.
|
||||||
|
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||||
|
defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err)
|
||||||
|
|
||||||
|
nativePath, err := f.toNative(remote)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f.newObjectNative(nativePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Precision of the ModTimes in this Fs
|
||||||
|
func (f *Fs) Precision() time.Duration {
|
||||||
|
return time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mkdir makes the directory (container, bucket)
|
||||||
|
//
|
||||||
|
// Shouldn't return an error if it already exists
|
||||||
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
|
return vfs.EROFS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
|
//
|
||||||
|
// Return an error if it doesn't exist or isn't empty
|
||||||
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
|
return vfs.EROFS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put in to the remote path with the modTime given of the given size
|
||||||
|
//
|
||||||
|
// May create the object even if it returns an error - if so
|
||||||
|
// will return the object and the error, otherwise will return
|
||||||
|
// nil and the error
|
||||||
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||||
|
return nil, vfs.EROFS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hashes returns the supported hash sets.
|
||||||
|
func (f *Fs) Hashes() hash.Set {
|
||||||
|
return hash.Set(hash.None)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnWrap returns the Fs that this Fs is wrapping
|
||||||
|
func (f *Fs) UnWrap() fs.Fs {
|
||||||
|
return f.f
|
||||||
|
}
|
||||||
|
|
||||||
|
// WrapFs returns the Fs that is wrapping this Fs
|
||||||
|
func (f *Fs) WrapFs() fs.Fs {
|
||||||
|
return f.wrapper
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWrapper sets the Fs that is wrapping this Fs
|
||||||
|
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
||||||
|
f.wrapper = wrapper
|
||||||
|
}
|
||||||
|
|
||||||
|
// Object describes an object to be read from the raw squashfs file
|
||||||
|
type Object struct {
|
||||||
|
fs *Fs
|
||||||
|
remote string
|
||||||
|
size int64
|
||||||
|
modTime time.Time
|
||||||
|
item squashfs.FileStat
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fs returns read only access to the Fs that this object is part of
|
||||||
|
func (o *Object) Fs() fs.Info {
|
||||||
|
return o.fs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a string version
|
||||||
|
func (o *Object) String() string {
|
||||||
|
if o == nil {
|
||||||
|
return "<nil>"
|
||||||
|
}
|
||||||
|
return o.Remote()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Turn a squashfs path into a full path for the parent Fs
|
||||||
|
// func (o *Object) path(remote string) string {
|
||||||
|
// return path.Join(o.fs.prefix, remote)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// Remote returns the remote path
|
||||||
|
func (o *Object) Remote() string {
|
||||||
|
return o.remote
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the size of the file
|
||||||
|
func (o *Object) Size() int64 {
|
||||||
|
return o.size
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModTime returns the modification time of the object
|
||||||
|
//
|
||||||
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
|
// LastModified returned in the http headers
|
||||||
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
|
return o.modTime
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetModTime sets the modification time of the local fs object
|
||||||
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
|
return vfs.EROFS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Storable raturns a boolean indicating if this object is storable
|
||||||
|
func (o *Object) Storable() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hash returns the selected checksum of the file
|
||||||
|
// If no checksum is available it returns ""
|
||||||
|
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||||
|
return "", hash.ErrUnsupported
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||||
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||||
|
var offset, limit int64 = 0, -1
|
||||||
|
for _, option := range options {
|
||||||
|
switch x := option.(type) {
|
||||||
|
case *fs.SeekOption:
|
||||||
|
offset = x.Offset
|
||||||
|
case *fs.RangeOption:
|
||||||
|
offset, limit = x.Decode(o.Size())
|
||||||
|
default:
|
||||||
|
if option.Mandatory() {
|
||||||
|
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
remote, err := o.fs.toNative(o.remote)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.Debugf(o, "Opening %q", remote)
|
||||||
|
//fh, err := o.fs.sqfs.OpenFile(remote, os.O_RDONLY)
|
||||||
|
fh, err := o.item.Open()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// discard data from start as necessary
|
||||||
|
if offset > 0 {
|
||||||
|
_, err = fh.Seek(offset, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If limited then don't return everything
|
||||||
|
if limit >= 0 {
|
||||||
|
fs.Debugf(nil, "limit=%d, offset=%d, options=%v", limit, offset, options)
|
||||||
|
return readers.NewLimitedReadCloser(fh, limit), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fh, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update in to the object with the modTime given of the given size
|
||||||
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
|
return vfs.EROFS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove an object
|
||||||
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
|
return vfs.EROFS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the interfaces are satisfied
|
||||||
|
var (
|
||||||
|
_ fs.Fs = (*Fs)(nil)
|
||||||
|
_ fs.UnWrapper = (*Fs)(nil)
|
||||||
|
_ fs.Wrapper = (*Fs)(nil)
|
||||||
|
_ fs.Object = (*Object)(nil)
|
||||||
|
)
|
||||||
385
backend/archive/zip/zip.go
Normal file
385
backend/archive/zip/zip.go
Normal file
@@ -0,0 +1,385 @@
|
|||||||
|
// Package zip implements a zip archiver for the archive backend
|
||||||
|
package zip
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/zip"
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/archive/archiver"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/dirtree"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/fs/log"
|
||||||
|
"github.com/rclone/rclone/lib/readers"
|
||||||
|
"github.com/rclone/rclone/vfs"
|
||||||
|
"github.com/rclone/rclone/vfs/vfscommon"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
archiver.Register(archiver.Archiver{
|
||||||
|
New: New,
|
||||||
|
Extension: ".zip",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fs represents a wrapped fs.Fs
|
||||||
|
type Fs struct {
|
||||||
|
f fs.Fs
|
||||||
|
wrapper fs.Fs
|
||||||
|
name string
|
||||||
|
features *fs.Features // optional features
|
||||||
|
vfs *vfs.VFS
|
||||||
|
node vfs.Node // zip file object - set if reading
|
||||||
|
remote string // remote of the zip file object
|
||||||
|
prefix string // position for objects
|
||||||
|
prefixSlash string // position for objects with a slash on
|
||||||
|
root string // position to read from within the archive
|
||||||
|
dt dirtree.DirTree // read from zipfile
|
||||||
|
}
|
||||||
|
|
||||||
|
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
||||||
|
// prefix with prefix and rooted at root
|
||||||
|
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) {
|
||||||
|
// FIXME vfs cache?
|
||||||
|
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
|
||||||
|
fs.Debugf(nil, "Zip: New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
|
||||||
|
vfsOpt := vfscommon.Opt
|
||||||
|
vfsOpt.ReadWait = 0
|
||||||
|
VFS := vfs.New(wrappedFs, &vfsOpt)
|
||||||
|
node, err := VFS.Stat(remote)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f := &Fs{
|
||||||
|
f: wrappedFs,
|
||||||
|
name: path.Join(fs.ConfigString(wrappedFs), remote),
|
||||||
|
vfs: VFS,
|
||||||
|
node: node,
|
||||||
|
remote: remote,
|
||||||
|
root: root,
|
||||||
|
prefix: prefix,
|
||||||
|
prefixSlash: prefix + "/",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the contents of the zip file
|
||||||
|
singleObject, err := f.readZip()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to open zip file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME
|
||||||
|
// the features here are ones we could support, and they are
|
||||||
|
// ANDed with the ones from wrappedFs
|
||||||
|
//
|
||||||
|
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
|
||||||
|
f.features = (&fs.Features{
|
||||||
|
CaseInsensitive: false,
|
||||||
|
DuplicateFiles: false,
|
||||||
|
ReadMimeType: false, // MimeTypes not supported with gzip
|
||||||
|
WriteMimeType: false,
|
||||||
|
BucketBased: false,
|
||||||
|
CanHaveEmptyDirectories: true,
|
||||||
|
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
|
|
||||||
|
if singleObject {
|
||||||
|
return f, fs.ErrorIsFile
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name of the remote (as passed into NewFs)
|
||||||
|
func (f *Fs) Name() string {
|
||||||
|
return f.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root of the remote (as passed into NewFs)
|
||||||
|
func (f *Fs) Root() string {
|
||||||
|
return f.root
|
||||||
|
}
|
||||||
|
|
||||||
|
// Features returns the optional features of this Fs
|
||||||
|
func (f *Fs) Features() *fs.Features {
|
||||||
|
return f.features
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a description of the FS
|
||||||
|
func (f *Fs) String() string {
|
||||||
|
return fmt.Sprintf("Zip %q", f.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// readZip the zip file into f
|
||||||
|
//
|
||||||
|
// Returns singleObject=true if f.root points to a file
|
||||||
|
func (f *Fs) readZip() (singleObject bool, err error) {
|
||||||
|
if f.node == nil {
|
||||||
|
return singleObject, fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
|
size := f.node.Size()
|
||||||
|
if size < 0 {
|
||||||
|
return singleObject, errors.New("can't read from zip file with unknown size")
|
||||||
|
}
|
||||||
|
r, err := f.node.Open(os.O_RDONLY)
|
||||||
|
if err != nil {
|
||||||
|
return singleObject, fmt.Errorf("failed to open zip file: %w", err)
|
||||||
|
}
|
||||||
|
zr, err := zip.NewReader(r, size)
|
||||||
|
if err != nil {
|
||||||
|
return singleObject, fmt.Errorf("failed to read zip file: %w", err)
|
||||||
|
}
|
||||||
|
dt := dirtree.New()
|
||||||
|
for _, file := range zr.File {
|
||||||
|
remote := strings.Trim(path.Clean(file.Name), "/")
|
||||||
|
if remote == "." {
|
||||||
|
remote = ""
|
||||||
|
}
|
||||||
|
remote = path.Join(f.prefix, remote)
|
||||||
|
if f.root != "" {
|
||||||
|
// Ignore all files outside the root
|
||||||
|
if !strings.HasPrefix(remote, f.root) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if remote == f.root {
|
||||||
|
remote = ""
|
||||||
|
} else {
|
||||||
|
remote = strings.TrimPrefix(remote, f.root+"/")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(file.Name, "/") {
|
||||||
|
dir := fs.NewDir(remote, file.Modified)
|
||||||
|
dt.AddDir(dir)
|
||||||
|
} else {
|
||||||
|
if remote == "" {
|
||||||
|
remote = path.Base(f.root)
|
||||||
|
singleObject = true
|
||||||
|
dt = dirtree.New()
|
||||||
|
}
|
||||||
|
o := &Object{
|
||||||
|
f: f,
|
||||||
|
remote: remote,
|
||||||
|
fh: &file.FileHeader,
|
||||||
|
file: file,
|
||||||
|
}
|
||||||
|
dt.Add(o)
|
||||||
|
if singleObject {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dt.CheckParents("")
|
||||||
|
dt.Sort()
|
||||||
|
f.dt = dt
|
||||||
|
//fs.Debugf(nil, "dt = %v", dt)
|
||||||
|
return singleObject, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List the objects and directories in dir into entries. The
|
||||||
|
// entries can be returned in any order but should be for a
|
||||||
|
// complete directory.
|
||||||
|
//
|
||||||
|
// dir should be "" to list the root, and should not have
|
||||||
|
// trailing slashes.
|
||||||
|
//
|
||||||
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
|
// found.
|
||||||
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
||||||
|
// _, err = f.strip(dir)
|
||||||
|
// if err != nil {
|
||||||
|
// return nil, err
|
||||||
|
// }
|
||||||
|
entries, ok := f.dt[dir]
|
||||||
|
if !ok {
|
||||||
|
return nil, fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
|
fs.Debugf(f, "dir=%q, entries=%v", dir, entries)
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewObject finds the Object at remote.
|
||||||
|
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||||
|
defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err)
|
||||||
|
if f.dt == nil {
|
||||||
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
_, entry := f.dt.Find(remote)
|
||||||
|
if entry == nil {
|
||||||
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
o, ok := entry.(*Object)
|
||||||
|
if !ok {
|
||||||
|
return nil, fs.ErrorNotAFile
|
||||||
|
}
|
||||||
|
return o, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Precision of the ModTimes in this Fs
|
||||||
|
func (f *Fs) Precision() time.Duration {
|
||||||
|
return time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mkdir makes the directory (container, bucket)
|
||||||
|
//
|
||||||
|
// Shouldn't return an error if it already exists
|
||||||
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
|
return vfs.EROFS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
|
//
|
||||||
|
// Return an error if it doesn't exist or isn't empty
|
||||||
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
|
return vfs.EROFS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put in to the remote path with the modTime given of the given size
|
||||||
|
//
|
||||||
|
// May create the object even if it returns an error - if so
|
||||||
|
// will return the object and the error, otherwise will return
|
||||||
|
// nil and the error
|
||||||
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||||
|
return nil, vfs.EROFS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hashes returns the supported hash sets.
|
||||||
|
func (f *Fs) Hashes() hash.Set {
|
||||||
|
return hash.Set(hash.CRC32)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnWrap returns the Fs that this Fs is wrapping
|
||||||
|
func (f *Fs) UnWrap() fs.Fs {
|
||||||
|
return f.f
|
||||||
|
}
|
||||||
|
|
||||||
|
// WrapFs returns the Fs that is wrapping this Fs
|
||||||
|
func (f *Fs) WrapFs() fs.Fs {
|
||||||
|
return f.wrapper
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWrapper sets the Fs that is wrapping this Fs
|
||||||
|
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
||||||
|
f.wrapper = wrapper
|
||||||
|
}
|
||||||
|
|
||||||
|
// Object describes an object to be read from the raw zip file
|
||||||
|
type Object struct {
|
||||||
|
f *Fs
|
||||||
|
remote string
|
||||||
|
fh *zip.FileHeader
|
||||||
|
file *zip.File
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fs returns read only access to the Fs that this object is part of
|
||||||
|
func (o *Object) Fs() fs.Info {
|
||||||
|
return o.f
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a string version
|
||||||
|
func (o *Object) String() string {
|
||||||
|
if o == nil {
|
||||||
|
return "<nil>"
|
||||||
|
}
|
||||||
|
return o.Remote()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remote returns the remote path
|
||||||
|
func (o *Object) Remote() string {
|
||||||
|
return o.remote
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the size of the file
|
||||||
|
func (o *Object) Size() int64 {
|
||||||
|
return int64(o.fh.UncompressedSize64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModTime returns the modification time of the object
|
||||||
|
//
|
||||||
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
|
// LastModified returned in the http headers
|
||||||
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
|
return o.fh.Modified
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetModTime sets the modification time of the local fs object
|
||||||
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
|
return vfs.EROFS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Storable raturns a boolean indicating if this object is storable
|
||||||
|
func (o *Object) Storable() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hash returns the selected checksum of the file
|
||||||
|
// If no checksum is available it returns ""
|
||||||
|
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||||
|
if ht == hash.CRC32 {
|
||||||
|
// FIXME return empty CRC if writing
|
||||||
|
if o.f.dt == nil {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%08x", o.fh.CRC32), nil
|
||||||
|
}
|
||||||
|
return "", hash.ErrUnsupported
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||||
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||||
|
var offset, limit int64 = 0, -1
|
||||||
|
for _, option := range options {
|
||||||
|
switch x := option.(type) {
|
||||||
|
case *fs.SeekOption:
|
||||||
|
offset = x.Offset
|
||||||
|
case *fs.RangeOption:
|
||||||
|
offset, limit = x.Decode(o.Size())
|
||||||
|
default:
|
||||||
|
if option.Mandatory() {
|
||||||
|
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rc, err = o.file.Open()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// discard data from start as necessary
|
||||||
|
if offset > 0 {
|
||||||
|
_, err = io.CopyN(io.Discard, rc, offset)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If limited then don't return everything
|
||||||
|
if limit >= 0 {
|
||||||
|
return readers.NewLimitedReadCloser(rc, limit), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return rc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update in to the object with the modTime given of the given size
|
||||||
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
|
return vfs.EROFS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove an object
|
||||||
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
|
return vfs.EROFS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the interfaces are satisfied
|
||||||
|
var (
|
||||||
|
_ fs.Fs = (*Fs)(nil)
|
||||||
|
_ fs.UnWrapper = (*Fs)(nil)
|
||||||
|
_ fs.Wrapper = (*Fs)(nil)
|
||||||
|
_ fs.Object = (*Object)(nil)
|
||||||
|
)
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !solaris && !js && !wasm
|
//go:build !plan9 && !solaris && !js
|
||||||
|
|
||||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||||
package azureblob
|
package azureblob
|
||||||
@@ -86,12 +86,56 @@ var (
|
|||||||
metadataMu sync.Mutex
|
metadataMu sync.Mutex
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// system metadata keys which this backend owns
|
||||||
|
var systemMetadataInfo = map[string]fs.MetadataHelp{
|
||||||
|
"cache-control": {
|
||||||
|
Help: "Cache-Control header",
|
||||||
|
Type: "string",
|
||||||
|
Example: "no-cache",
|
||||||
|
},
|
||||||
|
"content-disposition": {
|
||||||
|
Help: "Content-Disposition header",
|
||||||
|
Type: "string",
|
||||||
|
Example: "inline",
|
||||||
|
},
|
||||||
|
"content-encoding": {
|
||||||
|
Help: "Content-Encoding header",
|
||||||
|
Type: "string",
|
||||||
|
Example: "gzip",
|
||||||
|
},
|
||||||
|
"content-language": {
|
||||||
|
Help: "Content-Language header",
|
||||||
|
Type: "string",
|
||||||
|
Example: "en-US",
|
||||||
|
},
|
||||||
|
"content-type": {
|
||||||
|
Help: "Content-Type header",
|
||||||
|
Type: "string",
|
||||||
|
Example: "text/plain",
|
||||||
|
},
|
||||||
|
"tier": {
|
||||||
|
Help: "Tier of the object",
|
||||||
|
Type: "string",
|
||||||
|
Example: "Hot",
|
||||||
|
ReadOnly: true,
|
||||||
|
},
|
||||||
|
"mtime": {
|
||||||
|
Help: "Time of last modification, read from rclone metadata",
|
||||||
|
Type: "RFC 3339",
|
||||||
|
Example: "2006-01-02T15:04:05.999999999Z07:00",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
Name: "azureblob",
|
Name: "azureblob",
|
||||||
Description: "Microsoft Azure Blob Storage",
|
Description: "Microsoft Azure Blob Storage",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
|
MetadataInfo: &fs.MetadataInfo{
|
||||||
|
System: systemMetadataInfo,
|
||||||
|
Help: `User metadata is stored as x-ms-meta- keys. Azure metadata keys are case insensitive and are always returned in lower case.`,
|
||||||
|
},
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "account",
|
Name: "account",
|
||||||
Help: `Azure Storage Account Name.
|
Help: `Azure Storage Account Name.
|
||||||
@@ -810,6 +854,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
|
ReadMetadata: true,
|
||||||
|
WriteMetadata: true,
|
||||||
|
UserMetadata: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
BucketBasedRootOK: true,
|
BucketBasedRootOK: true,
|
||||||
SetTier: true,
|
SetTier: true,
|
||||||
@@ -1157,6 +1204,289 @@ func (o *Object) updateMetadataWithModTime(modTime time.Time) {
|
|||||||
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseXMsTags parses the value of the x-ms-tags header into a map.
|
||||||
|
// It expects comma-separated key=value pairs. Whitespace around keys and
|
||||||
|
// values is trimmed. Empty pairs and empty keys are rejected.
|
||||||
|
func parseXMsTags(s string) (map[string]string, error) {
|
||||||
|
if strings.TrimSpace(s) == "" {
|
||||||
|
return map[string]string{}, nil
|
||||||
|
}
|
||||||
|
out := make(map[string]string)
|
||||||
|
parts := strings.Split(s, ",")
|
||||||
|
for _, p := range parts {
|
||||||
|
p = strings.TrimSpace(p)
|
||||||
|
if p == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
kv := strings.SplitN(p, "=", 2)
|
||||||
|
if len(kv) != 2 {
|
||||||
|
return nil, fmt.Errorf("invalid tag %q", p)
|
||||||
|
}
|
||||||
|
k := strings.TrimSpace(kv[0])
|
||||||
|
v := strings.TrimSpace(kv[1])
|
||||||
|
if k == "" {
|
||||||
|
return nil, fmt.Errorf("invalid tag key in %q", p)
|
||||||
|
}
|
||||||
|
out[k] = v
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mapMetadataToAzure maps a generic metadata map to Azure HTTP headers,
|
||||||
|
// user metadata, tags and optional modTime override.
|
||||||
|
// Reserved x-ms-* keys (except x-ms-tags) are ignored for user metadata.
|
||||||
|
//
|
||||||
|
// Pass a logger to surface non-fatal parsing issues (e.g. bad mtime).
|
||||||
|
func mapMetadataToAzure(meta map[string]string, logf func(string, ...any)) (headers blob.HTTPHeaders, userMeta map[string]*string, tags map[string]string, modTime *time.Time, err error) {
|
||||||
|
if meta == nil {
|
||||||
|
return headers, nil, nil, nil, nil
|
||||||
|
}
|
||||||
|
tmp := make(map[string]string)
|
||||||
|
for k, v := range meta {
|
||||||
|
lowerKey := strings.ToLower(k)
|
||||||
|
switch lowerKey {
|
||||||
|
case "cache-control":
|
||||||
|
headers.BlobCacheControl = pString(v)
|
||||||
|
case "content-disposition":
|
||||||
|
headers.BlobContentDisposition = pString(v)
|
||||||
|
case "content-encoding":
|
||||||
|
headers.BlobContentEncoding = pString(v)
|
||||||
|
case "content-language":
|
||||||
|
headers.BlobContentLanguage = pString(v)
|
||||||
|
case "content-type":
|
||||||
|
headers.BlobContentType = pString(v)
|
||||||
|
case "x-ms-tags":
|
||||||
|
parsed, perr := parseXMsTags(v)
|
||||||
|
if perr != nil {
|
||||||
|
return headers, nil, nil, nil, perr
|
||||||
|
}
|
||||||
|
// allocate only if there are tags
|
||||||
|
if len(parsed) > 0 {
|
||||||
|
tags = parsed
|
||||||
|
}
|
||||||
|
case "mtime":
|
||||||
|
// Accept multiple layouts for tolerance
|
||||||
|
var parsed time.Time
|
||||||
|
var pErr error
|
||||||
|
for _, layout := range []string{time.RFC3339Nano, time.RFC3339, timeFormatOut} {
|
||||||
|
parsed, pErr = time.Parse(layout, v)
|
||||||
|
if pErr == nil {
|
||||||
|
modTime = &parsed
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Log and ignore if unparseable
|
||||||
|
if modTime == nil && logf != nil {
|
||||||
|
logf("metadata: couldn't parse mtime %q: %v", v, pErr)
|
||||||
|
}
|
||||||
|
case "tier":
|
||||||
|
// ignore - handled elsewhere
|
||||||
|
default:
|
||||||
|
// Filter out other reserved headers so they don't end up as user metadata
|
||||||
|
if strings.HasPrefix(lowerKey, "x-ms-") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tmp[lowerKey] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
userMeta = toAzureMetaPtr(tmp)
|
||||||
|
return headers, userMeta, tags, modTime, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// toAzureMetaPtr converts a map[string]string to map[string]*string as used by Azure SDK
|
||||||
|
func toAzureMetaPtr(in map[string]string) map[string]*string {
|
||||||
|
if len(in) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := make(map[string]*string, len(in))
|
||||||
|
for k, v := range in {
|
||||||
|
vv := v
|
||||||
|
out[k] = &vv
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// assembleCopyParams prepares headers, metadata and tags for copy operations.
|
||||||
|
//
|
||||||
|
// It starts from the source properties, optionally overlays mapped metadata
|
||||||
|
// from rclone's metadata options, ensures mtime presence when mapping is
|
||||||
|
// enabled, and returns whether mapping was actually requested (hadMapping).
|
||||||
|
// assembleCopyParams prepares headers, metadata and tags for copy operations.
|
||||||
|
//
|
||||||
|
// If includeBaseMeta is true, start user metadata from the source's metadata
|
||||||
|
// and overlay mapped values. This matches multipart copy commit behavior.
|
||||||
|
// If false, only include mapped user metadata (no source baseline) which
|
||||||
|
// matches previous singlepart StartCopyFromURL semantics.
|
||||||
|
func assembleCopyParams(ctx context.Context, f *Fs, src fs.Object, srcProps *blob.GetPropertiesResponse, includeBaseMeta bool) (headers blob.HTTPHeaders, meta map[string]*string, tags map[string]string, hadMapping bool, err error) {
|
||||||
|
// Start from source properties
|
||||||
|
headers = blob.HTTPHeaders{
|
||||||
|
BlobCacheControl: srcProps.CacheControl,
|
||||||
|
BlobContentDisposition: srcProps.ContentDisposition,
|
||||||
|
BlobContentEncoding: srcProps.ContentEncoding,
|
||||||
|
BlobContentLanguage: srcProps.ContentLanguage,
|
||||||
|
BlobContentMD5: srcProps.ContentMD5,
|
||||||
|
BlobContentType: srcProps.ContentType,
|
||||||
|
}
|
||||||
|
// Optionally deep copy user metadata pointers from source. Normalise keys to
|
||||||
|
// lower-case to avoid duplicate x-ms-meta headers when we later inject/overlay
|
||||||
|
// metadata (Azure treats keys case-insensitively but Go's http.Header will
|
||||||
|
// join duplicate keys into a comma separated list, which breaks shared-key
|
||||||
|
// signing).
|
||||||
|
if includeBaseMeta && len(srcProps.Metadata) > 0 {
|
||||||
|
meta = make(map[string]*string, len(srcProps.Metadata))
|
||||||
|
for k, v := range srcProps.Metadata {
|
||||||
|
if v != nil {
|
||||||
|
vv := *v
|
||||||
|
meta[strings.ToLower(k)] = &vv
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only consider mapping if metadata pipeline is enabled
|
||||||
|
if fs.GetConfig(ctx).Metadata {
|
||||||
|
mapped, mapErr := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
|
||||||
|
if mapErr != nil {
|
||||||
|
return headers, meta, nil, false, fmt.Errorf("failed to map metadata: %w", mapErr)
|
||||||
|
}
|
||||||
|
if mapped != nil {
|
||||||
|
// Map rclone metadata to Azure shapes
|
||||||
|
mappedHeaders, userMeta, mappedTags, mappedModTime, herr := mapMetadataToAzure(mapped, func(format string, args ...any) { fs.Debugf(f, format, args...) })
|
||||||
|
if herr != nil {
|
||||||
|
return headers, meta, nil, false, fmt.Errorf("metadata mapping: %w", herr)
|
||||||
|
}
|
||||||
|
hadMapping = true
|
||||||
|
// Overlay headers (only non-nil)
|
||||||
|
if mappedHeaders.BlobCacheControl != nil {
|
||||||
|
headers.BlobCacheControl = mappedHeaders.BlobCacheControl
|
||||||
|
}
|
||||||
|
if mappedHeaders.BlobContentDisposition != nil {
|
||||||
|
headers.BlobContentDisposition = mappedHeaders.BlobContentDisposition
|
||||||
|
}
|
||||||
|
if mappedHeaders.BlobContentEncoding != nil {
|
||||||
|
headers.BlobContentEncoding = mappedHeaders.BlobContentEncoding
|
||||||
|
}
|
||||||
|
if mappedHeaders.BlobContentLanguage != nil {
|
||||||
|
headers.BlobContentLanguage = mappedHeaders.BlobContentLanguage
|
||||||
|
}
|
||||||
|
if mappedHeaders.BlobContentType != nil {
|
||||||
|
headers.BlobContentType = mappedHeaders.BlobContentType
|
||||||
|
}
|
||||||
|
// Overlay user metadata
|
||||||
|
if len(userMeta) > 0 {
|
||||||
|
if meta == nil {
|
||||||
|
meta = make(map[string]*string, len(userMeta))
|
||||||
|
}
|
||||||
|
for k, v := range userMeta {
|
||||||
|
meta[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Apply tags if any
|
||||||
|
if len(mappedTags) > 0 {
|
||||||
|
tags = mappedTags
|
||||||
|
}
|
||||||
|
// Ensure mtime present using mapped or source time
|
||||||
|
if _, ok := meta[modTimeKey]; !ok {
|
||||||
|
when := src.ModTime(ctx)
|
||||||
|
if mappedModTime != nil {
|
||||||
|
when = *mappedModTime
|
||||||
|
}
|
||||||
|
val := when.Format(time.RFC3339Nano)
|
||||||
|
if meta == nil {
|
||||||
|
meta = make(map[string]*string, 1)
|
||||||
|
}
|
||||||
|
meta[modTimeKey] = &val
|
||||||
|
}
|
||||||
|
// Ensure content-type fallback to source if not set by mapper
|
||||||
|
if headers.BlobContentType == nil {
|
||||||
|
headers.BlobContentType = srcProps.ContentType
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Mapping enabled but not provided: ensure mtime present based on source ModTime
|
||||||
|
if _, ok := meta[modTimeKey]; !ok {
|
||||||
|
when := src.ModTime(ctx)
|
||||||
|
val := when.Format(time.RFC3339Nano)
|
||||||
|
if meta == nil {
|
||||||
|
meta = make(map[string]*string, 1)
|
||||||
|
}
|
||||||
|
meta[modTimeKey] = &val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return headers, meta, tags, hadMapping, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// applyMappedMetadata applies mapped metadata and headers to the object state for uploads.
|
||||||
|
//
|
||||||
|
// It reads `--metadata`, `--metadata-set`, and `--metadata-mapper` outputs via fs.GetMetadataOptions
|
||||||
|
// and updates o.meta, o.tags and ui.httpHeaders accordingly.
|
||||||
|
func (o *Object) applyMappedMetadata(ctx context.Context, src fs.ObjectInfo, ui *uploadInfo, options []fs.OpenOption) (modTime time.Time, err error) {
|
||||||
|
// Start from the source modtime; may be overridden by metadata
|
||||||
|
modTime = src.ModTime(ctx)
|
||||||
|
|
||||||
|
// Fetch mapped metadata if --metadata is enabled
|
||||||
|
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||||
|
if err != nil {
|
||||||
|
return modTime, err
|
||||||
|
}
|
||||||
|
if meta == nil {
|
||||||
|
// No metadata processing requested
|
||||||
|
return modTime, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map metadata using common helper
|
||||||
|
headers, userMeta, tags, mappedModTime, err := mapMetadataToAzure(meta, func(format string, args ...any) { fs.Debugf(o, format, args...) })
|
||||||
|
if err != nil {
|
||||||
|
return modTime, err
|
||||||
|
}
|
||||||
|
// Merge headers into ui
|
||||||
|
if headers.BlobCacheControl != nil {
|
||||||
|
ui.httpHeaders.BlobCacheControl = headers.BlobCacheControl
|
||||||
|
}
|
||||||
|
if headers.BlobContentDisposition != nil {
|
||||||
|
ui.httpHeaders.BlobContentDisposition = headers.BlobContentDisposition
|
||||||
|
}
|
||||||
|
if headers.BlobContentEncoding != nil {
|
||||||
|
ui.httpHeaders.BlobContentEncoding = headers.BlobContentEncoding
|
||||||
|
}
|
||||||
|
if headers.BlobContentLanguage != nil {
|
||||||
|
ui.httpHeaders.BlobContentLanguage = headers.BlobContentLanguage
|
||||||
|
}
|
||||||
|
if headers.BlobContentType != nil {
|
||||||
|
ui.httpHeaders.BlobContentType = headers.BlobContentType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply user metadata to o.meta with a single critical section
|
||||||
|
if len(userMeta) > 0 {
|
||||||
|
metadataMu.Lock()
|
||||||
|
if o.meta == nil {
|
||||||
|
o.meta = make(map[string]string, len(userMeta))
|
||||||
|
}
|
||||||
|
for k, v := range userMeta {
|
||||||
|
if v != nil {
|
||||||
|
o.meta[k] = *v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
metadataMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply tags
|
||||||
|
if len(tags) > 0 {
|
||||||
|
if o.tags == nil {
|
||||||
|
o.tags = make(map[string]string, len(tags))
|
||||||
|
}
|
||||||
|
for k, v := range tags {
|
||||||
|
o.tags[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if mappedModTime != nil {
|
||||||
|
modTime = *mappedModTime
|
||||||
|
}
|
||||||
|
|
||||||
|
return modTime, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Returns whether file is a directory marker or not
|
// Returns whether file is a directory marker or not
|
||||||
func isDirectoryMarker(size int64, metadata map[string]*string, remote string) bool {
|
func isDirectoryMarker(size int64, metadata map[string]*string, remote string) bool {
|
||||||
// Directory markers are 0 length
|
// Directory markers are 0 length
|
||||||
@@ -1338,9 +1668,9 @@ func (f *Fs) containerOK(container string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// listDir lists a single directory
|
// listDir lists a single directory
|
||||||
func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix string, addContainer bool, callback func(fs.DirEntry) error) (err error) {
|
||||||
if !f.containerOK(containerName) {
|
if !f.containerOK(containerName) {
|
||||||
return nil, fs.ErrorDirNotFound
|
return fs.ErrorDirNotFound
|
||||||
}
|
}
|
||||||
err = f.list(ctx, containerName, directory, prefix, addContainer, false, int32(f.opt.ListChunkSize), func(remote string, object *container.BlobItem, isDirectory bool) error {
|
err = f.list(ctx, containerName, directory, prefix, addContainer, false, int32(f.opt.ListChunkSize), func(remote string, object *container.BlobItem, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||||
@@ -1348,16 +1678,16 @@ func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix strin
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if entry != nil {
|
if entry != nil {
|
||||||
entries = append(entries, entry)
|
return callback(entry)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
// container must be present if listing succeeded
|
// container must be present if listing succeeded
|
||||||
f.cache.MarkOK(containerName)
|
f.cache.MarkOK(containerName)
|
||||||
return entries, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// listContainers returns all the containers to out
|
// listContainers returns all the containers to out
|
||||||
@@ -1393,14 +1723,47 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
|
|||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
return list.WithListP(ctx, dir, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListP lists the objects and directories of the Fs starting
|
||||||
|
// from dir non recursively into out.
|
||||||
|
//
|
||||||
|
// dir should be "" to start from the root, and should not
|
||||||
|
// have trailing slashes.
|
||||||
|
//
|
||||||
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
|
// found.
|
||||||
|
//
|
||||||
|
// It should call callback for each tranche of entries read.
|
||||||
|
// These need not be returned in any particular order. If
|
||||||
|
// callback returns an error then the listing will stop
|
||||||
|
// immediately.
|
||||||
|
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||||
|
list := list.NewHelper(callback)
|
||||||
container, directory := f.split(dir)
|
container, directory := f.split(dir)
|
||||||
if container == "" {
|
if container == "" {
|
||||||
if directory != "" {
|
if directory != "" {
|
||||||
return nil, fs.ErrorListBucketRequired
|
return fs.ErrorListBucketRequired
|
||||||
}
|
}
|
||||||
return f.listContainers(ctx)
|
entries, err := f.listContainers(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, entry := range entries {
|
||||||
|
err = list.Add(entry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err := f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "", list.Add)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
|
return list.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@@ -1918,18 +2281,19 @@ func (f *Fs) copyMultipart(ctx context.Context, remote, dstContainer, dstPath st
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert metadata from source object
|
// Prepare metadata/headers/tags for destination
|
||||||
|
// For multipart commit, include base metadata from source then overlay mapped
|
||||||
|
commitHeaders, commitMeta, commitTags, _, err := assembleCopyParams(ctx, f, src, srcProperties, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("multipart copy: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert metadata from source or mapper
|
||||||
options := blockblob.CommitBlockListOptions{
|
options := blockblob.CommitBlockListOptions{
|
||||||
Metadata: srcProperties.Metadata,
|
Metadata: commitMeta,
|
||||||
Tier: parseTier(f.opt.AccessTier),
|
Tags: commitTags,
|
||||||
HTTPHeaders: &blob.HTTPHeaders{
|
Tier: parseTier(f.opt.AccessTier),
|
||||||
BlobCacheControl: srcProperties.CacheControl,
|
HTTPHeaders: &commitHeaders,
|
||||||
BlobContentDisposition: srcProperties.ContentDisposition,
|
|
||||||
BlobContentEncoding: srcProperties.ContentEncoding,
|
|
||||||
BlobContentLanguage: srcProperties.ContentLanguage,
|
|
||||||
BlobContentMD5: srcProperties.ContentMD5,
|
|
||||||
BlobContentType: srcProperties.ContentType,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finalise the upload session
|
// Finalise the upload session
|
||||||
@@ -1960,10 +2324,36 @@ func (f *Fs) copySinglepart(ctx context.Context, remote, dstContainer, dstPath s
|
|||||||
return nil, fmt.Errorf("single part copy: source auth: %w", err)
|
return nil, fmt.Errorf("single part copy: source auth: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start the copy
|
// Prepare mapped metadata/tags/headers if requested
|
||||||
options := blob.StartCopyFromURLOptions{
|
options := blob.StartCopyFromURLOptions{
|
||||||
Tier: parseTier(f.opt.AccessTier),
|
Tier: parseTier(f.opt.AccessTier),
|
||||||
}
|
}
|
||||||
|
var postHeaders *blob.HTTPHeaders
|
||||||
|
// Read source properties and assemble params; this also handles the case when mapping is disabled
|
||||||
|
srcProps, err := src.readMetaDataAlways(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("single part copy: read source properties: %w", err)
|
||||||
|
}
|
||||||
|
// For singlepart copy, do not include base metadata from source in StartCopyFromURL
|
||||||
|
headers, meta, tags, hadMapping, aerr := assembleCopyParams(ctx, f, src, srcProps, false)
|
||||||
|
if aerr != nil {
|
||||||
|
return nil, fmt.Errorf("single part copy: %w", aerr)
|
||||||
|
}
|
||||||
|
// Apply tags and post-copy headers only when mapping requested changes
|
||||||
|
if len(tags) > 0 {
|
||||||
|
options.BlobTags = make(map[string]string, len(tags))
|
||||||
|
for k, v := range tags {
|
||||||
|
options.BlobTags[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if hadMapping {
|
||||||
|
// Only set metadata explicitly when mapping was requested; otherwise
|
||||||
|
// let the service copy source metadata (including mtime) automatically.
|
||||||
|
if len(meta) > 0 {
|
||||||
|
options.Metadata = meta
|
||||||
|
}
|
||||||
|
postHeaders = &headers
|
||||||
|
}
|
||||||
var startCopy blob.StartCopyFromURLResponse
|
var startCopy blob.StartCopyFromURLResponse
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
startCopy, err = dstBlobSVC.StartCopyFromURL(ctx, srcURL, &options)
|
startCopy, err = dstBlobSVC.StartCopyFromURL(ctx, srcURL, &options)
|
||||||
@@ -1993,6 +2383,16 @@ func (f *Fs) copySinglepart(ctx context.Context, remote, dstContainer, dstPath s
|
|||||||
pollTime = min(2*pollTime, time.Second)
|
pollTime = min(2*pollTime, time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If mapper requested header changes, set them post-copy
|
||||||
|
if postHeaders != nil {
|
||||||
|
blb := f.getBlobSVC(dstContainer, dstPath)
|
||||||
|
_, setErr := blb.SetHTTPHeaders(ctx, *postHeaders, nil)
|
||||||
|
if setErr != nil {
|
||||||
|
return nil, fmt.Errorf("single part copy: failed to set headers: %w", setErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Metadata (when requested) is set via StartCopyFromURL options.Metadata
|
||||||
|
|
||||||
return f.NewObject(ctx, remote)
|
return f.NewObject(ctx, remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2119,12 +2519,40 @@ func (o *Object) getMetadata() (metadata map[string]*string) {
|
|||||||
}
|
}
|
||||||
metadata = make(map[string]*string, len(o.meta))
|
metadata = make(map[string]*string, len(o.meta))
|
||||||
for k, v := range o.meta {
|
for k, v := range o.meta {
|
||||||
v := v
|
|
||||||
metadata[k] = &v
|
metadata[k] = &v
|
||||||
}
|
}
|
||||||
return metadata
|
return metadata
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Metadata returns metadata for an object
|
||||||
|
//
|
||||||
|
// It returns a combined view of system and user metadata.
|
||||||
|
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||||
|
// Ensure metadata is loaded
|
||||||
|
if err := o.readMetaData(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
m := fs.Metadata{}
|
||||||
|
|
||||||
|
// System metadata we expose
|
||||||
|
if !o.modTime.IsZero() {
|
||||||
|
m["mtime"] = o.modTime.Format(time.RFC3339Nano)
|
||||||
|
}
|
||||||
|
if o.accessTier != "" {
|
||||||
|
m["tier"] = string(o.accessTier)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge user metadata (already lower-cased keys)
|
||||||
|
metadataMu.Lock()
|
||||||
|
for k, v := range o.meta {
|
||||||
|
m[k] = v
|
||||||
|
}
|
||||||
|
metadataMu.Unlock()
|
||||||
|
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
// decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in
|
// decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in
|
||||||
//
|
//
|
||||||
// Sets
|
// Sets
|
||||||
@@ -2765,8 +3193,6 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
|
|||||||
blockList blockblob.GetBlockListResponse
|
blockList blockblob.GetBlockListResponse
|
||||||
properties *blob.GetPropertiesResponse
|
properties *blob.GetPropertiesResponse
|
||||||
options *blockblob.CommitBlockListOptions
|
options *blockblob.CommitBlockListOptions
|
||||||
// Use temporary pacer as this can be called recursively which can cause a deadlock with --max-connections
|
|
||||||
pacer = fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
|
||||||
)
|
)
|
||||||
|
|
||||||
properties, err = o.readMetaDataAlways(ctx)
|
properties, err = o.readMetaDataAlways(ctx)
|
||||||
@@ -2778,7 +3204,7 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
|
|||||||
|
|
||||||
if objectExists {
|
if objectExists {
|
||||||
// Get the committed block list
|
// Get the committed block list
|
||||||
err = pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
blockList, err = blockBlobSVC.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
|
blockList, err = blockBlobSVC.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
|
||||||
return o.fs.shouldRetry(ctx, err)
|
return o.fs.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
@@ -2820,7 +3246,7 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
|
|||||||
|
|
||||||
// Commit only the committed blocks
|
// Commit only the committed blocks
|
||||||
fs.Debugf(o, "Committing %d blocks to remove uncommitted blocks", len(blockIDs))
|
fs.Debugf(o, "Committing %d blocks to remove uncommitted blocks", len(blockIDs))
|
||||||
err = pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
_, err := blockBlobSVC.CommitBlockList(ctx, blockIDs, options)
|
_, err := blockBlobSVC.CommitBlockList(ctx, blockIDs, options)
|
||||||
return o.fs.shouldRetry(ctx, err)
|
return o.fs.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
@@ -2965,17 +3391,19 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
|||||||
// containerPath = containerPath[:len(containerPath)-1]
|
// containerPath = containerPath[:len(containerPath)-1]
|
||||||
// }
|
// }
|
||||||
|
|
||||||
// Update Mod time
|
// Start with default content-type based on source
|
||||||
o.updateMetadataWithModTime(src.ModTime(ctx))
|
|
||||||
if err != nil {
|
|
||||||
return ui, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the HTTP headers for the upload
|
|
||||||
ui.httpHeaders = blob.HTTPHeaders{
|
ui.httpHeaders = blob.HTTPHeaders{
|
||||||
BlobContentType: pString(fs.MimeType(ctx, src)),
|
BlobContentType: pString(fs.MimeType(ctx, src)),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Apply mapped metadata/headers/tags if requested
|
||||||
|
modTime, err := o.applyMappedMetadata(ctx, src, &ui, options)
|
||||||
|
if err != nil {
|
||||||
|
return ui, err
|
||||||
|
}
|
||||||
|
// Ensure mtime is set in metadata based on possibly overridden modTime
|
||||||
|
o.updateMetadataWithModTime(modTime)
|
||||||
|
|
||||||
// Compute the Content-MD5 of the file. As we stream all uploads it
|
// Compute the Content-MD5 of the file. As we stream all uploads it
|
||||||
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
||||||
if !o.fs.opt.DisableCheckSum {
|
if !o.fs.opt.DisableCheckSum {
|
||||||
@@ -3156,6 +3584,7 @@ var (
|
|||||||
_ fs.PutStreamer = &Fs{}
|
_ fs.PutStreamer = &Fs{}
|
||||||
_ fs.Purger = &Fs{}
|
_ fs.Purger = &Fs{}
|
||||||
_ fs.ListRer = &Fs{}
|
_ fs.ListRer = &Fs{}
|
||||||
|
_ fs.ListPer = &Fs{}
|
||||||
_ fs.OpenChunkWriter = &Fs{}
|
_ fs.OpenChunkWriter = &Fs{}
|
||||||
_ fs.Object = &Object{}
|
_ fs.Object = &Object{}
|
||||||
_ fs.MimeTyper = &Object{}
|
_ fs.MimeTyper = &Object{}
|
||||||
|
|||||||
@@ -1,15 +1,20 @@
|
|||||||
//go:build !plan9 && !solaris && !js && !wasm
|
//go:build !plan9 && !solaris && !js
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/object"
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/rclone/rclone/lib/random"
|
||||||
@@ -148,4 +153,417 @@ func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
|
|||||||
func (f *Fs) InternalTest(t *testing.T) {
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
t.Run("Features", f.testFeatures)
|
t.Run("Features", f.testFeatures)
|
||||||
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
|
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
|
||||||
|
t.Run("Metadata", f.testMetadataPaths)
|
||||||
|
}
|
||||||
|
|
||||||
|
// helper to read blob properties for an object
|
||||||
|
func getProps(ctx context.Context, t *testing.T, o fs.Object) *blob.GetPropertiesResponse {
|
||||||
|
ao := o.(*Object)
|
||||||
|
props, err := ao.readMetaDataAlways(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return props
|
||||||
|
}
|
||||||
|
|
||||||
|
// helper to assert select headers and user metadata
|
||||||
|
func assertHeadersAndMetadata(t *testing.T, props *blob.GetPropertiesResponse, want map[string]string, wantUserMeta map[string]string) {
|
||||||
|
// Headers
|
||||||
|
get := func(p *string) string {
|
||||||
|
if p == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return *p
|
||||||
|
}
|
||||||
|
if v, ok := want["content-type"]; ok {
|
||||||
|
assert.Equal(t, v, get(props.ContentType), "content-type")
|
||||||
|
}
|
||||||
|
if v, ok := want["cache-control"]; ok {
|
||||||
|
assert.Equal(t, v, get(props.CacheControl), "cache-control")
|
||||||
|
}
|
||||||
|
if v, ok := want["content-disposition"]; ok {
|
||||||
|
assert.Equal(t, v, get(props.ContentDisposition), "content-disposition")
|
||||||
|
}
|
||||||
|
if v, ok := want["content-encoding"]; ok {
|
||||||
|
assert.Equal(t, v, get(props.ContentEncoding), "content-encoding")
|
||||||
|
}
|
||||||
|
if v, ok := want["content-language"]; ok {
|
||||||
|
assert.Equal(t, v, get(props.ContentLanguage), "content-language")
|
||||||
|
}
|
||||||
|
// User metadata (case-insensitive keys from service)
|
||||||
|
norm := make(map[string]*string, len(props.Metadata))
|
||||||
|
for kk, vv := range props.Metadata {
|
||||||
|
norm[strings.ToLower(kk)] = vv
|
||||||
|
}
|
||||||
|
for k, v := range wantUserMeta {
|
||||||
|
pv, ok := norm[strings.ToLower(k)]
|
||||||
|
if assert.True(t, ok, fmt.Sprintf("missing user metadata key %q", k)) {
|
||||||
|
if pv == nil {
|
||||||
|
assert.Equal(t, v, "", k)
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, v, *pv, k)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Log available keys for diagnostics
|
||||||
|
keys := make([]string, 0, len(props.Metadata))
|
||||||
|
for kk := range props.Metadata {
|
||||||
|
keys = append(keys, kk)
|
||||||
|
}
|
||||||
|
t.Logf("available user metadata keys: %v", keys)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// helper to read blob tags for an object
|
||||||
|
func getTagsMap(ctx context.Context, t *testing.T, o fs.Object) map[string]string {
|
||||||
|
ao := o.(*Object)
|
||||||
|
blb := ao.getBlobSVC()
|
||||||
|
resp, err := blb.GetTags(ctx, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
out := make(map[string]string)
|
||||||
|
for _, tag := range resp.BlobTagSet {
|
||||||
|
if tag.Key != nil {
|
||||||
|
k := *tag.Key
|
||||||
|
v := ""
|
||||||
|
if tag.Value != nil {
|
||||||
|
v = *tag.Value
|
||||||
|
}
|
||||||
|
out[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test metadata across different write paths
|
||||||
|
func (f *Fs) testMetadataPaths(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Common expected metadata and headers
|
||||||
|
baseMeta := fs.Metadata{
|
||||||
|
"cache-control": "no-cache",
|
||||||
|
"content-disposition": "inline",
|
||||||
|
"content-language": "en-US",
|
||||||
|
// Note: Don't set content-encoding here to avoid download decoding differences
|
||||||
|
// We will set a custom user metadata key
|
||||||
|
"potato": "royal",
|
||||||
|
// and modtime
|
||||||
|
"mtime": fstest.Time("2009-05-06T04:05:06.499999999Z").Format(time.RFC3339Nano),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Singlepart upload
|
||||||
|
t.Run("PutSinglepart", func(t *testing.T) {
|
||||||
|
// size less than chunk size
|
||||||
|
contents := random.String(int(f.opt.ChunkSize / 2))
|
||||||
|
item := fstest.NewItem("meta-single.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||||
|
// override content-type via metadata mapping
|
||||||
|
meta := fs.Metadata{}
|
||||||
|
meta.Merge(baseMeta)
|
||||||
|
meta["content-type"] = "text/plain"
|
||||||
|
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/html", meta)
|
||||||
|
defer func() { _ = obj.Remove(ctx) }()
|
||||||
|
|
||||||
|
props := getProps(ctx, t, obj)
|
||||||
|
assertHeadersAndMetadata(t, props, map[string]string{
|
||||||
|
"content-type": "text/plain",
|
||||||
|
"cache-control": "no-cache",
|
||||||
|
"content-disposition": "inline",
|
||||||
|
"content-language": "en-US",
|
||||||
|
}, map[string]string{
|
||||||
|
"potato": "royal",
|
||||||
|
})
|
||||||
|
_ = http.StatusOK // keep import for parity but don't inspect RawResponse
|
||||||
|
})
|
||||||
|
|
||||||
|
// Multipart upload
|
||||||
|
t.Run("PutMultipart", func(t *testing.T) {
|
||||||
|
// size greater than chunk size to force multipart
|
||||||
|
contents := random.String(int(f.opt.ChunkSize + 1024))
|
||||||
|
item := fstest.NewItem("meta-multipart.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||||
|
meta := fs.Metadata{}
|
||||||
|
meta.Merge(baseMeta)
|
||||||
|
meta["content-type"] = "application/json"
|
||||||
|
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/html", meta)
|
||||||
|
defer func() { _ = obj.Remove(ctx) }()
|
||||||
|
|
||||||
|
props := getProps(ctx, t, obj)
|
||||||
|
assertHeadersAndMetadata(t, props, map[string]string{
|
||||||
|
"content-type": "application/json",
|
||||||
|
"cache-control": "no-cache",
|
||||||
|
"content-disposition": "inline",
|
||||||
|
"content-language": "en-US",
|
||||||
|
}, map[string]string{
|
||||||
|
"potato": "royal",
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tags: Singlepart upload
|
||||||
|
t.Run("PutSinglepartTags", func(t *testing.T) {
|
||||||
|
contents := random.String(int(f.opt.ChunkSize / 2))
|
||||||
|
item := fstest.NewItem("tags-single.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||||
|
meta := fs.Metadata{
|
||||||
|
"x-ms-tags": "env=dev,team=sync",
|
||||||
|
}
|
||||||
|
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/plain", meta)
|
||||||
|
defer func() { _ = obj.Remove(ctx) }()
|
||||||
|
|
||||||
|
tags := getTagsMap(ctx, t, obj)
|
||||||
|
assert.Equal(t, "dev", tags["env"])
|
||||||
|
assert.Equal(t, "sync", tags["team"])
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tags: Multipart upload
|
||||||
|
t.Run("PutMultipartTags", func(t *testing.T) {
|
||||||
|
contents := random.String(int(f.opt.ChunkSize + 2048))
|
||||||
|
item := fstest.NewItem("tags-multipart.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||||
|
meta := fs.Metadata{
|
||||||
|
"x-ms-tags": "project=alpha,release=2025-08",
|
||||||
|
}
|
||||||
|
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "application/octet-stream", meta)
|
||||||
|
defer func() { _ = obj.Remove(ctx) }()
|
||||||
|
|
||||||
|
tags := getTagsMap(ctx, t, obj)
|
||||||
|
assert.Equal(t, "alpha", tags["project"])
|
||||||
|
assert.Equal(t, "2025-08", tags["release"])
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
// Singlepart copy with metadata-set mapping; omit content-type to exercise fallback
|
||||||
|
t.Run("CopySinglepart", func(t *testing.T) {
|
||||||
|
// create small source
|
||||||
|
contents := random.String(int(f.opt.ChunkSize / 2))
|
||||||
|
srcItem := fstest.NewItem("meta-copy-single-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||||
|
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil)
|
||||||
|
defer func() { _ = srcObj.Remove(ctx) }()
|
||||||
|
|
||||||
|
// set mapping via MetadataSet
|
||||||
|
ctx2, ci := fs.AddConfig(ctx)
|
||||||
|
ci.Metadata = true
|
||||||
|
ci.MetadataSet = fs.Metadata{
|
||||||
|
"cache-control": "private, max-age=60",
|
||||||
|
"content-disposition": "attachment; filename=foo.txt",
|
||||||
|
"content-language": "fr",
|
||||||
|
// no content-type: should fallback to source
|
||||||
|
"potato": "maris",
|
||||||
|
}
|
||||||
|
|
||||||
|
// do copy
|
||||||
|
dstName := "meta-copy-single-dst.txt"
|
||||||
|
dst, err := f.Copy(ctx2, srcObj, dstName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() { _ = dst.Remove(ctx2) }()
|
||||||
|
|
||||||
|
props := getProps(ctx2, t, dst)
|
||||||
|
// content-type should fallback to source (text/plain)
|
||||||
|
assertHeadersAndMetadata(t, props, map[string]string{
|
||||||
|
"content-type": "text/plain",
|
||||||
|
"cache-control": "private, max-age=60",
|
||||||
|
"content-disposition": "attachment; filename=foo.txt",
|
||||||
|
"content-language": "fr",
|
||||||
|
}, map[string]string{
|
||||||
|
"potato": "maris",
|
||||||
|
})
|
||||||
|
// mtime should be populated on copy when --metadata is used
|
||||||
|
// and should equal the source ModTime (RFC3339Nano)
|
||||||
|
// Read user metadata (case-insensitive)
|
||||||
|
m := props.Metadata
|
||||||
|
var gotMtime string
|
||||||
|
for k, v := range m {
|
||||||
|
if strings.EqualFold(k, "mtime") && v != nil {
|
||||||
|
gotMtime = *v
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
|
||||||
|
// parse and compare times ignoring formatting differences
|
||||||
|
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// CopySinglepart with only --metadata (no MetadataSet) must inject mtime and preserve src content-type
|
||||||
|
t.Run("CopySinglepart_MetadataOnly", func(t *testing.T) {
|
||||||
|
contents := random.String(int(f.opt.ChunkSize / 2))
|
||||||
|
srcItem := fstest.NewItem("meta-copy-single-only-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||||
|
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil)
|
||||||
|
defer func() { _ = srcObj.Remove(ctx) }()
|
||||||
|
|
||||||
|
ctx2, ci := fs.AddConfig(ctx)
|
||||||
|
ci.Metadata = true
|
||||||
|
|
||||||
|
dstName := "meta-copy-single-only-dst.txt"
|
||||||
|
dst, err := f.Copy(ctx2, srcObj, dstName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() { _ = dst.Remove(ctx2) }()
|
||||||
|
|
||||||
|
props := getProps(ctx2, t, dst)
|
||||||
|
assertHeadersAndMetadata(t, props, map[string]string{
|
||||||
|
"content-type": "text/plain",
|
||||||
|
}, map[string]string{})
|
||||||
|
// Assert mtime injected
|
||||||
|
m := props.Metadata
|
||||||
|
var gotMtime string
|
||||||
|
for k, v := range m {
|
||||||
|
if strings.EqualFold(k, "mtime") && v != nil {
|
||||||
|
gotMtime = *v
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
|
||||||
|
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Multipart copy with metadata-set mapping; omit content-type to exercise fallback
|
||||||
|
t.Run("CopyMultipart", func(t *testing.T) {
|
||||||
|
// create large source to force multipart
|
||||||
|
contents := random.String(int(f.opt.CopyCutoff + 1024))
|
||||||
|
srcItem := fstest.NewItem("meta-copy-multi-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||||
|
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil)
|
||||||
|
defer func() { _ = srcObj.Remove(ctx) }()
|
||||||
|
|
||||||
|
// set mapping via MetadataSet
|
||||||
|
ctx2, ci := fs.AddConfig(ctx)
|
||||||
|
ci.Metadata = true
|
||||||
|
ci.MetadataSet = fs.Metadata{
|
||||||
|
"cache-control": "max-age=0, no-cache",
|
||||||
|
// omit content-type to trigger fallback
|
||||||
|
"content-language": "de",
|
||||||
|
"potato": "desiree",
|
||||||
|
}
|
||||||
|
|
||||||
|
dstName := "meta-copy-multi-dst.txt"
|
||||||
|
dst, err := f.Copy(ctx2, srcObj, dstName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() { _ = dst.Remove(ctx2) }()
|
||||||
|
|
||||||
|
props := getProps(ctx2, t, dst)
|
||||||
|
// content-type should fallback to source (application/octet-stream)
|
||||||
|
assertHeadersAndMetadata(t, props, map[string]string{
|
||||||
|
"content-type": "application/octet-stream",
|
||||||
|
"cache-control": "max-age=0, no-cache",
|
||||||
|
"content-language": "de",
|
||||||
|
}, map[string]string{
|
||||||
|
"potato": "desiree",
|
||||||
|
})
|
||||||
|
// mtime should be populated on copy when --metadata is used
|
||||||
|
m := props.Metadata
|
||||||
|
var gotMtime string
|
||||||
|
for k, v := range m {
|
||||||
|
if strings.EqualFold(k, "mtime") && v != nil {
|
||||||
|
gotMtime = *v
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
|
||||||
|
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// CopyMultipart with only --metadata must inject mtime and preserve src content-type
|
||||||
|
t.Run("CopyMultipart_MetadataOnly", func(t *testing.T) {
|
||||||
|
contents := random.String(int(f.opt.CopyCutoff + 2048))
|
||||||
|
srcItem := fstest.NewItem("meta-copy-multi-only-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||||
|
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil)
|
||||||
|
defer func() { _ = srcObj.Remove(ctx) }()
|
||||||
|
|
||||||
|
ctx2, ci := fs.AddConfig(ctx)
|
||||||
|
ci.Metadata = true
|
||||||
|
|
||||||
|
dstName := "meta-copy-multi-only-dst.txt"
|
||||||
|
dst, err := f.Copy(ctx2, srcObj, dstName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() { _ = dst.Remove(ctx2) }()
|
||||||
|
|
||||||
|
props := getProps(ctx2, t, dst)
|
||||||
|
assertHeadersAndMetadata(t, props, map[string]string{
|
||||||
|
"content-type": "application/octet-stream",
|
||||||
|
}, map[string]string{})
|
||||||
|
m := props.Metadata
|
||||||
|
var gotMtime string
|
||||||
|
for k, v := range m {
|
||||||
|
if strings.EqualFold(k, "mtime") && v != nil {
|
||||||
|
gotMtime = *v
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
|
||||||
|
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tags: Singlepart copy
|
||||||
|
t.Run("CopySinglepartTags", func(t *testing.T) {
|
||||||
|
// create small source
|
||||||
|
contents := random.String(int(f.opt.ChunkSize / 2))
|
||||||
|
srcItem := fstest.NewItem("tags-copy-single-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||||
|
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil)
|
||||||
|
defer func() { _ = srcObj.Remove(ctx) }()
|
||||||
|
|
||||||
|
// set mapping via MetadataSet including tags
|
||||||
|
ctx2, ci := fs.AddConfig(ctx)
|
||||||
|
ci.Metadata = true
|
||||||
|
ci.MetadataSet = fs.Metadata{
|
||||||
|
"x-ms-tags": "copy=single,mode=test",
|
||||||
|
}
|
||||||
|
|
||||||
|
dstName := "tags-copy-single-dst.txt"
|
||||||
|
dst, err := f.Copy(ctx2, srcObj, dstName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() { _ = dst.Remove(ctx2) }()
|
||||||
|
|
||||||
|
tags := getTagsMap(ctx2, t, dst)
|
||||||
|
assert.Equal(t, "single", tags["copy"])
|
||||||
|
assert.Equal(t, "test", tags["mode"])
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tags: Multipart copy
|
||||||
|
t.Run("CopyMultipartTags", func(t *testing.T) {
|
||||||
|
// create large source to force multipart
|
||||||
|
contents := random.String(int(f.opt.CopyCutoff + 4096))
|
||||||
|
srcItem := fstest.NewItem("tags-copy-multi-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||||
|
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil)
|
||||||
|
defer func() { _ = srcObj.Remove(ctx) }()
|
||||||
|
|
||||||
|
ctx2, ci := fs.AddConfig(ctx)
|
||||||
|
ci.Metadata = true
|
||||||
|
ci.MetadataSet = fs.Metadata{
|
||||||
|
"x-ms-tags": "copy=multi,mode=test",
|
||||||
|
}
|
||||||
|
|
||||||
|
dstName := "tags-copy-multi-dst.txt"
|
||||||
|
dst, err := f.Copy(ctx2, srcObj, dstName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() { _ = dst.Remove(ctx2) }()
|
||||||
|
|
||||||
|
tags := getTagsMap(ctx2, t, dst)
|
||||||
|
assert.Equal(t, "multi", tags["copy"])
|
||||||
|
assert.Equal(t, "test", tags["mode"])
|
||||||
|
})
|
||||||
|
|
||||||
|
// Negative: invalid x-ms-tags must error
|
||||||
|
t.Run("InvalidXMsTags", func(t *testing.T) {
|
||||||
|
contents := random.String(32)
|
||||||
|
item := fstest.NewItem("tags-invalid.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||||
|
// construct ObjectInfo with invalid x-ms-tags
|
||||||
|
buf := strings.NewReader(contents)
|
||||||
|
// Build obj info with metadata
|
||||||
|
meta := fs.Metadata{
|
||||||
|
"x-ms-tags": "badpair-without-equals",
|
||||||
|
}
|
||||||
|
// force metadata on
|
||||||
|
ctx2, ci := fs.AddConfig(ctx)
|
||||||
|
ci.Metadata = true
|
||||||
|
obji := object.NewStaticObjectInfo(item.Path, item.ModTime, int64(len(contents)), true, nil, nil)
|
||||||
|
obji = obji.WithMetadata(meta).WithMimeType("text/plain")
|
||||||
|
_, err := f.Put(ctx2, buf, obji)
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.Contains(t, err.Error(), "invalid tag")
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Test AzureBlob filesystem interface
|
// Test AzureBlob filesystem interface
|
||||||
|
|
||||||
//go:build !plan9 && !solaris && !js && !wasm
|
//go:build !plan9 && !solaris && !js
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
// Build for azureblob for unsupported platforms to stop go complaining
|
// Build for azureblob for unsupported platforms to stop go complaining
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
//go:build plan9 || solaris || js || wasm
|
//go:build plan9 || solaris || js
|
||||||
|
|
||||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !js && !wasm
|
//go:build !plan9 && !js
|
||||||
|
|
||||||
// Package azurefiles provides an interface to Microsoft Azure Files
|
// Package azurefiles provides an interface to Microsoft Azure Files
|
||||||
package azurefiles
|
package azurefiles
|
||||||
@@ -56,6 +56,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/fs/list"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/env"
|
"github.com/rclone/rclone/lib/env"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
@@ -843,15 +844,32 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't found.
|
// This should return ErrDirNotFound if the directory isn't found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||||
var entries fs.DirEntries
|
return list.WithListP(ctx, dir, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListP lists the objects and directories of the Fs starting
|
||||||
|
// from dir non recursively into out.
|
||||||
|
//
|
||||||
|
// dir should be "" to start from the root, and should not
|
||||||
|
// have trailing slashes.
|
||||||
|
//
|
||||||
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
|
// found.
|
||||||
|
//
|
||||||
|
// It should call callback for each tranche of entries read.
|
||||||
|
// These need not be returned in any particular order. If
|
||||||
|
// callback returns an error then the listing will stop
|
||||||
|
// immediately.
|
||||||
|
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||||
|
list := list.NewHelper(callback)
|
||||||
subDirClient := f.dirClient(dir)
|
subDirClient := f.dirClient(dir)
|
||||||
|
|
||||||
// Checking whether directory exists
|
// Checking whether directory exists
|
||||||
_, err := subDirClient.GetProperties(ctx, nil)
|
_, err := subDirClient.GetProperties(ctx, nil)
|
||||||
if fileerror.HasCode(err, fileerror.ParentNotFound, fileerror.ResourceNotFound) {
|
if fileerror.HasCode(err, fileerror.ParentNotFound, fileerror.ResourceNotFound) {
|
||||||
return entries, fs.ErrorDirNotFound
|
return fs.ErrorDirNotFound
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return entries, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
opt := &directory.ListFilesAndDirectoriesOptions{
|
opt := &directory.ListFilesAndDirectoriesOptions{
|
||||||
@@ -863,7 +881,7 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
|||||||
for pager.More() {
|
for pager.More() {
|
||||||
resp, err := pager.NextPage(ctx)
|
resp, err := pager.NextPage(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return entries, err
|
return err
|
||||||
}
|
}
|
||||||
for _, directory := range resp.Segment.Directories {
|
for _, directory := range resp.Segment.Directories {
|
||||||
// Name *string `xml:"Name"`
|
// Name *string `xml:"Name"`
|
||||||
@@ -889,7 +907,10 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
|||||||
if directory.Properties.ContentLength != nil {
|
if directory.Properties.ContentLength != nil {
|
||||||
entry.SetSize(*directory.Properties.ContentLength)
|
entry.SetSize(*directory.Properties.ContentLength)
|
||||||
}
|
}
|
||||||
entries = append(entries, entry)
|
err = list.Add(entry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
for _, file := range resp.Segment.Files {
|
for _, file := range resp.Segment.Files {
|
||||||
leaf := f.opt.Enc.ToStandardPath(*file.Name)
|
leaf := f.opt.Enc.ToStandardPath(*file.Name)
|
||||||
@@ -903,10 +924,13 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
|||||||
if file.Properties.LastWriteTime != nil {
|
if file.Properties.LastWriteTime != nil {
|
||||||
entry.modTime = *file.Properties.LastWriteTime
|
entry.modTime = *file.Properties.LastWriteTime
|
||||||
}
|
}
|
||||||
entries = append(entries, entry)
|
err = list.Add(entry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return entries, nil
|
return list.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
@@ -1313,10 +1337,29 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
srcURL := srcObj.fileClient().URL()
|
srcURL := srcObj.fileClient().URL()
|
||||||
fc := f.fileClient(remote)
|
fc := f.fileClient(remote)
|
||||||
_, err = fc.StartCopyFromURL(ctx, srcURL, &opt)
|
startCopy, err := fc.StartCopyFromURL(ctx, srcURL, &opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Copy failed: %w", err)
|
return nil, fmt.Errorf("Copy failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Poll for completion if necessary
|
||||||
|
//
|
||||||
|
// The for loop is never executed for same storage account copies.
|
||||||
|
copyStatus := startCopy.CopyStatus
|
||||||
|
var properties file.GetPropertiesResponse
|
||||||
|
pollTime := 100 * time.Millisecond
|
||||||
|
|
||||||
|
for copyStatus != nil && string(*copyStatus) == string(file.CopyStatusTypePending) {
|
||||||
|
time.Sleep(pollTime)
|
||||||
|
|
||||||
|
properties, err = fc.GetProperties(ctx, &file.GetPropertiesOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
copyStatus = properties.CopyStatus
|
||||||
|
pollTime = min(2*pollTime, time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
dstObj, err := f.NewObject(ctx, remote)
|
dstObj, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Copy: NewObject failed: %w", err)
|
return nil, fmt.Errorf("Copy: NewObject failed: %w", err)
|
||||||
@@ -1431,6 +1474,7 @@ var (
|
|||||||
_ fs.DirMover = &Fs{}
|
_ fs.DirMover = &Fs{}
|
||||||
_ fs.Copier = &Fs{}
|
_ fs.Copier = &Fs{}
|
||||||
_ fs.OpenWriterAter = &Fs{}
|
_ fs.OpenWriterAter = &Fs{}
|
||||||
|
_ fs.ListPer = &Fs{}
|
||||||
_ fs.Object = &Object{}
|
_ fs.Object = &Object{}
|
||||||
_ fs.MimeTyper = &Object{}
|
_ fs.MimeTyper = &Object{}
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !js && !wasm
|
//go:build !plan9 && !js
|
||||||
|
|
||||||
package azurefiles
|
package azurefiles
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !js && !wasm
|
//go:build !plan9 && !js
|
||||||
|
|
||||||
package azurefiles
|
package azurefiles
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
// Build for azurefiles for unsupported platforms to stop go complaining
|
// Build for azurefiles for unsupported platforms to stop go complaining
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
//go:build plan9 || js || wasm
|
//go:build plan9 || js
|
||||||
|
|
||||||
// Package azurefiles provides an interface to Microsoft Azure Files
|
// Package azurefiles provides an interface to Microsoft Azure Files
|
||||||
package azurefiles
|
package azurefiles
|
||||||
|
|||||||
@@ -48,6 +48,14 @@ type LifecycleRule struct {
|
|||||||
FileNamePrefix string `json:"fileNamePrefix"`
|
FileNamePrefix string `json:"fileNamePrefix"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ServerSideEncryption is a configuration object for B2 Server-Side Encryption
|
||||||
|
type ServerSideEncryption struct {
|
||||||
|
Mode string `json:"mode"`
|
||||||
|
Algorithm string `json:"algorithm"` // Encryption algorithm to use
|
||||||
|
CustomerKey string `json:"customerKey"` // User provided Base64 encoded key that is used by the server to encrypt files
|
||||||
|
CustomerKeyMd5 string `json:"customerKeyMd5"` // An MD5 hash of the decoded key
|
||||||
|
}
|
||||||
|
|
||||||
// Timestamp is a UTC time when this file was uploaded. It is a base
|
// Timestamp is a UTC time when this file was uploaded. It is a base
|
||||||
// 10 number of milliseconds since midnight, January 1, 1970 UTC. This
|
// 10 number of milliseconds since midnight, January 1, 1970 UTC. This
|
||||||
// fits in a 64 bit integer such as the type "long" in the programming
|
// fits in a 64 bit integer such as the type "long" in the programming
|
||||||
@@ -125,23 +133,32 @@ type File struct {
|
|||||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||||
}
|
}
|
||||||
|
|
||||||
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
// StorageAPI is as returned from the b2_authorize_account call
|
||||||
type AuthorizeAccountResponse struct {
|
type StorageAPI struct {
|
||||||
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
||||||
AccountID string `json:"accountId"` // The identifier for the account.
|
|
||||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
Buckets []struct { // When present, access is restricted to one or more buckets.
|
||||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
ID string `json:"id"` // ID of bucket
|
||||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
Name string `json:"name"` // When present, name of bucket - may be empty
|
||||||
|
} `json:"buckets"`
|
||||||
|
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has for every bucket.
|
||||||
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||||
} `json:"allowed"`
|
} `json:"allowed"`
|
||||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
|
||||||
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
|
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
|
||||||
MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead.
|
MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead.
|
||||||
RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance.
|
RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
||||||
|
type AuthorizeAccountResponse struct {
|
||||||
|
AccountID string `json:"accountId"` // The identifier for the account.
|
||||||
|
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||||
|
APIs struct { // Supported APIs for this account / key. These are API-dependent JSON objects.
|
||||||
|
Storage StorageAPI `json:"storageApi"`
|
||||||
|
} `json:"apiInfo"`
|
||||||
|
}
|
||||||
|
|
||||||
// ListBucketsRequest is parameters for b2_list_buckets call
|
// ListBucketsRequest is parameters for b2_list_buckets call
|
||||||
type ListBucketsRequest struct {
|
type ListBucketsRequest struct {
|
||||||
AccountID string `json:"accountId"` // The identifier for the account.
|
AccountID string `json:"accountId"` // The identifier for the account.
|
||||||
@@ -261,21 +278,22 @@ type GetFileInfoRequest struct {
|
|||||||
//
|
//
|
||||||
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
|
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
|
||||||
type StartLargeFileRequest struct {
|
type StartLargeFileRequest struct {
|
||||||
BucketID string `json:"bucketId"` //The ID of the bucket that the file will go in.
|
BucketID string `json:"bucketId"` // The ID of the bucket that the file will go in.
|
||||||
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
|
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
|
||||||
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
|
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
|
||||||
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
|
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
|
||||||
|
ServerSideEncryption *ServerSideEncryption `json:"serverSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartLargeFileResponse is the response to StartLargeFileRequest
|
// StartLargeFileResponse is the response to StartLargeFileRequest
|
||||||
type StartLargeFileResponse struct {
|
type StartLargeFileResponse struct {
|
||||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||||
AccountID string `json:"accountId"` // The identifier for the account.
|
AccountID string `json:"accountId"` // The identifier for the account.
|
||||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
UploadTimestamp Timestamp `json:"uploadTimestamp,omitempty"` // This is a UTC time when this file was uploaded.
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
|
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
|
||||||
@@ -325,21 +343,25 @@ type CancelLargeFileResponse struct {
|
|||||||
|
|
||||||
// CopyFileRequest is as passed to b2_copy_file
|
// CopyFileRequest is as passed to b2_copy_file
|
||||||
type CopyFileRequest struct {
|
type CopyFileRequest struct {
|
||||||
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
||||||
Name string `json:"fileName"` // The name of the new file being created.
|
Name string `json:"fileName"` // The name of the new file being created.
|
||||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||||
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
|
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
|
||||||
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
|
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
|
||||||
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
|
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
|
||||||
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
|
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
|
||||||
|
SourceServerSideEncryption *ServerSideEncryption `json:"sourceServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the source file
|
||||||
|
DestinationServerSideEncryption *ServerSideEncryption `json:"destinationServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the destination file
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyPartRequest is the request for b2_copy_part - the response is UploadPartResponse
|
// CopyPartRequest is the request for b2_copy_part - the response is UploadPartResponse
|
||||||
type CopyPartRequest struct {
|
type CopyPartRequest struct {
|
||||||
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
||||||
LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file.
|
LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file.
|
||||||
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
||||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||||
|
SourceServerSideEncryption *ServerSideEncryption `json:"sourceServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the source file
|
||||||
|
DestinationServerSideEncryption *ServerSideEncryption `json:"destinationServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the destination file
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateBucketRequest describes a request to modify a B2 bucket
|
// UpdateBucketRequest describes a request to modify a B2 bucket
|
||||||
|
|||||||
359
backend/b2/b2.go
359
backend/b2/b2.go
@@ -8,7 +8,9 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/md5"
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -53,6 +55,9 @@ const (
|
|||||||
nameHeader = "X-Bz-File-Name"
|
nameHeader = "X-Bz-File-Name"
|
||||||
timestampHeader = "X-Bz-Upload-Timestamp"
|
timestampHeader = "X-Bz-Upload-Timestamp"
|
||||||
retryAfterHeader = "Retry-After"
|
retryAfterHeader = "Retry-After"
|
||||||
|
sseAlgorithmHeader = "X-Bz-Server-Side-Encryption-Customer-Algorithm"
|
||||||
|
sseKeyHeader = "X-Bz-Server-Side-Encryption-Customer-Key"
|
||||||
|
sseMd5Header = "X-Bz-Server-Side-Encryption-Customer-Key-Md5"
|
||||||
minSleep = 10 * time.Millisecond
|
minSleep = 10 * time.Millisecond
|
||||||
maxSleep = 5 * time.Minute
|
maxSleep = 5 * time.Minute
|
||||||
decayConstant = 1 // bigger for slower decay, exponential
|
decayConstant = 1 // bigger for slower decay, exponential
|
||||||
@@ -67,7 +72,7 @@ const (
|
|||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
errNotWithVersions = errors.New("can't modify files in --b2-versions mode")
|
||||||
errNotWithVersionAt = errors.New("can't modify or delete files in --b2-version-at mode")
|
errNotWithVersionAt = errors.New("can't modify or delete files in --b2-version-at mode")
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -252,6 +257,51 @@ See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket
|
|||||||
Default: (encoder.Display |
|
Default: (encoder.Display |
|
||||||
encoder.EncodeBackSlash |
|
encoder.EncodeBackSlash |
|
||||||
encoder.EncodeInvalidUtf8),
|
encoder.EncodeInvalidUtf8),
|
||||||
|
}, {
|
||||||
|
Name: "sse_customer_algorithm",
|
||||||
|
Help: "If using SSE-C, the server-side encryption algorithm used when storing this object in B2.",
|
||||||
|
Advanced: true,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "",
|
||||||
|
Help: "None",
|
||||||
|
}, {
|
||||||
|
Value: "AES256",
|
||||||
|
Help: "Advanced Encryption Standard (256 bits key length)",
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
Name: "sse_customer_key",
|
||||||
|
Help: `To use SSE-C, you may provide the secret encryption key encoded in a UTF-8 compatible string to encrypt/decrypt your data
|
||||||
|
|
||||||
|
Alternatively you can provide --sse-customer-key-base64.`,
|
||||||
|
Advanced: true,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "",
|
||||||
|
Help: "None",
|
||||||
|
}},
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "sse_customer_key_base64",
|
||||||
|
Help: `To use SSE-C, you may provide the secret encryption key encoded in Base64 format to encrypt/decrypt your data
|
||||||
|
|
||||||
|
Alternatively you can provide --sse-customer-key.`,
|
||||||
|
Advanced: true,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "",
|
||||||
|
Help: "None",
|
||||||
|
}},
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "sse_customer_key_md5",
|
||||||
|
Help: `If using SSE-C you may provide the secret encryption key MD5 checksum (optional).
|
||||||
|
|
||||||
|
If you leave it blank, this is calculated automatically from the sse_customer_key provided.
|
||||||
|
`,
|
||||||
|
Advanced: true,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "",
|
||||||
|
Help: "None",
|
||||||
|
}},
|
||||||
|
Sensitive: true,
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -274,6 +324,10 @@ type Options struct {
|
|||||||
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
|
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
|
||||||
Lifecycle int `config:"lifecycle"`
|
Lifecycle int `config:"lifecycle"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
|
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
|
||||||
|
SSECustomerKey string `config:"sse_customer_key"`
|
||||||
|
SSECustomerKeyBase64 string `config:"sse_customer_key_base64"`
|
||||||
|
SSECustomerKeyMD5 string `config:"sse_customer_key_md5"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote b2 server
|
// Fs represents a remote b2 server
|
||||||
@@ -504,6 +558,24 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if opt.Endpoint == "" {
|
if opt.Endpoint == "" {
|
||||||
opt.Endpoint = defaultEndpoint
|
opt.Endpoint = defaultEndpoint
|
||||||
}
|
}
|
||||||
|
if opt.SSECustomerKey != "" && opt.SSECustomerKeyBase64 != "" {
|
||||||
|
return nil, errors.New("b2: can't use both sse_customer_key and sse_customer_key_base64 at the same time")
|
||||||
|
} else if opt.SSECustomerKeyBase64 != "" {
|
||||||
|
// Decode the Base64-encoded key and store it in the SSECustomerKey field
|
||||||
|
decoded, err := base64.StdEncoding.DecodeString(opt.SSECustomerKeyBase64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("b2: Could not decode sse_customer_key_base64: %w", err)
|
||||||
|
}
|
||||||
|
opt.SSECustomerKey = string(decoded)
|
||||||
|
} else {
|
||||||
|
// Encode the raw key as Base64
|
||||||
|
opt.SSECustomerKeyBase64 = base64.StdEncoding.EncodeToString([]byte(opt.SSECustomerKey))
|
||||||
|
}
|
||||||
|
if opt.SSECustomerKey != "" && opt.SSECustomerKeyMD5 == "" {
|
||||||
|
// Calculate CustomerKeyMd5 if not supplied
|
||||||
|
md5sumBinary := md5.Sum([]byte(opt.SSECustomerKey))
|
||||||
|
opt.SSECustomerKeyMD5 = base64.StdEncoding.EncodeToString(md5sumBinary[:])
|
||||||
|
}
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
@@ -535,17 +607,29 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to authorize account: %w", err)
|
return nil, fmt.Errorf("failed to authorize account: %w", err)
|
||||||
}
|
}
|
||||||
// If this is a key limited to a single bucket, it must exist already
|
// If this is a key limited to one or more buckets, one of them must exist
|
||||||
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
|
// and be ours.
|
||||||
allowedBucket := f.opt.Enc.ToStandardName(f.info.Allowed.BucketName)
|
if f.rootBucket != "" && len(f.info.APIs.Storage.Allowed.Buckets) != 0 {
|
||||||
if allowedBucket == "" {
|
buckets := f.info.APIs.Storage.Allowed.Buckets
|
||||||
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
var rootFound = false
|
||||||
|
var rootID string
|
||||||
|
for _, b := range buckets {
|
||||||
|
allowedBucket := f.opt.Enc.ToStandardName(b.Name)
|
||||||
|
if allowedBucket == "" {
|
||||||
|
fs.Debugf(f, "bucket %q that application key is restricted to no longer exists", b.ID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if allowedBucket == f.rootBucket {
|
||||||
|
rootFound = true
|
||||||
|
rootID = b.ID
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if allowedBucket != f.rootBucket {
|
if !rootFound {
|
||||||
return nil, fmt.Errorf("you must use bucket %q with this application key", allowedBucket)
|
return nil, fmt.Errorf("you must use bucket(s) %q with this application key", buckets)
|
||||||
}
|
}
|
||||||
f.cache.MarkOK(f.rootBucket)
|
f.cache.MarkOK(f.rootBucket)
|
||||||
f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
|
f.setBucketID(f.rootBucket, rootID)
|
||||||
}
|
}
|
||||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||||
// Check to see if the (bucket,directory) is actually an existing file
|
// Check to see if the (bucket,directory) is actually an existing file
|
||||||
@@ -571,7 +655,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
|||||||
defer f.authMu.Unlock()
|
defer f.authMu.Unlock()
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
Path: "/b2api/v1/b2_authorize_account",
|
Path: "/b2api/v4/b2_authorize_account",
|
||||||
RootURL: f.opt.Endpoint,
|
RootURL: f.opt.Endpoint,
|
||||||
UserName: f.opt.Account,
|
UserName: f.opt.Account,
|
||||||
Password: f.opt.Key,
|
Password: f.opt.Key,
|
||||||
@@ -584,13 +668,13 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to authenticate: %w", err)
|
return fmt.Errorf("failed to authenticate: %w", err)
|
||||||
}
|
}
|
||||||
f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
|
f.srv.SetRoot(f.info.APIs.Storage.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// hasPermission returns if the current AuthorizationToken has the selected permission
|
// hasPermission returns if the current AuthorizationToken has the selected permission
|
||||||
func (f *Fs) hasPermission(permission string) bool {
|
func (f *Fs) hasPermission(permission string) bool {
|
||||||
return slices.Contains(f.info.Allowed.Capabilities, permission)
|
return slices.Contains(f.info.APIs.Storage.Allowed.Capabilities, permission)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||||
@@ -847,7 +931,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File
|
|||||||
}
|
}
|
||||||
|
|
||||||
// listDir lists a single directory
|
// listDir lists a single directory
|
||||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
|
||||||
last := ""
|
last := ""
|
||||||
err = f.list(ctx, bucket, directory, prefix, f.rootBucket == "", false, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
|
err = f.list(ctx, bucket, directory, prefix, f.rootBucket == "", false, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
|
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
|
||||||
@@ -855,16 +939,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if entry != nil {
|
if entry != nil {
|
||||||
entries = append(entries, entry)
|
return callback(entry)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
// bucket must be present if listing succeeded
|
// bucket must be present if listing succeeded
|
||||||
f.cache.MarkOK(bucket)
|
f.cache.MarkOK(bucket)
|
||||||
return entries, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// listBuckets returns all the buckets to out
|
// listBuckets returns all the buckets to out
|
||||||
@@ -890,14 +974,46 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
|||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
return list.WithListP(ctx, dir, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListP lists the objects and directories of the Fs starting
|
||||||
|
// from dir non recursively into out.
|
||||||
|
//
|
||||||
|
// dir should be "" to start from the root, and should not
|
||||||
|
// have trailing slashes.
|
||||||
|
//
|
||||||
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
|
// found.
|
||||||
|
//
|
||||||
|
// It should call callback for each tranche of entries read.
|
||||||
|
// These need not be returned in any particular order. If
|
||||||
|
// callback returns an error then the listing will stop
|
||||||
|
// immediately.
|
||||||
|
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||||
|
list := list.NewHelper(callback)
|
||||||
bucket, directory := f.split(dir)
|
bucket, directory := f.split(dir)
|
||||||
if bucket == "" {
|
if bucket == "" {
|
||||||
if directory != "" {
|
if directory != "" {
|
||||||
return nil, fs.ErrorListBucketRequired
|
return fs.ErrorListBucketRequired
|
||||||
|
}
|
||||||
|
entries, err := f.listBuckets(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, entry := range entries {
|
||||||
|
err = list.Add(entry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
return f.listBuckets(ctx)
|
|
||||||
}
|
}
|
||||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
return list.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@@ -963,44 +1079,83 @@ type listBucketFn func(*api.Bucket) error
|
|||||||
|
|
||||||
// listBucketsToFn lists the buckets to the function supplied
|
// listBucketsToFn lists the buckets to the function supplied
|
||||||
func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error {
|
func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error {
|
||||||
var account = api.ListBucketsRequest{
|
responses := make([]api.ListBucketsResponse, len(f.info.APIs.Storage.Allowed.Buckets))[:0]
|
||||||
AccountID: f.info.AccountID,
|
|
||||||
BucketID: f.info.Allowed.BucketID,
|
call := func(id string) error {
|
||||||
}
|
var account = api.ListBucketsRequest{
|
||||||
if bucketName != "" && account.BucketID == "" {
|
AccountID: f.info.AccountID,
|
||||||
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
|
BucketID: id,
|
||||||
|
}
|
||||||
|
if bucketName != "" && account.BucketID == "" {
|
||||||
|
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
|
||||||
|
}
|
||||||
|
|
||||||
|
var response api.ListBucketsResponse
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/b2_list_buckets",
|
||||||
|
}
|
||||||
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
|
||||||
|
return f.shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
responses = append(responses, response)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var response api.ListBucketsResponse
|
for i := range f.info.APIs.Storage.Allowed.Buckets {
|
||||||
opts := rest.Opts{
|
b := &f.info.APIs.Storage.Allowed.Buckets[i]
|
||||||
Method: "POST",
|
// Empty names indicate a bucket that no longer exists, this is non-fatal
|
||||||
Path: "/b2_list_buckets",
|
// for multi-bucket API keys.
|
||||||
|
if b.Name == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// When requesting a specific bucket skip over non-matching names
|
||||||
|
if bucketName != "" && b.Name != bucketName {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
err := call(b.ID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
|
if len(f.info.APIs.Storage.Allowed.Buckets) == 0 {
|
||||||
return f.shouldRetry(ctx, resp, err)
|
err := call("")
|
||||||
})
|
if err != nil {
|
||||||
if err != nil {
|
return err
|
||||||
return err
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
f.bucketIDMutex.Lock()
|
f.bucketIDMutex.Lock()
|
||||||
f.bucketTypeMutex.Lock()
|
f.bucketTypeMutex.Lock()
|
||||||
f._bucketID = make(map[string]string, 1)
|
f._bucketID = make(map[string]string, 1)
|
||||||
f._bucketType = make(map[string]string, 1)
|
f._bucketType = make(map[string]string, 1)
|
||||||
for i := range response.Buckets {
|
|
||||||
bucket := &response.Buckets[i]
|
for ri := range responses {
|
||||||
bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
|
response := &responses[ri]
|
||||||
f.cache.MarkOK(bucket.Name)
|
for i := range response.Buckets {
|
||||||
f._bucketID[bucket.Name] = bucket.ID
|
bucket := &response.Buckets[i]
|
||||||
f._bucketType[bucket.Name] = bucket.Type
|
bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
|
||||||
|
f.cache.MarkOK(bucket.Name)
|
||||||
|
f._bucketID[bucket.Name] = bucket.ID
|
||||||
|
f._bucketType[bucket.Name] = bucket.Type
|
||||||
|
}
|
||||||
}
|
}
|
||||||
f.bucketTypeMutex.Unlock()
|
f.bucketTypeMutex.Unlock()
|
||||||
f.bucketIDMutex.Unlock()
|
f.bucketIDMutex.Unlock()
|
||||||
for i := range response.Buckets {
|
for ri := range responses {
|
||||||
bucket := &response.Buckets[i]
|
response := &responses[ri]
|
||||||
err = fn(bucket)
|
for i := range response.Buckets {
|
||||||
if err != nil {
|
bucket := &response.Buckets[i]
|
||||||
return err
|
err := fn(bucket)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -1403,6 +1558,16 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
|
|||||||
Name: f.opt.Enc.FromStandardPath(dstPath),
|
Name: f.opt.Enc.FromStandardPath(dstPath),
|
||||||
DestBucketID: destBucketID,
|
DestBucketID: destBucketID,
|
||||||
}
|
}
|
||||||
|
if f.opt.SSECustomerKey != "" && f.opt.SSECustomerKeyMD5 != "" {
|
||||||
|
serverSideEncryptionConfig := api.ServerSideEncryption{
|
||||||
|
Mode: "SSE-C",
|
||||||
|
Algorithm: f.opt.SSECustomerAlgorithm,
|
||||||
|
CustomerKey: f.opt.SSECustomerKeyBase64,
|
||||||
|
CustomerKeyMd5: f.opt.SSECustomerKeyMD5,
|
||||||
|
}
|
||||||
|
request.SourceServerSideEncryption = &serverSideEncryptionConfig
|
||||||
|
request.DestinationServerSideEncryption = &serverSideEncryptionConfig
|
||||||
|
}
|
||||||
if newInfo == nil {
|
if newInfo == nil {
|
||||||
request.MetadataDirective = "COPY"
|
request.MetadataDirective = "COPY"
|
||||||
} else {
|
} else {
|
||||||
@@ -1492,7 +1657,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
bucket, bucketPath := f.split(remote)
|
bucket, bucketPath := f.split(remote)
|
||||||
var RootURL string
|
var RootURL string
|
||||||
if f.opt.DownloadURL == "" {
|
if f.opt.DownloadURL == "" {
|
||||||
RootURL = f.info.DownloadURL
|
RootURL = f.info.APIs.Storage.DownloadURL
|
||||||
} else {
|
} else {
|
||||||
RootURL = f.opt.DownloadURL
|
RootURL = f.opt.DownloadURL
|
||||||
}
|
}
|
||||||
@@ -1834,15 +1999,16 @@ var _ io.ReadCloser = &openFile{}
|
|||||||
|
|
||||||
func (o *Object) getOrHead(ctx context.Context, method string, options []fs.OpenOption) (resp *http.Response, info *api.File, err error) {
|
func (o *Object) getOrHead(ctx context.Context, method string, options []fs.OpenOption) (resp *http.Response, info *api.File, err error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: method,
|
Method: method,
|
||||||
Options: options,
|
Options: options,
|
||||||
NoResponse: method == "HEAD",
|
NoResponse: method == "HEAD",
|
||||||
|
ExtraHeaders: map[string]string{},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use downloadUrl from backblaze if downloadUrl is not set
|
// Use downloadUrl from backblaze if downloadUrl is not set
|
||||||
// otherwise use the custom downloadUrl
|
// otherwise use the custom downloadUrl
|
||||||
if o.fs.opt.DownloadURL == "" {
|
if o.fs.opt.DownloadURL == "" {
|
||||||
opts.RootURL = o.fs.info.DownloadURL
|
opts.RootURL = o.fs.info.APIs.Storage.DownloadURL
|
||||||
} else {
|
} else {
|
||||||
opts.RootURL = o.fs.opt.DownloadURL
|
opts.RootURL = o.fs.opt.DownloadURL
|
||||||
}
|
}
|
||||||
@@ -1854,6 +2020,11 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
|||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath))
|
opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath))
|
||||||
}
|
}
|
||||||
|
if o.fs.opt.SSECustomerKey != "" && o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||||
|
opts.ExtraHeaders[sseAlgorithmHeader] = o.fs.opt.SSECustomerAlgorithm
|
||||||
|
opts.ExtraHeaders[sseKeyHeader] = o.fs.opt.SSECustomerKeyBase64
|
||||||
|
opts.ExtraHeaders[sseMd5Header] = o.fs.opt.SSECustomerKeyMD5
|
||||||
|
}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||||
return o.fs.shouldRetry(ctx, resp, err)
|
return o.fs.shouldRetry(ctx, resp, err)
|
||||||
@@ -2118,6 +2289,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
},
|
},
|
||||||
ContentLength: &size,
|
ContentLength: &size,
|
||||||
}
|
}
|
||||||
|
if o.fs.opt.SSECustomerKey != "" && o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||||
|
opts.ExtraHeaders[sseAlgorithmHeader] = o.fs.opt.SSECustomerAlgorithm
|
||||||
|
opts.ExtraHeaders[sseKeyHeader] = o.fs.opt.SSECustomerKeyBase64
|
||||||
|
opts.ExtraHeaders[sseMd5Header] = o.fs.opt.SSECustomerKeyMD5
|
||||||
|
}
|
||||||
var response api.FileInfo
|
var response api.FileInfo
|
||||||
// Don't retry, return a retry error instead
|
// Don't retry, return a retry error instead
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
@@ -2192,20 +2368,27 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
|||||||
return info, nil, err
|
return info, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
||||||
|
if err != nil {
|
||||||
|
return info, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
info = fs.ChunkWriterInfo{
|
info = fs.ChunkWriterInfo{
|
||||||
ChunkSize: int64(f.opt.ChunkSize),
|
ChunkSize: up.chunkSize,
|
||||||
Concurrency: o.fs.opt.UploadConcurrency,
|
Concurrency: o.fs.opt.UploadConcurrency,
|
||||||
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||||
}
|
}
|
||||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
return info, up, nil
|
||||||
return info, up, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
if o.fs.opt.Versions {
|
if o.fs.opt.Versions {
|
||||||
return errNotWithVersions
|
t, path := api.RemoveVersion(bucketPath)
|
||||||
|
if !t.IsZero() {
|
||||||
|
return o.fs.deleteByID(ctx, o.id, path)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if o.fs.opt.VersionAt.IsSet() {
|
if o.fs.opt.VersionAt.IsSet() {
|
||||||
return errNotWithVersionAt
|
return errNotWithVersionAt
|
||||||
@@ -2228,32 +2411,36 @@ func (o *Object) ID() string {
|
|||||||
|
|
||||||
var lifecycleHelp = fs.CommandHelp{
|
var lifecycleHelp = fs.CommandHelp{
|
||||||
Name: "lifecycle",
|
Name: "lifecycle",
|
||||||
Short: "Read or set the lifecycle for a bucket",
|
Short: "Read or set the lifecycle for a bucket.",
|
||||||
Long: `This command can be used to read or set the lifecycle for a bucket.
|
Long: `This command can be used to read or set the lifecycle for a bucket.
|
||||||
|
|
||||||
Usage Examples:
|
|
||||||
|
|
||||||
To show the current lifecycle rules:
|
To show the current lifecycle rules:
|
||||||
|
|
||||||
rclone backend lifecycle b2:bucket
|
` + "```console" + `
|
||||||
|
rclone backend lifecycle b2:bucket
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
This will dump something like this showing the lifecycle rules.
|
This will dump something like this showing the lifecycle rules.
|
||||||
|
|
||||||
[
|
` + "```json" + `
|
||||||
{
|
[
|
||||||
"daysFromHidingToDeleting": 1,
|
{
|
||||||
"daysFromUploadingToHiding": null,
|
"daysFromHidingToDeleting": 1,
|
||||||
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
|
"daysFromUploadingToHiding": null,
|
||||||
"fileNamePrefix": ""
|
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
|
||||||
}
|
"fileNamePrefix": ""
|
||||||
]
|
}
|
||||||
|
]
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
If there are no lifecycle rules (the default) then it will just return [].
|
If there are no lifecycle rules (the default) then it will just return ` + "`[]`" + `.
|
||||||
|
|
||||||
To reset the current lifecycle rules:
|
To reset the current lifecycle rules:
|
||||||
|
|
||||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
|
` + "```console" + `
|
||||||
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
|
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
|
||||||
|
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
This will run and then print the new lifecycle rules as above.
|
This will run and then print the new lifecycle rules as above.
|
||||||
|
|
||||||
@@ -2265,14 +2452,17 @@ the daysFromHidingToDeleting to 1 day. You can enable hard_delete in
|
|||||||
the config also which will mean deletions won't cause versions but
|
the config also which will mean deletions won't cause versions but
|
||||||
overwrites will still cause versions to be made.
|
overwrites will still cause versions to be made.
|
||||||
|
|
||||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
|
` + "```console" + `
|
||||||
|
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
|
See: <https://www.backblaze.com/docs/cloud-storage-lifecycle-rules>`,
|
||||||
`,
|
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
|
"daysFromHidingToDeleting": `After a file has been hidden for this many days
|
||||||
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
|
it is deleted. 0 is off.`,
|
||||||
"daysFromStartingToCancelingUnfinishedLargeFiles": "Cancels any unfinished large file versions after this many days",
|
"daysFromUploadingToHiding": `This many days after uploading a file is hidden.`,
|
||||||
|
"daysFromStartingToCancelingUnfinishedLargeFiles": `Cancels any unfinished
|
||||||
|
large file versions after this many days.`,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2355,13 +2545,14 @@ max-age, which defaults to 24 hours.
|
|||||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||||
it would do.
|
it would do.
|
||||||
|
|
||||||
rclone backend cleanup b2:bucket/path/to/object
|
` + "```console" + `
|
||||||
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
|
rclone backend cleanup b2:bucket/path/to/object
|
||||||
|
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.`,
|
||||||
`,
|
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"max-age": "Max age of upload to delete",
|
"max-age": "Max age of upload to delete.",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2384,8 +2575,9 @@ var cleanupHiddenHelp = fs.CommandHelp{
|
|||||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||||
it would do.
|
it would do.
|
||||||
|
|
||||||
rclone backend cleanup-hidden b2:bucket/path/to/dir
|
` + "```console" + `
|
||||||
`,
|
rclone backend cleanup-hidden b2:bucket/path/to/dir
|
||||||
|
` + "```",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||||
@@ -2428,6 +2620,7 @@ var (
|
|||||||
_ fs.PutStreamer = &Fs{}
|
_ fs.PutStreamer = &Fs{}
|
||||||
_ fs.CleanUpper = &Fs{}
|
_ fs.CleanUpper = &Fs{}
|
||||||
_ fs.ListRer = &Fs{}
|
_ fs.ListRer = &Fs{}
|
||||||
|
_ fs.ListPer = &Fs{}
|
||||||
_ fs.PublicLinker = &Fs{}
|
_ fs.PublicLinker = &Fs{}
|
||||||
_ fs.OpenChunkWriter = &Fs{}
|
_ fs.OpenChunkWriter = &Fs{}
|
||||||
_ fs.Commander = &Fs{}
|
_ fs.Commander = &Fs{}
|
||||||
|
|||||||
@@ -144,6 +144,14 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
request.ContentType = newInfo.ContentType
|
request.ContentType = newInfo.ContentType
|
||||||
request.Info = newInfo.Info
|
request.Info = newInfo.Info
|
||||||
}
|
}
|
||||||
|
if o.fs.opt.SSECustomerKey != "" && o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||||
|
request.ServerSideEncryption = &api.ServerSideEncryption{
|
||||||
|
Mode: "SSE-C",
|
||||||
|
Algorithm: o.fs.opt.SSECustomerAlgorithm,
|
||||||
|
CustomerKey: o.fs.opt.SSECustomerKeyBase64,
|
||||||
|
CustomerKeyMd5: o.fs.opt.SSECustomerKeyMD5,
|
||||||
|
}
|
||||||
|
}
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: "/b2_start_large_file",
|
Path: "/b2_start_large_file",
|
||||||
@@ -295,6 +303,12 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
|||||||
ContentLength: &sizeWithHash,
|
ContentLength: &sizeWithHash,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if up.o.fs.opt.SSECustomerKey != "" && up.o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||||
|
opts.ExtraHeaders[sseAlgorithmHeader] = up.o.fs.opt.SSECustomerAlgorithm
|
||||||
|
opts.ExtraHeaders[sseKeyHeader] = up.o.fs.opt.SSECustomerKeyBase64
|
||||||
|
opts.ExtraHeaders[sseMd5Header] = up.o.fs.opt.SSECustomerKeyMD5
|
||||||
|
}
|
||||||
|
|
||||||
var response api.UploadPartResponse
|
var response api.UploadPartResponse
|
||||||
|
|
||||||
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||||
@@ -334,6 +348,17 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
|
|||||||
PartNumber: int64(part + 1),
|
PartNumber: int64(part + 1),
|
||||||
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
|
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if up.o.fs.opt.SSECustomerKey != "" && up.o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||||
|
serverSideEncryptionConfig := api.ServerSideEncryption{
|
||||||
|
Mode: "SSE-C",
|
||||||
|
Algorithm: up.o.fs.opt.SSECustomerAlgorithm,
|
||||||
|
CustomerKey: up.o.fs.opt.SSECustomerKeyBase64,
|
||||||
|
CustomerKeyMd5: up.o.fs.opt.SSECustomerKeyMD5,
|
||||||
|
}
|
||||||
|
request.SourceServerSideEncryption = &serverSideEncryptionConfig
|
||||||
|
request.DestinationServerSideEncryption = &serverSideEncryptionConfig
|
||||||
|
}
|
||||||
var response api.UploadPartResponse
|
var response api.UploadPartResponse
|
||||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||||
retry, err := up.f.shouldRetry(ctx, resp, err)
|
retry, err := up.f.shouldRetry(ctx, resp, err)
|
||||||
|
|||||||
@@ -125,10 +125,21 @@ type FolderItems struct {
|
|||||||
Offset int `json:"offset"`
|
Offset int `json:"offset"`
|
||||||
Limit int `json:"limit"`
|
Limit int `json:"limit"`
|
||||||
NextMarker *string `json:"next_marker,omitempty"`
|
NextMarker *string `json:"next_marker,omitempty"`
|
||||||
Order []struct {
|
// There is some confusion about how this is actually
|
||||||
By string `json:"by"`
|
// returned. The []struct has worked for many years, but in
|
||||||
Direction string `json:"direction"`
|
// https://github.com/rclone/rclone/issues/8776 box was
|
||||||
} `json:"order"`
|
// returning it returned not as a list. We don't actually use
|
||||||
|
// this so comment it out.
|
||||||
|
//
|
||||||
|
// Order struct {
|
||||||
|
// By string `json:"by"`
|
||||||
|
// Direction string `json:"direction"`
|
||||||
|
// } `json:"order"`
|
||||||
|
//
|
||||||
|
// Order []struct {
|
||||||
|
// By string `json:"by"`
|
||||||
|
// Direction string `json:"direction"`
|
||||||
|
// } `json:"order"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parent defined the ID of the parent directory
|
// Parent defined the ID of the parent directory
|
||||||
|
|||||||
@@ -37,6 +37,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/fs/list"
|
||||||
"github.com/rclone/rclone/lib/dircache"
|
"github.com/rclone/rclone/lib/dircache"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/env"
|
"github.com/rclone/rclone/lib/env"
|
||||||
@@ -86,13 +87,11 @@ func init() {
|
|||||||
Description: "Box",
|
Description: "Box",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||||
jsonFile, ok := m.Get("box_config_file")
|
|
||||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
|
||||||
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
|
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
|
||||||
var err error
|
var err error
|
||||||
// If using box config.json, use JWT auth
|
// If using box config.json, use JWT auth
|
||||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
if usesJWTAuth(m) {
|
||||||
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
err = refreshJWTToken(ctx, name, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to configure token with jwt authentication: %w", err)
|
return nil, fmt.Errorf("failed to configure token with jwt authentication: %w", err)
|
||||||
}
|
}
|
||||||
@@ -113,6 +112,11 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Name: "box_config_file",
|
Name: "box_config_file",
|
||||||
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
|
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
|
||||||
|
}, {
|
||||||
|
Name: "config_credentials",
|
||||||
|
Help: "Box App config.json contents.\n\nLeave blank normally.",
|
||||||
|
Hide: fs.OptionHideBoth,
|
||||||
|
Sensitive: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "access_token",
|
Name: "access_token",
|
||||||
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
||||||
@@ -183,9 +187,17 @@ See: https://developer.box.com/guides/authentication/jwt/as-user/
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
|
func usesJWTAuth(m configmap.Mapper) bool {
|
||||||
jsonFile = env.ShellExpand(jsonFile)
|
jsonFile, okFile := m.Get("box_config_file")
|
||||||
boxConfig, err := getBoxConfig(jsonFile)
|
jsonFileCredentials, okCredentials := m.Get("config_credentials")
|
||||||
|
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||||
|
return (okFile || okCredentials) && boxSubTypeOk && (jsonFile != "" || jsonFileCredentials != "") && boxSubType != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func refreshJWTToken(ctx context.Context, name string, m configmap.Mapper) error {
|
||||||
|
boxSubType, _ := m.Get("box_sub_type")
|
||||||
|
|
||||||
|
boxConfig, err := getBoxConfig(m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("get box config: %w", err)
|
return fmt.Errorf("get box config: %w", err)
|
||||||
}
|
}
|
||||||
@@ -204,12 +216,19 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
func getBoxConfig(m configmap.Mapper) (boxConfig *api.ConfigJSON, err error) {
|
||||||
file, err := os.ReadFile(configFile)
|
configFileCredentials, _ := m.Get("config_credentials")
|
||||||
if err != nil {
|
configFileBytes := []byte(configFileCredentials)
|
||||||
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
|
|
||||||
|
if configFileCredentials == "" {
|
||||||
|
configFile, _ := m.Get("box_config_file")
|
||||||
|
configFileBytes, err = os.ReadFile(configFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
err = json.Unmarshal(file, &boxConfig)
|
|
||||||
|
err = json.Unmarshal(configFileBytes, &boxConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("box: failed to parse Box config: %w", err)
|
return nil, fmt.Errorf("box: failed to parse Box config: %w", err)
|
||||||
}
|
}
|
||||||
@@ -484,15 +503,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
f.srv.SetHeader("as-user", f.opt.Impersonate)
|
f.srv.SetHeader("as-user", f.opt.Impersonate)
|
||||||
}
|
}
|
||||||
|
|
||||||
jsonFile, ok := m.Get("box_config_file")
|
|
||||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
|
||||||
|
|
||||||
if ts != nil {
|
if ts != nil {
|
||||||
// If using box config.json and JWT, renewing should just refresh the token and
|
// If using box config.json and JWT, renewing should just refresh the token and
|
||||||
// should do so whether there are uploads pending or not.
|
// should do so whether there are uploads pending or not.
|
||||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
if usesJWTAuth(m) {
|
||||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||||
err := refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
err := refreshJWTToken(ctx, name, m)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
f.tokenRenewer.Start()
|
f.tokenRenewer.Start()
|
||||||
@@ -705,9 +721,27 @@ OUTER:
|
|||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
return list.WithListP(ctx, dir, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListP lists the objects and directories of the Fs starting
|
||||||
|
// from dir non recursively into out.
|
||||||
|
//
|
||||||
|
// dir should be "" to start from the root, and should not
|
||||||
|
// have trailing slashes.
|
||||||
|
//
|
||||||
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
|
// found.
|
||||||
|
//
|
||||||
|
// It should call callback for each tranche of entries read.
|
||||||
|
// These need not be returned in any particular order. If
|
||||||
|
// callback returns an error then the listing will stop
|
||||||
|
// immediately.
|
||||||
|
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||||
|
list := list.NewHelper(callback)
|
||||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
var iErr error
|
var iErr error
|
||||||
_, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool {
|
_, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool {
|
||||||
@@ -717,14 +751,22 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
f.dirCache.Put(remote, info.ID)
|
f.dirCache.Put(remote, info.ID)
|
||||||
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
|
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
|
||||||
// FIXME more info from dir?
|
// FIXME more info from dir?
|
||||||
entries = append(entries, d)
|
err = list.Add(d)
|
||||||
|
if err != nil {
|
||||||
|
iErr = err
|
||||||
|
return true
|
||||||
|
}
|
||||||
} else if info.Type == api.ItemTypeFile {
|
} else if info.Type == api.ItemTypeFile {
|
||||||
o, err := f.newObjectWithInfo(ctx, remote, info)
|
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
iErr = err
|
iErr = err
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
entries = append(entries, o)
|
err = list.Add(o)
|
||||||
|
if err != nil {
|
||||||
|
iErr = err
|
||||||
|
return true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cache some metadata for this Item to help us process events later
|
// Cache some metadata for this Item to help us process events later
|
||||||
@@ -740,12 +782,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
if iErr != nil {
|
if iErr != nil {
|
||||||
return nil, iErr
|
return iErr
|
||||||
}
|
}
|
||||||
return entries, nil
|
return list.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates from the parameters passed in a half finished Object which
|
// Creates from the parameters passed in a half finished Object which
|
||||||
@@ -1741,6 +1783,7 @@ var (
|
|||||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
_ fs.CleanUpper = (*Fs)(nil)
|
_ fs.CleanUpper = (*Fs)(nil)
|
||||||
|
_ fs.ListPer = (*Fs)(nil)
|
||||||
_ fs.Shutdowner = (*Fs)(nil)
|
_ fs.Shutdowner = (*Fs)(nil)
|
||||||
_ fs.Object = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
_ fs.IDer = (*Object)(nil)
|
_ fs.IDer = (*Object)(nil)
|
||||||
|
|||||||
4
backend/cache/cache.go
vendored
4
backend/cache/cache.go
vendored
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !js && !wasm
|
//go:build !plan9 && !js
|
||||||
|
|
||||||
// Package cache implements a virtual provider to cache existing remotes.
|
// Package cache implements a virtual provider to cache existing remotes.
|
||||||
package cache
|
package cache
|
||||||
@@ -684,7 +684,7 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
|||||||
start, end int64
|
start, end int64
|
||||||
}
|
}
|
||||||
parseChunks := func(ranges string) (crs []chunkRange, err error) {
|
parseChunks := func(ranges string) (crs []chunkRange, err error) {
|
||||||
for _, part := range strings.Split(ranges, ",") {
|
for part := range strings.SplitSeq(ranges, ",") {
|
||||||
var start, end int64 = 0, math.MaxInt64
|
var start, end int64 = 0, math.MaxInt64
|
||||||
switch ints := strings.Split(part, ":"); len(ints) {
|
switch ints := strings.Split(part, ":"); len(ints) {
|
||||||
case 1:
|
case 1:
|
||||||
|
|||||||
2
backend/cache/cache_internal_test.go
vendored
2
backend/cache/cache_internal_test.go
vendored
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !js && !wasm && !race
|
//go:build !plan9 && !js && !race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
|
|||||||
2
backend/cache/cache_test.go
vendored
2
backend/cache/cache_test.go
vendored
@@ -1,6 +1,6 @@
|
|||||||
// Test Cache filesystem interface
|
// Test Cache filesystem interface
|
||||||
|
|
||||||
//go:build !plan9 && !js && !wasm && !race
|
//go:build !plan9 && !js && !race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
|
|||||||
2
backend/cache/cache_unsupported.go
vendored
2
backend/cache/cache_unsupported.go
vendored
@@ -1,7 +1,7 @@
|
|||||||
// Build for cache for unsupported platforms to stop go complaining
|
// Build for cache for unsupported platforms to stop go complaining
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
//go:build plan9 || js || wasm
|
//go:build plan9 || js
|
||||||
|
|
||||||
// Package cache implements a virtual provider to cache existing remotes.
|
// Package cache implements a virtual provider to cache existing remotes.
|
||||||
package cache
|
package cache
|
||||||
|
|||||||
2
backend/cache/cache_upload_test.go
vendored
2
backend/cache/cache_upload_test.go
vendored
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !js && !wasm && !race
|
//go:build !plan9 && !js && !race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
|
|||||||
2
backend/cache/directory.go
vendored
2
backend/cache/directory.go
vendored
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !js && !wasm
|
//go:build !plan9 && !js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
2
backend/cache/handle.go
vendored
2
backend/cache/handle.go
vendored
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !js && !wasm
|
//go:build !plan9 && !js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
2
backend/cache/object.go
vendored
2
backend/cache/object.go
vendored
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !js && !wasm
|
//go:build !plan9 && !js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
2
backend/cache/plex.go
vendored
2
backend/cache/plex.go
vendored
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !js && !wasm
|
//go:build !plan9 && !js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
2
backend/cache/storage_memory.go
vendored
2
backend/cache/storage_memory.go
vendored
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !js && !wasm
|
//go:build !plan9 && !js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
2
backend/cache/storage_persistent.go
vendored
2
backend/cache/storage_persistent.go
vendored
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !js && !wasm
|
//go:build !plan9 && !js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
2
backend/cache/utils_test.go
vendored
2
backend/cache/utils_test.go
vendored
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !js && !wasm
|
//go:build !plan9 && !js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
@@ -187,7 +187,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
|||||||
g, gCtx := errgroup.WithContext(ctx)
|
g, gCtx := errgroup.WithContext(ctx)
|
||||||
var mu sync.Mutex
|
var mu sync.Mutex
|
||||||
for _, upstream := range opt.Upstreams {
|
for _, upstream := range opt.Upstreams {
|
||||||
upstream := upstream
|
|
||||||
g.Go(func() (err error) {
|
g.Go(func() (err error) {
|
||||||
equal := strings.IndexRune(upstream, '=')
|
equal := strings.IndexRune(upstream, '=')
|
||||||
if equal < 0 {
|
if equal < 0 {
|
||||||
@@ -241,18 +240,22 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
|||||||
DirModTimeUpdatesOnWrite: true,
|
DirModTimeUpdatesOnWrite: true,
|
||||||
PartialUploads: true,
|
PartialUploads: true,
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
canMove := true
|
canMove, slowHash := true, false
|
||||||
for _, u := range f.upstreams {
|
for _, u := range f.upstreams {
|
||||||
features = features.Mask(ctx, u.f) // Mask all upstream fs
|
features = features.Mask(ctx, u.f) // Mask all upstream fs
|
||||||
if !operations.CanServerSideMove(u.f) {
|
if !operations.CanServerSideMove(u.f) {
|
||||||
canMove = false
|
canMove = false
|
||||||
}
|
}
|
||||||
|
slowHash = slowHash || u.f.Features().SlowHash
|
||||||
}
|
}
|
||||||
// We can move if all remotes support Move or Copy
|
// We can move if all remotes support Move or Copy
|
||||||
if canMove {
|
if canMove {
|
||||||
features.Move = f.Move
|
features.Move = f.Move
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If any of upstreams are SlowHash, propagate it
|
||||||
|
features.SlowHash = slowHash
|
||||||
|
|
||||||
// Enable ListR when upstreams either support ListR or is local
|
// Enable ListR when upstreams either support ListR or is local
|
||||||
// But not when all upstreams are local
|
// But not when all upstreams are local
|
||||||
if features.ListR == nil {
|
if features.ListR == nil {
|
||||||
@@ -366,7 +369,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
|||||||
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
|
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
|
||||||
g, gCtx := errgroup.WithContext(ctx)
|
g, gCtx := errgroup.WithContext(ctx)
|
||||||
for _, u := range f.upstreams {
|
for _, u := range f.upstreams {
|
||||||
u := u
|
|
||||||
g.Go(func() (err error) {
|
g.Go(func() (err error) {
|
||||||
return fn(gCtx, u)
|
return fn(gCtx, u)
|
||||||
})
|
})
|
||||||
@@ -633,7 +635,6 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
|||||||
var uChans []chan time.Duration
|
var uChans []chan time.Duration
|
||||||
|
|
||||||
for _, u := range f.upstreams {
|
for _, u := range f.upstreams {
|
||||||
u := u
|
|
||||||
if do := u.f.Features().ChangeNotify; do != nil {
|
if do := u.f.Features().ChangeNotify; do != nil {
|
||||||
ch := make(chan time.Duration)
|
ch := make(chan time.Duration)
|
||||||
uChans = append(uChans, ch)
|
uChans = append(uChans, ch)
|
||||||
|
|||||||
@@ -2,10 +2,8 @@
|
|||||||
package compress
|
package compress
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/md5"
|
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
@@ -46,6 +44,7 @@ const (
|
|||||||
minCompressionRatio = 1.1
|
minCompressionRatio = 1.1
|
||||||
|
|
||||||
gzFileExt = ".gz"
|
gzFileExt = ".gz"
|
||||||
|
zstdFileExt = ".zst"
|
||||||
metaFileExt = ".json"
|
metaFileExt = ".json"
|
||||||
uncompressedFileExt = ".bin"
|
uncompressedFileExt = ".bin"
|
||||||
)
|
)
|
||||||
@@ -54,6 +53,7 @@ const (
|
|||||||
const (
|
const (
|
||||||
Uncompressed = 0
|
Uncompressed = 0
|
||||||
Gzip = 2
|
Gzip = 2
|
||||||
|
Zstd = 4
|
||||||
)
|
)
|
||||||
|
|
||||||
var nameRegexp = regexp.MustCompile(`^(.+?)\.([A-Za-z0-9-_]{11})$`)
|
var nameRegexp = regexp.MustCompile(`^(.+?)\.([A-Za-z0-9-_]{11})$`)
|
||||||
@@ -66,6 +66,10 @@ func init() {
|
|||||||
Value: "gzip",
|
Value: "gzip",
|
||||||
Help: "Standard gzip compression with fastest parameters.",
|
Help: "Standard gzip compression with fastest parameters.",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Value: "zstd",
|
||||||
|
Help: "Zstandard compression — fast modern algorithm offering adjustable speed-to-compression tradeoffs.",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register our remote
|
// Register our remote
|
||||||
@@ -87,17 +91,23 @@ func init() {
|
|||||||
Examples: compressionModeOptions,
|
Examples: compressionModeOptions,
|
||||||
}, {
|
}, {
|
||||||
Name: "level",
|
Name: "level",
|
||||||
Help: `GZIP compression level (-2 to 9).
|
Help: `GZIP (levels -2 to 9):
|
||||||
|
- -2 — Huffman encoding only. Only use if you know what you're doing.
|
||||||
Generally -1 (default, equivalent to 5) is recommended.
|
- -1 (default) — recommended; equivalent to level 5.
|
||||||
Levels 1 to 9 increase compression at the cost of speed. Going past 6
|
- 0 — turns off compression.
|
||||||
generally offers very little return.
|
- 1–9 — increase compression at the cost of speed. Going past 6 generally offers very little return.
|
||||||
|
|
||||||
Level -2 uses Huffman encoding only. Only use if you know what you
|
ZSTD (levels 0 to 4):
|
||||||
are doing.
|
- 0 — turns off compression entirely.
|
||||||
Level 0 turns off compression.`,
|
- 1 — fastest compression with the lowest ratio.
|
||||||
Default: sgzip.DefaultCompression,
|
- 2 (default) — good balance of speed and compression.
|
||||||
Advanced: true,
|
- 3 — better compression, but uses about 2–3x more CPU than the default.
|
||||||
|
- 4 — best possible compression ratio (highest CPU cost).
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- Choose GZIP for wide compatibility; ZSTD for better speed/ratio tradeoffs.
|
||||||
|
- Negative gzip levels: -2 = Huffman-only, -1 = default (≈ level 5).`,
|
||||||
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "ram_cache_limit",
|
Name: "ram_cache_limit",
|
||||||
Help: `Some remotes don't allow the upload of files with unknown size.
|
Help: `Some remotes don't allow the upload of files with unknown size.
|
||||||
@@ -112,6 +122,47 @@ this limit will be cached on disk.`,
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// compressionModeHandler defines the interface for handling different compression modes
|
||||||
|
type compressionModeHandler interface {
|
||||||
|
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
||||||
|
processFileNameGetFileExtension(compressionMode int) string
|
||||||
|
|
||||||
|
// newObjectGetOriginalSize returns the original file size from the metadata
|
||||||
|
newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error)
|
||||||
|
|
||||||
|
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
||||||
|
// the configured threshold
|
||||||
|
isCompressible(r io.Reader, compressionMode int) (bool, error)
|
||||||
|
|
||||||
|
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
||||||
|
putCompress(
|
||||||
|
ctx context.Context,
|
||||||
|
f *Fs,
|
||||||
|
in io.Reader,
|
||||||
|
src fs.ObjectInfo,
|
||||||
|
options []fs.OpenOption,
|
||||||
|
mimeType string,
|
||||||
|
) (fs.Object, *ObjectMetadata, error)
|
||||||
|
|
||||||
|
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
||||||
|
openGetReadCloser(
|
||||||
|
ctx context.Context,
|
||||||
|
o *Object,
|
||||||
|
offset int64,
|
||||||
|
limit int64,
|
||||||
|
cr chunkedreader.ChunkedReader,
|
||||||
|
closer io.Closer,
|
||||||
|
options ...fs.OpenOption,
|
||||||
|
) (rc io.ReadCloser, err error)
|
||||||
|
|
||||||
|
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
||||||
|
putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error)
|
||||||
|
|
||||||
|
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
||||||
|
// Warning: This function panics if cmeta is not of the expected type.
|
||||||
|
newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata
|
||||||
|
}
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Remote string `config:"remote"`
|
Remote string `config:"remote"`
|
||||||
@@ -125,12 +176,13 @@ type Options struct {
|
|||||||
// Fs represents a wrapped fs.Fs
|
// Fs represents a wrapped fs.Fs
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
fs.Fs
|
fs.Fs
|
||||||
wrapper fs.Fs
|
wrapper fs.Fs
|
||||||
name string
|
name string
|
||||||
root string
|
root string
|
||||||
opt Options
|
opt Options
|
||||||
mode int // compression mode id
|
mode int // compression mode id
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
|
modeHandler compressionModeHandler // compression mode handler
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
@@ -167,13 +219,28 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
return nil, fmt.Errorf("failed to make remote %s:%q to wrap: %w", wName, remotePath, err)
|
return nil, fmt.Errorf("failed to make remote %s:%q to wrap: %w", wName, remotePath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
compressionMode := compressionModeFromName(opt.CompressionMode)
|
||||||
|
var modeHandler compressionModeHandler
|
||||||
|
|
||||||
|
switch compressionMode {
|
||||||
|
case Gzip:
|
||||||
|
modeHandler = &gzipModeHandler{}
|
||||||
|
case Zstd:
|
||||||
|
modeHandler = &zstdModeHandler{}
|
||||||
|
case Uncompressed:
|
||||||
|
modeHandler = &uncompressedModeHandler{}
|
||||||
|
default:
|
||||||
|
modeHandler = &unknownModeHandler{}
|
||||||
|
}
|
||||||
|
|
||||||
// Create the wrapping fs
|
// Create the wrapping fs
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
Fs: wrappedFs,
|
Fs: wrappedFs,
|
||||||
name: name,
|
name: name,
|
||||||
root: rpath,
|
root: rpath,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
mode: compressionModeFromName(opt.CompressionMode),
|
mode: compressionMode,
|
||||||
|
modeHandler: modeHandler,
|
||||||
}
|
}
|
||||||
// Correct root if definitely pointing to a file
|
// Correct root if definitely pointing to a file
|
||||||
if err == fs.ErrorIsFile {
|
if err == fs.ErrorIsFile {
|
||||||
@@ -215,10 +282,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
return f, err
|
return f, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// compressionModeFromName converts a compression mode name to its int representation.
|
||||||
func compressionModeFromName(name string) int {
|
func compressionModeFromName(name string) int {
|
||||||
switch name {
|
switch name {
|
||||||
case "gzip":
|
case "gzip":
|
||||||
return Gzip
|
return Gzip
|
||||||
|
case "zstd":
|
||||||
|
return Zstd
|
||||||
default:
|
default:
|
||||||
return Uncompressed
|
return Uncompressed
|
||||||
}
|
}
|
||||||
@@ -242,7 +312,7 @@ func base64ToInt64(str string) (int64, error) {
|
|||||||
|
|
||||||
// Processes a file name for a compressed file. Returns the original file name, the extension, and the size of the original file.
|
// Processes a file name for a compressed file. Returns the original file name, the extension, and the size of the original file.
|
||||||
// Returns -2 for the original size if the file is uncompressed.
|
// Returns -2 for the original size if the file is uncompressed.
|
||||||
func processFileName(compressedFileName string) (origFileName string, extension string, origSize int64, err error) {
|
func processFileName(compressedFileName string, modeHandler compressionModeHandler) (origFileName string, extension string, origSize int64, err error) {
|
||||||
// Separate the filename and size from the extension
|
// Separate the filename and size from the extension
|
||||||
extensionPos := strings.LastIndex(compressedFileName, ".")
|
extensionPos := strings.LastIndex(compressedFileName, ".")
|
||||||
if extensionPos == -1 {
|
if extensionPos == -1 {
|
||||||
@@ -261,7 +331,8 @@ func processFileName(compressedFileName string) (origFileName string, extension
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", 0, errors.New("could not decode size")
|
return "", "", 0, errors.New("could not decode size")
|
||||||
}
|
}
|
||||||
return match[1], gzFileExt, size, nil
|
ext := modeHandler.processFileNameGetFileExtension(compressionModeFromName(compressedFileName[extensionPos+1:]))
|
||||||
|
return match[1], ext, size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generates the file name for a metadata file
|
// Generates the file name for a metadata file
|
||||||
@@ -286,11 +357,15 @@ func unwrapMetadataFile(filename string) (string, bool) {
|
|||||||
|
|
||||||
// makeDataName generates the file name for a data file with specified compression mode
|
// makeDataName generates the file name for a data file with specified compression mode
|
||||||
func makeDataName(remote string, size int64, mode int) (newRemote string) {
|
func makeDataName(remote string, size int64, mode int) (newRemote string) {
|
||||||
if mode != Uncompressed {
|
switch mode {
|
||||||
|
case Gzip:
|
||||||
newRemote = remote + "." + int64ToBase64(size) + gzFileExt
|
newRemote = remote + "." + int64ToBase64(size) + gzFileExt
|
||||||
} else {
|
case Zstd:
|
||||||
|
newRemote = remote + "." + int64ToBase64(size) + zstdFileExt
|
||||||
|
default:
|
||||||
newRemote = remote + uncompressedFileExt
|
newRemote = remote + uncompressedFileExt
|
||||||
}
|
}
|
||||||
|
|
||||||
return newRemote
|
return newRemote
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -304,7 +379,7 @@ func (f *Fs) dataName(remote string, size int64, compressed bool) (name string)
|
|||||||
|
|
||||||
// addData parses an object and adds it to the DirEntries
|
// addData parses an object and adds it to the DirEntries
|
||||||
func (f *Fs) addData(entries *fs.DirEntries, o fs.Object) {
|
func (f *Fs) addData(entries *fs.DirEntries, o fs.Object) {
|
||||||
origFileName, _, size, err := processFileName(o.Remote())
|
origFileName, _, size, err := processFileName(o.Remote(), f.modeHandler)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "Error on parsing file name: %v", err)
|
fs.Errorf(o, "Error on parsing file name: %v", err)
|
||||||
return
|
return
|
||||||
@@ -427,8 +502,12 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error decoding metadata: %w", err)
|
return nil, fmt.Errorf("error decoding metadata: %w", err)
|
||||||
}
|
}
|
||||||
|
size, err := f.modeHandler.newObjectGetOriginalSize(meta)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error reading metadata: %w", err)
|
||||||
|
}
|
||||||
// Create our Object
|
// Create our Object
|
||||||
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
|
o, err := f.Fs.NewObject(ctx, makeDataName(remote, size, meta.Mode))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -437,7 +516,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
|
|
||||||
// checkCompressAndType checks if an object is compressible and determines it's mime type
|
// checkCompressAndType checks if an object is compressible and determines it's mime type
|
||||||
// returns a multireader with the bytes that were read to determine mime type
|
// returns a multireader with the bytes that were read to determine mime type
|
||||||
func checkCompressAndType(in io.Reader) (newReader io.Reader, compressible bool, mimeType string, err error) {
|
func checkCompressAndType(in io.Reader, compressionMode int, modeHandler compressionModeHandler) (newReader io.Reader, compressible bool, mimeType string, err error) {
|
||||||
in, wrap := accounting.UnWrap(in)
|
in, wrap := accounting.UnWrap(in)
|
||||||
buf := make([]byte, heuristicBytes)
|
buf := make([]byte, heuristicBytes)
|
||||||
n, err := in.Read(buf)
|
n, err := in.Read(buf)
|
||||||
@@ -446,7 +525,7 @@ func checkCompressAndType(in io.Reader) (newReader io.Reader, compressible bool,
|
|||||||
return nil, false, "", err
|
return nil, false, "", err
|
||||||
}
|
}
|
||||||
mime := mimetype.Detect(buf)
|
mime := mimetype.Detect(buf)
|
||||||
compressible, err = isCompressible(bytes.NewReader(buf))
|
compressible, err = modeHandler.isCompressible(bytes.NewReader(buf), compressionMode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, "", err
|
return nil, false, "", err
|
||||||
}
|
}
|
||||||
@@ -454,26 +533,6 @@ func checkCompressAndType(in io.Reader) (newReader io.Reader, compressible bool,
|
|||||||
return wrap(in), compressible, mime.String(), nil
|
return wrap(in), compressible, mime.String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
|
||||||
// the configured threshold
|
|
||||||
func isCompressible(r io.Reader) (bool, error) {
|
|
||||||
var b bytes.Buffer
|
|
||||||
w, err := sgzip.NewWriterLevel(&b, sgzip.DefaultCompression)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
n, err := io.Copy(w, r)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
err = w.Close()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
ratio := float64(n) / float64(b.Len())
|
|
||||||
return ratio > minCompressionRatio, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// verifyObjectHash verifies the Objects hash
|
// verifyObjectHash verifies the Objects hash
|
||||||
func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.MultiHasher, ht hash.Type) error {
|
func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.MultiHasher, ht hash.Type) error {
|
||||||
srcHash := hasher.Sums()[ht]
|
srcHash := hasher.Sums()[ht]
|
||||||
@@ -494,9 +553,9 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
|
|||||||
|
|
||||||
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
||||||
|
|
||||||
type compressionResult struct {
|
type compressionResult[T sgzip.GzipMetadata | SzstdMetadata] struct {
|
||||||
err error
|
err error
|
||||||
meta sgzip.GzipMetadata
|
meta T
|
||||||
}
|
}
|
||||||
|
|
||||||
// replicating some of operations.Rcat functionality because we want to support remotes without streaming
|
// replicating some of operations.Rcat functionality because we want to support remotes without streaming
|
||||||
@@ -537,106 +596,18 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
|||||||
return nil, fmt.Errorf("failed to write temporary local file: %w", err)
|
return nil, fmt.Errorf("failed to write temporary local file: %w", err)
|
||||||
}
|
}
|
||||||
if _, err = tempFile.Seek(0, 0); err != nil {
|
if _, err = tempFile.Seek(0, 0); err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("failed to seek temporary local file: %w", err)
|
||||||
}
|
}
|
||||||
finfo, err := tempFile.Stat()
|
finfo, err := tempFile.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("failed to stat temporary local file: %w", err)
|
||||||
}
|
}
|
||||||
return f.Fs.Put(ctx, tempFile, object.NewStaticObjectInfo(dstFileName, modTime, finfo.Size(), false, nil, f.Fs))
|
return f.Fs.Put(ctx, tempFile, object.NewStaticObjectInfo(dstFileName, modTime, finfo.Size(), false, nil, f.Fs))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put a compressed version of a file. Returns a wrappable object and metadata.
|
// Put a compressed version of a file. Returns a wrappable object and metadata.
|
||||||
func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, mimeType string) (fs.Object, *ObjectMetadata, error) {
|
func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, mimeType string) (fs.Object, *ObjectMetadata, error) {
|
||||||
// Unwrap reader accounting
|
return f.modeHandler.putCompress(ctx, f, in, src, options, mimeType)
|
||||||
in, wrap := accounting.UnWrap(in)
|
|
||||||
|
|
||||||
// Add the metadata hasher
|
|
||||||
metaHasher := md5.New()
|
|
||||||
in = io.TeeReader(in, metaHasher)
|
|
||||||
|
|
||||||
// Compress the file
|
|
||||||
pipeReader, pipeWriter := io.Pipe()
|
|
||||||
results := make(chan compressionResult)
|
|
||||||
go func() {
|
|
||||||
gz, err := sgzip.NewWriterLevel(pipeWriter, f.opt.CompressionLevel)
|
|
||||||
if err != nil {
|
|
||||||
results <- compressionResult{err: err, meta: sgzip.GzipMetadata{}}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_, err = io.Copy(gz, in)
|
|
||||||
gzErr := gz.Close()
|
|
||||||
if gzErr != nil {
|
|
||||||
fs.Errorf(nil, "Failed to close compress: %v", gzErr)
|
|
||||||
if err == nil {
|
|
||||||
err = gzErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
closeErr := pipeWriter.Close()
|
|
||||||
if closeErr != nil {
|
|
||||||
fs.Errorf(nil, "Failed to close pipe: %v", closeErr)
|
|
||||||
if err == nil {
|
|
||||||
err = closeErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
results <- compressionResult{err: err, meta: gz.MetaData()}
|
|
||||||
}()
|
|
||||||
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize)) // Probably no longer needed as sgzip has it's own buffering
|
|
||||||
|
|
||||||
// Find a hash the destination supports to compute a hash of
|
|
||||||
// the compressed data.
|
|
||||||
ht := f.Fs.Hashes().GetOne()
|
|
||||||
var hasher *hash.MultiHasher
|
|
||||||
var err error
|
|
||||||
if ht != hash.None {
|
|
||||||
// unwrap the accounting again
|
|
||||||
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
|
|
||||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
// add the hasher and re-wrap the accounting
|
|
||||||
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
|
||||||
wrappedIn = wrap(wrappedIn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transfer the data
|
|
||||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
|
||||||
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx))
|
|
||||||
if err != nil {
|
|
||||||
if o != nil {
|
|
||||||
removeErr := o.Remove(ctx)
|
|
||||||
if removeErr != nil {
|
|
||||||
fs.Errorf(o, "Failed to remove partially transferred object: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
// Check whether we got an error during compression
|
|
||||||
result := <-results
|
|
||||||
err = result.err
|
|
||||||
if err != nil {
|
|
||||||
if o != nil {
|
|
||||||
removeErr := o.Remove(ctx)
|
|
||||||
if removeErr != nil {
|
|
||||||
fs.Errorf(o, "Failed to remove partially compressed object: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate metadata
|
|
||||||
meta := newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
|
|
||||||
|
|
||||||
// Check the hashes of the compressed data if we were comparing them
|
|
||||||
if ht != hash.None && hasher != nil {
|
|
||||||
err = f.verifyObjectHash(ctx, o, hasher, ht)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return o, meta, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put an uncompressed version of a file. Returns a wrappable object and metadata.
|
// Put an uncompressed version of a file. Returns a wrappable object and metadata.
|
||||||
@@ -680,7 +651,8 @@ func (f *Fs) putUncompress(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
return o, newMetadata(o.Size(), Uncompressed, sgzip.GzipMetadata{}, hex.EncodeToString(sum), mimeType), nil
|
|
||||||
|
return f.modeHandler.putUncompressGetNewMetadata(o, Uncompressed, hex.EncodeToString(sum), mimeType, sum)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function will write a metadata struct to a metadata Object for an src. Returns a wrappable metadata object.
|
// This function will write a metadata struct to a metadata Object for an src. Returns a wrappable metadata object.
|
||||||
@@ -751,7 +723,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
o, err := f.NewObject(ctx, src.Remote())
|
o, err := f.NewObject(ctx, src.Remote())
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
// Get our file compressibility
|
// Get our file compressibility
|
||||||
in, compressible, mimeType, err := checkCompressAndType(in)
|
in, compressible, mimeType, err := checkCompressAndType(in, f.mode, f.modeHandler)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -771,7 +743,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
}
|
}
|
||||||
found := err == nil
|
found := err == nil
|
||||||
|
|
||||||
in, compressible, mimeType, err := checkCompressAndType(in)
|
in, compressible, mimeType, err := checkCompressAndType(in, f.mode, f.modeHandler)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1090,11 +1062,12 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, duration fs.Duration
|
|||||||
|
|
||||||
// ObjectMetadata describes the metadata for an Object.
|
// ObjectMetadata describes the metadata for an Object.
|
||||||
type ObjectMetadata struct {
|
type ObjectMetadata struct {
|
||||||
Mode int // Compression mode of the file.
|
Mode int // Compression mode of the file.
|
||||||
Size int64 // Size of the object.
|
Size int64 // Size of the object.
|
||||||
MD5 string // MD5 hash of the file.
|
MD5 string // MD5 hash of the file.
|
||||||
MimeType string // Mime type of the file
|
MimeType string // Mime type of the file
|
||||||
CompressionMetadata sgzip.GzipMetadata
|
CompressionMetadataGzip *sgzip.GzipMetadata // Metadata for Gzip compression
|
||||||
|
CompressionMetadataZstd *SzstdMetadata // Metadata for Zstd compression
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object with external metadata
|
// Object with external metadata
|
||||||
@@ -1107,17 +1080,6 @@ type Object struct {
|
|||||||
meta *ObjectMetadata // Metadata struct for this object (nil if not loaded)
|
meta *ObjectMetadata // Metadata struct for this object (nil if not loaded)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function generates a metadata object
|
|
||||||
func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mimeType string) *ObjectMetadata {
|
|
||||||
meta := new(ObjectMetadata)
|
|
||||||
meta.Size = size
|
|
||||||
meta.Mode = mode
|
|
||||||
meta.CompressionMetadata = cmeta
|
|
||||||
meta.MD5 = md5
|
|
||||||
meta.MimeType = mimeType
|
|
||||||
return meta
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function will read the metadata from a metadata object.
|
// This function will read the metadata from a metadata object.
|
||||||
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) {
|
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) {
|
||||||
// Open our meradata object
|
// Open our meradata object
|
||||||
@@ -1165,7 +1127,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return o.mo, o.mo.Update(ctx, in, src, options...)
|
return o.mo, o.mo.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
in, compressible, mimeType, err := checkCompressAndType(in)
|
in, compressible, mimeType, err := checkCompressAndType(in, o.meta.Mode, o.f.modeHandler)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1278,7 +1240,7 @@ func (o *Object) String() string {
|
|||||||
|
|
||||||
// Remote returns the remote path
|
// Remote returns the remote path
|
||||||
func (o *Object) Remote() string {
|
func (o *Object) Remote() string {
|
||||||
origFileName, _, _, err := processFileName(o.Object.Remote())
|
origFileName, _, _, err := processFileName(o.Object.Remote(), o.f.modeHandler)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o.f, "Could not get remote path for: %s", o.Object.Remote())
|
fs.Errorf(o.f, "Could not get remote path for: %s", o.Object.Remote())
|
||||||
return o.Object.Remote()
|
return o.Object.Remote()
|
||||||
@@ -1381,7 +1343,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||||||
return o.Object.Open(ctx, options...)
|
return o.Object.Open(ctx, options...)
|
||||||
}
|
}
|
||||||
// Get offset and limit from OpenOptions, pass the rest to the underlying remote
|
// Get offset and limit from OpenOptions, pass the rest to the underlying remote
|
||||||
var openOptions = []fs.OpenOption{&fs.SeekOption{Offset: 0}}
|
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
switch x := option.(type) {
|
switch x := option.(type) {
|
||||||
@@ -1389,31 +1350,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||||||
offset = x.Offset
|
offset = x.Offset
|
||||||
case *fs.RangeOption:
|
case *fs.RangeOption:
|
||||||
offset, limit = x.Decode(o.Size())
|
offset, limit = x.Decode(o.Size())
|
||||||
default:
|
|
||||||
openOptions = append(openOptions, option)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Get a chunkedreader for the wrapped object
|
// Get a chunkedreader for the wrapped object
|
||||||
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams)
|
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams)
|
||||||
// Get file handle
|
var retCloser io.Closer = chunkedReader
|
||||||
var file io.Reader
|
return o.f.modeHandler.openGetReadCloser(ctx, o, offset, limit, chunkedReader, retCloser, options...)
|
||||||
if offset != 0 {
|
|
||||||
file, err = sgzip.NewReaderAt(chunkedReader, &o.meta.CompressionMetadata, offset)
|
|
||||||
} else {
|
|
||||||
file, err = sgzip.NewReader(chunkedReader)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileReader io.Reader
|
|
||||||
if limit != -1 {
|
|
||||||
fileReader = io.LimitReader(file, limit)
|
|
||||||
} else {
|
|
||||||
fileReader = file
|
|
||||||
}
|
|
||||||
// Return a ReadCloser
|
|
||||||
return ReadCloserWrapper{Reader: fileReader, Closer: chunkedReader}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
||||||
|
|||||||
@@ -48,7 +48,27 @@ func TestRemoteGzip(t *testing.T) {
|
|||||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
||||||
{Name: name, Key: "type", Value: "compress"},
|
{Name: name, Key: "type", Value: "compress"},
|
||||||
{Name: name, Key: "remote", Value: tempdir},
|
{Name: name, Key: "remote", Value: tempdir},
|
||||||
{Name: name, Key: "compression_mode", Value: "gzip"},
|
{Name: name, Key: "mode", Value: "gzip"},
|
||||||
|
{Name: name, Key: "level", Value: "-1"},
|
||||||
|
}
|
||||||
|
opt.QuickTestOK = true
|
||||||
|
fstests.Run(t, &opt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRemoteZstd tests ZSTD compression
|
||||||
|
func TestRemoteZstd(t *testing.T) {
|
||||||
|
if *fstest.RemoteName != "" {
|
||||||
|
t.Skip("Skipping as -remote set")
|
||||||
|
}
|
||||||
|
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-zstd")
|
||||||
|
name := "TestCompressZstd"
|
||||||
|
opt := defaultOpt
|
||||||
|
opt.RemoteName = name + ":"
|
||||||
|
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
||||||
|
{Name: name, Key: "type", Value: "compress"},
|
||||||
|
{Name: name, Key: "remote", Value: tempdir},
|
||||||
|
{Name: name, Key: "mode", Value: "zstd"},
|
||||||
|
{Name: name, Key: "level", Value: "2"},
|
||||||
}
|
}
|
||||||
opt.QuickTestOK = true
|
opt.QuickTestOK = true
|
||||||
fstests.Run(t, &opt)
|
fstests.Run(t, &opt)
|
||||||
|
|||||||
207
backend/compress/gzip_handler.go
Normal file
207
backend/compress/gzip_handler.go
Normal file
@@ -0,0 +1,207 @@
|
|||||||
|
package compress
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/md5"
|
||||||
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/buengese/sgzip"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
|
"github.com/rclone/rclone/fs/chunkedreader"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
)
|
||||||
|
|
||||||
|
// gzipModeHandler implements compressionModeHandler for gzip
|
||||||
|
type gzipModeHandler struct{}
|
||||||
|
|
||||||
|
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
||||||
|
// the configured threshold
|
||||||
|
func (g *gzipModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
||||||
|
var b bytes.Buffer
|
||||||
|
var n int64
|
||||||
|
w, err := sgzip.NewWriterLevel(&b, sgzip.DefaultCompression)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
n, err = io.Copy(w, r)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
err = w.Close()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
ratio := float64(n) / float64(b.Len())
|
||||||
|
return ratio > minCompressionRatio, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newObjectGetOriginalSize returns the original file size from the metadata
|
||||||
|
func (g *gzipModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
||||||
|
if meta.CompressionMetadataGzip == nil {
|
||||||
|
return 0, errors.New("missing gzip metadata")
|
||||||
|
}
|
||||||
|
return meta.CompressionMetadataGzip.Size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
||||||
|
func (g *gzipModeHandler) openGetReadCloser(
|
||||||
|
ctx context.Context,
|
||||||
|
o *Object,
|
||||||
|
offset int64,
|
||||||
|
limit int64,
|
||||||
|
cr chunkedreader.ChunkedReader,
|
||||||
|
closer io.Closer,
|
||||||
|
options ...fs.OpenOption,
|
||||||
|
) (rc io.ReadCloser, err error) {
|
||||||
|
var file io.Reader
|
||||||
|
|
||||||
|
if offset != 0 {
|
||||||
|
file, err = sgzip.NewReaderAt(cr, o.meta.CompressionMetadataGzip, offset)
|
||||||
|
} else {
|
||||||
|
file, err = sgzip.NewReader(cr)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileReader io.Reader
|
||||||
|
if limit != -1 {
|
||||||
|
fileReader = io.LimitReader(file, limit)
|
||||||
|
} else {
|
||||||
|
fileReader = file
|
||||||
|
}
|
||||||
|
// Return a ReadCloser
|
||||||
|
return ReadCloserWrapper{Reader: fileReader, Closer: closer}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
||||||
|
func (g *gzipModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
||||||
|
if compressionMode == Gzip {
|
||||||
|
return gzFileExt
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
||||||
|
func (g *gzipModeHandler) putCompress(
|
||||||
|
ctx context.Context,
|
||||||
|
f *Fs,
|
||||||
|
in io.Reader,
|
||||||
|
src fs.ObjectInfo,
|
||||||
|
options []fs.OpenOption,
|
||||||
|
mimeType string,
|
||||||
|
) (fs.Object, *ObjectMetadata, error) {
|
||||||
|
// Unwrap reader accounting
|
||||||
|
in, wrap := accounting.UnWrap(in)
|
||||||
|
|
||||||
|
// Add the metadata hasher
|
||||||
|
metaHasher := md5.New()
|
||||||
|
in = io.TeeReader(in, metaHasher)
|
||||||
|
|
||||||
|
// Compress the file
|
||||||
|
pipeReader, pipeWriter := io.Pipe()
|
||||||
|
|
||||||
|
resultsGzip := make(chan compressionResult[sgzip.GzipMetadata])
|
||||||
|
go func() {
|
||||||
|
gz, err := sgzip.NewWriterLevel(pipeWriter, f.opt.CompressionLevel)
|
||||||
|
if err != nil {
|
||||||
|
resultsGzip <- compressionResult[sgzip.GzipMetadata]{err: err, meta: sgzip.GzipMetadata{}}
|
||||||
|
close(resultsGzip)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, err = io.Copy(gz, in)
|
||||||
|
gzErr := gz.Close()
|
||||||
|
if gzErr != nil && err == nil {
|
||||||
|
err = gzErr
|
||||||
|
}
|
||||||
|
closeErr := pipeWriter.Close()
|
||||||
|
if closeErr != nil && err == nil {
|
||||||
|
err = closeErr
|
||||||
|
}
|
||||||
|
resultsGzip <- compressionResult[sgzip.GzipMetadata]{err: err, meta: gz.MetaData()}
|
||||||
|
close(resultsGzip)
|
||||||
|
}()
|
||||||
|
|
||||||
|
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize)) // Probably no longer needed as sgzip has it's own buffering
|
||||||
|
|
||||||
|
// Find a hash the destination supports to compute a hash of
|
||||||
|
// the compressed data.
|
||||||
|
ht := f.Fs.Hashes().GetOne()
|
||||||
|
var hasher *hash.MultiHasher
|
||||||
|
var err error
|
||||||
|
if ht != hash.None {
|
||||||
|
// unwrap the accounting again
|
||||||
|
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
|
||||||
|
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
// add the hasher and re-wrap the accounting
|
||||||
|
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
||||||
|
wrappedIn = wrap(wrappedIn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transfer the data
|
||||||
|
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
||||||
|
if err != nil {
|
||||||
|
if o != nil {
|
||||||
|
if removeErr := o.Remove(ctx); removeErr != nil {
|
||||||
|
fs.Errorf(o, "Failed to remove partially transferred object: %v", removeErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
// Check whether we got an error during compression
|
||||||
|
result := <-resultsGzip
|
||||||
|
if result.err != nil {
|
||||||
|
if o != nil {
|
||||||
|
if removeErr := o.Remove(ctx); removeErr != nil {
|
||||||
|
fs.Errorf(o, "Failed to remove partially compressed object: %v", removeErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, nil, result.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate metadata
|
||||||
|
meta := g.newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
|
||||||
|
|
||||||
|
// Check the hashes of the compressed data if we were comparing them
|
||||||
|
if ht != hash.None && hasher != nil {
|
||||||
|
err = f.verifyObjectHash(ctx, o, hasher, ht)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return o, meta, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
||||||
|
func (g *gzipModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
||||||
|
return o, g.newMetadata(o.Size(), mode, sgzip.GzipMetadata{}, hex.EncodeToString(sum), mimeType), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
||||||
|
// Warning: This function panics if cmeta is not of the expected type.
|
||||||
|
func (g *gzipModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
||||||
|
meta, ok := cmeta.(sgzip.GzipMetadata)
|
||||||
|
if !ok {
|
||||||
|
panic("invalid cmeta type: expected sgzip.GzipMetadata")
|
||||||
|
}
|
||||||
|
|
||||||
|
objMeta := new(ObjectMetadata)
|
||||||
|
objMeta.Size = size
|
||||||
|
objMeta.Mode = mode
|
||||||
|
objMeta.CompressionMetadataGzip = &meta
|
||||||
|
objMeta.CompressionMetadataZstd = nil
|
||||||
|
objMeta.MD5 = md5
|
||||||
|
objMeta.MimeType = mimeType
|
||||||
|
|
||||||
|
return objMeta
|
||||||
|
}
|
||||||
327
backend/compress/szstd_helper.go
Normal file
327
backend/compress/szstd_helper.go
Normal file
@@ -0,0 +1,327 @@
|
|||||||
|
package compress
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
szstd "github.com/a1ex3/zstd-seekable-format-go/pkg"
|
||||||
|
"github.com/klauspost/compress/zstd"
|
||||||
|
)
|
||||||
|
|
||||||
|
const szstdChunkSize int = 1 << 20 // 1 MiB chunk size
|
||||||
|
|
||||||
|
// SzstdMetadata holds metadata for szstd compressed files.
|
||||||
|
type SzstdMetadata struct {
|
||||||
|
BlockSize int // BlockSize is the size of the blocks in the zstd file
|
||||||
|
Size int64 // Size is the uncompressed size of the file
|
||||||
|
BlockData []uint32 // BlockData is the block data for the zstd file, used for seeking
|
||||||
|
}
|
||||||
|
|
||||||
|
// SzstdWriter is a writer that compresses data in szstd format.
|
||||||
|
type SzstdWriter struct {
|
||||||
|
enc *zstd.Encoder
|
||||||
|
w szstd.ConcurrentWriter
|
||||||
|
metadata SzstdMetadata
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriterSzstd creates a new szstd writer with the specified options.
|
||||||
|
// It initializes the szstd writer with a zstd encoder and returns a pointer to the SzstdWriter.
|
||||||
|
// The writer can be used to write data in chunks, and it will automatically handle block sizes and metadata.
|
||||||
|
func NewWriterSzstd(w io.Writer, opts ...zstd.EOption) (*SzstdWriter, error) {
|
||||||
|
encoder, err := zstd.NewWriter(nil, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sw, err := szstd.NewWriter(w, encoder)
|
||||||
|
if err != nil {
|
||||||
|
if err := encoder.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &SzstdWriter{
|
||||||
|
enc: encoder,
|
||||||
|
w: sw,
|
||||||
|
metadata: SzstdMetadata{
|
||||||
|
BlockSize: szstdChunkSize,
|
||||||
|
Size: 0,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes data to the szstd writer in chunks of szstdChunkSize.
|
||||||
|
// It handles the block size and metadata updates automatically.
|
||||||
|
func (w *SzstdWriter) Write(p []byte) (int, error) {
|
||||||
|
if len(p) == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if w.metadata.BlockData == nil {
|
||||||
|
numBlocks := (len(p) + w.metadata.BlockSize - 1) / w.metadata.BlockSize
|
||||||
|
w.metadata.BlockData = make([]uint32, 1, numBlocks+1)
|
||||||
|
w.metadata.BlockData[0] = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
start := 0
|
||||||
|
total := len(p)
|
||||||
|
|
||||||
|
var writerFunc szstd.FrameSource = func() ([]byte, error) {
|
||||||
|
if start >= total {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
end := min(start+w.metadata.BlockSize, total)
|
||||||
|
chunk := p[start:end]
|
||||||
|
size := end - start
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
w.metadata.Size += int64(size)
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
start = end
|
||||||
|
return chunk, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// write sizes of compressed blocks in the callback
|
||||||
|
err := w.w.WriteMany(context.Background(), writerFunc,
|
||||||
|
szstd.WithWriteCallback(func(size uint32) {
|
||||||
|
w.mu.Lock()
|
||||||
|
lastOffset := w.metadata.BlockData[len(w.metadata.BlockData)-1]
|
||||||
|
w.metadata.BlockData = append(w.metadata.BlockData, lastOffset+size)
|
||||||
|
w.mu.Unlock()
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return total, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the SzstdWriter and its underlying encoder.
|
||||||
|
func (w *SzstdWriter) Close() error {
|
||||||
|
if err := w.w.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := w.enc.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMetadata returns the metadata of the szstd writer.
|
||||||
|
func (w *SzstdWriter) GetMetadata() SzstdMetadata {
|
||||||
|
return w.metadata
|
||||||
|
}
|
||||||
|
|
||||||
|
// SzstdReaderAt is a reader that allows random access in szstd compressed data.
|
||||||
|
type SzstdReaderAt struct {
|
||||||
|
r szstd.Reader
|
||||||
|
decoder *zstd.Decoder
|
||||||
|
metadata *SzstdMetadata
|
||||||
|
pos int64
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReaderAtSzstd creates a new SzstdReaderAt at the specified io.ReadSeeker.
|
||||||
|
func NewReaderAtSzstd(rs io.ReadSeeker, meta *SzstdMetadata, offset int64, opts ...zstd.DOption) (*SzstdReaderAt, error) {
|
||||||
|
decoder, err := zstd.NewReader(nil, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := szstd.NewReader(rs, decoder)
|
||||||
|
if err != nil {
|
||||||
|
decoder.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sr := &SzstdReaderAt{
|
||||||
|
r: r,
|
||||||
|
decoder: decoder,
|
||||||
|
metadata: meta,
|
||||||
|
pos: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set initial position to the provided offset
|
||||||
|
if _, err := sr.Seek(offset, io.SeekStart); err != nil {
|
||||||
|
if err := sr.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return sr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seek sets the offset for the next Read.
|
||||||
|
func (s *SzstdReaderAt) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
pos, err := s.r.Seek(offset, whence)
|
||||||
|
if err == nil {
|
||||||
|
s.pos = pos
|
||||||
|
}
|
||||||
|
return pos, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SzstdReaderAt) Read(p []byte) (int, error) {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
n, err := s.r.Read(p)
|
||||||
|
if err == nil {
|
||||||
|
s.pos += int64(n)
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAt reads data at the specified offset.
|
||||||
|
func (s *SzstdReaderAt) ReadAt(p []byte, off int64) (int, error) {
|
||||||
|
if off < 0 {
|
||||||
|
return 0, errors.New("invalid offset")
|
||||||
|
}
|
||||||
|
if off >= s.metadata.Size {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
endOff := min(off+int64(len(p)), s.metadata.Size)
|
||||||
|
|
||||||
|
// Find all blocks covered by the range
|
||||||
|
type blockInfo struct {
|
||||||
|
index int // Block index
|
||||||
|
offsetInBlock int64 // Offset within the block for starting reading
|
||||||
|
bytesToRead int64 // How many bytes to read from this block
|
||||||
|
}
|
||||||
|
|
||||||
|
var blocks []blockInfo
|
||||||
|
uncompressedOffset := int64(0)
|
||||||
|
currentOff := off
|
||||||
|
|
||||||
|
for i := 0; i < len(s.metadata.BlockData)-1; i++ {
|
||||||
|
blockUncompressedEnd := min(uncompressedOffset+int64(s.metadata.BlockSize), s.metadata.Size)
|
||||||
|
|
||||||
|
if currentOff < blockUncompressedEnd && endOff > uncompressedOffset {
|
||||||
|
offsetInBlock := max(0, currentOff-uncompressedOffset)
|
||||||
|
bytesToRead := min(blockUncompressedEnd-uncompressedOffset-offsetInBlock, endOff-currentOff)
|
||||||
|
|
||||||
|
blocks = append(blocks, blockInfo{
|
||||||
|
index: i,
|
||||||
|
offsetInBlock: offsetInBlock,
|
||||||
|
bytesToRead: bytesToRead,
|
||||||
|
})
|
||||||
|
|
||||||
|
currentOff += bytesToRead
|
||||||
|
if currentOff >= endOff {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
uncompressedOffset = blockUncompressedEnd
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(blocks) == 0 {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parallel block decoding
|
||||||
|
type decodeResult struct {
|
||||||
|
index int
|
||||||
|
data []byte
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
resultCh := make(chan decodeResult, len(blocks))
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
sem := make(chan struct{}, runtime.NumCPU())
|
||||||
|
|
||||||
|
for _, block := range blocks {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(block blockInfo) {
|
||||||
|
defer wg.Done()
|
||||||
|
sem <- struct{}{}
|
||||||
|
defer func() { <-sem }()
|
||||||
|
|
||||||
|
startOffset := int64(s.metadata.BlockData[block.index])
|
||||||
|
endOffset := int64(s.metadata.BlockData[block.index+1])
|
||||||
|
compressedSize := endOffset - startOffset
|
||||||
|
|
||||||
|
compressed := make([]byte, compressedSize)
|
||||||
|
_, err := s.r.ReadAt(compressed, startOffset)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
resultCh <- decodeResult{index: block.index, err: err}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
decoded, err := s.decoder.DecodeAll(compressed, nil)
|
||||||
|
if err != nil {
|
||||||
|
resultCh <- decodeResult{index: block.index, err: err}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
resultCh <- decodeResult{index: block.index, data: decoded, err: nil}
|
||||||
|
}(block)
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
close(resultCh)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Collect results in block index order
|
||||||
|
totalRead := 0
|
||||||
|
results := make(map[int]decodeResult)
|
||||||
|
expected := len(blocks)
|
||||||
|
minIndex := blocks[0].index
|
||||||
|
|
||||||
|
for res := range resultCh {
|
||||||
|
results[res.index] = res
|
||||||
|
for {
|
||||||
|
if result, ok := results[minIndex]; ok {
|
||||||
|
if result.err != nil {
|
||||||
|
return 0, result.err
|
||||||
|
}
|
||||||
|
// find the corresponding blockInfo
|
||||||
|
var blk blockInfo
|
||||||
|
for _, b := range blocks {
|
||||||
|
if b.index == result.index {
|
||||||
|
blk = b
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
start := blk.offsetInBlock
|
||||||
|
end := start + blk.bytesToRead
|
||||||
|
copy(p[totalRead:totalRead+int(blk.bytesToRead)], result.data[start:end])
|
||||||
|
totalRead += int(blk.bytesToRead)
|
||||||
|
minIndex++
|
||||||
|
if minIndex-blocks[0].index >= len(blocks) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(results) == expected && minIndex-blocks[0].index >= len(blocks) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return totalRead, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the SzstdReaderAt and underlying decoder.
|
||||||
|
func (s *SzstdReaderAt) Close() error {
|
||||||
|
if err := s.r.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.decoder.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
65
backend/compress/uncompressed_handler.go
Normal file
65
backend/compress/uncompressed_handler.go
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
package compress
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/chunkedreader"
|
||||||
|
)
|
||||||
|
|
||||||
|
// uncompressedModeHandler implements compressionModeHandler for uncompressed files
|
||||||
|
type uncompressedModeHandler struct{}
|
||||||
|
|
||||||
|
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
||||||
|
// the configured threshold
|
||||||
|
func (u *uncompressedModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newObjectGetOriginalSize returns the original file size from the metadata
|
||||||
|
func (u *uncompressedModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
||||||
|
func (u *uncompressedModeHandler) openGetReadCloser(
|
||||||
|
ctx context.Context,
|
||||||
|
o *Object,
|
||||||
|
offset int64,
|
||||||
|
limit int64,
|
||||||
|
cr chunkedreader.ChunkedReader,
|
||||||
|
closer io.Closer,
|
||||||
|
options ...fs.OpenOption,
|
||||||
|
) (rc io.ReadCloser, err error) {
|
||||||
|
return o.Object.Open(ctx, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
||||||
|
func (u *uncompressedModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
||||||
|
func (u *uncompressedModeHandler) putCompress(
|
||||||
|
ctx context.Context,
|
||||||
|
f *Fs,
|
||||||
|
in io.Reader,
|
||||||
|
src fs.ObjectInfo,
|
||||||
|
options []fs.OpenOption,
|
||||||
|
mimeType string,
|
||||||
|
) (fs.Object, *ObjectMetadata, error) {
|
||||||
|
return nil, nil, fmt.Errorf("unsupported compression mode %d", f.mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
||||||
|
func (u *uncompressedModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
||||||
|
return nil, nil, fmt.Errorf("unsupported compression mode %d", Uncompressed)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
||||||
|
// Warning: This function panics if cmeta is not of the expected type.
|
||||||
|
func (u *uncompressedModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
65
backend/compress/unknown_handler.go
Normal file
65
backend/compress/unknown_handler.go
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
package compress
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/chunkedreader"
|
||||||
|
)
|
||||||
|
|
||||||
|
// unknownModeHandler implements compressionModeHandler for unknown compression types
|
||||||
|
type unknownModeHandler struct{}
|
||||||
|
|
||||||
|
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
||||||
|
// the configured threshold
|
||||||
|
func (unk *unknownModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
||||||
|
return false, fmt.Errorf("unknown compression mode %d", compressionMode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newObjectGetOriginalSize returns the original file size from the metadata
|
||||||
|
func (unk *unknownModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
||||||
|
func (unk *unknownModeHandler) openGetReadCloser(
|
||||||
|
ctx context.Context,
|
||||||
|
o *Object,
|
||||||
|
offset int64,
|
||||||
|
limit int64,
|
||||||
|
cr chunkedreader.ChunkedReader,
|
||||||
|
closer io.Closer,
|
||||||
|
options ...fs.OpenOption,
|
||||||
|
) (rc io.ReadCloser, err error) {
|
||||||
|
return nil, fmt.Errorf("unknown compression mode %d", o.meta.Mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
||||||
|
func (unk *unknownModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
||||||
|
func (unk *unknownModeHandler) putCompress(
|
||||||
|
ctx context.Context,
|
||||||
|
f *Fs,
|
||||||
|
in io.Reader,
|
||||||
|
src fs.ObjectInfo,
|
||||||
|
options []fs.OpenOption,
|
||||||
|
mimeType string,
|
||||||
|
) (fs.Object, *ObjectMetadata, error) {
|
||||||
|
return nil, nil, fmt.Errorf("unknown compression mode %d", f.mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
||||||
|
func (unk *unknownModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
||||||
|
return nil, nil, fmt.Errorf("unknown compression mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
||||||
|
// Warning: This function panics if cmeta is not of the expected type.
|
||||||
|
func (unk *unknownModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
192
backend/compress/zstd_handler.go
Normal file
192
backend/compress/zstd_handler.go
Normal file
@@ -0,0 +1,192 @@
|
|||||||
|
package compress
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/md5"
|
||||||
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/klauspost/compress/zstd"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
|
"github.com/rclone/rclone/fs/chunkedreader"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
)
|
||||||
|
|
||||||
|
// zstdModeHandler implements compressionModeHandler for zstd
|
||||||
|
type zstdModeHandler struct{}
|
||||||
|
|
||||||
|
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
||||||
|
// the configured threshold
|
||||||
|
func (z *zstdModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
||||||
|
var b bytes.Buffer
|
||||||
|
var n int64
|
||||||
|
w, err := NewWriterSzstd(&b, zstd.WithEncoderLevel(zstd.SpeedDefault))
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
n, err = io.Copy(w, r)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
err = w.Close()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
ratio := float64(n) / float64(b.Len())
|
||||||
|
return ratio > minCompressionRatio, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newObjectGetOriginalSize returns the original file size from the metadata
|
||||||
|
func (z *zstdModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
||||||
|
if meta.CompressionMetadataZstd == nil {
|
||||||
|
return 0, errors.New("missing zstd metadata")
|
||||||
|
}
|
||||||
|
return meta.CompressionMetadataZstd.Size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
||||||
|
func (z *zstdModeHandler) openGetReadCloser(
|
||||||
|
ctx context.Context,
|
||||||
|
o *Object,
|
||||||
|
offset int64,
|
||||||
|
limit int64,
|
||||||
|
cr chunkedreader.ChunkedReader,
|
||||||
|
closer io.Closer,
|
||||||
|
options ...fs.OpenOption,
|
||||||
|
) (rc io.ReadCloser, err error) {
|
||||||
|
var file io.Reader
|
||||||
|
|
||||||
|
if offset != 0 {
|
||||||
|
file, err = NewReaderAtSzstd(cr, o.meta.CompressionMetadataZstd, offset)
|
||||||
|
} else {
|
||||||
|
file, err = zstd.NewReader(cr)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileReader io.Reader
|
||||||
|
if limit != -1 {
|
||||||
|
fileReader = io.LimitReader(file, limit)
|
||||||
|
} else {
|
||||||
|
fileReader = file
|
||||||
|
}
|
||||||
|
// Return a ReadCloser
|
||||||
|
return ReadCloserWrapper{Reader: fileReader, Closer: closer}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
||||||
|
func (z *zstdModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
||||||
|
if compressionMode == Zstd {
|
||||||
|
return zstdFileExt
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
||||||
|
func (z *zstdModeHandler) putCompress(
|
||||||
|
ctx context.Context,
|
||||||
|
f *Fs,
|
||||||
|
in io.Reader,
|
||||||
|
src fs.ObjectInfo,
|
||||||
|
options []fs.OpenOption,
|
||||||
|
mimeType string,
|
||||||
|
) (fs.Object, *ObjectMetadata, error) {
|
||||||
|
// Unwrap reader accounting
|
||||||
|
in, wrap := accounting.UnWrap(in)
|
||||||
|
|
||||||
|
// Add the metadata hasher
|
||||||
|
metaHasher := md5.New()
|
||||||
|
in = io.TeeReader(in, metaHasher)
|
||||||
|
|
||||||
|
// Compress the file
|
||||||
|
pipeReader, pipeWriter := io.Pipe()
|
||||||
|
|
||||||
|
resultsZstd := make(chan compressionResult[SzstdMetadata])
|
||||||
|
go func() {
|
||||||
|
writer, err := NewWriterSzstd(pipeWriter, zstd.WithEncoderLevel(zstd.EncoderLevel(f.opt.CompressionLevel)))
|
||||||
|
if err != nil {
|
||||||
|
resultsZstd <- compressionResult[SzstdMetadata]{err: err}
|
||||||
|
close(resultsZstd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, err = io.Copy(writer, in)
|
||||||
|
if wErr := writer.Close(); wErr != nil && err == nil {
|
||||||
|
err = wErr
|
||||||
|
}
|
||||||
|
if cErr := pipeWriter.Close(); cErr != nil && err == nil {
|
||||||
|
err = cErr
|
||||||
|
}
|
||||||
|
|
||||||
|
resultsZstd <- compressionResult[SzstdMetadata]{err: err, meta: writer.GetMetadata()}
|
||||||
|
close(resultsZstd)
|
||||||
|
}()
|
||||||
|
|
||||||
|
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize))
|
||||||
|
|
||||||
|
ht := f.Fs.Hashes().GetOne()
|
||||||
|
var hasher *hash.MultiHasher
|
||||||
|
var err error
|
||||||
|
if ht != hash.None {
|
||||||
|
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
|
||||||
|
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
||||||
|
wrappedIn = wrap(wrappedIn)
|
||||||
|
}
|
||||||
|
|
||||||
|
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result := <-resultsZstd
|
||||||
|
if result.err != nil {
|
||||||
|
if o != nil {
|
||||||
|
_ = o.Remove(ctx)
|
||||||
|
}
|
||||||
|
return nil, nil, result.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build metadata using uncompressed size for filename
|
||||||
|
meta := z.newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
|
||||||
|
if ht != hash.None && hasher != nil {
|
||||||
|
err = f.verifyObjectHash(ctx, o, hasher, ht)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return o, meta, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
||||||
|
func (z *zstdModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
||||||
|
return o, z.newMetadata(o.Size(), mode, SzstdMetadata{}, hex.EncodeToString(sum), mimeType), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
||||||
|
// Warning: This function panics if cmeta is not of the expected type.
|
||||||
|
func (z *zstdModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
||||||
|
meta, ok := cmeta.(SzstdMetadata)
|
||||||
|
if !ok {
|
||||||
|
panic("invalid cmeta type: expected SzstdMetadata")
|
||||||
|
}
|
||||||
|
|
||||||
|
objMeta := new(ObjectMetadata)
|
||||||
|
objMeta.Size = size
|
||||||
|
objMeta.Mode = mode
|
||||||
|
objMeta.CompressionMetadataGzip = nil
|
||||||
|
objMeta.CompressionMetadataZstd = &meta
|
||||||
|
objMeta.MD5 = md5
|
||||||
|
objMeta.MimeType = mimeType
|
||||||
|
|
||||||
|
return objMeta
|
||||||
|
}
|
||||||
@@ -403,14 +403,14 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
|||||||
if ciphertext == "" {
|
if ciphertext == "" {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
pos := strings.Index(ciphertext, ".")
|
before, after, ok := strings.Cut(ciphertext, ".")
|
||||||
if pos == -1 {
|
if !ok {
|
||||||
return "", ErrorNotAnEncryptedFile
|
return "", ErrorNotAnEncryptedFile
|
||||||
} // No .
|
} // No .
|
||||||
num := ciphertext[:pos]
|
num := before
|
||||||
if num == "!" {
|
if num == "!" {
|
||||||
// No rotation; probably original was not valid unicode
|
// No rotation; probably original was not valid unicode
|
||||||
return ciphertext[pos+1:], nil
|
return after, nil
|
||||||
}
|
}
|
||||||
dir, err := strconv.Atoi(num)
|
dir, err := strconv.Atoi(num)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -425,7 +425,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
|||||||
var result bytes.Buffer
|
var result bytes.Buffer
|
||||||
|
|
||||||
inQuote := false
|
inQuote := false
|
||||||
for _, runeValue := range ciphertext[pos+1:] {
|
for _, runeValue := range after {
|
||||||
switch {
|
switch {
|
||||||
case inQuote:
|
case inQuote:
|
||||||
_, _ = result.WriteRune(runeValue)
|
_, _ = result.WriteRune(runeValue)
|
||||||
|
|||||||
@@ -923,28 +923,30 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
|||||||
var commandHelp = []fs.CommandHelp{
|
var commandHelp = []fs.CommandHelp{
|
||||||
{
|
{
|
||||||
Name: "encode",
|
Name: "encode",
|
||||||
Short: "Encode the given filename(s)",
|
Short: "Encode the given filename(s).",
|
||||||
Long: `This encodes the filenames given as arguments returning a list of
|
Long: `This encodes the filenames given as arguments returning a list of
|
||||||
strings of the encoded results.
|
strings of the encoded results.
|
||||||
|
|
||||||
Usage Example:
|
Usage examples:
|
||||||
|
|
||||||
rclone backend encode crypt: file1 [file2...]
|
` + "```console" + `
|
||||||
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
|
rclone backend encode crypt: file1 [file2...]
|
||||||
`,
|
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
|
||||||
|
` + "```",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "decode",
|
Name: "decode",
|
||||||
Short: "Decode the given filename(s)",
|
Short: "Decode the given filename(s).",
|
||||||
Long: `This decodes the filenames given as arguments returning a list of
|
Long: `This decodes the filenames given as arguments returning a list of
|
||||||
strings of the decoded results. It will return an error if any of the
|
strings of the decoded results. It will return an error if any of the
|
||||||
inputs are invalid.
|
inputs are invalid.
|
||||||
|
|
||||||
Usage Example:
|
Usage examples:
|
||||||
|
|
||||||
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
|
` + "```console" + `
|
||||||
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
|
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
|
||||||
`,
|
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
|
||||||
|
` + "```",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -563,21 +563,26 @@ var commandHelp = []fs.CommandHelp{{
|
|||||||
Short: "Show metadata about the DOI.",
|
Short: "Show metadata about the DOI.",
|
||||||
Long: `This command returns a JSON object with some information about the DOI.
|
Long: `This command returns a JSON object with some information about the DOI.
|
||||||
|
|
||||||
rclone backend medatadata doi:
|
Usage example:
|
||||||
|
|
||||||
It returns a JSON object representing metadata about the DOI.
|
` + "```console" + `
|
||||||
`,
|
rclone backend metadata doi:
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
|
It returns a JSON object representing metadata about the DOI.`,
|
||||||
}, {
|
}, {
|
||||||
Name: "set",
|
Name: "set",
|
||||||
Short: "Set command for updating the config parameters.",
|
Short: "Set command for updating the config parameters.",
|
||||||
Long: `This set command can be used to update the config parameters
|
Long: `This set command can be used to update the config parameters
|
||||||
for a running doi backend.
|
for a running doi backend.
|
||||||
|
|
||||||
Usage Examples:
|
Usage examples:
|
||||||
|
|
||||||
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
` + "```console" + `
|
||||||
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||||
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
|
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||||
|
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
The option keys are named as they are in the config file.
|
The option keys are named as they are in the config file.
|
||||||
|
|
||||||
@@ -585,8 +590,7 @@ This rebuilds the connection to the doi backend when it is called with
|
|||||||
the new parameters. Only new parameters need be passed as the values
|
the new parameters. Only new parameters need be passed as the values
|
||||||
will default to those currently in use.
|
will default to those currently in use.
|
||||||
|
|
||||||
It doesn't return anything.
|
It doesn't return anything.`,
|
||||||
`,
|
|
||||||
}}
|
}}
|
||||||
|
|
||||||
// Command the backend to run a named command
|
// Command the backend to run a named command
|
||||||
@@ -598,7 +602,7 @@ It doesn't return anything.
|
|||||||
// The result should be capable of being JSON encoded
|
// The result should be capable of being JSON encoded
|
||||||
// If it is a string or a []string it will be shown to the user
|
// If it is a string or a []string it will be shown to the user
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
// otherwise it will be JSON encoded and shown to the user like that
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||||
switch name {
|
switch name {
|
||||||
case "metadata":
|
case "metadata":
|
||||||
return f.ShowMetadata(ctx)
|
return f.ShowMetadata(ctx)
|
||||||
@@ -625,7 +629,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ShowMetadata returns some metadata about the corresponding DOI
|
// ShowMetadata returns some metadata about the corresponding DOI
|
||||||
func (f *Fs) ShowMetadata(ctx context.Context) (metadata interface{}, err error) {
|
func (f *Fs) ShowMetadata(ctx context.Context) (metadata any, err error) {
|
||||||
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
|
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ type headerLink struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func parseLinkHeader(header string) (links []headerLink) {
|
func parseLinkHeader(header string) (links []headerLink) {
|
||||||
for _, link := range strings.Split(header, ",") {
|
for link := range strings.SplitSeq(header, ",") {
|
||||||
link = strings.TrimSpace(link)
|
link = strings.TrimSpace(link)
|
||||||
parsed := parseLink(link)
|
parsed := parseLink(link)
|
||||||
if parsed != nil {
|
if parsed != nil {
|
||||||
@@ -30,7 +30,7 @@ func parseLinkHeader(header string) (links []headerLink) {
|
|||||||
|
|
||||||
func parseLink(link string) (parsedLink *headerLink) {
|
func parseLink(link string) (parsedLink *headerLink) {
|
||||||
var parts []string
|
var parts []string
|
||||||
for _, part := range strings.Split(link, ";") {
|
for part := range strings.SplitSeq(link, ";") {
|
||||||
parts = append(parts, strings.TrimSpace(part))
|
parts = append(parts, strings.TrimSpace(part))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -191,7 +191,7 @@ func driveScopes(scopesString string) (scopes []string) {
|
|||||||
if scopesString == "" {
|
if scopesString == "" {
|
||||||
scopesString = defaultScope
|
scopesString = defaultScope
|
||||||
}
|
}
|
||||||
for _, scope := range strings.Split(scopesString, ",") {
|
for scope := range strings.SplitSeq(scopesString, ",") {
|
||||||
scope = strings.TrimSpace(scope)
|
scope = strings.TrimSpace(scope)
|
||||||
scopes = append(scopes, scopePrefix+scope)
|
scopes = append(scopes, scopePrefix+scope)
|
||||||
}
|
}
|
||||||
@@ -1220,7 +1220,7 @@ func isLinkMimeType(mimeType string) bool {
|
|||||||
// into a list of unique extensions with leading "." and a list of associated MIME types
|
// into a list of unique extensions with leading "." and a list of associated MIME types
|
||||||
func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, err error) {
|
func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, err error) {
|
||||||
for _, extensionText := range extensionsIn {
|
for _, extensionText := range extensionsIn {
|
||||||
for _, extension := range strings.Split(extensionText, ",") {
|
for extension := range strings.SplitSeq(extensionText, ",") {
|
||||||
extension = strings.ToLower(strings.TrimSpace(extension))
|
extension = strings.ToLower(strings.TrimSpace(extension))
|
||||||
if extension == "" {
|
if extension == "" {
|
||||||
continue
|
continue
|
||||||
@@ -1965,9 +1965,28 @@ func (f *Fs) findImportFormat(ctx context.Context, mimeType string) string {
|
|||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
return list.WithListP(ctx, dir, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListP lists the objects and directories of the Fs starting
|
||||||
|
// from dir non recursively into out.
|
||||||
|
//
|
||||||
|
// dir should be "" to start from the root, and should not
|
||||||
|
// have trailing slashes.
|
||||||
|
//
|
||||||
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
|
// found.
|
||||||
|
//
|
||||||
|
// It should call callback for each tranche of entries read.
|
||||||
|
// These need not be returned in any particular order. If
|
||||||
|
// callback returns an error then the listing will stop
|
||||||
|
// immediately.
|
||||||
|
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||||
|
list := list.NewHelper(callback)
|
||||||
|
entriesAdded := 0
|
||||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
directoryID = actualID(directoryID)
|
directoryID = actualID(directoryID)
|
||||||
|
|
||||||
@@ -1979,25 +1998,30 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if entry != nil {
|
if entry != nil {
|
||||||
entries = append(entries, entry)
|
err = list.Add(entry)
|
||||||
|
if err != nil {
|
||||||
|
iErr = err
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
entriesAdded++
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
if iErr != nil {
|
if iErr != nil {
|
||||||
return nil, iErr
|
return iErr
|
||||||
}
|
}
|
||||||
// If listing the root of a teamdrive and got no entries,
|
// If listing the root of a teamdrive and got no entries,
|
||||||
// double check we have access
|
// double check we have access
|
||||||
if f.isTeamDrive && len(entries) == 0 && f.root == "" && dir == "" {
|
if f.isTeamDrive && entriesAdded == 0 && f.root == "" && dir == "" {
|
||||||
err = f.teamDriveOK(ctx)
|
err = f.teamDriveOK(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return entries, nil
|
return list.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
// listREntry is a task to be executed by a litRRunner
|
// listREntry is a task to be executed by a litRRunner
|
||||||
@@ -3640,41 +3664,47 @@ func (f *Fs) rescue(ctx context.Context, dirID string, delete bool) (err error)
|
|||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{{
|
var commandHelp = []fs.CommandHelp{{
|
||||||
Name: "get",
|
Name: "get",
|
||||||
Short: "Get command for fetching the drive config parameters",
|
Short: "Get command for fetching the drive config parameters.",
|
||||||
Long: `This is a get command which will be used to fetch the various drive config parameters
|
Long: `This is a get command which will be used to fetch the various drive config
|
||||||
|
parameters.
|
||||||
|
|
||||||
Usage Examples:
|
Usage examples:
|
||||||
|
|
||||||
rclone backend get drive: [-o service_account_file] [-o chunk_size]
|
` + "```console" + `
|
||||||
rclone rc backend/command command=get fs=drive: [-o service_account_file] [-o chunk_size]
|
rclone backend get drive: [-o service_account_file] [-o chunk_size]
|
||||||
`,
|
rclone rc backend/command command=get fs=drive: [-o service_account_file] [-o chunk_size]
|
||||||
|
` + "```",
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"chunk_size": "show the current upload chunk size",
|
"chunk_size": "Show the current upload chunk size.",
|
||||||
"service_account_file": "show the current service account file",
|
"service_account_file": "Show the current service account file.",
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
Name: "set",
|
Name: "set",
|
||||||
Short: "Set command for updating the drive config parameters",
|
Short: "Set command for updating the drive config parameters.",
|
||||||
Long: `This is a set command which will be used to update the various drive config parameters
|
Long: `This is a set command which will be used to update the various drive config
|
||||||
|
parameters.
|
||||||
|
|
||||||
Usage Examples:
|
Usage examples:
|
||||||
|
|
||||||
rclone backend set drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
` + "```console" + `
|
||||||
rclone rc backend/command command=set fs=drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
rclone backend set drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
||||||
`,
|
rclone rc backend/command command=set fs=drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
||||||
|
` + "```",
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"chunk_size": "update the current upload chunk size",
|
"chunk_size": "Update the current upload chunk size.",
|
||||||
"service_account_file": "update the current service account file",
|
"service_account_file": "Update the current service account file.",
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
Name: "shortcut",
|
Name: "shortcut",
|
||||||
Short: "Create shortcuts from files or directories",
|
Short: "Create shortcuts from files or directories.",
|
||||||
Long: `This command creates shortcuts from files or directories.
|
Long: `This command creates shortcuts from files or directories.
|
||||||
|
|
||||||
Usage:
|
Usage examples:
|
||||||
|
|
||||||
rclone backend shortcut drive: source_item destination_shortcut
|
` + "```console" + `
|
||||||
rclone backend shortcut drive: source_item -o target=drive2: destination_shortcut
|
rclone backend shortcut drive: source_item destination_shortcut
|
||||||
|
rclone backend shortcut drive: source_item -o target=drive2: destination_shortcut
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
In the first example this creates a shortcut from the "source_item"
|
In the first example this creates a shortcut from the "source_item"
|
||||||
which can be a file or a directory to the "destination_shortcut". The
|
which can be a file or a directory to the "destination_shortcut". The
|
||||||
@@ -3684,90 +3714,100 @@ from "drive:"
|
|||||||
In the second example this creates a shortcut from the "source_item"
|
In the second example this creates a shortcut from the "source_item"
|
||||||
relative to "drive:" to the "destination_shortcut" relative to
|
relative to "drive:" to the "destination_shortcut" relative to
|
||||||
"drive2:". This may fail with a permission error if the user
|
"drive2:". This may fail with a permission error if the user
|
||||||
authenticated with "drive2:" can't read files from "drive:".
|
authenticated with "drive2:" can't read files from "drive:".`,
|
||||||
`,
|
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"target": "optional target remote for the shortcut destination",
|
"target": "Optional target remote for the shortcut destination.",
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
Name: "drives",
|
Name: "drives",
|
||||||
Short: "List the Shared Drives available to this account",
|
Short: "List the Shared Drives available to this account.",
|
||||||
Long: `This command lists the Shared Drives (Team Drives) available to this
|
Long: `This command lists the Shared Drives (Team Drives) available to this
|
||||||
account.
|
account.
|
||||||
|
|
||||||
Usage:
|
Usage example:
|
||||||
|
|
||||||
rclone backend [-o config] drives drive:
|
` + "```console" + `
|
||||||
|
rclone backend [-o config] drives drive:
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
This will return a JSON list of objects like this
|
This will return a JSON list of objects like this:
|
||||||
|
|
||||||
[
|
` + "```json" + `
|
||||||
{
|
[
|
||||||
"id": "0ABCDEF-01234567890",
|
{
|
||||||
"kind": "drive#teamDrive",
|
"id": "0ABCDEF-01234567890",
|
||||||
"name": "My Drive"
|
"kind": "drive#teamDrive",
|
||||||
},
|
"name": "My Drive"
|
||||||
{
|
},
|
||||||
"id": "0ABCDEFabcdefghijkl",
|
{
|
||||||
"kind": "drive#teamDrive",
|
"id": "0ABCDEFabcdefghijkl",
|
||||||
"name": "Test Drive"
|
"kind": "drive#teamDrive",
|
||||||
}
|
"name": "Test Drive"
|
||||||
]
|
}
|
||||||
|
]
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
With the -o config parameter it will output the list in a format
|
With the -o config parameter it will output the list in a format
|
||||||
suitable for adding to a config file to make aliases for all the
|
suitable for adding to a config file to make aliases for all the
|
||||||
drives found and a combined drive.
|
drives found and a combined drive.
|
||||||
|
|
||||||
[My Drive]
|
` + "```ini" + `
|
||||||
type = alias
|
[My Drive]
|
||||||
remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
|
type = alias
|
||||||
|
remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
|
||||||
|
|
||||||
[Test Drive]
|
[Test Drive]
|
||||||
type = alias
|
type = alias
|
||||||
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
||||||
|
|
||||||
[AllDrives]
|
[AllDrives]
|
||||||
type = combine
|
type = combine
|
||||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Adding this to the rclone config file will cause those team drives to
|
Adding this to the rclone config file will cause those team drives to
|
||||||
be accessible with the aliases shown. Any illegal characters will be
|
be accessible with the aliases shown. Any illegal characters will be
|
||||||
substituted with "_" and duplicate names will have numbers suffixed.
|
substituted with "_" and duplicate names will have numbers suffixed.
|
||||||
It will also add a remote called AllDrives which shows all the shared
|
It will also add a remote called AllDrives which shows all the shared
|
||||||
drives combined into one directory tree.
|
drives combined into one directory tree.`,
|
||||||
`,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "untrash",
|
Name: "untrash",
|
||||||
Short: "Untrash files and directories",
|
Short: "Untrash files and directories.",
|
||||||
Long: `This command untrashes all the files and directories in the directory
|
Long: `This command untrashes all the files and directories in the directory
|
||||||
passed in recursively.
|
passed in recursively.
|
||||||
|
|
||||||
Usage:
|
Usage example:
|
||||||
|
|
||||||
|
` + "```console" + `
|
||||||
|
rclone backend untrash drive:directory
|
||||||
|
rclone backend --interactive untrash drive:directory subdir
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
This takes an optional directory to trash which make this easier to
|
This takes an optional directory to trash which make this easier to
|
||||||
use via the API.
|
use via the API.
|
||||||
|
|
||||||
rclone backend untrash drive:directory
|
Use the --interactive/-i or --dry-run flag to see what would be restored before
|
||||||
rclone backend --interactive untrash drive:directory subdir
|
restoring it.
|
||||||
|
|
||||||
Use the --interactive/-i or --dry-run flag to see what would be restored before restoring it.
|
|
||||||
|
|
||||||
Result:
|
Result:
|
||||||
|
|
||||||
{
|
` + "```json" + `
|
||||||
"Untrashed": 17,
|
{
|
||||||
"Errors": 0
|
"Untrashed": 17,
|
||||||
}
|
"Errors": 0
|
||||||
`,
|
}
|
||||||
|
` + "```",
|
||||||
}, {
|
}, {
|
||||||
Name: "copyid",
|
Name: "copyid",
|
||||||
Short: "Copy files by ID",
|
Short: "Copy files by ID.",
|
||||||
Long: `This command copies files by ID
|
Long: `This command copies files by ID.
|
||||||
|
|
||||||
Usage:
|
Usage examples:
|
||||||
|
|
||||||
rclone backend copyid drive: ID path
|
` + "```console" + `
|
||||||
rclone backend copyid drive: ID1 path1 ID2 path2
|
rclone backend copyid drive: ID path
|
||||||
|
rclone backend copyid drive: ID1 path1 ID2 path2
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
It copies the drive file with ID given to the path (an rclone path which
|
It copies the drive file with ID given to the path (an rclone path which
|
||||||
will be passed internally to rclone copyto). The ID and path pairs can be
|
will be passed internally to rclone copyto). The ID and path pairs can be
|
||||||
@@ -3780,17 +3820,19 @@ component will be used as the file name.
|
|||||||
If the destination is a drive backend then server-side copying will be
|
If the destination is a drive backend then server-side copying will be
|
||||||
attempted if possible.
|
attempted if possible.
|
||||||
|
|
||||||
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
|
Use the --interactive/-i or --dry-run flag to see what would be copied before
|
||||||
`,
|
copying.`,
|
||||||
}, {
|
}, {
|
||||||
Name: "moveid",
|
Name: "moveid",
|
||||||
Short: "Move files by ID",
|
Short: "Move files by ID.",
|
||||||
Long: `This command moves files by ID
|
Long: `This command moves files by ID.
|
||||||
|
|
||||||
Usage:
|
Usage examples:
|
||||||
|
|
||||||
rclone backend moveid drive: ID path
|
` + "```console" + `
|
||||||
rclone backend moveid drive: ID1 path1 ID2 path2
|
rclone backend moveid drive: ID path
|
||||||
|
rclone backend moveid drive: ID1 path1 ID2 path2
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
It moves the drive file with ID given to the path (an rclone path which
|
It moves the drive file with ID given to the path (an rclone path which
|
||||||
will be passed internally to rclone moveto).
|
will be passed internally to rclone moveto).
|
||||||
@@ -3802,58 +3844,65 @@ component will be used as the file name.
|
|||||||
If the destination is a drive backend then server-side moving will be
|
If the destination is a drive backend then server-side moving will be
|
||||||
attempted if possible.
|
attempted if possible.
|
||||||
|
|
||||||
Use the --interactive/-i or --dry-run flag to see what would be moved beforehand.
|
Use the --interactive/-i or --dry-run flag to see what would be moved beforehand.`,
|
||||||
`,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "exportformats",
|
Name: "exportformats",
|
||||||
Short: "Dump the export formats for debug purposes",
|
Short: "Dump the export formats for debug purposes.",
|
||||||
}, {
|
}, {
|
||||||
Name: "importformats",
|
Name: "importformats",
|
||||||
Short: "Dump the import formats for debug purposes",
|
Short: "Dump the import formats for debug purposes.",
|
||||||
}, {
|
}, {
|
||||||
Name: "query",
|
Name: "query",
|
||||||
Short: "List files using Google Drive query language",
|
Short: "List files using Google Drive query language.",
|
||||||
Long: `This command lists files based on a query
|
Long: `This command lists files based on a query.
|
||||||
|
|
||||||
Usage:
|
Usage example:
|
||||||
|
|
||||||
|
` + "```console" + `
|
||||||
|
rclone backend query drive: query
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
rclone backend query drive: query
|
|
||||||
|
|
||||||
The query syntax is documented at [Google Drive Search query terms and
|
The query syntax is documented at [Google Drive Search query terms and
|
||||||
operators](https://developers.google.com/drive/api/guides/ref-search-terms).
|
operators](https://developers.google.com/drive/api/guides/ref-search-terms).
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
rclone backend query drive: "'0ABc9DEFGHIJKLMNop0QRatUVW3X' in parents and name contains 'foo'"
|
` + "```console" + `
|
||||||
|
rclone backend query drive: "'0ABc9DEFGHIJKLMNop0QRatUVW3X' in parents and name contains 'foo'"
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
If the query contains literal ' or \ characters, these need to be escaped with
|
If the query contains literal ' or \ characters, these need to be escaped with
|
||||||
\ characters. "'" becomes "\'" and "\" becomes "\\\", for example to match a
|
\ characters. "'" becomes "\'" and "\" becomes "\\\", for example to match a
|
||||||
file named "foo ' \.txt":
|
file named "foo ' \.txt":
|
||||||
|
|
||||||
rclone backend query drive: "name = 'foo \' \\\.txt'"
|
` + "```console" + `
|
||||||
|
rclone backend query drive: "name = 'foo \' \\\.txt'"
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
The result is a JSON array of matches, for example:
|
The result is a JSON array of matches, for example:
|
||||||
|
|
||||||
[
|
` + "```json" + `
|
||||||
{
|
[
|
||||||
"createdTime": "2017-06-29T19:58:28.537Z",
|
{
|
||||||
"id": "0AxBe_CDEF4zkGHI4d0FjYko2QkD",
|
"createdTime": "2017-06-29T19:58:28.537Z",
|
||||||
"md5Checksum": "68518d16be0c6fbfab918be61d658032",
|
"id": "0AxBe_CDEF4zkGHI4d0FjYko2QkD",
|
||||||
"mimeType": "text/plain",
|
"md5Checksum": "68518d16be0c6fbfab918be61d658032",
|
||||||
"modifiedTime": "2024-02-02T10:40:02.874Z",
|
"mimeType": "text/plain",
|
||||||
"name": "foo ' \\.txt",
|
"modifiedTime": "2024-02-02T10:40:02.874Z",
|
||||||
"parents": [
|
"name": "foo ' \\.txt",
|
||||||
"0BxAe_BCDE4zkFGZpcWJGek0xbzC"
|
"parents": [
|
||||||
],
|
"0BxAe_BCDE4zkFGZpcWJGek0xbzC"
|
||||||
"resourceKey": "0-ABCDEFGHIXJQpIGqBJq3MC",
|
],
|
||||||
"sha1Checksum": "8f284fa768bfb4e45d076a579ab3905ab6bfa893",
|
"resourceKey": "0-ABCDEFGHIXJQpIGqBJq3MC",
|
||||||
"size": "311",
|
"sha1Checksum": "8f284fa768bfb4e45d076a579ab3905ab6bfa893",
|
||||||
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
|
"size": "311",
|
||||||
}
|
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
|
||||||
]`,
|
}
|
||||||
|
]
|
||||||
|
` + "```console",
|
||||||
}, {
|
}, {
|
||||||
Name: "rescue",
|
Name: "rescue",
|
||||||
Short: "Rescue or delete any orphaned files",
|
Short: "Rescue or delete any orphaned files.",
|
||||||
Long: `This command rescues or deletes any orphaned files or directories.
|
Long: `This command rescues or deletes any orphaned files or directories.
|
||||||
|
|
||||||
Sometimes files can get orphaned in Google Drive. This means that they
|
Sometimes files can get orphaned in Google Drive. This means that they
|
||||||
@@ -3862,26 +3911,31 @@ are no longer in any folder in Google Drive.
|
|||||||
This command finds those files and either rescues them to a directory
|
This command finds those files and either rescues them to a directory
|
||||||
you specify or deletes them.
|
you specify or deletes them.
|
||||||
|
|
||||||
Usage:
|
|
||||||
|
|
||||||
This can be used in 3 ways.
|
This can be used in 3 ways.
|
||||||
|
|
||||||
First, list all orphaned files
|
First, list all orphaned files:
|
||||||
|
|
||||||
rclone backend rescue drive:
|
` + "```console" + `
|
||||||
|
rclone backend rescue drive:
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Second rescue all orphaned files to the directory indicated
|
Second rescue all orphaned files to the directory indicated:
|
||||||
|
|
||||||
rclone backend rescue drive: "relative/path/to/rescue/directory"
|
` + "```console" + `
|
||||||
|
rclone backend rescue drive: "relative/path/to/rescue/directory"
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
e.g. To rescue all orphans to a directory called "Orphans" in the top level
|
E.g. to rescue all orphans to a directory called "Orphans" in the top level:
|
||||||
|
|
||||||
rclone backend rescue drive: Orphans
|
` + "```console" + `
|
||||||
|
rclone backend rescue drive: Orphans
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Third delete all orphaned files to the trash
|
Third delete all orphaned files to the trash:
|
||||||
|
|
||||||
rclone backend rescue drive: -o delete
|
` + "```console" + `
|
||||||
`,
|
rclone backend rescue drive: -o delete
|
||||||
|
` + "```",
|
||||||
}}
|
}}
|
||||||
|
|
||||||
// Command the backend to run a named command
|
// Command the backend to run a named command
|
||||||
@@ -4617,6 +4671,7 @@ var (
|
|||||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
_ fs.ListRer = (*Fs)(nil)
|
_ fs.ListRer = (*Fs)(nil)
|
||||||
|
_ fs.ListPer = (*Fs)(nil)
|
||||||
_ fs.MergeDirser = (*Fs)(nil)
|
_ fs.MergeDirser = (*Fs)(nil)
|
||||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
_ fs.DirSetModTimer = (*Fs)(nil)
|
||||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
_ fs.MkdirMetadataer = (*Fs)(nil)
|
||||||
|
|||||||
@@ -386,7 +386,6 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
|
|||||||
g.SetLimit(o.fs.ci.Checkers)
|
g.SetLimit(o.fs.ci.Checkers)
|
||||||
var mu sync.Mutex // protect the info.Permissions from concurrent writes
|
var mu sync.Mutex // protect the info.Permissions from concurrent writes
|
||||||
for _, permissionID := range info.PermissionIds {
|
for _, permissionID := range info.PermissionIds {
|
||||||
permissionID := permissionID
|
|
||||||
g.Go(func() error {
|
g.Go(func() error {
|
||||||
// must fetch the team drive ones individually to check the inherited flag
|
// must fetch the team drive ones individually to check the inherited flag
|
||||||
perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive)
|
perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive)
|
||||||
@@ -520,7 +519,6 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
|
|||||||
}
|
}
|
||||||
// merge metadata into request and user metadata
|
// merge metadata into request and user metadata
|
||||||
for k, v := range meta {
|
for k, v := range meta {
|
||||||
k, v := k, v
|
|
||||||
// parse a boolean from v and write into out
|
// parse a boolean from v and write into out
|
||||||
parseBool := func(out *bool) error {
|
parseBool := func(out *bool) error {
|
||||||
b, err := strconv.ParseBool(v)
|
b, err := strconv.ParseBool(v)
|
||||||
|
|||||||
@@ -47,6 +47,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/fs/list"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/rclone/rclone/lib/batcher"
|
"github.com/rclone/rclone/lib/batcher"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
@@ -834,7 +835,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
|
|
||||||
// listSharedFolders lists all available shared folders mounted and not mounted
|
// listSharedFolders lists all available shared folders mounted and not mounted
|
||||||
// we'll need the id later so we have to return them in original format
|
// we'll need the id later so we have to return them in original format
|
||||||
func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) {
|
func (f *Fs) listSharedFolders(ctx context.Context, callback func(fs.DirEntry) error) (err error) {
|
||||||
started := false
|
started := false
|
||||||
var res *sharing.ListFoldersResult
|
var res *sharing.ListFoldersResult
|
||||||
for {
|
for {
|
||||||
@@ -847,7 +848,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
started = true
|
started = true
|
||||||
} else {
|
} else {
|
||||||
@@ -859,15 +860,15 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("list continue: %w", err)
|
return fmt.Errorf("list continue: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, entry := range res.Entries {
|
for _, entry := range res.Entries {
|
||||||
leaf := f.opt.Enc.ToStandardName(entry.Name)
|
leaf := f.opt.Enc.ToStandardName(entry.Name)
|
||||||
d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId)
|
d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId)
|
||||||
entries = append(entries, d)
|
err = callback(d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if res.Cursor == "" {
|
if res.Cursor == "" {
|
||||||
@@ -875,21 +876,25 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return entries, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// findSharedFolder find the id for a given shared folder name
|
// findSharedFolder find the id for a given shared folder name
|
||||||
// somewhat annoyingly there is no endpoint to query a shared folder by it's name
|
// somewhat annoyingly there is no endpoint to query a shared folder by it's name
|
||||||
// so our only option is to iterate over all shared folders
|
// so our only option is to iterate over all shared folders
|
||||||
func (f *Fs) findSharedFolder(ctx context.Context, name string) (id string, err error) {
|
func (f *Fs) findSharedFolder(ctx context.Context, name string) (id string, err error) {
|
||||||
entries, err := f.listSharedFolders(ctx)
|
errFoundFile := errors.New("found file")
|
||||||
if err != nil {
|
err = f.listSharedFolders(ctx, func(entry fs.DirEntry) error {
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
for _, entry := range entries {
|
|
||||||
if entry.(*fs.Dir).Remote() == name {
|
if entry.(*fs.Dir).Remote() == name {
|
||||||
return entry.(*fs.Dir).ID(), nil
|
id = entry.(*fs.Dir).ID()
|
||||||
|
return errFoundFile
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if errors.Is(err, errFoundFile) {
|
||||||
|
return id, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return "", err
|
||||||
}
|
}
|
||||||
return "", fs.ErrorDirNotFound
|
return "", fs.ErrorDirNotFound
|
||||||
}
|
}
|
||||||
@@ -908,7 +913,7 @@ func (f *Fs) mountSharedFolder(ctx context.Context, id string) error {
|
|||||||
|
|
||||||
// listReceivedFiles lists shared the user as access to (note this means individual
|
// listReceivedFiles lists shared the user as access to (note this means individual
|
||||||
// files not files contained in shared folders)
|
// files not files contained in shared folders)
|
||||||
func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err error) {
|
func (f *Fs) listReceivedFiles(ctx context.Context, callback func(fs.DirEntry) error) (err error) {
|
||||||
started := false
|
started := false
|
||||||
var res *sharing.ListFilesResult
|
var res *sharing.ListFilesResult
|
||||||
for {
|
for {
|
||||||
@@ -921,7 +926,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
started = true
|
started = true
|
||||||
} else {
|
} else {
|
||||||
@@ -933,7 +938,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("list continue: %w", err)
|
return fmt.Errorf("list continue: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, entry := range res.Entries {
|
for _, entry := range res.Entries {
|
||||||
@@ -946,26 +951,33 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
|
|||||||
modTime: *entry.TimeInvited,
|
modTime: *entry.TimeInvited,
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
|
}
|
||||||
|
err = callback(o)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
entries = append(entries, o)
|
|
||||||
}
|
}
|
||||||
if res.Cursor == "" {
|
if res.Cursor == "" {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return entries, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err error) {
|
func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err error) {
|
||||||
files, err := f.listReceivedFiles(ctx)
|
errFoundFile := errors.New("found file")
|
||||||
if err != nil {
|
err = f.listReceivedFiles(ctx, func(entry fs.DirEntry) error {
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, entry := range files {
|
|
||||||
if entry.(*Object).remote == name {
|
if entry.(*Object).remote == name {
|
||||||
return entry.(*Object), nil
|
o = entry.(*Object)
|
||||||
|
return errFoundFile
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if errors.Is(err, errFoundFile) {
|
||||||
|
return o, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
}
|
}
|
||||||
@@ -980,11 +992,37 @@ func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err er
|
|||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
return list.WithListP(ctx, dir, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListP lists the objects and directories of the Fs starting
|
||||||
|
// from dir non recursively into out.
|
||||||
|
//
|
||||||
|
// dir should be "" to start from the root, and should not
|
||||||
|
// have trailing slashes.
|
||||||
|
//
|
||||||
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
|
// found.
|
||||||
|
//
|
||||||
|
// It should call callback for each tranche of entries read.
|
||||||
|
// These need not be returned in any particular order. If
|
||||||
|
// callback returns an error then the listing will stop
|
||||||
|
// immediately.
|
||||||
|
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||||
|
list := list.NewHelper(callback)
|
||||||
if f.opt.SharedFiles {
|
if f.opt.SharedFiles {
|
||||||
return f.listReceivedFiles(ctx)
|
err := f.listReceivedFiles(ctx, list.Add)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return list.Flush()
|
||||||
}
|
}
|
||||||
if f.opt.SharedFolders {
|
if f.opt.SharedFolders {
|
||||||
return f.listSharedFolders(ctx)
|
err := f.listSharedFolders(ctx, list.Add)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return list.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
root := f.slashRoot
|
root := f.slashRoot
|
||||||
@@ -1014,7 +1052,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
err = fs.ErrorDirNotFound
|
err = fs.ErrorDirNotFound
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
started = true
|
started = true
|
||||||
} else {
|
} else {
|
||||||
@@ -1026,7 +1064,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("list continue: %w", err)
|
return fmt.Errorf("list continue: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, entry := range res.Entries {
|
for _, entry := range res.Entries {
|
||||||
@@ -1051,14 +1089,20 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
remote := path.Join(dir, leaf)
|
remote := path.Join(dir, leaf)
|
||||||
if folderInfo != nil {
|
if folderInfo != nil {
|
||||||
d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id)
|
d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id)
|
||||||
entries = append(entries, d)
|
err = list.Add(d)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
} else if fileInfo != nil {
|
} else if fileInfo != nil {
|
||||||
o, err := f.newObjectWithInfo(ctx, remote, fileInfo)
|
o, err := f.newObjectWithInfo(ctx, remote, fileInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
if o.(*Object).exportType.listable() {
|
if o.(*Object).exportType.listable() {
|
||||||
entries = append(entries, o)
|
err = list.Add(o)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1066,7 +1110,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return entries, nil
|
return list.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put the object
|
// Put the object
|
||||||
@@ -1286,6 +1330,16 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
var result *files.RelocationResult
|
var result *files.RelocationResult
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
result, err = f.srv.MoveV2(&arg)
|
result, err = f.srv.MoveV2(&arg)
|
||||||
|
switch e := err.(type) {
|
||||||
|
case files.MoveV2APIError:
|
||||||
|
// There seems to be a bit of eventual consistency here which causes this to
|
||||||
|
// fail on just created objects
|
||||||
|
// See: https://github.com/rclone/rclone/issues/8881
|
||||||
|
if e.EndpointError != nil && e.EndpointError.FromLookup != nil && e.EndpointError.FromLookup.Tag == files.LookupErrorNotFound {
|
||||||
|
fs.Debugf(srcObj, "Retrying move on %v error", err)
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
}
|
||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -2087,6 +2141,7 @@ var (
|
|||||||
_ fs.Mover = (*Fs)(nil)
|
_ fs.Mover = (*Fs)(nil)
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
_ fs.DirMover = (*Fs)(nil)
|
||||||
|
_ fs.ListPer = (*Fs)(nil)
|
||||||
_ fs.Abouter = (*Fs)(nil)
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
_ fs.Shutdowner = &Fs{}
|
_ fs.Shutdowner = &Fs{}
|
||||||
_ fs.Object = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ type CreateFolderResponse struct {
|
|||||||
Status int `json:"status"`
|
Status int `json:"status"`
|
||||||
Msg string `json:"msg"`
|
Msg string `json:"msg"`
|
||||||
Result struct {
|
Result struct {
|
||||||
FldID interface{} `json:"fld_id"`
|
FldID any `json:"fld_id"`
|
||||||
} `json:"result"`
|
} `json:"result"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// errFileNotFound represent file not found error
|
// errFileNotFound represent file not found error
|
||||||
var errFileNotFound error = errors.New("file not found")
|
var errFileNotFound = errors.New("file not found")
|
||||||
|
|
||||||
// getFileCode retrieves the file code for a given file path
|
// getFileCode retrieves the file code for a given file path
|
||||||
func (f *Fs) getFileCode(ctx context.Context, filePath string) (string, error) {
|
func (f *Fs) getFileCode(ctx context.Context, filePath string) (string, error) {
|
||||||
|
|||||||
@@ -283,6 +283,7 @@ type Fs struct {
|
|||||||
user string
|
user string
|
||||||
pass string
|
pass string
|
||||||
dialAddr string
|
dialAddr string
|
||||||
|
tlsConf *tls.Config // default TLS client config
|
||||||
poolMu sync.Mutex
|
poolMu sync.Mutex
|
||||||
pool []*ftp.ServerConn
|
pool []*ftp.ServerConn
|
||||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||||
@@ -408,9 +409,14 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
|||||||
func (f *Fs) tlsConfig() *tls.Config {
|
func (f *Fs) tlsConfig() *tls.Config {
|
||||||
var tlsConfig *tls.Config
|
var tlsConfig *tls.Config
|
||||||
if f.opt.TLS || f.opt.ExplicitTLS {
|
if f.opt.TLS || f.opt.ExplicitTLS {
|
||||||
tlsConfig = &tls.Config{
|
if f.tlsConf != nil {
|
||||||
ServerName: f.opt.Host,
|
tlsConfig = f.tlsConf.Clone()
|
||||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
} else {
|
||||||
|
tlsConfig = new(tls.Config)
|
||||||
|
}
|
||||||
|
tlsConfig.ServerName = f.opt.Host
|
||||||
|
if f.opt.SkipVerifyTLSCert {
|
||||||
|
tlsConfig.InsecureSkipVerify = true
|
||||||
}
|
}
|
||||||
if f.opt.TLSCacheSize > 0 {
|
if f.opt.TLSCacheSize > 0 {
|
||||||
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
|
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
|
||||||
@@ -450,9 +456,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
baseDialer := fshttp.NewDialer(ctx)
|
baseDialer := fshttp.NewDialer(ctx)
|
||||||
if f.opt.SocksProxy != "" {
|
if f.opt.SocksProxy != "" || f.proxyURL != nil {
|
||||||
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
|
|
||||||
} else if f.proxyURL != nil {
|
|
||||||
// We need to make the onward connection to f.opt.Host. However the FTP
|
// We need to make the onward connection to f.opt.Host. However the FTP
|
||||||
// library sets the host to the proxy IP after using EPSV or PASV so we need
|
// library sets the host to the proxy IP after using EPSV or PASV so we need
|
||||||
// to correct that here.
|
// to correct that here.
|
||||||
@@ -462,7 +466,11 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
dialAddress := net.JoinHostPort(f.opt.Host, dialPort)
|
dialAddress := net.JoinHostPort(f.opt.Host, dialPort)
|
||||||
conn, err = proxy.HTTPConnectDial(network, dialAddress, f.proxyURL, baseDialer)
|
if f.opt.SocksProxy != "" {
|
||||||
|
conn, err = proxy.SOCKS5Dial(network, dialAddress, f.opt.SocksProxy, baseDialer)
|
||||||
|
} else {
|
||||||
|
conn, err = proxy.HTTPConnectDial(network, dialAddress, f.proxyURL, baseDialer)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
conn, err = baseDialer.Dial(network, address)
|
conn, err = baseDialer.Dial(network, address)
|
||||||
}
|
}
|
||||||
@@ -671,6 +679,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
|||||||
dialAddr: dialAddr,
|
dialAddr: dialAddr,
|
||||||
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
|
tlsConf: fshttp.NewTransport(ctx).TLSClientConfig,
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
@@ -1283,7 +1292,7 @@ func (f *ftpReadCloser) Close() error {
|
|||||||
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
|
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
|
||||||
if errX := textprotoError(err); errX != nil {
|
if errX := textprotoError(err); errX != nil {
|
||||||
switch errX.Code {
|
switch errX.Code {
|
||||||
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
|
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend, ftp.StatusRequestedFileActionOK:
|
||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -252,6 +252,9 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
|||||||
}, {
|
}, {
|
||||||
Value: "us-east4",
|
Value: "us-east4",
|
||||||
Help: "Northern Virginia",
|
Help: "Northern Virginia",
|
||||||
|
}, {
|
||||||
|
Value: "us-east5",
|
||||||
|
Help: "Ohio",
|
||||||
}, {
|
}, {
|
||||||
Value: "us-west1",
|
Value: "us-west1",
|
||||||
Help: "Oregon",
|
Help: "Oregon",
|
||||||
@@ -343,9 +346,26 @@ can't check the size and hash but the file contents will be decompressed.
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
Default: false,
|
Default: false,
|
||||||
}, {
|
}, {
|
||||||
Name: "endpoint",
|
Name: "endpoint",
|
||||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
Help: `Custom endpoint for the storage API. Leave blank to use the provider default.
|
||||||
|
|
||||||
|
When using a custom endpoint that includes a subpath (e.g. example.org/custom/endpoint),
|
||||||
|
the subpath will be ignored during upload operations due to a limitation in the
|
||||||
|
underlying Google API Go client library.
|
||||||
|
Download and listing operations will work correctly with the full endpoint path.
|
||||||
|
If you require subpath support for uploads, avoid using subpaths in your custom
|
||||||
|
endpoint configuration.`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "storage.example.org",
|
||||||
|
Help: "Specify a custom endpoint",
|
||||||
|
}, {
|
||||||
|
Value: "storage.example.org:4443",
|
||||||
|
Help: "Specifying a custom endpoint with port",
|
||||||
|
}, {
|
||||||
|
Value: "storage.example.org:4443/gcs/api",
|
||||||
|
Help: "Specifying a subpath, see the note, uploads won't use the custom path!",
|
||||||
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -760,7 +780,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.
|
|||||||
}
|
}
|
||||||
|
|
||||||
// listDir lists a single directory
|
// listDir lists a single directory
|
||||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
|
||||||
// List the objects
|
// List the objects
|
||||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||||
@@ -768,16 +788,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if entry != nil {
|
if entry != nil {
|
||||||
entries = append(entries, entry)
|
return callback(entry)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
// bucket must be present if listing succeeded
|
// bucket must be present if listing succeeded
|
||||||
f.cache.MarkOK(bucket)
|
f.cache.MarkOK(bucket)
|
||||||
return entries, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// listBuckets lists the buckets
|
// listBuckets lists the buckets
|
||||||
@@ -820,14 +840,46 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
|||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
return list.WithListP(ctx, dir, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListP lists the objects and directories of the Fs starting
|
||||||
|
// from dir non recursively into out.
|
||||||
|
//
|
||||||
|
// dir should be "" to start from the root, and should not
|
||||||
|
// have trailing slashes.
|
||||||
|
//
|
||||||
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
|
// found.
|
||||||
|
//
|
||||||
|
// It should call callback for each tranche of entries read.
|
||||||
|
// These need not be returned in any particular order. If
|
||||||
|
// callback returns an error then the listing will stop
|
||||||
|
// immediately.
|
||||||
|
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||||
|
list := list.NewHelper(callback)
|
||||||
bucket, directory := f.split(dir)
|
bucket, directory := f.split(dir)
|
||||||
if bucket == "" {
|
if bucket == "" {
|
||||||
if directory != "" {
|
if directory != "" {
|
||||||
return nil, fs.ErrorListBucketRequired
|
return fs.ErrorListBucketRequired
|
||||||
|
}
|
||||||
|
entries, err := f.listBuckets(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, entry := range entries {
|
||||||
|
err = list.Add(entry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
return f.listBuckets(ctx)
|
|
||||||
}
|
}
|
||||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
return list.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@@ -1099,7 +1151,15 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
remote: remote,
|
remote: remote,
|
||||||
}
|
}
|
||||||
|
|
||||||
rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, nil)
|
// Set the storage class for the destination object if configured
|
||||||
|
var dstObject *storage.Object
|
||||||
|
if f.opt.StorageClass != "" {
|
||||||
|
dstObject = &storage.Object{
|
||||||
|
StorageClass: f.opt.StorageClass,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, dstObject)
|
||||||
if !f.opt.BucketPolicyOnly {
|
if !f.opt.BucketPolicyOnly {
|
||||||
rewriteRequest.DestinationPredefinedAcl(f.opt.ObjectACL)
|
rewriteRequest.DestinationPredefinedAcl(f.opt.ObjectACL)
|
||||||
}
|
}
|
||||||
@@ -1387,6 +1447,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
ContentType: fs.MimeType(ctx, src),
|
ContentType: fs.MimeType(ctx, src),
|
||||||
Metadata: metadataFromModTime(modTime),
|
Metadata: metadataFromModTime(modTime),
|
||||||
}
|
}
|
||||||
|
// Set the storage class from config if configured
|
||||||
|
if o.fs.opt.StorageClass != "" {
|
||||||
|
object.StorageClass = o.fs.opt.StorageClass
|
||||||
|
}
|
||||||
// Apply upload options
|
// Apply upload options
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
key, value := option.Header()
|
key, value := option.Header()
|
||||||
@@ -1462,6 +1526,7 @@ var (
|
|||||||
_ fs.Copier = &Fs{}
|
_ fs.Copier = &Fs{}
|
||||||
_ fs.PutStreamer = &Fs{}
|
_ fs.PutStreamer = &Fs{}
|
||||||
_ fs.ListRer = &Fs{}
|
_ fs.ListRer = &Fs{}
|
||||||
|
_ fs.ListPer = &Fs{}
|
||||||
_ fs.Object = &Object{}
|
_ fs.Object = &Object{}
|
||||||
_ fs.MimeTyper = &Object{}
|
_ fs.MimeTyper = &Object{}
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -43,33 +43,42 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{{
|
var commandHelp = []fs.CommandHelp{{
|
||||||
Name: "drop",
|
Name: "drop",
|
||||||
Short: "Drop cache",
|
Short: "Drop cache.",
|
||||||
Long: `Completely drop checksum cache.
|
Long: `Completely drop checksum cache.
|
||||||
Usage Example:
|
|
||||||
rclone backend drop hasher:
|
Usage example:
|
||||||
`,
|
|
||||||
|
` + "```console" + `
|
||||||
|
rclone backend drop hasher:
|
||||||
|
` + "```",
|
||||||
}, {
|
}, {
|
||||||
Name: "dump",
|
Name: "dump",
|
||||||
Short: "Dump the database",
|
Short: "Dump the database.",
|
||||||
Long: "Dump cache records covered by the current remote",
|
Long: "Dump cache records covered by the current remote.",
|
||||||
}, {
|
}, {
|
||||||
Name: "fulldump",
|
Name: "fulldump",
|
||||||
Short: "Full dump of the database",
|
Short: "Full dump of the database.",
|
||||||
Long: "Dump all cache records in the database",
|
Long: "Dump all cache records in the database.",
|
||||||
}, {
|
}, {
|
||||||
Name: "import",
|
Name: "import",
|
||||||
Short: "Import a SUM file",
|
Short: "Import a SUM file.",
|
||||||
Long: `Amend hash cache from a SUM file and bind checksums to files by size/time.
|
Long: `Amend hash cache from a SUM file and bind checksums to files by size/time.
|
||||||
Usage Example:
|
|
||||||
rclone backend import hasher:subdir md5 /path/to/sum.md5
|
Usage example:
|
||||||
`,
|
|
||||||
|
` + "```console" + `
|
||||||
|
rclone backend import hasher:subdir md5 /path/to/sum.md5
|
||||||
|
` + "```",
|
||||||
}, {
|
}, {
|
||||||
Name: "stickyimport",
|
Name: "stickyimport",
|
||||||
Short: "Perform fast import of a SUM file",
|
Short: "Perform fast import of a SUM file.",
|
||||||
Long: `Fill hash cache from a SUM file without verifying file fingerprints.
|
Long: `Fill hash cache from a SUM file without verifying file fingerprints.
|
||||||
Usage Example:
|
|
||||||
rclone backend stickyimport hasher:subdir md5 remote:path/to/sum.md5
|
Usage example:
|
||||||
`,
|
|
||||||
|
` + "```console" + `
|
||||||
|
rclone backend stickyimport hasher:subdir md5 remote:path/to/sum.md5
|
||||||
|
` + "```",
|
||||||
}}
|
}}
|
||||||
|
|
||||||
func (f *Fs) dbDump(ctx context.Context, full bool, root string) error {
|
func (f *Fs) dbDump(ctx context.Context, full bool, root string) error {
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"mime"
|
"mime"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/textproto"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -37,6 +38,10 @@ func init() {
|
|||||||
Description: "HTTP",
|
Description: "HTTP",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
|
MetadataInfo: &fs.MetadataInfo{
|
||||||
|
System: systemMetadataInfo,
|
||||||
|
Help: `HTTP metadata keys are case insensitive and are always returned in lower case.`,
|
||||||
|
},
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "url",
|
Name: "url",
|
||||||
Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
||||||
@@ -98,6 +103,40 @@ sizes of any files, and some files that don't exist may be in the listing.`,
|
|||||||
fs.Register(fsi)
|
fs.Register(fsi)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// system metadata keys which this backend owns
|
||||||
|
var systemMetadataInfo = map[string]fs.MetadataHelp{
|
||||||
|
"cache-control": {
|
||||||
|
Help: "Cache-Control header",
|
||||||
|
Type: "string",
|
||||||
|
Example: "no-cache",
|
||||||
|
},
|
||||||
|
"content-disposition": {
|
||||||
|
Help: "Content-Disposition header",
|
||||||
|
Type: "string",
|
||||||
|
Example: "inline",
|
||||||
|
},
|
||||||
|
"content-disposition-filename": {
|
||||||
|
Help: "Filename retrieved from Content-Disposition header",
|
||||||
|
Type: "string",
|
||||||
|
Example: "file.txt",
|
||||||
|
},
|
||||||
|
"content-encoding": {
|
||||||
|
Help: "Content-Encoding header",
|
||||||
|
Type: "string",
|
||||||
|
Example: "gzip",
|
||||||
|
},
|
||||||
|
"content-language": {
|
||||||
|
Help: "Content-Language header",
|
||||||
|
Type: "string",
|
||||||
|
Example: "en-US",
|
||||||
|
},
|
||||||
|
"content-type": {
|
||||||
|
Help: "Content-Type header",
|
||||||
|
Type: "string",
|
||||||
|
Example: "text/plain",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Endpoint string `config:"url"`
|
Endpoint string `config:"url"`
|
||||||
@@ -126,6 +165,13 @@ type Object struct {
|
|||||||
size int64
|
size int64
|
||||||
modTime time.Time
|
modTime time.Time
|
||||||
contentType string
|
contentType string
|
||||||
|
|
||||||
|
// Metadata as pointers to strings as they often won't be present
|
||||||
|
contentDisposition *string // Content-Disposition: header
|
||||||
|
contentDispositionFilename *string // Filename retrieved from Content-Disposition: header
|
||||||
|
cacheControl *string // Cache-Control: header
|
||||||
|
contentEncoding *string // Content-Encoding: header
|
||||||
|
contentLanguage *string // Content-Language: header
|
||||||
}
|
}
|
||||||
|
|
||||||
// statusError returns an error if the res contained an error
|
// statusError returns an error if the res contained an error
|
||||||
@@ -277,6 +323,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
ci: ci,
|
ci: ci,
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
|
ReadMetadata: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
@@ -429,6 +476,29 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
|
|||||||
return names, nil
|
return names, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseFilename extracts the filename from a Content-Disposition header
|
||||||
|
func parseFilename(contentDisposition string) (string, error) {
|
||||||
|
// Normalize the contentDisposition to canonical MIME format
|
||||||
|
mediaType, params, err := mime.ParseMediaType(contentDisposition)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to parse contentDisposition: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the contentDisposition is an attachment
|
||||||
|
if strings.ToLower(mediaType) != "attachment" {
|
||||||
|
return "", fmt.Errorf("not an attachment: %s", mediaType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract the filename from the parameters
|
||||||
|
filename, ok := params["filename"]
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("filename not found in contentDisposition")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode filename if it contains special encoding
|
||||||
|
return textproto.TrimString(filename), nil
|
||||||
|
}
|
||||||
|
|
||||||
// Adds the configured headers to the request if any
|
// Adds the configured headers to the request if any
|
||||||
func addHeaders(req *http.Request, opt *Options) {
|
func addHeaders(req *http.Request, opt *Options) {
|
||||||
for i := 0; i < len(opt.Headers); i += 2 {
|
for i := 0; i < len(opt.Headers); i += 2 {
|
||||||
@@ -577,6 +647,9 @@ func (o *Object) String() string {
|
|||||||
|
|
||||||
// Remote the name of the remote HTTP file, relative to the fs root
|
// Remote the name of the remote HTTP file, relative to the fs root
|
||||||
func (o *Object) Remote() string {
|
func (o *Object) Remote() string {
|
||||||
|
if o.contentDispositionFilename != nil {
|
||||||
|
return *o.contentDispositionFilename
|
||||||
|
}
|
||||||
return o.remote
|
return o.remote
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -634,6 +707,29 @@ func (o *Object) decodeMetadata(ctx context.Context, res *http.Response) error {
|
|||||||
o.modTime = t
|
o.modTime = t
|
||||||
o.contentType = res.Header.Get("Content-Type")
|
o.contentType = res.Header.Get("Content-Type")
|
||||||
o.size = rest.ParseSizeFromHeaders(res.Header)
|
o.size = rest.ParseSizeFromHeaders(res.Header)
|
||||||
|
contentDisposition := res.Header.Get("Content-Disposition")
|
||||||
|
if contentDisposition != "" {
|
||||||
|
o.contentDisposition = &contentDisposition
|
||||||
|
}
|
||||||
|
if o.contentDisposition != nil {
|
||||||
|
var filename string
|
||||||
|
filename, err = parseFilename(*o.contentDisposition)
|
||||||
|
if err == nil && filename != "" {
|
||||||
|
o.contentDispositionFilename = &filename
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cacheControl := res.Header.Get("Cache-Control")
|
||||||
|
if cacheControl != "" {
|
||||||
|
o.cacheControl = &cacheControl
|
||||||
|
}
|
||||||
|
contentEncoding := res.Header.Get("Content-Encoding")
|
||||||
|
if contentEncoding != "" {
|
||||||
|
o.contentEncoding = &contentEncoding
|
||||||
|
}
|
||||||
|
contentLanguage := res.Header.Get("Content-Language")
|
||||||
|
if contentLanguage != "" {
|
||||||
|
o.contentLanguage = &contentLanguage
|
||||||
|
}
|
||||||
|
|
||||||
// If NoSlash is set then check ContentType to see if it is a directory
|
// If NoSlash is set then check ContentType to see if it is a directory
|
||||||
if o.fs.opt.NoSlash {
|
if o.fs.opt.NoSlash {
|
||||||
@@ -722,11 +818,13 @@ var commandHelp = []fs.CommandHelp{{
|
|||||||
Long: `This set command can be used to update the config parameters
|
Long: `This set command can be used to update the config parameters
|
||||||
for a running http backend.
|
for a running http backend.
|
||||||
|
|
||||||
Usage Examples:
|
Usage examples:
|
||||||
|
|
||||||
rclone backend set remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
` + "```console" + `
|
||||||
rclone rc backend/command command=set fs=remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
rclone backend set remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||||
rclone rc backend/command command=set fs=remote: -o url=https://example.com
|
rclone rc backend/command command=set fs=remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||||
|
rclone rc backend/command command=set fs=remote: -o url=https://example.com
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
The option keys are named as they are in the config file.
|
The option keys are named as they are in the config file.
|
||||||
|
|
||||||
@@ -734,8 +832,7 @@ This rebuilds the connection to the http backend when it is called with
|
|||||||
the new parameters. Only new parameters need be passed as the values
|
the new parameters. Only new parameters need be passed as the values
|
||||||
will default to those currently in use.
|
will default to those currently in use.
|
||||||
|
|
||||||
It doesn't return anything.
|
It doesn't return anything.`,
|
||||||
`,
|
|
||||||
}}
|
}}
|
||||||
|
|
||||||
// Command the backend to run a named command
|
// Command the backend to run a named command
|
||||||
@@ -771,6 +868,30 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Metadata returns metadata for an object
|
||||||
|
//
|
||||||
|
// It should return nil if there is no Metadata
|
||||||
|
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
|
||||||
|
metadata = make(fs.Metadata, 6)
|
||||||
|
if o.contentType != "" {
|
||||||
|
metadata["content-type"] = o.contentType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set system metadata
|
||||||
|
setMetadata := func(k string, v *string) {
|
||||||
|
if v == nil || *v == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
metadata[k] = *v
|
||||||
|
}
|
||||||
|
setMetadata("content-disposition", o.contentDisposition)
|
||||||
|
setMetadata("content-disposition-filename", o.contentDispositionFilename)
|
||||||
|
setMetadata("cache-control", o.cacheControl)
|
||||||
|
setMetadata("content-language", o.contentLanguage)
|
||||||
|
setMetadata("content-encoding", o.contentEncoding)
|
||||||
|
return metadata, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
// Check the interfaces are satisfied
|
||||||
var (
|
var (
|
||||||
_ fs.Fs = &Fs{}
|
_ fs.Fs = &Fs{}
|
||||||
@@ -778,4 +899,5 @@ var (
|
|||||||
_ fs.Object = &Object{}
|
_ fs.Object = &Object{}
|
||||||
_ fs.MimeTyper = &Object{}
|
_ fs.MimeTyper = &Object{}
|
||||||
_ fs.Commander = &Fs{}
|
_ fs.Commander = &Fs{}
|
||||||
|
_ fs.Metadataer = &Object{}
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -60,6 +60,17 @@ func prepareServer(t *testing.T) configmap.Simple {
|
|||||||
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
|
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
|
||||||
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
|
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
|
||||||
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
|
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
|
||||||
|
|
||||||
|
// Set the content disposition header for the fifth file
|
||||||
|
// later we will check if it is set using the metadata method
|
||||||
|
if r.URL.Path == "/five.txt.gz" {
|
||||||
|
w.Header().Set("Content-Disposition", "attachment; filename=\"five.txt.gz\"")
|
||||||
|
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||||
|
w.Header().Set("Cache-Control", "no-cache")
|
||||||
|
w.Header().Set("Content-Language", "en-US")
|
||||||
|
w.Header().Set("Content-Encoding", "gzip")
|
||||||
|
}
|
||||||
|
|
||||||
fileServer.ServeHTTP(w, r)
|
fileServer.ServeHTTP(w, r)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -102,27 +113,33 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
|||||||
|
|
||||||
sort.Sort(entries)
|
sort.Sort(entries)
|
||||||
|
|
||||||
require.Equal(t, 4, len(entries))
|
require.Equal(t, 5, len(entries))
|
||||||
|
|
||||||
e := entries[0]
|
e := entries[0]
|
||||||
assert.Equal(t, "four", e.Remote())
|
assert.Equal(t, "five.txt.gz", e.Remote())
|
||||||
assert.Equal(t, int64(-1), e.Size())
|
assert.Equal(t, int64(-1), e.Size())
|
||||||
_, ok := e.(fs.Directory)
|
_, ok := e.(fs.Object)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
|
|
||||||
e = entries[1]
|
e = entries[1]
|
||||||
|
assert.Equal(t, "four", e.Remote())
|
||||||
|
assert.Equal(t, int64(-1), e.Size())
|
||||||
|
_, ok = e.(fs.Directory)
|
||||||
|
assert.True(t, ok)
|
||||||
|
|
||||||
|
e = entries[2]
|
||||||
assert.Equal(t, "one%.txt", e.Remote())
|
assert.Equal(t, "one%.txt", e.Remote())
|
||||||
assert.Equal(t, int64(5+lineEndSize), e.Size())
|
assert.Equal(t, int64(5+lineEndSize), e.Size())
|
||||||
_, ok = e.(*Object)
|
_, ok = e.(*Object)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
|
|
||||||
e = entries[2]
|
e = entries[3]
|
||||||
assert.Equal(t, "three", e.Remote())
|
assert.Equal(t, "three", e.Remote())
|
||||||
assert.Equal(t, int64(-1), e.Size())
|
assert.Equal(t, int64(-1), e.Size())
|
||||||
_, ok = e.(fs.Directory)
|
_, ok = e.(fs.Directory)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
|
|
||||||
e = entries[3]
|
e = entries[4]
|
||||||
assert.Equal(t, "two.html", e.Remote())
|
assert.Equal(t, "two.html", e.Remote())
|
||||||
if noSlash {
|
if noSlash {
|
||||||
assert.Equal(t, int64(-1), e.Size())
|
assert.Equal(t, int64(-1), e.Size())
|
||||||
@@ -218,6 +235,23 @@ func TestNewObjectWithLeadingSlash(t *testing.T) {
|
|||||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNewObjectWithMetadata(t *testing.T) {
|
||||||
|
f := prepare(t)
|
||||||
|
o, err := f.NewObject(context.Background(), "/five.txt.gz")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "five.txt.gz", o.Remote())
|
||||||
|
ho, ok := o.(*Object)
|
||||||
|
assert.True(t, ok)
|
||||||
|
metadata, err := ho.Metadata(context.Background())
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "text/plain; charset=utf-8", metadata["content-type"])
|
||||||
|
assert.Equal(t, "attachment; filename=\"five.txt.gz\"", metadata["content-disposition"])
|
||||||
|
assert.Equal(t, "five.txt.gz", metadata["content-disposition-filename"])
|
||||||
|
assert.Equal(t, "no-cache", metadata["cache-control"])
|
||||||
|
assert.Equal(t, "en-US", metadata["content-language"])
|
||||||
|
assert.Equal(t, "gzip", metadata["content-encoding"])
|
||||||
|
}
|
||||||
|
|
||||||
func TestOpen(t *testing.T) {
|
func TestOpen(t *testing.T) {
|
||||||
m := prepareServer(t)
|
m := prepareServer(t)
|
||||||
|
|
||||||
|
|||||||
BIN
backend/http/test/files/five.txt.gz
Normal file
BIN
backend/http/test/files/five.txt.gz
Normal file
Binary file not shown.
@@ -590,7 +590,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
bucket, bucketPath := f.split(remote)
|
bucket, bucketPath := f.split(remote)
|
||||||
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, quotePath(bucketPath)), nil
|
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, rest.URLPathEscapeAll(bucketPath)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
@@ -622,7 +622,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (_ fs.Objec
|
|||||||
"x-archive-auto-make-bucket": "1",
|
"x-archive-auto-make-bucket": "1",
|
||||||
"x-archive-queue-derive": "0",
|
"x-archive-queue-derive": "0",
|
||||||
"x-archive-keep-old-version": "0",
|
"x-archive-keep-old-version": "0",
|
||||||
"x-amz-copy-source": quotePath(path.Join("/", srcBucket, srcPath)),
|
"x-amz-copy-source": rest.URLPathEscapeAll(path.Join("/", srcBucket, srcPath)),
|
||||||
"x-amz-metadata-directive": "COPY",
|
"x-amz-metadata-directive": "COPY",
|
||||||
"x-archive-filemeta-sha1": srcObj.sha1,
|
"x-archive-filemeta-sha1": srcObj.sha1,
|
||||||
"x-archive-filemeta-md5": srcObj.md5,
|
"x-archive-filemeta-md5": srcObj.md5,
|
||||||
@@ -778,7 +778,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
// make a GET request to (frontend)/download/:item/:path
|
// make a GET request to (frontend)/download/:item/:path
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
Path: path.Join("/download/", o.fs.root, quotePath(o.fs.opt.Enc.FromStandardPath(o.remote))),
|
Path: path.Join("/download/", o.fs.root, rest.URLPathEscapeAll(o.fs.opt.Enc.FromStandardPath(o.remote))),
|
||||||
Options: optionsFixed,
|
Options: optionsFixed,
|
||||||
}
|
}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
@@ -1334,16 +1334,6 @@ func trimPathPrefix(s, prefix string, enc encoder.MultiEncoder) string {
|
|||||||
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
|
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// mimics urllib.parse.quote() on Python; exclude / from url.PathEscape
|
|
||||||
func quotePath(s string) string {
|
|
||||||
seg := strings.Split(s, "/")
|
|
||||||
newValues := []string{}
|
|
||||||
for _, v := range seg {
|
|
||||||
newValues = append(newValues, url.QueryEscape(v))
|
|
||||||
}
|
|
||||||
return strings.Join(newValues, "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ fs.Fs = &Fs{}
|
_ fs.Fs = &Fs{}
|
||||||
_ fs.Copier = &Fs{}
|
_ fs.Copier = &Fs{}
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -59,31 +60,43 @@ const (
|
|||||||
configVersion = 1
|
configVersion = 1
|
||||||
|
|
||||||
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
|
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
|
||||||
defaultClientID = "jottacli"
|
defaultClientID = "jottacli" // Identified as "Jottacloud CLI" in "My logged in devices"
|
||||||
|
|
||||||
legacyTokenURL = "https://api.jottacloud.com/auth/v1/token"
|
legacyTokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||||
legacyRegisterURL = "https://api.jottacloud.com/auth/v1/register"
|
legacyRegisterURL = "https://api.jottacloud.com/auth/v1/register"
|
||||||
legacyClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
legacyClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||||
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||||
legacyConfigVersion = 0
|
legacyConfigVersion = 0
|
||||||
|
|
||||||
teliaseCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
|
|
||||||
teliaseCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
|
|
||||||
teliaseCloudClientID = "desktop"
|
|
||||||
|
|
||||||
telianoCloudTokenURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/token"
|
|
||||||
telianoCloudAuthURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/auth"
|
|
||||||
telianoCloudClientID = "desktop"
|
|
||||||
|
|
||||||
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
|
|
||||||
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
|
|
||||||
tele2CloudClientID = "desktop"
|
|
||||||
|
|
||||||
onlimeCloudTokenURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/token"
|
|
||||||
onlimeCloudAuthURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/auth"
|
|
||||||
onlimeCloudClientID = "desktop"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type service struct {
|
||||||
|
key string
|
||||||
|
name string
|
||||||
|
domain string
|
||||||
|
realm string
|
||||||
|
clientID string
|
||||||
|
scopes []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// The list of services and their settings for supporting traditional OAuth.
|
||||||
|
// Please keep these in alphabetical order, but with jottacloud first.
|
||||||
|
func getServices() []service {
|
||||||
|
return []service{
|
||||||
|
{"jottacloud", "Jottacloud", "id.jottacloud.com", "jottacloud", "desktop", []string{"openid", "jotta-default", "offline_access"}}, // Chose client id "desktop" here, will be identified as "Jottacloud for Desktop" in "My logged in devices", but could have used "jottacli" here as well.
|
||||||
|
{"elgiganten_dk", "Elgiganten Cloud (Denmark)", "cloud.elgiganten.dk", "elgiganten", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||||
|
{"elgiganten_se", "Elgiganten Cloud (Sweden)", "cloud.elgiganten.se", "elgiganten", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||||
|
{"elkjop", "Elkjøp Cloud (Norway)", "cloud.elkjop.no", "elkjop", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||||
|
{"elko", "ELKO Cloud (Iceland)", "cloud.elko.is", "elko", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||||
|
{"gigantti", "Gigantti Cloud (Finland)", "cloud.gigantti.fi", "gigantti", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||||
|
{"letsgo", "Let's Go Cloud (Germany)", "letsgo.jotta.cloud", "letsgo", "desktop-win", []string{"openid", "offline_access"}},
|
||||||
|
{"mediamarkt", "MediaMarkt Cloud (Multiregional)", "mediamarkt.jottacloud.com", "mediamarkt", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||||
|
{"onlime", "Onlime (Denmark)", "cloud-auth.onlime.dk", "onlime_wl", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||||
|
{"tele2", "Tele2 Cloud (Sweden)", "mittcloud-auth.tele2.se", "comhem", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||||
|
{"telia_no", "Telia Sky (Norway)", "sky-auth.telia.no", "get", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||||
|
{"telia_se", "Telia Cloud (Sweden)", "cloud-auth.telia.se", "telia_se", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
// needs to be done early so we can use oauth during config
|
// needs to be done early so we can use oauth during config
|
||||||
@@ -159,36 +172,44 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Config runs the backend configuration protocol
|
// Config runs the backend configuration protocol
|
||||||
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||||
switch config.State {
|
switch conf.State {
|
||||||
case "":
|
case "":
|
||||||
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Select authentication type.`, []fs.OptionExample{{
|
if isAuthorize, _ := m.Get(config.ConfigAuthorize); isAuthorize == "true" {
|
||||||
|
return nil, errors.New("not supported by this backend")
|
||||||
|
}
|
||||||
|
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Type of authentication.`, []fs.OptionExample{{
|
||||||
Value: "standard",
|
Value: "standard",
|
||||||
Help: "Standard authentication.\nUse this if you're a normal Jottacloud user.",
|
Help: `Standard authentication.
|
||||||
|
This is primarily supported by the official service, but may also be
|
||||||
|
supported by some white-label services. It is designed for command-line
|
||||||
|
applications, and you will be asked to enter a single-use personal login
|
||||||
|
token which you must manually generate from the account security settings
|
||||||
|
in the web interface of your service.`,
|
||||||
|
}, {
|
||||||
|
Value: "traditional",
|
||||||
|
Help: `Traditional authentication.
|
||||||
|
This is supported by the official service and all white-label services
|
||||||
|
that rclone knows about. You will be asked which service to connect to.
|
||||||
|
It has a limitation of only a single active authentication at a time. You
|
||||||
|
need to be on, or have access to, a machine with an internet-connected
|
||||||
|
web browser.`,
|
||||||
}, {
|
}, {
|
||||||
Value: "legacy",
|
Value: "legacy",
|
||||||
Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
|
Help: `Legacy authentication.
|
||||||
}, {
|
This is no longer supported by any known services and not recommended
|
||||||
Value: "telia_se",
|
used. You will be asked for your account's username and password.`,
|
||||||
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud (Sweden).",
|
|
||||||
}, {
|
|
||||||
Value: "telia_no",
|
|
||||||
Help: "Telia Sky authentication.\nUse this if you are using Telia Sky (Norway).",
|
|
||||||
}, {
|
|
||||||
Value: "tele2",
|
|
||||||
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
|
|
||||||
}, {
|
|
||||||
Value: "onlime",
|
|
||||||
Help: "Onlime Cloud authentication.\nUse this if you are using Onlime Cloud.",
|
|
||||||
}})
|
}})
|
||||||
case "auth_type_done":
|
case "auth_type_done":
|
||||||
// Jump to next state according to config chosen
|
// Jump to next state according to config chosen
|
||||||
return fs.ConfigGoto(config.Result)
|
return fs.ConfigGoto(conf.Result)
|
||||||
case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication
|
case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication
|
||||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||||
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\nGenerate here: https://www.jottacloud.com/web/secure")
|
return fs.ConfigInput("standard_token", "config_login_token", `Personal login token.
|
||||||
|
Generate it from the account security settings in the web interface of your
|
||||||
|
service, for the official service on https://www.jottacloud.com/web/secure.`)
|
||||||
case "standard_token":
|
case "standard_token":
|
||||||
loginToken := config.Result
|
loginToken := conf.Result
|
||||||
m.Set(configClientID, defaultClientID)
|
m.Set(configClientID, defaultClientID)
|
||||||
m.Set(configClientSecret, "")
|
m.Set(configClientSecret, "")
|
||||||
|
|
||||||
@@ -203,10 +224,50 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
|||||||
return nil, fmt.Errorf("error while saving token: %w", err)
|
return nil, fmt.Errorf("error while saving token: %w", err)
|
||||||
}
|
}
|
||||||
return fs.ConfigGoto("choose_device")
|
return fs.ConfigGoto("choose_device")
|
||||||
|
case "traditional":
|
||||||
|
services := getServices()
|
||||||
|
options := make([]fs.OptionExample, 0, len(services))
|
||||||
|
for _, service := range services {
|
||||||
|
options = append(options, fs.OptionExample{
|
||||||
|
Value: service.key,
|
||||||
|
Help: service.name,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return fs.ConfigChooseExclusiveFixed("traditional_type", "config_traditional",
|
||||||
|
"White-label service. This decides the domain name to connect to and\nthe authentication configuration to use.",
|
||||||
|
options)
|
||||||
|
case "traditional_type":
|
||||||
|
services := getServices()
|
||||||
|
i := slices.IndexFunc(services, func(s service) bool { return s.key == conf.Result })
|
||||||
|
if i == -1 {
|
||||||
|
return nil, fmt.Errorf("unexpected service %q", conf.Result)
|
||||||
|
}
|
||||||
|
service := services[i]
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
RootURL: "https://" + service.domain + "/auth/realms/" + service.realm + "/.well-known/openid-configuration",
|
||||||
|
}
|
||||||
|
var wellKnown api.WellKnown
|
||||||
|
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||||
|
_, err := srv.CallJSON(ctx, &opts, nil, &wellKnown)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get authentication provider configuration: %w", err)
|
||||||
|
}
|
||||||
|
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||||
|
m.Set(configClientID, service.clientID)
|
||||||
|
m.Set(configTokenURL, wellKnown.TokenEndpoint)
|
||||||
|
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||||
|
OAuth2Config: &oauthutil.Config{
|
||||||
|
AuthURL: wellKnown.AuthorizationEndpoint,
|
||||||
|
TokenURL: wellKnown.TokenEndpoint,
|
||||||
|
ClientID: service.clientID,
|
||||||
|
Scopes: service.scopes,
|
||||||
|
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||||
|
},
|
||||||
|
})
|
||||||
case "legacy": // configure a jottacloud backend using legacy authentication
|
case "legacy": // configure a jottacloud backend using legacy authentication
|
||||||
m.Set("configVersion", fmt.Sprint(legacyConfigVersion))
|
m.Set("configVersion", fmt.Sprint(legacyConfigVersion))
|
||||||
return fs.ConfigConfirm("legacy_api", false, "config_machine_specific", `Do you want to create a machine specific API key?
|
return fs.ConfigConfirm("legacy_api", false, "config_machine_specific", `Do you want to create a machine specific API key?
|
||||||
|
|
||||||
Rclone has it's own Jottacloud API KEY which works fine as long as one
|
Rclone has it's own Jottacloud API KEY which works fine as long as one
|
||||||
only uses rclone on a single machine. When you want to use rclone with
|
only uses rclone on a single machine. When you want to use rclone with
|
||||||
this account on more than one machine it's recommended to create a
|
this account on more than one machine it's recommended to create a
|
||||||
@@ -214,7 +275,7 @@ machine specific API key. These keys can NOT be shared between
|
|||||||
machines.`)
|
machines.`)
|
||||||
case "legacy_api":
|
case "legacy_api":
|
||||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||||
if config.Result == "true" {
|
if conf.Result == "true" {
|
||||||
deviceRegistration, err := registerDevice(ctx, srv)
|
deviceRegistration, err := registerDevice(ctx, srv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to register device: %w", err)
|
return nil, fmt.Errorf("failed to register device: %w", err)
|
||||||
@@ -223,16 +284,16 @@ machines.`)
|
|||||||
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
||||||
fs.Debugf(nil, "Got clientID %q and clientSecret %q", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
|
fs.Debugf(nil, "Got clientID %q and clientSecret %q", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
|
||||||
}
|
}
|
||||||
return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address)")
|
return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address) of your account.")
|
||||||
case "legacy_username":
|
case "legacy_username":
|
||||||
m.Set(configUsername, config.Result)
|
m.Set(configUsername, conf.Result)
|
||||||
return fs.ConfigPassword("legacy_password", "config_password", "Password (only used in setup, will not be stored)")
|
return fs.ConfigPassword("legacy_password", "config_password", "Password of your account. This is only used in setup, it will not be stored.")
|
||||||
case "legacy_password":
|
case "legacy_password":
|
||||||
m.Set("password", config.Result)
|
m.Set("password", conf.Result)
|
||||||
m.Set("auth_code", "")
|
m.Set("auth_code", "")
|
||||||
return fs.ConfigGoto("legacy_do_auth")
|
return fs.ConfigGoto("legacy_do_auth")
|
||||||
case "legacy_auth_code":
|
case "legacy_auth_code":
|
||||||
authCode := strings.ReplaceAll(config.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number
|
authCode := strings.ReplaceAll(conf.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number
|
||||||
m.Set("auth_code", authCode)
|
m.Set("auth_code", authCode)
|
||||||
return fs.ConfigGoto("legacy_do_auth")
|
return fs.ConfigGoto("legacy_do_auth")
|
||||||
case "legacy_do_auth":
|
case "legacy_do_auth":
|
||||||
@@ -242,12 +303,12 @@ machines.`)
|
|||||||
authCode, _ := m.Get("auth_code")
|
authCode, _ := m.Get("auth_code")
|
||||||
|
|
||||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||||
clientID, ok := m.Get(configClientID)
|
clientID, _ := m.Get(configClientID)
|
||||||
if !ok {
|
if clientID == "" {
|
||||||
clientID = legacyClientID
|
clientID = legacyClientID
|
||||||
}
|
}
|
||||||
clientSecret, ok := m.Get(configClientSecret)
|
clientSecret, _ := m.Get(configClientSecret)
|
||||||
if !ok {
|
if clientSecret == "" {
|
||||||
clientSecret = legacyEncryptedClientSecret
|
clientSecret = legacyEncryptedClientSecret
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -260,7 +321,7 @@ machines.`)
|
|||||||
}
|
}
|
||||||
token, err := doLegacyAuth(ctx, srv, oauthConfig, username, password, authCode)
|
token, err := doLegacyAuth(ctx, srv, oauthConfig, username, password, authCode)
|
||||||
if err == errAuthCodeRequired {
|
if err == errAuthCodeRequired {
|
||||||
return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification Code\nThis account uses 2 factor authentication you will receive a verification code via SMS.")
|
return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification code.\nThis account uses 2 factor authentication you will receive a verification code via SMS.")
|
||||||
}
|
}
|
||||||
m.Set("password", "")
|
m.Set("password", "")
|
||||||
m.Set("auth_code", "")
|
m.Set("auth_code", "")
|
||||||
@@ -272,58 +333,6 @@ machines.`)
|
|||||||
return nil, fmt.Errorf("error while saving token: %w", err)
|
return nil, fmt.Errorf("error while saving token: %w", err)
|
||||||
}
|
}
|
||||||
return fs.ConfigGoto("choose_device")
|
return fs.ConfigGoto("choose_device")
|
||||||
case "telia_se": // telia_se cloud config
|
|
||||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
|
||||||
m.Set(configClientID, teliaseCloudClientID)
|
|
||||||
m.Set(configTokenURL, teliaseCloudTokenURL)
|
|
||||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
|
||||||
OAuth2Config: &oauthutil.Config{
|
|
||||||
AuthURL: teliaseCloudAuthURL,
|
|
||||||
TokenURL: teliaseCloudTokenURL,
|
|
||||||
ClientID: teliaseCloudClientID,
|
|
||||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
|
||||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
case "telia_no": // telia_no cloud config
|
|
||||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
|
||||||
m.Set(configClientID, telianoCloudClientID)
|
|
||||||
m.Set(configTokenURL, telianoCloudTokenURL)
|
|
||||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
|
||||||
OAuth2Config: &oauthutil.Config{
|
|
||||||
AuthURL: telianoCloudAuthURL,
|
|
||||||
TokenURL: telianoCloudTokenURL,
|
|
||||||
ClientID: telianoCloudClientID,
|
|
||||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
|
||||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
case "tele2": // tele2 cloud config
|
|
||||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
|
||||||
m.Set(configClientID, tele2CloudClientID)
|
|
||||||
m.Set(configTokenURL, tele2CloudTokenURL)
|
|
||||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
|
||||||
OAuth2Config: &oauthutil.Config{
|
|
||||||
AuthURL: tele2CloudAuthURL,
|
|
||||||
TokenURL: tele2CloudTokenURL,
|
|
||||||
ClientID: tele2CloudClientID,
|
|
||||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
|
||||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
case "onlime": // onlime cloud config
|
|
||||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
|
||||||
m.Set(configClientID, onlimeCloudClientID)
|
|
||||||
m.Set(configTokenURL, onlimeCloudTokenURL)
|
|
||||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
|
||||||
OAuth2Config: &oauthutil.Config{
|
|
||||||
AuthURL: onlimeCloudAuthURL,
|
|
||||||
TokenURL: onlimeCloudTokenURL,
|
|
||||||
ClientID: onlimeCloudClientID,
|
|
||||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
|
||||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
case "choose_device":
|
case "choose_device":
|
||||||
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
|
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
|
||||||
Choosing no, the default, will let you access the storage used for the archive
|
Choosing no, the default, will let you access the storage used for the archive
|
||||||
@@ -331,7 +340,7 @@ section of the official Jottacloud client. If you instead want to access the
|
|||||||
sync or the backup section, for example, you must choose yes.`)
|
sync or the backup section, for example, you must choose yes.`)
|
||||||
|
|
||||||
case "choose_device_query":
|
case "choose_device_query":
|
||||||
if config.Result != "true" {
|
if conf.Result != "true" {
|
||||||
m.Set(configDevice, "")
|
m.Set(configDevice, "")
|
||||||
m.Set(configMountpoint, "")
|
m.Set(configMountpoint, "")
|
||||||
return fs.ConfigGoto("end")
|
return fs.ConfigGoto("end")
|
||||||
@@ -372,7 +381,7 @@ a new by entering a unique name.`, defaultDevice)
|
|||||||
return deviceNames[i], ""
|
return deviceNames[i], ""
|
||||||
})
|
})
|
||||||
case "choose_device_result":
|
case "choose_device_result":
|
||||||
device := config.Result
|
device := conf.Result
|
||||||
|
|
||||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -432,7 +441,7 @@ You may create a new by entering a unique name.`, device)
|
|||||||
return dev.MountPoints[i].Name, ""
|
return dev.MountPoints[i].Name, ""
|
||||||
})
|
})
|
||||||
case "choose_device_mountpoint":
|
case "choose_device_mountpoint":
|
||||||
mountpoint := config.Result
|
mountpoint := conf.Result
|
||||||
|
|
||||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -463,7 +472,7 @@ You may create a new by entering a unique name.`, device)
|
|||||||
|
|
||||||
if isNew {
|
if isNew {
|
||||||
if device == defaultDevice {
|
if device == defaultDevice {
|
||||||
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device: %w", defaultDevice, err)
|
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device", defaultDevice)
|
||||||
}
|
}
|
||||||
fs.Debugf(nil, "Creating new mountpoint: %s", mountpoint)
|
fs.Debugf(nil, "Creating new mountpoint: %s", mountpoint)
|
||||||
_, err := createMountPoint(ctx, jfsSrv, path.Join(cust.Username, device, mountpoint))
|
_, err := createMountPoint(ctx, jfsSrv, path.Join(cust.Username, device, mountpoint))
|
||||||
@@ -478,7 +487,7 @@ You may create a new by entering a unique name.`, device)
|
|||||||
// All the config flows end up here in case we need to carry on with something
|
// All the config flows end up here in case we need to carry on with something
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
return nil, fmt.Errorf("unknown state %q", conf.State)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
@@ -929,12 +938,12 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
|||||||
oauthConfig.AuthURL = tokenURL
|
oauthConfig.AuthURL = tokenURL
|
||||||
}
|
}
|
||||||
} else if ver == legacyConfigVersion {
|
} else if ver == legacyConfigVersion {
|
||||||
clientID, ok := m.Get(configClientID)
|
clientID, _ := m.Get(configClientID)
|
||||||
if !ok {
|
if clientID == "" {
|
||||||
clientID = legacyClientID
|
clientID = legacyClientID
|
||||||
}
|
}
|
||||||
clientSecret, ok := m.Get(configClientSecret)
|
clientSecret, _ := m.Get(configClientSecret)
|
||||||
if !ok {
|
if clientSecret == "" {
|
||||||
clientSecret = legacyEncryptedClientSecret
|
clientSecret = legacyEncryptedClientSecret
|
||||||
}
|
}
|
||||||
oauthConfig.ClientID = clientID
|
oauthConfig.ClientID = clientID
|
||||||
@@ -1000,6 +1009,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
f.features.ListR = nil
|
f.features.ListR = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cust, err := getCustomerInfo(ctx, f.apiSrv)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
f.user = cust.Username
|
||||||
|
f.setEndpoints()
|
||||||
|
|
||||||
// Renew the token in the background
|
// Renew the token in the background
|
||||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||||
_, err := f.readMetaDataForPath(ctx, "")
|
_, err := f.readMetaDataForPath(ctx, "")
|
||||||
@@ -1009,13 +1025,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
||||||
cust, err := getCustomerInfo(ctx, f.apiSrv)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
f.user = cust.Username
|
|
||||||
f.setEndpoints()
|
|
||||||
|
|
||||||
if root != "" && !rootIsDir {
|
if root != "" && !rootIsDir {
|
||||||
// Check to see if the root actually an existing file
|
// Check to see if the root actually an existing file
|
||||||
remote := path.Base(root)
|
remote := path.Base(root)
|
||||||
|
|||||||
@@ -497,9 +497,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
f.dirCache.FlushDir(dir)
|
f.dirCache.FlushDir(dir)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build windows || plan9 || js || wasm || linux
|
//go:build windows || plan9 || js || linux
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !windows && !plan9 && !js && !wasm && !linux
|
//go:build !windows && !plan9 && !js && !linux
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build plan9 || js || wasm
|
//go:build plan9 || js
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !windows && !plan9 && !js && !wasm
|
//go:build !windows && !plan9 && !js
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
iofs "io/fs"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -114,6 +115,17 @@ points, as you explicitly acknowledge that they should be skipped.`,
|
|||||||
NoPrefix: true,
|
NoPrefix: true,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "skip_specials",
|
||||||
|
Help: `Don't warn about skipped pipes, sockets and device objects.
|
||||||
|
|
||||||
|
This flag disables warning messages on skipped pipes, sockets and
|
||||||
|
device objects, as you explicitly acknowledge that they should be
|
||||||
|
skipped.`,
|
||||||
|
Default: false,
|
||||||
|
NoPrefix: true,
|
||||||
|
Advanced: true,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "zero_size_links",
|
Name: "zero_size_links",
|
||||||
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
|
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
|
||||||
@@ -327,6 +339,7 @@ type Options struct {
|
|||||||
FollowSymlinks bool `config:"copy_links"`
|
FollowSymlinks bool `config:"copy_links"`
|
||||||
TranslateSymlinks bool `config:"links"`
|
TranslateSymlinks bool `config:"links"`
|
||||||
SkipSymlinks bool `config:"skip_links"`
|
SkipSymlinks bool `config:"skip_links"`
|
||||||
|
SkipSpecials bool `config:"skip_specials"`
|
||||||
UTFNorm bool `config:"unicode_normalization"`
|
UTFNorm bool `config:"unicode_normalization"`
|
||||||
NoCheckUpdated bool `config:"no_check_updated"`
|
NoCheckUpdated bool `config:"no_check_updated"`
|
||||||
NoUNC bool `config:"nounc"`
|
NoUNC bool `config:"nounc"`
|
||||||
@@ -841,7 +854,13 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
} else if !fi.IsDir() {
|
} else if !fi.IsDir() {
|
||||||
return fs.ErrorIsFile
|
return fs.ErrorIsFile
|
||||||
}
|
}
|
||||||
return os.Remove(localPath)
|
err := os.Remove(localPath)
|
||||||
|
if runtime.GOOS == "windows" && errors.Is(err, iofs.ErrPermission) { // https://github.com/golang/go/issues/26295
|
||||||
|
if os.Chmod(localPath, 0o600) == nil {
|
||||||
|
err = os.Remove(localPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Precision of the file system
|
// Precision of the file system
|
||||||
@@ -1051,12 +1070,11 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
var commandHelp = []fs.CommandHelp{
|
var commandHelp = []fs.CommandHelp{
|
||||||
{
|
{
|
||||||
Name: "noop",
|
Name: "noop",
|
||||||
Short: "A null operation for testing backend commands",
|
Short: "A null operation for testing backend commands.",
|
||||||
Long: `This is a test command which has some options
|
Long: `This is a test command which has some options you can try to change the output.`,
|
||||||
you can try to change the output.`,
|
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"echo": "echo the input arguments",
|
"echo": "Echo the input arguments.",
|
||||||
"error": "return an error based on option value",
|
"error": "Return an error based on option value.",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -1239,7 +1257,9 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
} else if mode&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
|
} else if mode&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
|
||||||
fs.Logf(o, "Can't transfer non file/directory")
|
if !o.fs.opt.SkipSpecials {
|
||||||
|
fs.Logf(o, "Can't transfer non file/directory")
|
||||||
|
}
|
||||||
return false
|
return false
|
||||||
} else if mode&os.ModeDir != 0 {
|
} else if mode&os.ModeDir != 0 {
|
||||||
// fs.Debugf(o, "Skipping directory")
|
// fs.Debugf(o, "Skipping directory")
|
||||||
|
|||||||
@@ -334,7 +334,7 @@ func TestMetadata(t *testing.T) {
|
|||||||
|
|
||||||
func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
whenRFC := when.Format(time.RFC3339Nano)
|
whenRFC := when.Local().Format(time.RFC3339Nano)
|
||||||
const dayLength = len("2001-01-01")
|
const dayLength = len("2001-01-01")
|
||||||
|
|
||||||
f := r.Flocal.(*Fs)
|
f := r.Flocal.(*Fs)
|
||||||
|
|||||||
40
backend/local/local_internal_windows_test.go
Normal file
40
backend/local/local_internal_windows_test.go
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
|
package local
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"syscall"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs/operations"
|
||||||
|
"github.com/rclone/rclone/fstest"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestRmdirWindows tests that FILE_ATTRIBUTE_READONLY does not block Rmdir on windows.
|
||||||
|
// Microsoft docs indicate that "This attribute is not honored on directories."
|
||||||
|
// See https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants#file_attribute_readonly
|
||||||
|
// and https://github.com/golang/go/issues/26295
|
||||||
|
func TestRmdirWindows(t *testing.T) {
|
||||||
|
if runtime.GOOS != "windows" {
|
||||||
|
t.Skipf("windows only")
|
||||||
|
}
|
||||||
|
r := fstest.NewRun(t)
|
||||||
|
defer r.Finalise()
|
||||||
|
|
||||||
|
err := operations.Mkdir(context.Background(), r.Flocal, "testdir")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ptr, err := syscall.UTF16PtrFromString(filepath.Join(r.Flocal.Root(), "testdir"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = syscall.SetFileAttributes(ptr, uint32(syscall.FILE_ATTRIBUTE_DIRECTORY+syscall.FILE_ATTRIBUTE_READONLY))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = operations.Rmdir(context.Background(), r.Flocal, "testdir")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build dragonfly || plan9 || js || wasm
|
//go:build dragonfly || plan9 || js || aix
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !windows && !plan9 && !js && !wasm
|
//go:build !windows && !plan9 && !js
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build windows || plan9 || js || wasm
|
//go:build windows || plan9 || js
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|
||||||
|
|||||||
@@ -400,7 +400,7 @@ type quirks struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (q *quirks) parseQuirks(option string) {
|
func (q *quirks) parseQuirks(option string) {
|
||||||
for _, flag := range strings.Split(option, ",") {
|
for flag := range strings.SplitSeq(option, ",") {
|
||||||
switch strings.ToLower(strings.TrimSpace(flag)) {
|
switch strings.ToLower(strings.TrimSpace(flag)) {
|
||||||
case "binlist":
|
case "binlist":
|
||||||
// The official client sometimes uses a so called "bin" protocol,
|
// The official client sometimes uses a so called "bin" protocol,
|
||||||
@@ -1770,7 +1770,7 @@ func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
|
|||||||
f.speedupAny = false
|
f.speedupAny = false
|
||||||
uniqueValidPatterns := make(map[string]any)
|
uniqueValidPatterns := make(map[string]any)
|
||||||
|
|
||||||
for _, pattern := range strings.Split(patternString, ",") {
|
for pattern := range strings.SplitSeq(patternString, ",") {
|
||||||
pattern = strings.ToLower(strings.TrimSpace(pattern))
|
pattern = strings.ToLower(strings.TrimSpace(pattern))
|
||||||
if pattern == "" {
|
if pattern == "" {
|
||||||
continue
|
continue
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ Improvements:
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
|
"encoding/base64"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@@ -47,6 +48,9 @@ const (
|
|||||||
maxSleep = 2 * time.Second
|
maxSleep = 2 * time.Second
|
||||||
eventWaitTime = 500 * time.Millisecond
|
eventWaitTime = 500 * time.Millisecond
|
||||||
decayConstant = 2 // bigger for slower decay, exponential
|
decayConstant = 2 // bigger for slower decay, exponential
|
||||||
|
|
||||||
|
sessionIDConfigKey = "session_id"
|
||||||
|
masterKeyConfigKey = "master_key"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -70,6 +74,24 @@ func init() {
|
|||||||
Help: "Password.",
|
Help: "Password.",
|
||||||
Required: true,
|
Required: true,
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
|
}, {
|
||||||
|
Name: "2fa",
|
||||||
|
Help: `The 2FA code of your MEGA account if the account is set up with one`,
|
||||||
|
Required: false,
|
||||||
|
}, {
|
||||||
|
Name: sessionIDConfigKey,
|
||||||
|
Help: "Session (internal use only)",
|
||||||
|
Required: false,
|
||||||
|
Advanced: true,
|
||||||
|
Sensitive: true,
|
||||||
|
Hide: fs.OptionHideBoth,
|
||||||
|
}, {
|
||||||
|
Name: masterKeyConfigKey,
|
||||||
|
Help: "Master key (internal use only)",
|
||||||
|
Required: false,
|
||||||
|
Advanced: true,
|
||||||
|
Sensitive: true,
|
||||||
|
Hide: fs.OptionHideBoth,
|
||||||
}, {
|
}, {
|
||||||
Name: "debug",
|
Name: "debug",
|
||||||
Help: `Output more debug from Mega.
|
Help: `Output more debug from Mega.
|
||||||
@@ -113,6 +135,9 @@ Enabling it will increase CPU usage and add network overhead.`,
|
|||||||
type Options struct {
|
type Options struct {
|
||||||
User string `config:"user"`
|
User string `config:"user"`
|
||||||
Pass string `config:"pass"`
|
Pass string `config:"pass"`
|
||||||
|
TwoFA string `config:"2fa"`
|
||||||
|
SessionID string `config:"session_id"`
|
||||||
|
MasterKey string `config:"master_key"`
|
||||||
Debug bool `config:"debug"`
|
Debug bool `config:"debug"`
|
||||||
HardDelete bool `config:"hard_delete"`
|
HardDelete bool `config:"hard_delete"`
|
||||||
UseHTTPS bool `config:"use_https"`
|
UseHTTPS bool `config:"use_https"`
|
||||||
@@ -209,6 +234,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
|
|
||||||
|
// Create Fs
|
||||||
|
root = parsePath(root)
|
||||||
|
f := &Fs{
|
||||||
|
name: name,
|
||||||
|
root: root,
|
||||||
|
opt: *opt,
|
||||||
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
|
}
|
||||||
|
f.features = (&fs.Features{
|
||||||
|
DuplicateFiles: true,
|
||||||
|
CanHaveEmptyDirectories: true,
|
||||||
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
// cache *mega.Mega on username so we can reuse and share
|
// cache *mega.Mega on username so we can reuse and share
|
||||||
// them between remotes. They are expensive to make as they
|
// them between remotes. They are expensive to make as they
|
||||||
// contain all the objects and sharing the objects makes the
|
// contain all the objects and sharing the objects makes the
|
||||||
@@ -248,25 +286,29 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
err := srv.Login(opt.User, opt.Pass)
|
if opt.SessionID == "" {
|
||||||
if err != nil {
|
fs.Debugf(f, "Using username and password to initialize the Mega API")
|
||||||
return nil, fmt.Errorf("couldn't login: %w", err)
|
err := srv.MultiFactorLogin(opt.User, opt.Pass, opt.TwoFA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("couldn't login: %w", err)
|
||||||
|
}
|
||||||
|
megaCache[opt.User] = srv
|
||||||
|
m.Set(sessionIDConfigKey, srv.GetSessionID())
|
||||||
|
encodedMasterKey := base64.StdEncoding.EncodeToString(srv.GetMasterKey())
|
||||||
|
m.Set(masterKeyConfigKey, encodedMasterKey)
|
||||||
|
} else {
|
||||||
|
fs.Debugf(f, "Using previously stored session ID and master key to initialize the Mega API")
|
||||||
|
decodedMasterKey, err := base64.StdEncoding.DecodeString(opt.MasterKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("couldn't decode master key: %w", err)
|
||||||
|
}
|
||||||
|
err = srv.LoginWithKeys(opt.SessionID, decodedMasterKey)
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(f, "login with previous auth keys failed: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
megaCache[opt.User] = srv
|
|
||||||
}
|
}
|
||||||
|
f.srv = srv
|
||||||
root = parsePath(root)
|
|
||||||
f := &Fs{
|
|
||||||
name: name,
|
|
||||||
root: root,
|
|
||||||
opt: *opt,
|
|
||||||
srv: srv,
|
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
|
||||||
}
|
|
||||||
f.features = (&fs.Features{
|
|
||||||
DuplicateFiles: true,
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
}).Fill(ctx, f)
|
|
||||||
|
|
||||||
// Find the root node and check if it is a file or not
|
// Find the root node and check if it is a file or not
|
||||||
_, err = f.findRoot(ctx, false)
|
_, err = f.findRoot(ctx, false)
|
||||||
|
|||||||
@@ -325,13 +325,12 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||||||
}
|
}
|
||||||
|
|
||||||
// listDir lists the bucket to the entries
|
// listDir lists the bucket to the entries
|
||||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
|
||||||
// List the objects and directories
|
// List the objects and directories
|
||||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, entry fs.DirEntry, isDirectory bool) error {
|
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, entry fs.DirEntry, isDirectory bool) error {
|
||||||
entries = append(entries, entry)
|
return callback(entry)
|
||||||
return nil
|
|
||||||
})
|
})
|
||||||
return entries, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// listBuckets lists the buckets to entries
|
// listBuckets lists the buckets to entries
|
||||||
@@ -354,15 +353,46 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
|||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
// defer fslog.Trace(dir, "")("entries = %q, err = %v", &entries, &err)
|
return list.WithListP(ctx, dir, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListP lists the objects and directories of the Fs starting
|
||||||
|
// from dir non recursively into out.
|
||||||
|
//
|
||||||
|
// dir should be "" to start from the root, and should not
|
||||||
|
// have trailing slashes.
|
||||||
|
//
|
||||||
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
|
// found.
|
||||||
|
//
|
||||||
|
// It should call callback for each tranche of entries read.
|
||||||
|
// These need not be returned in any particular order. If
|
||||||
|
// callback returns an error then the listing will stop
|
||||||
|
// immediately.
|
||||||
|
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||||
|
list := list.NewHelper(callback)
|
||||||
bucket, directory := f.split(dir)
|
bucket, directory := f.split(dir)
|
||||||
if bucket == "" {
|
if bucket == "" {
|
||||||
if directory != "" {
|
if directory != "" {
|
||||||
return nil, fs.ErrorListBucketRequired
|
return fs.ErrorListBucketRequired
|
||||||
|
}
|
||||||
|
entries, err := f.listBuckets(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, entry := range entries {
|
||||||
|
err = list.Add(entry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
return f.listBuckets(ctx)
|
|
||||||
}
|
}
|
||||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
return list.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@@ -629,6 +659,7 @@ var (
|
|||||||
_ fs.Copier = &Fs{}
|
_ fs.Copier = &Fs{}
|
||||||
_ fs.PutStreamer = &Fs{}
|
_ fs.PutStreamer = &Fs{}
|
||||||
_ fs.ListRer = &Fs{}
|
_ fs.ListRer = &Fs{}
|
||||||
|
_ fs.ListPer = &Fs{}
|
||||||
_ fs.Object = &Object{}
|
_ fs.Object = &Object{}
|
||||||
_ fs.MimeTyper = &Object{}
|
_ fs.MimeTyper = &Object{}
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -87,7 +87,7 @@ Please choose the 'y' option to set your own password then enter your secret.`,
|
|||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{{
|
var commandHelp = []fs.CommandHelp{{
|
||||||
Name: "du",
|
Name: "du",
|
||||||
Short: "Return disk usage information for a specified directory",
|
Short: "Return disk usage information for a specified directory.",
|
||||||
Long: `The usage information returned, includes the targeted directory as well as all
|
Long: `The usage information returned, includes the targeted directory as well as all
|
||||||
files stored in any sub-directories that may exist.`,
|
files stored in any sub-directories that may exist.`,
|
||||||
}, {
|
}, {
|
||||||
@@ -96,7 +96,12 @@ files stored in any sub-directories that may exist.`,
|
|||||||
Long: `The desired path location (including applicable sub-directories) ending in
|
Long: `The desired path location (including applicable sub-directories) ending in
|
||||||
the object that will be the target of the symlink (for example, /links/mylink).
|
the object that will be the target of the symlink (for example, /links/mylink).
|
||||||
Include the file extension for the object, if applicable.
|
Include the file extension for the object, if applicable.
|
||||||
` + "`rclone backend symlink <src> <path>`",
|
|
||||||
|
Usage example:
|
||||||
|
|
||||||
|
` + "```console" + `
|
||||||
|
rclone backend symlink <src> <path>
|
||||||
|
` + "```",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -243,7 +243,6 @@ func (m *Metadata) Get(ctx context.Context) (metadata fs.Metadata, err error) {
|
|||||||
func (m *Metadata) Set(ctx context.Context, metadata fs.Metadata) (numSet int, err error) {
|
func (m *Metadata) Set(ctx context.Context, metadata fs.Metadata) (numSet int, err error) {
|
||||||
numSet = 0
|
numSet = 0
|
||||||
for k, v := range metadata {
|
for k, v := range metadata {
|
||||||
k, v := k, v
|
|
||||||
switch k {
|
switch k {
|
||||||
case "mtime":
|
case "mtime":
|
||||||
t, err := time.Parse(timeFormatIn, v)
|
t, err := time.Parse(timeFormatIn, v)
|
||||||
@@ -422,12 +421,7 @@ func (m *Metadata) orderPermissions(xs []*api.PermissionsType) {
|
|||||||
if hasUserIdentity(p.GetGrantedTo(m.fs.driveType)) {
|
if hasUserIdentity(p.GetGrantedTo(m.fs.driveType)) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
for _, identity := range p.GetGrantedToIdentities(m.fs.driveType) {
|
return slices.ContainsFunc(p.GetGrantedToIdentities(m.fs.driveType), hasUserIdentity)
|
||||||
if hasUserIdentity(identity) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
// Put Permissions with a user first, leaving unsorted otherwise
|
// Put Permissions with a user first, leaving unsorted otherwise
|
||||||
slices.SortStableFunc(xs, func(a, b *api.PermissionsType) int {
|
slices.SortStableFunc(xs, func(a, b *api.PermissionsType) int {
|
||||||
|
|||||||
@@ -1377,9 +1377,27 @@ func (f *Fs) itemToDirEntry(ctx context.Context, dir string, info *api.Item) (en
|
|||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
return list.WithListP(ctx, dir, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListP lists the objects and directories of the Fs starting
|
||||||
|
// from dir non recursively into out.
|
||||||
|
//
|
||||||
|
// dir should be "" to start from the root, and should not
|
||||||
|
// have trailing slashes.
|
||||||
|
//
|
||||||
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
|
// found.
|
||||||
|
//
|
||||||
|
// It should call callback for each tranche of entries read.
|
||||||
|
// These need not be returned in any particular order. If
|
||||||
|
// callback returns an error then the listing will stop
|
||||||
|
// immediately.
|
||||||
|
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||||
|
list := list.NewHelper(callback)
|
||||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) error {
|
err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) error {
|
||||||
entry, err := f.itemToDirEntry(ctx, dir, info)
|
entry, err := f.itemToDirEntry(ctx, dir, info)
|
||||||
@@ -1389,13 +1407,16 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
if entry == nil {
|
if entry == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
entries = append(entries, entry)
|
err = list.Add(entry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
return entries, nil
|
return list.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@@ -3023,6 +3044,7 @@ var (
|
|||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
_ fs.CleanUpper = (*Fs)(nil)
|
_ fs.CleanUpper = (*Fs)(nil)
|
||||||
_ fs.ListRer = (*Fs)(nil)
|
_ fs.ListRer = (*Fs)(nil)
|
||||||
|
_ fs.ListPer = (*Fs)(nil)
|
||||||
_ fs.Shutdowner = (*Fs)(nil)
|
_ fs.Shutdowner = (*Fs)(nil)
|
||||||
_ fs.Object = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
_ fs.MimeTyper = &Object{}
|
_ fs.MimeTyper = &Object{}
|
||||||
|
|||||||
@@ -172,8 +172,8 @@ func BenchmarkQuickXorHash(b *testing.B) {
|
|||||||
require.NoError(b, err)
|
require.NoError(b, err)
|
||||||
require.Equal(b, len(buf), n)
|
require.Equal(b, len(buf), n)
|
||||||
h := New()
|
h := New()
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
for b.Loop() {
|
||||||
h.Reset()
|
h.Reset()
|
||||||
h.Write(buf)
|
h.Write(buf)
|
||||||
h.Sum(nil)
|
h.Sum(nil)
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !solaris && !js && !wasm
|
//go:build !plan9 && !solaris && !js
|
||||||
|
|
||||||
package oracleobjectstorage
|
package oracleobjectstorage
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !solaris && !js && !wasm
|
//go:build !plan9 && !solaris && !js
|
||||||
|
|
||||||
package oracleobjectstorage
|
package oracleobjectstorage
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !solaris && !js && !wasm
|
//go:build !plan9 && !solaris && !js
|
||||||
|
|
||||||
package oracleobjectstorage
|
package oracleobjectstorage
|
||||||
|
|
||||||
@@ -30,20 +30,25 @@ const (
|
|||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{{
|
var commandHelp = []fs.CommandHelp{{
|
||||||
Name: operationRename,
|
Name: operationRename,
|
||||||
Short: "change the name of an object",
|
Short: "change the name of an object.",
|
||||||
Long: `This command can be used to rename a object.
|
Long: `This command can be used to rename a object.
|
||||||
|
|
||||||
Usage Examples:
|
Usage example:
|
||||||
|
|
||||||
rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
|
` + "```console" + `
|
||||||
`,
|
rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
|
||||||
|
` + "```",
|
||||||
Opts: nil,
|
Opts: nil,
|
||||||
}, {
|
}, {
|
||||||
Name: operationListMultiPart,
|
Name: operationListMultiPart,
|
||||||
Short: "List the unfinished multipart uploads",
|
Short: "List the unfinished multipart uploads.",
|
||||||
Long: `This command lists the unfinished multipart uploads in JSON format.
|
Long: `This command lists the unfinished multipart uploads in JSON format.
|
||||||
|
|
||||||
rclone backend list-multipart-uploads oos:bucket/path/to/object
|
Usage example:
|
||||||
|
|
||||||
|
` + "```console" + `
|
||||||
|
rclone backend list-multipart-uploads oos:bucket/path/to/object
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
It returns a dictionary of buckets with values as lists of unfinished
|
It returns a dictionary of buckets with values as lists of unfinished
|
||||||
multipart uploads.
|
multipart uploads.
|
||||||
@@ -51,70 +56,82 @@ multipart uploads.
|
|||||||
You can call it with no bucket in which case it lists all bucket, with
|
You can call it with no bucket in which case it lists all bucket, with
|
||||||
a bucket or with a bucket and path.
|
a bucket or with a bucket and path.
|
||||||
|
|
||||||
{
|
` + "```json" + `
|
||||||
"test-bucket": [
|
{
|
||||||
{
|
"test-bucket": [
|
||||||
"namespace": "test-namespace",
|
{
|
||||||
"bucket": "test-bucket",
|
"namespace": "test-namespace",
|
||||||
"object": "600m.bin",
|
"bucket": "test-bucket",
|
||||||
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
|
"object": "600m.bin",
|
||||||
"timeCreated": "2022-07-29T06:21:16.595Z",
|
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
|
||||||
"storageTier": "Standard"
|
"timeCreated": "2022-07-29T06:21:16.595Z",
|
||||||
}
|
"storageTier": "Standard"
|
||||||
]
|
}
|
||||||
`,
|
]
|
||||||
|
}`,
|
||||||
}, {
|
}, {
|
||||||
Name: operationCleanup,
|
Name: operationCleanup,
|
||||||
Short: "Remove unfinished multipart uploads.",
|
Short: "Remove unfinished multipart uploads.",
|
||||||
Long: `This command removes unfinished multipart uploads of age greater than
|
Long: `This command removes unfinished multipart uploads of age greater than
|
||||||
max-age which defaults to 24 hours.
|
max-age which defaults to 24 hours.
|
||||||
|
|
||||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
Note that you can use --interactive/-i or --dry-run with this command to see
|
||||||
it would do.
|
what it would do.
|
||||||
|
|
||||||
rclone backend cleanup oos:bucket/path/to/object
|
Usage examples:
|
||||||
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
|
|
||||||
|
|
||||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
` + "```console" + `
|
||||||
`,
|
rclone backend cleanup oos:bucket/path/to/object
|
||||||
|
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
|
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.`,
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"max-age": "Max age of upload to delete",
|
"max-age": "Max age of upload to delete.",
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
Name: operationRestore,
|
Name: operationRestore,
|
||||||
Short: "Restore objects from Archive to Standard storage",
|
Short: "Restore objects from Archive to Standard storage.",
|
||||||
Long: `This command can be used to restore one or more objects from Archive to Standard storage.
|
Long: `This command can be used to restore one or more objects from Archive to
|
||||||
|
Standard storage.
|
||||||
|
|
||||||
Usage Examples:
|
Usage examples:
|
||||||
|
|
||||||
rclone backend restore oos:bucket/path/to/directory -o hours=HOURS
|
` + "```console" + `
|
||||||
rclone backend restore oos:bucket -o hours=HOURS
|
rclone backend restore oos:bucket/path/to/directory -o hours=HOURS
|
||||||
|
rclone backend restore oos:bucket -o hours=HOURS
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
||||||
|
|
||||||
rclone --interactive backend restore --include "*.txt" oos:bucket/path -o hours=72
|
` + "```console" + `
|
||||||
|
rclone --interactive backend restore --include "*.txt" oos:bucket/path -o hours=72
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
All the objects shown will be marked for restore, then
|
All the objects shown will be marked for restore, then:
|
||||||
|
|
||||||
rclone backend restore --include "*.txt" oos:bucket/path -o hours=72
|
` + "```console" + `
|
||||||
|
rclone backend restore --include "*.txt" oos:bucket/path -o hours=72
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
It returns a list of status dictionaries with Object Name and Status
|
It returns a list of status dictionaries with Object Name and Status keys.
|
||||||
keys. The Status will be "RESTORED"" if it was successful or an error message
|
The Status will be "RESTORED"" if it was successful or an error message if not.
|
||||||
if not.
|
|
||||||
|
|
||||||
[
|
` + "```json" + `
|
||||||
{
|
[
|
||||||
"Object": "test.txt"
|
{
|
||||||
"Status": "RESTORED",
|
"Object": "test.txt"
|
||||||
},
|
"Status": "RESTORED",
|
||||||
{
|
},
|
||||||
"Object": "test/file4.txt"
|
{
|
||||||
"Status": "RESTORED",
|
"Object": "test/file4.txt"
|
||||||
}
|
"Status": "RESTORED",
|
||||||
]
|
}
|
||||||
`,
|
]
|
||||||
|
` + "```",
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"hours": "The number of hours for which this object will be restored. Default is 24 hrs.",
|
"hours": `The number of hours for which this object will be restored.
|
||||||
|
Default is 24 hrs.`,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !solaris && !js && !wasm
|
//go:build !plan9 && !solaris && !js
|
||||||
|
|
||||||
package oracleobjectstorage
|
package oracleobjectstorage
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !solaris && !js && !wasm
|
//go:build !plan9 && !solaris && !js
|
||||||
|
|
||||||
package oracleobjectstorage
|
package oracleobjectstorage
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !solaris && !js && !wasm
|
//go:build !plan9 && !solaris && !js
|
||||||
|
|
||||||
package oracleobjectstorage
|
package oracleobjectstorage
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !solaris && !js && !wasm
|
//go:build !plan9 && !solaris && !js
|
||||||
|
|
||||||
package oracleobjectstorage
|
package oracleobjectstorage
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !solaris && !js && !wasm
|
//go:build !plan9 && !solaris && !js
|
||||||
|
|
||||||
// Package oracleobjectstorage provides an interface to the OCI object storage system.
|
// Package oracleobjectstorage provides an interface to the OCI object storage system.
|
||||||
package oracleobjectstorage
|
package oracleobjectstorage
|
||||||
@@ -254,15 +254,47 @@ func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
|||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
return list.WithListP(ctx, dir, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListP lists the objects and directories of the Fs starting
|
||||||
|
// from dir non recursively into out.
|
||||||
|
//
|
||||||
|
// dir should be "" to start from the root, and should not
|
||||||
|
// have trailing slashes.
|
||||||
|
//
|
||||||
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
|
// found.
|
||||||
|
//
|
||||||
|
// It should call callback for each tranche of entries read.
|
||||||
|
// These need not be returned in any particular order. If
|
||||||
|
// callback returns an error then the listing will stop
|
||||||
|
// immediately.
|
||||||
|
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||||
|
list := list.NewHelper(callback)
|
||||||
bucketName, directory := f.split(dir)
|
bucketName, directory := f.split(dir)
|
||||||
fs.Debugf(f, "listing: bucket : %v, directory: %v", bucketName, dir)
|
fs.Debugf(f, "listing: bucket : %v, directory: %v", bucketName, dir)
|
||||||
if bucketName == "" {
|
if bucketName == "" {
|
||||||
if directory != "" {
|
if directory != "" {
|
||||||
return nil, fs.ErrorListBucketRequired
|
return fs.ErrorListBucketRequired
|
||||||
|
}
|
||||||
|
entries, err := f.listBuckets(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, entry := range entries {
|
||||||
|
err = list.Add(entry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err := f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "", list.Add)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
return f.listBuckets(ctx)
|
|
||||||
}
|
}
|
||||||
return f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "")
|
return list.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
// listFn is called from list to handle an object.
|
// listFn is called from list to handle an object.
|
||||||
@@ -411,24 +443,24 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *objectst
|
|||||||
}
|
}
|
||||||
|
|
||||||
// listDir lists a single directory
|
// listDir lists a single directory
|
||||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
|
||||||
fn := func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
|
fn := func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if entry != nil {
|
if entry != nil {
|
||||||
entries = append(entries, entry)
|
return callback(entry)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, 0, fn)
|
err = f.list(ctx, bucket, directory, prefix, addBucket, false, 0, fn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
// bucket must be present if listing succeeded
|
// bucket must be present if listing succeeded
|
||||||
f.cache.MarkOK(bucket)
|
f.cache.MarkOK(bucket)
|
||||||
return entries, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// listBuckets returns all the buckets to out
|
// listBuckets returns all the buckets to out
|
||||||
@@ -765,6 +797,7 @@ var (
|
|||||||
_ fs.Copier = &Fs{}
|
_ fs.Copier = &Fs{}
|
||||||
_ fs.PutStreamer = &Fs{}
|
_ fs.PutStreamer = &Fs{}
|
||||||
_ fs.ListRer = &Fs{}
|
_ fs.ListRer = &Fs{}
|
||||||
|
_ fs.ListPer = &Fs{}
|
||||||
_ fs.Commander = &Fs{}
|
_ fs.Commander = &Fs{}
|
||||||
_ fs.CleanUpper = &Fs{}
|
_ fs.CleanUpper = &Fs{}
|
||||||
_ fs.OpenChunkWriter = &Fs{}
|
_ fs.OpenChunkWriter = &Fs{}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user