mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
61 Commits
dependabot
...
v1.52-stab
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
729799af7c | ||
|
|
029f817ebc | ||
|
|
073e996cc2 | ||
|
|
fb2a4edd6f | ||
|
|
f949856170 | ||
|
|
c0e9584403 | ||
|
|
ace56b6e0f | ||
|
|
1d45c6eaaa | ||
|
|
e8222bbc95 | ||
|
|
815643264b | ||
|
|
4f8472664f | ||
|
|
3278b297cf | ||
|
|
5c0af62d0a | ||
|
|
6760ab6bb3 | ||
|
|
a9ffa1178c | ||
|
|
687865f760 | ||
|
|
d745bc1baa | ||
|
|
1f279f0419 | ||
|
|
d805e63f7d | ||
|
|
d8144a7e84 | ||
|
|
21f5b1076f | ||
|
|
a8cee91929 | ||
|
|
14edaefaa3 | ||
|
|
946b73c5fe | ||
|
|
07f7854a25 | ||
|
|
dfc7393e9c | ||
|
|
7f50143805 | ||
|
|
37308832b1 | ||
|
|
d5d124c9ee | ||
|
|
8b1f737271 | ||
|
|
ed013228ea | ||
|
|
a9b7974196 | ||
|
|
38453bd8ff | ||
|
|
51ba73afe4 | ||
|
|
b782a15f17 | ||
|
|
1d2edf0789 | ||
|
|
8367261a93 | ||
|
|
31dc78905e | ||
|
|
f9493ff1db | ||
|
|
a5cf531b94 | ||
|
|
04d59fe6bd | ||
|
|
cbc448039d | ||
|
|
7a8ade4949 | ||
|
|
8ab6e2bd1f | ||
|
|
d5c5811a54 | ||
|
|
43d21b7e64 | ||
|
|
0501b12e17 | ||
|
|
723c1dfdc5 | ||
|
|
d6894408b9 | ||
|
|
ac0e831090 | ||
|
|
b93ed77495 | ||
|
|
d0a3c40f9a | ||
|
|
6a9a571d87 | ||
|
|
6b68d1f79a | ||
|
|
088756ff8a | ||
|
|
4c1ffcc36d | ||
|
|
e5bd3f1c26 | ||
|
|
e03b4e2ce9 | ||
|
|
6054476c9c | ||
|
|
6cd8d3c4a0 | ||
|
|
78b0329928 |
4
.gitattributes
vendored
4
.gitattributes
vendored
@@ -1,7 +1,3 @@
|
|||||||
# Go writes go.mod and go.sum with lf even on windows
|
|
||||||
go.mod text eol=lf
|
|
||||||
go.sum text eol=lf
|
|
||||||
|
|
||||||
# Ignore generated files in GitHub language statistics and diffs
|
# Ignore generated files in GitHub language statistics and diffs
|
||||||
/MANUAL.* linguist-generated=true
|
/MANUAL.* linguist-generated=true
|
||||||
/rclone.1 linguist-generated=true
|
/rclone.1 linguist-generated=true
|
||||||
|
|||||||
4
.github/FUNDING.yml
vendored
Normal file
4
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
github: [ncw]
|
||||||
|
patreon: njcw
|
||||||
|
liberapay: ncw
|
||||||
|
custom: ["https://rclone.org/donate/"]
|
||||||
46
.github/ISSUE_TEMPLATE/Bug.md
vendored
46
.github/ISSUE_TEMPLATE/Bug.md
vendored
@@ -5,31 +5,19 @@ about: Report a problem with rclone
|
|||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
|
||||||
We understand you are having a problem with rclone; we want to help you with that!
|
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
|
||||||
|
|
||||||
**STOP and READ**
|
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
|
||||||
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
|
|
||||||
Please show the effort you've put into solving the problem and please be specific.
|
|
||||||
People are volunteering their time to help! Low effort posts are not likely to get good answers!
|
|
||||||
|
|
||||||
If you think you might have found a bug, try to replicate it with the latest beta (or stable).
|
|
||||||
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
|
|
||||||
|
|
||||||
If you can still replicate it or just got a question then please use the rclone forum:
|
|
||||||
|
|
||||||
https://forum.rclone.org/
|
https://forum.rclone.org/
|
||||||
|
|
||||||
for a quick response instead of filing an issue on this repo.
|
instead of filing an issue for a quick response.
|
||||||
|
|
||||||
If nothing else helps, then please fill in the info below which helps us help you.
|
If you think you might have found a bug, please can you try to replicate it with the latest beta?
|
||||||
|
|
||||||
**DO NOT REDACT** any information except passwords/keys/personal info.
|
https://beta.rclone.org/
|
||||||
|
|
||||||
You should use 3 backticks to begin and end your paste to make it readable.
|
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
|
||||||
|
|
||||||
Make sure to include a log obtained with '-vv'.
|
|
||||||
|
|
||||||
You can also use '-vv --log-file bug.log' and a service such as https://pastebin.com or https://gist.github.com/
|
|
||||||
|
|
||||||
Thank you
|
Thank you
|
||||||
|
|
||||||
@@ -37,10 +25,6 @@ The Rclone Developers
|
|||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
#### The associated forum post URL from `https://forum.rclone.org`
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### What is the problem you are having with rclone?
|
#### What is the problem you are having with rclone?
|
||||||
|
|
||||||
|
|
||||||
@@ -49,26 +33,18 @@ The Rclone Developers
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### Which OS you are using and how many bits (e.g. Windows 7, 64 bit)
|
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### Which cloud storage system are you using? (e.g. Google Drive)
|
#### Which cloud storage system are you using? (eg Google Drive)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### A log from the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<!--- Please keep the note below for others who read your bug report. -->
|
|
||||||
|
|
||||||
#### How to use GitHub
|
|
||||||
|
|
||||||
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
|
|
||||||
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
|
|
||||||
* Subscribe to receive notifications on status change and new comments.
|
|
||||||
|
|||||||
23
.github/ISSUE_TEMPLATE/Feature.md
vendored
23
.github/ISSUE_TEMPLATE/Feature.md
vendored
@@ -7,16 +7,12 @@ about: Suggest a new feature or enhancement for rclone
|
|||||||
|
|
||||||
Welcome :-)
|
Welcome :-)
|
||||||
|
|
||||||
So you've got an idea to improve rclone? We love that!
|
So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
|
||||||
You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
|
|
||||||
|
|
||||||
Probably the latest beta (or stable) release has your feature, so try to update your rclone.
|
Here is a checklist of things to do:
|
||||||
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
|
|
||||||
|
|
||||||
If it still isn't there, here is a checklist of things to do:
|
1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
|
||||||
|
2. Discuss on the forum first: https://forum.rclone.org/
|
||||||
1. Search the old issues for your idea and +1 or comment on an existing issue if possible.
|
|
||||||
2. Discuss on the forum: https://forum.rclone.org/
|
|
||||||
3. Make a feature request issue (this is the right place!).
|
3. Make a feature request issue (this is the right place!).
|
||||||
4. Be prepared to get involved making the feature :-)
|
4. Be prepared to get involved making the feature :-)
|
||||||
|
|
||||||
@@ -26,9 +22,6 @@ The Rclone Developers
|
|||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
#### The associated forum post URL from `https://forum.rclone.org`
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### What is your current rclone version (output from `rclone version`)?
|
#### What is your current rclone version (output from `rclone version`)?
|
||||||
|
|
||||||
@@ -41,11 +34,3 @@ The Rclone Developers
|
|||||||
#### How do you think rclone should be changed to solve that?
|
#### How do you think rclone should be changed to solve that?
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<!--- Please keep the note below for others who read your feature request. -->
|
|
||||||
|
|
||||||
#### How to use GitHub
|
|
||||||
|
|
||||||
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
|
|
||||||
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
|
|
||||||
* Subscribe to receive notifications on status change and new comments.
|
|
||||||
|
|||||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -22,7 +22,7 @@ Link issues and relevant forum posts here.
|
|||||||
|
|
||||||
#### Checklist
|
#### Checklist
|
||||||
|
|
||||||
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-new-feature-or-bug-fix).
|
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
|
||||||
- [ ] I have added tests for all changes in this PR if appropriate.
|
- [ ] I have added tests for all changes in this PR if appropriate.
|
||||||
- [ ] I have added documentation for the changes if appropriate.
|
- [ ] I have added documentation for the changes if appropriate.
|
||||||
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).
|
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).
|
||||||
|
|||||||
6
.github/dependabot.yml
vendored
6
.github/dependabot.yml
vendored
@@ -1,6 +0,0 @@
|
|||||||
version: 2
|
|
||||||
updates:
|
|
||||||
- package-ecosystem: "github-actions"
|
|
||||||
directory: "/"
|
|
||||||
schedule:
|
|
||||||
interval: "daily"
|
|
||||||
359
.github/workflows/build.yml
vendored
359
.github/workflows/build.yml
vendored
@@ -8,141 +8,146 @@ name: build
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- '**'
|
- '*'
|
||||||
tags:
|
tags:
|
||||||
- '**'
|
- '*'
|
||||||
pull_request:
|
pull_request:
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
manual:
|
|
||||||
description: Manual run (bypass default conditions)
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.24']
|
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'modules_race', 'go1.11', 'go1.12', 'go1.13']
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- job_name: linux
|
- job_name: linux
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '>=1.25.0-rc.1'
|
go: '1.14.x'
|
||||||
|
modules: 'off'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
build_flags: '-include "^linux/"'
|
build_flags: '-include "^linux/"'
|
||||||
check: true
|
check: true
|
||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
|
||||||
librclonetest: true
|
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: linux_386
|
- job_name: mac
|
||||||
os: ubuntu-latest
|
os: macOS-latest
|
||||||
go: '>=1.25.0-rc.1'
|
go: '1.14.x'
|
||||||
goarch: 386
|
modules: 'off'
|
||||||
gotags: cmount
|
gotags: '' # cmount doesn't work on osx travis for some reason
|
||||||
quicktest: true
|
|
||||||
|
|
||||||
- job_name: mac_amd64
|
|
||||||
os: macos-latest
|
|
||||||
go: '>=1.25.0-rc.1'
|
|
||||||
gotags: 'cmount'
|
|
||||||
build_flags: '-include "^darwin/amd64" -cgo'
|
build_flags: '-include "^darwin/amd64" -cgo'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
racequicktest: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: mac_arm64
|
- job_name: windows_amd64
|
||||||
os: macos-latest
|
os: windows-latest
|
||||||
go: '>=1.25.0-rc.1'
|
go: '1.14.x'
|
||||||
gotags: 'cmount'
|
modules: 'off'
|
||||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
gotags: cmount
|
||||||
|
build_flags: '-include "^windows/amd64" -cgo'
|
||||||
|
quicktest: true
|
||||||
|
racequicktest: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: windows
|
- job_name: windows_386
|
||||||
os: windows-latest
|
os: windows-latest
|
||||||
go: '>=1.25.0-rc.1'
|
go: '1.14.x'
|
||||||
|
modules: 'off'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
cgo: '0'
|
goarch: '386'
|
||||||
build_flags: '-include "^windows/"'
|
cgo: '1'
|
||||||
build_args: '-buildmode exe'
|
build_flags: '-include "^windows/386" -cgo'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: other_os
|
- job_name: other_os
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '>=1.25.0-rc.1'
|
go: '1.14.x'
|
||||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
modules: 'off'
|
||||||
|
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
|
||||||
compile_all: true
|
compile_all: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: go1.24
|
- job_name: modules_race
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.24'
|
go: '1.14.x'
|
||||||
|
modules: 'on'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
racequicktest: true
|
||||||
|
|
||||||
|
- job_name: go1.11
|
||||||
|
os: ubuntu-latest
|
||||||
|
go: '1.11.x'
|
||||||
|
modules: 'off'
|
||||||
|
quicktest: true
|
||||||
|
|
||||||
|
- job_name: go1.12
|
||||||
|
os: ubuntu-latest
|
||||||
|
go: '1.12.x'
|
||||||
|
modules: 'off'
|
||||||
|
quicktest: true
|
||||||
|
|
||||||
|
- job_name: go1.13
|
||||||
|
os: ubuntu-latest
|
||||||
|
go: '1.13.x'
|
||||||
|
modules: 'off'
|
||||||
|
quicktest: true
|
||||||
|
|
||||||
name: ${{ matrix.job_name }}
|
name: ${{ matrix.job_name }}
|
||||||
|
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v1
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
# Checkout into a fixed path to avoid import path problems on go < 1.11
|
||||||
|
path: ./src/github.com/rclone/rclone
|
||||||
|
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@v6
|
uses: actions/setup-go@v1
|
||||||
with:
|
with:
|
||||||
go-version: ${{ matrix.go }}
|
go-version: ${{ matrix.go }}
|
||||||
check-latest: true
|
|
||||||
|
|
||||||
- name: Set environment variables
|
- name: Set environment variables
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
|
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
||||||
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
|
echo '::add-path::${{ runner.workspace }}/bin'
|
||||||
echo 'BUILD_ARGS=${{ matrix.build_args }}' >> $GITHUB_ENV
|
echo '::set-env name=GO111MODULE::${{ matrix.modules }}'
|
||||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
|
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
|
||||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
|
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
|
||||||
|
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
|
||||||
|
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
|
||||||
|
|
||||||
- name: Install Libraries on Linux
|
- name: Install Libraries on Linux
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sudo modprobe fuse
|
sudo modprobe fuse
|
||||||
sudo chmod 666 /dev/fuse
|
sudo chmod 666 /dev/fuse
|
||||||
sudo chown root:$USER /etc/fuse.conf
|
sudo chown root:$USER /etc/fuse.conf
|
||||||
sudo apt-get update
|
sudo apt-get install fuse libfuse-dev rpm pkg-config
|
||||||
sudo apt-get install -y fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
|
|
||||||
if: matrix.os == 'ubuntu-latest'
|
if: matrix.os == 'ubuntu-latest'
|
||||||
|
|
||||||
- name: Install Libraries on macOS
|
- name: Install Libraries on macOS
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
|
|
||||||
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
|
|
||||||
unset HOMEBREW_NO_INSTALL_FROM_API
|
|
||||||
brew untap --force homebrew/core
|
|
||||||
brew untap --force homebrew/cask
|
|
||||||
brew update
|
brew update
|
||||||
brew install --cask macfuse
|
brew cask install osxfuse
|
||||||
brew install git-annex git-annex-remote-rclone
|
if: matrix.os == 'macOS-latest'
|
||||||
if: matrix.os == 'macos-latest'
|
|
||||||
|
|
||||||
- name: Install Libraries on Windows
|
- name: Install Libraries on Windows
|
||||||
shell: powershell
|
shell: powershell
|
||||||
run: |
|
run: |
|
||||||
$ProgressPreference = 'SilentlyContinue'
|
$ProgressPreference = 'SilentlyContinue'
|
||||||
choco install -y winfsp zip
|
choco install -y winfsp zip
|
||||||
echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
||||||
if ($env:GOARCH -eq "386") {
|
if ($env:GOARCH -eq "386") {
|
||||||
choco install -y mingw --forcex86 --force
|
choco install -y mingw --forcex86 --force
|
||||||
echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
|
||||||
}
|
}
|
||||||
# Copy mingw32-make.exe to make.exe so the same command line
|
# Copy mingw32-make.exe to make.exe so the same command line
|
||||||
# can be used on Windows as on macOS and Linux
|
# can be used on Windows as on macOS and Linux
|
||||||
@@ -151,6 +156,7 @@ jobs:
|
|||||||
if: matrix.os == 'windows-latest'
|
if: matrix.os == 'windows-latest'
|
||||||
|
|
||||||
- name: Print Go version and environment
|
- name: Print Go version and environment
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
printf "Using go at: $(which go)\n"
|
printf "Using go at: $(which go)\n"
|
||||||
printf "Go version: $(go version)\n"
|
printf "Go version: $(go version)\n"
|
||||||
@@ -161,233 +167,88 @@ jobs:
|
|||||||
printf "\n\nSystem environment:\n\n"
|
printf "\n\nSystem environment:\n\n"
|
||||||
env
|
env
|
||||||
|
|
||||||
- name: Build rclone
|
- name: Run tests
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
make
|
make
|
||||||
|
|
||||||
- name: Rclone version
|
|
||||||
run: |
|
|
||||||
rclone version
|
|
||||||
|
|
||||||
- name: Run tests
|
|
||||||
run: |
|
|
||||||
make quicktest
|
make quicktest
|
||||||
if: matrix.quicktest
|
if: matrix.quicktest
|
||||||
|
|
||||||
- name: Race test
|
- name: Race test
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
make racequicktest
|
make racequicktest
|
||||||
if: matrix.racequicktest
|
if: matrix.racequicktest
|
||||||
|
|
||||||
- name: Run librclone tests
|
- name: Code quality test
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
make -C librclone/ctest test
|
make build_dep
|
||||||
make -C librclone/ctest clean
|
make check
|
||||||
librclone/python/test_rclone.py
|
if: matrix.check
|
||||||
if: matrix.librclonetest
|
|
||||||
|
|
||||||
- name: Compile all architectures test
|
- name: Compile all architectures test
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
make
|
make
|
||||||
make compile_all
|
make compile_all
|
||||||
if: matrix.compile_all
|
if: matrix.compile_all
|
||||||
|
|
||||||
- name: Deploy built binaries
|
- name: Deploy built binaries
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
|
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep ; fi
|
||||||
make ci_beta
|
make travis_beta
|
||||||
env:
|
env:
|
||||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||||
# working-directory: '$(modulePath)'
|
# working-directory: '$(modulePath)'
|
||||||
# Deploy binaries if enabled in config && not a PR && not a fork
|
# Deploy binaries if enabled in config && not a PR && not a fork
|
||||||
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||||
|
|
||||||
lint:
|
xgo:
|
||||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
timeout-minutes: 60
|
||||||
timeout-minutes: 30
|
name: "xgo cross compile"
|
||||||
name: "lint"
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Get runner parameters
|
|
||||||
id: get-runner-parameters
|
|
||||||
run: |
|
|
||||||
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
|
|
||||||
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v1
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
# Checkout into a fixed path to avoid import path problems on go < 1.11
|
||||||
|
path: ./src/github.com/rclone/rclone
|
||||||
|
|
||||||
- name: Install Go
|
- name: Set environment variables
|
||||||
id: setup-go
|
shell: bash
|
||||||
uses: actions/setup-go@v6
|
|
||||||
with:
|
|
||||||
go-version: '>=1.24.0-rc.1'
|
|
||||||
check-latest: true
|
|
||||||
cache: false
|
|
||||||
|
|
||||||
- name: Cache
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/go/pkg/mod
|
|
||||||
~/.cache/go-build
|
|
||||||
~/.cache/golangci-lint
|
|
||||||
key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }}
|
|
||||||
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
|
||||||
|
|
||||||
- name: Code quality test (Linux)
|
|
||||||
uses: golangci/golangci-lint-action@v9
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Code quality test (Windows)
|
|
||||||
uses: golangci/golangci-lint-action@v9
|
|
||||||
env:
|
|
||||||
GOOS: "windows"
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Code quality test (macOS)
|
|
||||||
uses: golangci/golangci-lint-action@v9
|
|
||||||
env:
|
|
||||||
GOOS: "darwin"
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Code quality test (FreeBSD)
|
|
||||||
uses: golangci/golangci-lint-action@v9
|
|
||||||
env:
|
|
||||||
GOOS: "freebsd"
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Code quality test (OpenBSD)
|
|
||||||
uses: golangci/golangci-lint-action@v9
|
|
||||||
env:
|
|
||||||
GOOS: "openbsd"
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Install govulncheck
|
|
||||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
|
||||||
|
|
||||||
- name: Scan for vulnerabilities
|
|
||||||
run: govulncheck ./...
|
|
||||||
|
|
||||||
- name: Check Markdown format
|
|
||||||
uses: DavidAnson/markdownlint-cli2-action@v21
|
|
||||||
with:
|
|
||||||
globs: |
|
|
||||||
CONTRIBUTING.md
|
|
||||||
MAINTAINERS.md
|
|
||||||
README.md
|
|
||||||
RELEASE.md
|
|
||||||
CODE_OF_CONDUCT.md
|
|
||||||
librclone\README.md
|
|
||||||
backend\s3\README.md
|
|
||||||
docs/content/{_index,authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
|
|
||||||
|
|
||||||
- name: Scan edits of autogenerated files
|
|
||||||
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
|
|
||||||
if: github.event_name == 'pull_request'
|
|
||||||
|
|
||||||
android:
|
|
||||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
|
||||||
timeout-minutes: 30
|
|
||||||
name: "android-all"
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v5
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
# Upgrade together with NDK version
|
|
||||||
- name: Set up Go
|
|
||||||
uses: actions/setup-go@v6
|
|
||||||
with:
|
|
||||||
go-version: '>=1.25.0-rc.1'
|
|
||||||
|
|
||||||
- name: Set global environment variables
|
|
||||||
run: |
|
run: |
|
||||||
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
||||||
|
echo '::add-path::${{ runner.workspace }}/bin'
|
||||||
|
|
||||||
- name: build native rclone
|
- name: Cross-compile rclone
|
||||||
run: |
|
run: |
|
||||||
make
|
docker pull billziss/xgo-cgofuse
|
||||||
|
GO111MODULE=off go get -v github.com/karalabe/xgo # don't add to go.mod
|
||||||
|
xgo \
|
||||||
|
-image=billziss/xgo-cgofuse \
|
||||||
|
-targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||||
|
-tags cmount \
|
||||||
|
-dest build \
|
||||||
|
.
|
||||||
|
xgo \
|
||||||
|
-image=billziss/xgo-cgofuse \
|
||||||
|
-targets=android/*,ios/* \
|
||||||
|
-dest build \
|
||||||
|
.
|
||||||
|
|
||||||
- name: install gomobile
|
- name: Build rclone
|
||||||
run: |
|
run: |
|
||||||
go install golang.org/x/mobile/cmd/gobind@latest
|
docker pull golang
|
||||||
go install golang.org/x/mobile/cmd/gomobile@latest
|
docker run --rm -v "$PWD":/usr/src/rclone -w /usr/src/rclone golang go build -mod=vendor -v
|
||||||
env PATH=$PATH:~/go/bin gomobile init
|
|
||||||
echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: arm-v7a gomobile build
|
|
||||||
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
|
||||||
|
|
||||||
- name: arm-v7a Set environment variables
|
|
||||||
run: |
|
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
|
||||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
|
||||||
echo 'GOARM=7' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: arm-v7a build
|
|
||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
|
|
||||||
|
|
||||||
- name: arm64-v8a Set environment variables
|
|
||||||
run: |
|
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
|
||||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: arm64-v8a build
|
|
||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
|
|
||||||
|
|
||||||
- name: x86 Set environment variables
|
|
||||||
run: |
|
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
|
||||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: x86 build
|
|
||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
|
|
||||||
|
|
||||||
- name: x64 Set environment variables
|
|
||||||
run: |
|
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
|
||||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: x64 build
|
|
||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 .
|
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
run: |
|
run: |
|
||||||
make ci_upload
|
make circleci_upload
|
||||||
env:
|
env:
|
||||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||||
# Upload artifacts if not a PR && not a fork
|
# Upload artifacts if not a PR && not a fork
|
||||||
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'
|
if: github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||||
|
|||||||
311
.github/workflows/build_publish_docker_image.yml
vendored
311
.github/workflows/build_publish_docker_image.yml
vendored
@@ -1,294 +1,25 @@
|
|||||||
---
|
name: Docker beta build
|
||||||
# Github Actions release for rclone
|
|
||||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_image.yml" -*-
|
|
||||||
|
|
||||||
name: Build & Push Docker Images
|
|
||||||
|
|
||||||
# Trigger the workflow on push or pull request
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- '**'
|
- master
|
||||||
tags:
|
|
||||||
- '**'
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
manual:
|
|
||||||
description: Manual run (bypass default conditions)
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-image:
|
build:
|
||||||
if: inputs.manual || (github.repository == 'rclone/rclone' && github.event_name != 'pull_request')
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 60
|
name: Build image job
|
||||||
strategy:
|
steps:
|
||||||
fail-fast: false
|
- name: Checkout master
|
||||||
matrix:
|
uses: actions/checkout@v2
|
||||||
include:
|
with:
|
||||||
- platform: linux/amd64
|
fetch-depth: 0
|
||||||
runs-on: ubuntu-24.04
|
- name: Build and publish image
|
||||||
- platform: linux/386
|
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
|
||||||
runs-on: ubuntu-24.04
|
with:
|
||||||
- platform: linux/arm64
|
tag: beta
|
||||||
runs-on: ubuntu-24.04-arm
|
imageName: rclone/rclone
|
||||||
- platform: linux/arm/v7
|
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
|
||||||
runs-on: ubuntu-24.04-arm
|
publish: true
|
||||||
- platform: linux/arm/v6
|
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||||
runs-on: ubuntu-24.04-arm
|
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||||
|
|
||||||
name: Build Docker Image for ${{ matrix.platform }}
|
|
||||||
runs-on: ${{ matrix.runs-on }}
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Free Space
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
df -h .
|
|
||||||
# Remove android SDK
|
|
||||||
sudo rm -rf /usr/local/lib/android || true
|
|
||||||
# Remove .net runtime
|
|
||||||
sudo rm -rf /usr/share/dotnet || true
|
|
||||||
df -h .
|
|
||||||
|
|
||||||
- name: Checkout Repository
|
|
||||||
uses: actions/checkout@v5
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Set REPO_NAME Variable
|
|
||||||
run: |
|
|
||||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
|
||||||
|
|
||||||
- name: Set PLATFORM Variable
|
|
||||||
run: |
|
|
||||||
platform=${{ matrix.platform }}
|
|
||||||
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Set CACHE_NAME Variable
|
|
||||||
shell: python
|
|
||||||
run: |
|
|
||||||
import os, re
|
|
||||||
|
|
||||||
def slugify(input_string, max_length=63):
|
|
||||||
slug = input_string.lower()
|
|
||||||
slug = re.sub(r'[^a-z0-9 -]', ' ', slug)
|
|
||||||
slug = slug.strip()
|
|
||||||
slug = re.sub(r'\s+', '-', slug)
|
|
||||||
slug = re.sub(r'-+', '-', slug)
|
|
||||||
slug = slug[:max_length]
|
|
||||||
slug = re.sub(r'[-]+$', '', slug)
|
|
||||||
return slug
|
|
||||||
|
|
||||||
ref_name_slug = "cache"
|
|
||||||
|
|
||||||
if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request":
|
|
||||||
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
|
|
||||||
|
|
||||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
|
||||||
env.write(f"CACHE_NAME={ref_name_slug}\n")
|
|
||||||
|
|
||||||
- name: Get ImageOS
|
|
||||||
# There's no way around this, because "ImageOS" is only available to
|
|
||||||
# processes, but the setup-go action uses it in its key.
|
|
||||||
id: imageos
|
|
||||||
uses: actions/github-script@v8
|
|
||||||
with:
|
|
||||||
result-encoding: string
|
|
||||||
script: |
|
|
||||||
return process.env.ImageOS
|
|
||||||
|
|
||||||
- name: Extract Metadata (tags, labels) for Docker
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v5
|
|
||||||
env:
|
|
||||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,manifest-descriptor # Important for digest annotation (used by Github packages)
|
|
||||||
with:
|
|
||||||
images: |
|
|
||||||
ghcr.io/${{ env.REPO_NAME }}
|
|
||||||
labels: |
|
|
||||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
|
||||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
|
||||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
|
||||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
|
||||||
org.opencontainers.image.revision=${{ github.sha }}
|
|
||||||
tags: |
|
|
||||||
type=sha
|
|
||||||
type=ref,event=pr
|
|
||||||
type=ref,event=branch
|
|
||||||
type=semver,pattern={{version}}
|
|
||||||
type=semver,pattern={{major}}
|
|
||||||
type=semver,pattern={{major}}.{{minor}}
|
|
||||||
type=raw,value=beta,enable={{is_default_branch}}
|
|
||||||
|
|
||||||
- name: Setup QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
|
|
||||||
- name: Load Go Build Cache for Docker
|
|
||||||
id: go-cache
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
|
||||||
# Cache only the go builds, the module download is cached via the docker layer caching
|
|
||||||
path: |
|
|
||||||
go-build-cache
|
|
||||||
|
|
||||||
- name: Inject Go Build Cache into Docker
|
|
||||||
uses: reproducible-containers/buildkit-cache-dance@v3
|
|
||||||
with:
|
|
||||||
cache-map: |
|
|
||||||
{
|
|
||||||
"go-build-cache": "/root/.cache/go-build"
|
|
||||||
}
|
|
||||||
skip-extraction: ${{ steps.go-cache.outputs.cache-hit }}
|
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
# This is the user that triggered the Workflow. In this case, it will
|
|
||||||
# either be the user whom created the Release or manually triggered
|
|
||||||
# the workflow_dispatch.
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Build and Publish Image Digest
|
|
||||||
id: build
|
|
||||||
uses: docker/build-push-action@v6
|
|
||||||
with:
|
|
||||||
file: Dockerfile
|
|
||||||
context: .
|
|
||||||
provenance: false
|
|
||||||
# don't specify 'tags' here (error "get can't push tagged ref by digest")
|
|
||||||
# tags: ${{ steps.meta.outputs.tags }}
|
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
|
||||||
annotations: ${{ steps.meta.outputs.annotations }}
|
|
||||||
platforms: ${{ matrix.platform }}
|
|
||||||
outputs: |
|
|
||||||
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
|
|
||||||
cache-from: |
|
|
||||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
|
||||||
cache-to: |
|
|
||||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd
|
|
||||||
|
|
||||||
- name: Export Image Digest
|
|
||||||
run: |
|
|
||||||
mkdir -p /tmp/digests
|
|
||||||
digest="${{ steps.build.outputs.digest }}"
|
|
||||||
touch "/tmp/digests/${digest#sha256:}"
|
|
||||||
|
|
||||||
- name: Upload Image Digest
|
|
||||||
uses: actions/upload-artifact@v5
|
|
||||||
with:
|
|
||||||
name: digests-${{ env.PLATFORM }}
|
|
||||||
path: /tmp/digests/*
|
|
||||||
retention-days: 1
|
|
||||||
if-no-files-found: error
|
|
||||||
|
|
||||||
merge-image:
|
|
||||||
name: Merge & Push Final Docker Image
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
needs:
|
|
||||||
- build-image
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Download Image Digests
|
|
||||||
uses: actions/download-artifact@v6
|
|
||||||
with:
|
|
||||||
path: /tmp/digests
|
|
||||||
pattern: digests-*
|
|
||||||
merge-multiple: true
|
|
||||||
|
|
||||||
- name: Set REPO_NAME Variable
|
|
||||||
run: |
|
|
||||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
|
||||||
|
|
||||||
- name: Extract Metadata (tags, labels) for Docker
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v5
|
|
||||||
env:
|
|
||||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
|
||||||
with:
|
|
||||||
images: |
|
|
||||||
${{ env.REPO_NAME }}
|
|
||||||
ghcr.io/${{ env.REPO_NAME }}
|
|
||||||
labels: |
|
|
||||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
|
||||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
|
||||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
|
||||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
|
||||||
org.opencontainers.image.revision=${{ github.sha }}
|
|
||||||
tags: |
|
|
||||||
type=sha
|
|
||||||
type=ref,event=pr
|
|
||||||
type=ref,event=branch
|
|
||||||
type=semver,pattern={{version}}
|
|
||||||
type=semver,pattern={{major}}
|
|
||||||
type=semver,pattern={{major}}.{{minor}}
|
|
||||||
type=raw,value=beta,enable={{is_default_branch}}
|
|
||||||
|
|
||||||
- name: Extract Tags
|
|
||||||
shell: python
|
|
||||||
run: |
|
|
||||||
import json, os
|
|
||||||
|
|
||||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
|
||||||
metadata = json.loads(metadata_json)
|
|
||||||
|
|
||||||
tags = [f"--tag '{tag}'" for tag in metadata["tags"]]
|
|
||||||
tags_string = " ".join(tags)
|
|
||||||
|
|
||||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
|
||||||
env.write(f"TAGS={tags_string}\n")
|
|
||||||
|
|
||||||
- name: Extract Annotations
|
|
||||||
shell: python
|
|
||||||
run: |
|
|
||||||
import json, os
|
|
||||||
|
|
||||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
|
||||||
metadata = json.loads(metadata_json)
|
|
||||||
|
|
||||||
annotations = [f"--annotation '{annotation}'" for annotation in metadata["annotations"]]
|
|
||||||
annotations_string = " ".join(annotations)
|
|
||||||
|
|
||||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
|
||||||
env.write(f"ANNOTATIONS={annotations_string}\n")
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
|
|
||||||
- name: Login to Docker Hub
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
# This is the user that triggered the Workflow. In this case, it will
|
|
||||||
# either be the user whom created the Release or manually triggered
|
|
||||||
# the workflow_dispatch.
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Create & Push Manifest List
|
|
||||||
working-directory: /tmp/digests
|
|
||||||
run: |
|
|
||||||
docker buildx imagetools create \
|
|
||||||
${{ env.TAGS }} \
|
|
||||||
${{ env.ANNOTATIONS }} \
|
|
||||||
$(printf 'ghcr.io/${{ env.REPO_NAME }}@sha256:%s ' *)
|
|
||||||
|
|
||||||
- name: Inspect and Run Multi-Platform Image
|
|
||||||
run: |
|
|
||||||
docker buildx imagetools inspect --raw ${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
|
||||||
docker buildx imagetools inspect --raw ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
|
||||||
docker run --rm ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} version
|
|
||||||
|
|||||||
@@ -1,49 +0,0 @@
|
|||||||
---
|
|
||||||
# Github Actions release for rclone
|
|
||||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_plugin.yml" -*-
|
|
||||||
|
|
||||||
name: Release Build for Docker Plugin
|
|
||||||
|
|
||||||
on:
|
|
||||||
release:
|
|
||||||
types: [published]
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
manual:
|
|
||||||
description: Manual run (bypass default conditions)
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build_docker_volume_plugin:
|
|
||||||
if: inputs.manual || github.repository == 'rclone/rclone'
|
|
||||||
name: Build docker plugin job
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Free some space
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
df -h .
|
|
||||||
# Remove android SDK
|
|
||||||
sudo rm -rf /usr/local/lib/android || true
|
|
||||||
# Remove .net runtime
|
|
||||||
sudo rm -rf /usr/share/dotnet || true
|
|
||||||
df -h .
|
|
||||||
- name: Checkout master
|
|
||||||
uses: actions/checkout@v5
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
- name: Build and publish docker plugin
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
VER=${GITHUB_REF#refs/tags/}
|
|
||||||
PLUGIN_USER=rclone
|
|
||||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
|
||||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
|
||||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
|
||||||
export PLUGIN_USER PLUGIN_ARCH
|
|
||||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
|
||||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
|
||||||
done
|
|
||||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
|
||||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
|
||||||
33
.github/workflows/build_publish_release_docker_image.yml
vendored
Normal file
33
.github/workflows/build_publish_release_docker_image.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
name: Docker release build
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [published]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Build image job
|
||||||
|
steps:
|
||||||
|
- name: Checkout master
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Get actual patch version
|
||||||
|
id: actual_patch_version
|
||||||
|
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
|
||||||
|
- name: Get actual minor version
|
||||||
|
id: actual_minor_version
|
||||||
|
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
|
||||||
|
- name: Get actual major version
|
||||||
|
id: actual_major_version
|
||||||
|
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||||
|
- name: Build and publish image
|
||||||
|
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
|
||||||
|
with:
|
||||||
|
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||||
|
imageName: rclone/rclone
|
||||||
|
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
|
||||||
|
publish: true
|
||||||
|
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||||
|
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||||
15
.github/workflows/notify.yml
vendored
15
.github/workflows/notify.yml
vendored
@@ -1,15 +0,0 @@
|
|||||||
name: Notify users based on issue labels
|
|
||||||
|
|
||||||
on:
|
|
||||||
issues:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
notify:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: jenschelkopf/issue-label-notification-action@1.3
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.NOTIFY_ACTION_TOKEN }}
|
|
||||||
recipients: |
|
|
||||||
Support Contract=@rclone/support
|
|
||||||
14
.github/workflows/winget.yml
vendored
14
.github/workflows/winget.yml
vendored
@@ -1,14 +0,0 @@
|
|||||||
name: Publish to Winget
|
|
||||||
on:
|
|
||||||
release:
|
|
||||||
types: [released]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
publish:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: vedantmgoyal2009/winget-releaser@v2
|
|
||||||
with:
|
|
||||||
identifier: Rclone.Rclone
|
|
||||||
installers-regex: '-windows-\w+\.zip$'
|
|
||||||
token: ${{ secrets.WINGET_TOKEN }}
|
|
||||||
15
.gitignore
vendored
15
.gitignore
vendored
@@ -1,22 +1,11 @@
|
|||||||
*~
|
*~
|
||||||
_junk/
|
_junk/
|
||||||
rclone
|
rclone
|
||||||
rclone.exe
|
|
||||||
build
|
build
|
||||||
/docs/public/
|
docs/public
|
||||||
/docs/.hugo_build.lock
|
|
||||||
/docs/static/img/logos/
|
|
||||||
rclone.iml
|
rclone.iml
|
||||||
.idea
|
.idea
|
||||||
.history
|
.history
|
||||||
.vscode
|
|
||||||
*.test
|
*.test
|
||||||
|
*.log
|
||||||
*.iml
|
*.iml
|
||||||
fuzz-build.zip
|
|
||||||
*.orig
|
|
||||||
*.rej
|
|
||||||
Thumbs.db
|
|
||||||
__pycache__
|
|
||||||
.DS_Store
|
|
||||||
resource_windows_*.syso
|
|
||||||
.devcontainer
|
|
||||||
|
|||||||
157
.golangci.yml
157
.golangci.yml
@@ -1,151 +1,26 @@
|
|||||||
version: "2"
|
# golangci-lint configuration options
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
# Configure the linter set. To avoid unexpected results the implicit default
|
|
||||||
# set is ignored and all the ones to use are explicitly enabled.
|
|
||||||
default: none
|
|
||||||
enable:
|
enable:
|
||||||
# Default
|
- deadcode
|
||||||
- errcheck
|
- errcheck
|
||||||
- govet
|
|
||||||
- ineffassign
|
|
||||||
- staticcheck
|
|
||||||
- unused
|
|
||||||
# Additional
|
|
||||||
- gocritic
|
|
||||||
- misspell
|
|
||||||
#- prealloc # TODO
|
|
||||||
- revive
|
|
||||||
- unconvert
|
|
||||||
# Configure checks. Mostly using defaults but with some commented exceptions.
|
|
||||||
settings:
|
|
||||||
govet:
|
|
||||||
enable-all: true
|
|
||||||
disable:
|
|
||||||
- fieldalignment
|
|
||||||
- shadow
|
|
||||||
staticcheck:
|
|
||||||
# With staticcheck there is only one setting, so to extend the implicit
|
|
||||||
# default value it must be explicitly included.
|
|
||||||
checks:
|
|
||||||
# Default
|
|
||||||
- all
|
|
||||||
- -ST1000
|
|
||||||
- -ST1003
|
|
||||||
- -ST1016
|
|
||||||
- -ST1020
|
|
||||||
- -ST1021
|
|
||||||
- -ST1022
|
|
||||||
# Disable quickfix checks
|
|
||||||
- -QF*
|
|
||||||
gocritic:
|
|
||||||
# With gocritic there are different settings, but since enabled-checks
|
|
||||||
# and disabled-checks cannot both be set, for full customization the
|
|
||||||
# alternative is to disable all defaults and explicitly enable the ones
|
|
||||||
# to use.
|
|
||||||
disable-all: true
|
|
||||||
enabled-checks:
|
|
||||||
#- appendAssign # Skip default
|
|
||||||
- argOrder
|
|
||||||
- assignOp
|
|
||||||
- badCall
|
|
||||||
- badCond
|
|
||||||
#- captLocal # Skip default
|
|
||||||
- caseOrder
|
|
||||||
- codegenComment
|
|
||||||
#- commentFormatting # Skip default
|
|
||||||
- defaultCaseOrder
|
|
||||||
- deprecatedComment
|
|
||||||
- dupArg
|
|
||||||
- dupBranchBody
|
|
||||||
- dupCase
|
|
||||||
- dupSubExpr
|
|
||||||
- elseif
|
|
||||||
#- exitAfterDefer # Skip default
|
|
||||||
- flagDeref
|
|
||||||
- flagName
|
|
||||||
#- ifElseChain # Skip default
|
|
||||||
- mapKey
|
|
||||||
- newDeref
|
|
||||||
- offBy1
|
|
||||||
- regexpMust
|
|
||||||
- ruleguard # Enable additional check that are not enabled by default
|
|
||||||
#- singleCaseSwitch # Skip default
|
|
||||||
- sloppyLen
|
|
||||||
- sloppyTypeAssert
|
|
||||||
- switchTrue
|
|
||||||
- typeSwitchVar
|
|
||||||
- underef
|
|
||||||
- unlambda
|
|
||||||
- unslice
|
|
||||||
- valSwap
|
|
||||||
- wrapperFunc
|
|
||||||
settings:
|
|
||||||
ruleguard:
|
|
||||||
rules: ${base-path}/bin/rules.go
|
|
||||||
revive:
|
|
||||||
# With revive there is in reality only one setting, and when at least one
|
|
||||||
# rule are specified then only these rules will be considered, defaults
|
|
||||||
# and all others are then implicitly disabled, so must explicitly enable
|
|
||||||
# all rules to be used.
|
|
||||||
rules:
|
|
||||||
- name: blank-imports
|
|
||||||
disabled: false
|
|
||||||
- name: context-as-argument
|
|
||||||
disabled: false
|
|
||||||
- name: context-keys-type
|
|
||||||
disabled: false
|
|
||||||
- name: dot-imports
|
|
||||||
disabled: false
|
|
||||||
#- name: empty-block # Skip default
|
|
||||||
# disabled: true
|
|
||||||
- name: error-naming
|
|
||||||
disabled: false
|
|
||||||
- name: error-return
|
|
||||||
disabled: false
|
|
||||||
- name: error-strings
|
|
||||||
disabled: false
|
|
||||||
- name: errorf
|
|
||||||
disabled: false
|
|
||||||
- name: exported
|
|
||||||
disabled: false
|
|
||||||
#- name: increment-decrement # Skip default
|
|
||||||
# disabled: true
|
|
||||||
- name: indent-error-flow
|
|
||||||
disabled: false
|
|
||||||
- name: package-comments
|
|
||||||
disabled: false
|
|
||||||
- name: range
|
|
||||||
disabled: false
|
|
||||||
- name: receiver-naming
|
|
||||||
disabled: false
|
|
||||||
#- name: redefines-builtin-id # Skip default
|
|
||||||
# disabled: true
|
|
||||||
#- name: superfluous-else # Skip default
|
|
||||||
# disabled: true
|
|
||||||
- name: time-naming
|
|
||||||
disabled: false
|
|
||||||
- name: unexported-return
|
|
||||||
disabled: false
|
|
||||||
#- name: unreachable-code # Skip default
|
|
||||||
# disabled: true
|
|
||||||
#- name: unused-parameter # Skip default
|
|
||||||
# disabled: true
|
|
||||||
- name: var-declaration
|
|
||||||
disabled: false
|
|
||||||
- name: var-naming
|
|
||||||
disabled: false
|
|
||||||
|
|
||||||
formatters:
|
|
||||||
enable:
|
|
||||||
- goimports
|
- goimports
|
||||||
|
- golint
|
||||||
|
- ineffassign
|
||||||
|
- structcheck
|
||||||
|
- varcheck
|
||||||
|
- govet
|
||||||
|
- unconvert
|
||||||
|
#- prealloc
|
||||||
|
#- maligned
|
||||||
|
disable-all: true
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
|
# Enable some lints excluded by default
|
||||||
|
exclude-use-default: false
|
||||||
|
|
||||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||||
max-issues-per-linter: 0
|
max-per-linter: 0
|
||||||
|
|
||||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||||
max-same-issues: 0
|
max-same-issues: 0
|
||||||
|
|
||||||
run:
|
|
||||||
# Timeout for total work, e.g. 30s, 5m, 5m30s. Default is 0 (disabled).
|
|
||||||
timeout: 10m
|
|
||||||
|
|||||||
@@ -1,72 +0,0 @@
|
|||||||
default: true
|
|
||||||
|
|
||||||
# Use specific styles, to be consistent accross all documents.
|
|
||||||
# Default is to accept any as long as it is consistent within the same document.
|
|
||||||
heading-style: # MD003
|
|
||||||
style: atx
|
|
||||||
ul-style: # MD004
|
|
||||||
style: dash
|
|
||||||
hr-style: # MD035
|
|
||||||
style: ---
|
|
||||||
code-block-style: # MD046
|
|
||||||
style: fenced
|
|
||||||
code-fence-style: # MD048
|
|
||||||
style: backtick
|
|
||||||
emphasis-style: # MD049
|
|
||||||
style: asterisk
|
|
||||||
strong-style: # MD050
|
|
||||||
style: asterisk
|
|
||||||
|
|
||||||
# Allow multiple headers with same text as long as they are not siblings.
|
|
||||||
no-duplicate-heading: # MD024
|
|
||||||
siblings_only: true
|
|
||||||
|
|
||||||
# Allow long lines in code blocks and tables.
|
|
||||||
line-length: # MD013
|
|
||||||
code_blocks: false
|
|
||||||
tables: false
|
|
||||||
|
|
||||||
# The Markdown files used to generated docs with Hugo contain a top level
|
|
||||||
# header, even though the YAML front matter has a title property (which is
|
|
||||||
# used for the HTML document title only). Suppress Markdownlint warning:
|
|
||||||
# Multiple top-level headings in the same document.
|
|
||||||
single-title: # MD025
|
|
||||||
level: 1
|
|
||||||
front_matter_title:
|
|
||||||
|
|
||||||
# The HTML docs generated by Hugo from Markdown files may have slightly
|
|
||||||
# different header anchors than GitHub rendered Markdown, e.g. Hugo trims
|
|
||||||
# leading dashes so "--config string" becomes "#config-string" while it is
|
|
||||||
# "#--config-string" in GitHub preview. When writing links to headers in the
|
|
||||||
# Markdown files we must use whatever works in the final HTML generated docs.
|
|
||||||
# Suppress Markdownlint warning: Link fragments should be valid.
|
|
||||||
link-fragments: false # MD051
|
|
||||||
|
|
||||||
# Restrict the languages and language identifiers to use for code blocks.
|
|
||||||
# We only want those supported by both Hugo and GitHub. These are documented
|
|
||||||
# here:
|
|
||||||
# https://gohugo.io/content-management/syntax-highlighting/#languages
|
|
||||||
# https://docs.github.com//get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks#syntax-highlighting
|
|
||||||
# In addition, we only want to allow identifiers (aliases) that correspond to
|
|
||||||
# the same language in Hugo and GitHub, and preferrably also VSCode and other
|
|
||||||
# commonly used tools, to avoid confusion. An example of this is that "shell"
|
|
||||||
# by some are considered an identifier for shell scripts, i.e. an alias for
|
|
||||||
# "sh", while others consider it an identifier for shell sessions, i.e. an
|
|
||||||
# alias for "console". Although Hugo and GitHub in this case are consistent and
|
|
||||||
# have choosen the former, using "sh" instead, and not allowing use of "shell",
|
|
||||||
# avoids the confusion entirely.
|
|
||||||
fenced-code-language: # MD040
|
|
||||||
allowed_languages:
|
|
||||||
- text
|
|
||||||
- console
|
|
||||||
- sh
|
|
||||||
- bat
|
|
||||||
- ini
|
|
||||||
- json
|
|
||||||
- yaml
|
|
||||||
- go
|
|
||||||
- python
|
|
||||||
- c++
|
|
||||||
- c#
|
|
||||||
- java
|
|
||||||
- powershell
|
|
||||||
@@ -1,80 +0,0 @@
|
|||||||
# Rclone Code of Conduct
|
|
||||||
|
|
||||||
Like the technical community as a whole, the Rclone team and community
|
|
||||||
is made up of a mixture of professionals and volunteers from all over
|
|
||||||
the world, working on every aspect of the mission - including
|
|
||||||
mentorship, teaching, and connecting people.
|
|
||||||
|
|
||||||
Diversity is one of our huge strengths, but it can also lead to
|
|
||||||
communication issues and unhappiness. To that end, we have a few
|
|
||||||
ground rules that we ask people to adhere to. This code applies
|
|
||||||
equally to founders, mentors and those seeking help and guidance.
|
|
||||||
|
|
||||||
This isn't an exhaustive list of things that you can't do. Rather,
|
|
||||||
take it in the spirit in which it's intended - a guide to make it
|
|
||||||
easier to enrich all of us and the technical communities in which we
|
|
||||||
participate.
|
|
||||||
|
|
||||||
This code of conduct applies to all spaces managed by the Rclone
|
|
||||||
project or Rclone Services Ltd. This includes the issue tracker, the
|
|
||||||
forum, the GitHub site, the wiki, any other online services or
|
|
||||||
in-person events. In addition, violations of this code outside these
|
|
||||||
spaces may affect a person's ability to participate within them.
|
|
||||||
|
|
||||||
- **Be friendly and patient.**
|
|
||||||
- **Be welcoming.** We strive to be a community that welcomes and
|
|
||||||
supports people of all backgrounds and identities. This includes,
|
|
||||||
but is not limited to members of any race, ethnicity, culture,
|
|
||||||
national origin, colour, immigration status, social and economic
|
|
||||||
class, educational level, sex, sexual orientation, gender identity
|
|
||||||
and expression, age, size, family status, political belief,
|
|
||||||
religion, and mental and physical ability.
|
|
||||||
- **Be considerate.** Your work will be used by other people, and you
|
|
||||||
in turn will depend on the work of others. Any decision you take
|
|
||||||
will affect users and colleagues, and you should take those
|
|
||||||
consequences into account when making decisions. Remember that we're
|
|
||||||
a world-wide community, so you might not be communicating in someone
|
|
||||||
else's primary language.
|
|
||||||
- **Be respectful.** Not all of us will agree all the time, but
|
|
||||||
disagreement is no excuse for poor behavior and poor manners. We
|
|
||||||
might all experience some frustration now and then, but we cannot
|
|
||||||
allow that frustration to turn into a personal attack. It's
|
|
||||||
important to remember that a community where people feel
|
|
||||||
uncomfortable or threatened is not a productive one. Members of the
|
|
||||||
Rclone community should be respectful when dealing with other
|
|
||||||
members as well as with people outside the Rclone community.
|
|
||||||
- **Be careful in the words that you choose.** We are a community of
|
|
||||||
professionals, and we conduct ourselves professionally. Be kind to
|
|
||||||
others. Do not insult or put down other participants. Harassment and
|
|
||||||
other exclusionary behavior aren't acceptable. This includes, but is
|
|
||||||
not limited to:
|
|
||||||
- Violent threats or language directed against another person.
|
|
||||||
- Discriminatory jokes and language.
|
|
||||||
- Posting sexually explicit or violent material.
|
|
||||||
- Posting (or threatening to post) other people's personally
|
|
||||||
identifying information ("doxing").
|
|
||||||
- Personal insults, especially those using racist or sexist terms.
|
|
||||||
- Unwelcome sexual attention.
|
|
||||||
- Advocating for, or encouraging, any of the above behavior.
|
|
||||||
- Repeated harassment of others. In general, if someone asks you to
|
|
||||||
stop, then stop.
|
|
||||||
- **When we disagree, try to understand why.** Disagreements, both
|
|
||||||
social and technical, happen all the time and Rclone is no
|
|
||||||
exception. It is important that we resolve disagreements and
|
|
||||||
differing views constructively. Remember that we're different. The
|
|
||||||
strength of Rclone comes from its varied community, people from a
|
|
||||||
wide range of backgrounds. Different people have different
|
|
||||||
perspectives on issues. Being unable to understand why someone holds
|
|
||||||
a viewpoint doesn't mean that they're wrong. Don't forget that it is
|
|
||||||
human to err and blaming each other doesn't get us anywhere.
|
|
||||||
Instead, focus on helping to resolve issues and learning from
|
|
||||||
mistakes.
|
|
||||||
|
|
||||||
If you believe someone is violating the code of conduct, we ask that
|
|
||||||
you report it by emailing [info@rclone.com](mailto:info@rclone.com).
|
|
||||||
|
|
||||||
Original text courtesy of the [Speak Up! project](http://web.archive.org/web/20141109123859/http://speakup.io/coc.html).
|
|
||||||
|
|
||||||
## Questions?
|
|
||||||
|
|
||||||
If you have questions, please feel free to [contact us](mailto:info@rclone.com).
|
|
||||||
711
CONTRIBUTING.md
711
CONTRIBUTING.md
@@ -1,8 +1,8 @@
|
|||||||
# Contributing to rclone
|
# Contributing to rclone #
|
||||||
|
|
||||||
This is a short guide on how to contribute things to rclone.
|
This is a short guide on how to contribute things to rclone.
|
||||||
|
|
||||||
## Reporting a bug
|
## Reporting a bug ##
|
||||||
|
|
||||||
If you've just got a question or aren't sure if you've found a bug
|
If you've just got a question or aren't sure if you've found a bug
|
||||||
then please use the [rclone forum](https://forum.rclone.org/) instead
|
then please use the [rclone forum](https://forum.rclone.org/) instead
|
||||||
@@ -12,228 +12,87 @@ When filing an issue, please include the following information if
|
|||||||
possible as well as a description of the problem. Make sure you test
|
possible as well as a description of the problem. Make sure you test
|
||||||
with the [latest beta of rclone](https://beta.rclone.org/):
|
with the [latest beta of rclone](https://beta.rclone.org/):
|
||||||
|
|
||||||
- Rclone version (e.g. output from `rclone version`)
|
* Rclone version (eg output from `rclone -V`)
|
||||||
- Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
|
* Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||||
- The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
* The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||||
- A log of the command with the `-vv` flag (e.g. output from
|
* A log of the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
||||||
`rclone -vv copy /tmp remote:tmp`)
|
* if the log contains secrets then edit the file with a text editor first to obscure them
|
||||||
- if the log contains secrets then edit the file with a text editor first to
|
|
||||||
obscure them
|
|
||||||
|
|
||||||
## Submitting a new feature or bug fix
|
## Submitting a pull request ##
|
||||||
|
|
||||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||||
like to implement then please submit a pull request via GitHub.
|
like to implement then please submit a pull request via GitHub.
|
||||||
|
|
||||||
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues)
|
If it is a big feature then make an issue first so it can be discussed.
|
||||||
first so it can be discussed.
|
|
||||||
|
|
||||||
To prepare your pull request first press the fork button on [rclone's GitHub
|
You'll need a Go environment set up with GOPATH set. See [the Go
|
||||||
|
getting started docs](https://golang.org/doc/install) for more info.
|
||||||
|
|
||||||
|
First in your web browser press the fork button on [rclone's GitHub
|
||||||
page](https://github.com/rclone/rclone).
|
page](https://github.com/rclone/rclone).
|
||||||
|
|
||||||
Then [install Git](https://git-scm.com/downloads) and set your public contribution
|
Now in your terminal
|
||||||
[name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git)
|
|
||||||
and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
|
|
||||||
|
|
||||||
Next open your terminal, change directory to your preferred folder and initialise
|
go get -u github.com/rclone/rclone
|
||||||
your local rclone project:
|
cd $GOPATH/src/github.com/rclone/rclone
|
||||||
|
git remote rename origin upstream
|
||||||
|
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||||
|
|
||||||
```console
|
Make a branch to add your new feature
|
||||||
git clone https://github.com/rclone/rclone.git
|
|
||||||
cd rclone
|
|
||||||
git remote rename origin upstream
|
|
||||||
# if you have SSH keys setup in your GitHub account:
|
|
||||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
|
||||||
# otherwise:
|
|
||||||
git remote add origin https://github.com/YOURUSER/rclone.git
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that most of the terminal commands in the rest of this guide must be
|
git checkout -b my-new-feature
|
||||||
executed from the rclone folder created above.
|
|
||||||
|
|
||||||
Now [install Go](https://golang.org/doc/install) and verify your installation:
|
|
||||||
|
|
||||||
```console
|
|
||||||
go version
|
|
||||||
```
|
|
||||||
|
|
||||||
Great, you can now compile and execute your own version of rclone:
|
|
||||||
|
|
||||||
```console
|
|
||||||
go build
|
|
||||||
./rclone version
|
|
||||||
```
|
|
||||||
|
|
||||||
(Note that you can also replace `go build` with `make`, which will include a
|
|
||||||
more accurate version number in the executable as well as enable you to specify
|
|
||||||
more build options.) Finally make a branch to add your new feature
|
|
||||||
|
|
||||||
```console
|
|
||||||
git checkout -b my-new-feature
|
|
||||||
```
|
|
||||||
|
|
||||||
And get hacking.
|
And get hacking.
|
||||||
|
|
||||||
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins)
|
When ready - run the unit tests for the code you changed
|
||||||
and a quick view on the rclone [code organisation](#code-organisation).
|
|
||||||
|
|
||||||
When ready - test the affected functionality and run the unit tests for the
|
go test -v
|
||||||
code you changed
|
|
||||||
|
|
||||||
```console
|
Note that you may need to make a test remote, eg `TestSwift` for some
|
||||||
cd folder/with/changed/files
|
|
||||||
go test -v
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that you may need to make a test remote, e.g. `TestSwift` for some
|
|
||||||
of the unit tests.
|
of the unit tests.
|
||||||
|
|
||||||
This is typically enough if you made a simple bug fix, otherwise please read
|
Note the top level Makefile targets
|
||||||
the rclone [testing](#testing) section too.
|
|
||||||
|
* make check
|
||||||
|
* make test
|
||||||
|
|
||||||
|
Both of these will be run by Travis when you make a pull request but
|
||||||
|
you can do this yourself locally too. These require some extra go
|
||||||
|
packages which you can install with
|
||||||
|
|
||||||
|
* make build_dep
|
||||||
|
|
||||||
Make sure you
|
Make sure you
|
||||||
|
|
||||||
- Add [unit tests](#testing) for a new feature.
|
* Add [documentation](#writing-documentation) for a new feature.
|
||||||
- Add [documentation](#writing-documentation) for a new feature.
|
* Follow the [commit message guidelines](#commit-messages).
|
||||||
- [Commit your changes](#committing-your-changes) using the [commit message guidelines](#commit-messages).
|
* Add [unit tests](#testing) for a new feature
|
||||||
|
* squash commits down to one per feature
|
||||||
|
* rebase to master with `git rebase master`
|
||||||
|
|
||||||
When you are done with that push your changes to GitHub:
|
When you are done with that
|
||||||
|
|
||||||
```console
|
git push origin my-new-feature
|
||||||
git push -u origin my-new-feature
|
|
||||||
```
|
|
||||||
|
|
||||||
and open the GitHub website to [create your pull
|
Go to the GitHub website and click [Create pull
|
||||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||||
|
|
||||||
Your changes will then get reviewed and you might get asked to fix some stuff.
|
You patch will get reviewed and you might get asked to fix some stuff.
|
||||||
If so, then make the changes in the same branch, commit and push your updates to
|
|
||||||
GitHub.
|
|
||||||
|
|
||||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master)
|
If so, then make the changes in the same branch, squash the commits,
|
||||||
or [squash your commits](#squashing-your-commits).
|
rebase it to master then push it to GitHub with `--force`.
|
||||||
|
|
||||||
## Using Git and GitHub
|
## CI for your fork ##
|
||||||
|
|
||||||
### Committing your changes
|
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
||||||
|
|
||||||
Follow the guideline for [commit messages](#commit-messages) and then:
|
## Testing ##
|
||||||
|
|
||||||
```console
|
|
||||||
git checkout my-new-feature # To switch to your branch
|
|
||||||
git status # To see the new and changed files
|
|
||||||
git add FILENAME # To select FILENAME for the commit
|
|
||||||
git status # To verify the changes to be committed
|
|
||||||
git commit # To do the commit
|
|
||||||
git log # To verify the commit. Use q to quit the log
|
|
||||||
```
|
|
||||||
|
|
||||||
You can modify the message or changes in the latest commit using:
|
|
||||||
|
|
||||||
```console
|
|
||||||
git commit --amend
|
|
||||||
```
|
|
||||||
|
|
||||||
If you amend to commits that have been pushed to GitHub, then you will have to
|
|
||||||
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
|
||||||
|
|
||||||
### Replacing your previously pushed commits
|
|
||||||
|
|
||||||
Note that you are about to rewrite the GitHub history of your branch. It is good
|
|
||||||
practice to involve your collaborators before modifying commits that have been
|
|
||||||
pushed to GitHub.
|
|
||||||
|
|
||||||
Your previously pushed commits are replaced by:
|
|
||||||
|
|
||||||
```console
|
|
||||||
git push --force origin my-new-feature
|
|
||||||
```
|
|
||||||
|
|
||||||
### Basing your changes on the latest master
|
|
||||||
|
|
||||||
To base your changes on the latest version of the
|
|
||||||
[rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
|
||||||
|
|
||||||
```console
|
|
||||||
git checkout master
|
|
||||||
git fetch upstream
|
|
||||||
git merge --ff-only
|
|
||||||
git push origin --follow-tags # optional update of your fork in GitHub
|
|
||||||
git checkout my-new-feature
|
|
||||||
git rebase master
|
|
||||||
```
|
|
||||||
|
|
||||||
If you rebase commits that have been pushed to GitHub, then you will have to
|
|
||||||
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
|
||||||
|
|
||||||
### Squashing your commits
|
|
||||||
|
|
||||||
To combine your commits into one commit:
|
|
||||||
|
|
||||||
```console
|
|
||||||
git log # To count the commits to squash, e.g. the last 2
|
|
||||||
git reset --soft HEAD~2 # To undo the 2 latest commits
|
|
||||||
git status # To check everything is as expected
|
|
||||||
```
|
|
||||||
|
|
||||||
If everything is fine, then make the new combined commit:
|
|
||||||
|
|
||||||
```console
|
|
||||||
git commit # To commit the undone commits as one
|
|
||||||
```
|
|
||||||
|
|
||||||
otherwise, you may roll back using:
|
|
||||||
|
|
||||||
```console
|
|
||||||
git reflog # To check that HEAD{1} is your previous state
|
|
||||||
git reset --soft 'HEAD@{1}' # To roll back to your previous state
|
|
||||||
```
|
|
||||||
|
|
||||||
If you squash commits that have been pushed to GitHub, then you will have to
|
|
||||||
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
|
||||||
|
|
||||||
Tip: You may like to use `git rebase -i master` if you are experienced or have a
|
|
||||||
more complex situation.
|
|
||||||
|
|
||||||
### GitHub Continuous Integration
|
|
||||||
|
|
||||||
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions)
|
|
||||||
to build and test the project, which should be automatically available for your
|
|
||||||
fork too from the `Actions` tab in your repository.
|
|
||||||
|
|
||||||
## Testing
|
|
||||||
|
|
||||||
### Code quality tests
|
|
||||||
|
|
||||||
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then
|
|
||||||
you can run the same tests as get run in the CI which can be very helpful.
|
|
||||||
|
|
||||||
You can run them with `make check` or with `golangci-lint run ./...`.
|
|
||||||
|
|
||||||
Using these tests ensures that the rclone codebase all uses the same coding
|
|
||||||
standards. These tests also check for easy mistakes to make (like forgetting
|
|
||||||
to check an error return).
|
|
||||||
|
|
||||||
### Quick testing
|
|
||||||
|
|
||||||
rclone's tests are run from the go testing framework, so at the top
|
rclone's tests are run from the go testing framework, so at the top
|
||||||
level you can run this to run all the tests.
|
level you can run this to run all the tests.
|
||||||
|
|
||||||
```console
|
go test -v ./...
|
||||||
go test -v ./...
|
|
||||||
```
|
|
||||||
|
|
||||||
You can also use `make`, if supported by your platform
|
|
||||||
|
|
||||||
```console
|
|
||||||
make quicktest
|
|
||||||
```
|
|
||||||
|
|
||||||
The quicktest is [automatically run by GitHub](#github-continuous-integration)
|
|
||||||
when you push your branch to GitHub.
|
|
||||||
|
|
||||||
### Backend testing
|
|
||||||
|
|
||||||
rclone contains a mixture of unit tests and integration tests.
|
rclone contains a mixture of unit tests and integration tests.
|
||||||
Because it is difficult (and in some respects pointless) to test cloud
|
Because it is difficult (and in some respects pointless) to test cloud
|
||||||
storage systems by mocking all their interfaces, rclone unit tests can
|
storage systems by mocking all their interfaces, rclone unit tests can
|
||||||
@@ -246,216 +105,114 @@ need to make a remote called `TestDrive`.
|
|||||||
You can then run the unit tests in the drive directory. These tests
|
You can then run the unit tests in the drive directory. These tests
|
||||||
are skipped if `TestDrive:` isn't defined.
|
are skipped if `TestDrive:` isn't defined.
|
||||||
|
|
||||||
```console
|
cd backend/drive
|
||||||
cd backend/drive
|
go test -v
|
||||||
go test -v
|
|
||||||
```
|
|
||||||
|
|
||||||
You can then run the integration tests which test all of rclone's
|
You can then run the integration tests which tests all of rclone's
|
||||||
operations. Normally these get run against the local file system,
|
operations. Normally these get run against the local filing system,
|
||||||
but they can be run against any of the remotes.
|
but they can be run against any of the remotes.
|
||||||
|
|
||||||
```console
|
cd fs/sync
|
||||||
cd fs/sync
|
go test -v -remote TestDrive:
|
||||||
go test -v -remote TestDrive:
|
go test -v -remote TestDrive: -fast-list
|
||||||
go test -v -remote TestDrive: -fast-list
|
|
||||||
|
|
||||||
cd fs/operations
|
cd fs/operations
|
||||||
go test -v -remote TestDrive:
|
go test -v -remote TestDrive:
|
||||||
```
|
|
||||||
|
|
||||||
If you want to use the integration test framework to run these tests
|
If you want to use the integration test framework to run these tests
|
||||||
altogether with an HTML report and test retries then from the
|
all together with an HTML report and test retries then from the
|
||||||
project root:
|
project root:
|
||||||
|
|
||||||
```console
|
go install github.com/rclone/rclone/fstest/test_all
|
||||||
go run ./fstest/test_all -backends drive
|
test_all -backend drive
|
||||||
```
|
|
||||||
|
|
||||||
### Full integration testing
|
|
||||||
|
|
||||||
If you want to run all the integration tests against all the remotes,
|
If you want to run all the integration tests against all the remotes,
|
||||||
then change into the project root and run
|
then change into the project root and run
|
||||||
|
|
||||||
```console
|
make test
|
||||||
make check
|
|
||||||
make test
|
|
||||||
```
|
|
||||||
|
|
||||||
The commands may require some extra go packages which you can install with
|
This command is run daily on the integration test server. You can
|
||||||
|
find the results at https://pub.rclone.org/integration-tests/
|
||||||
|
|
||||||
```console
|
## Code Organisation ##
|
||||||
make build_dep
|
|
||||||
```
|
|
||||||
|
|
||||||
The full integration tests are run daily on the integration test server. You can
|
|
||||||
find the results at <https://integration.rclone.org>
|
|
||||||
|
|
||||||
## Code Organisation
|
|
||||||
|
|
||||||
Rclone code is organised into a small number of top level directories
|
Rclone code is organised into a small number of top level directories
|
||||||
with modules beneath.
|
with modules beneath.
|
||||||
|
|
||||||
- backend - the rclone backends for interfacing to cloud providers -
|
* backend - the rclone backends for interfacing to cloud providers -
|
||||||
- all - import this to load all the cloud providers
|
* all - import this to load all the cloud providers
|
||||||
- ...providers
|
* ...providers
|
||||||
- bin - scripts for use while building or maintaining rclone
|
* bin - scripts for use while building or maintaining rclone
|
||||||
- cmd - the rclone commands
|
* cmd - the rclone commands
|
||||||
- all - import this to load all the commands
|
* all - import this to load all the commands
|
||||||
- ...commands
|
* ...commands
|
||||||
- cmdtest - end-to-end tests of commands, flags, environment variables,...
|
* docs - the documentation and website
|
||||||
- docs - the documentation and website
|
* content - adjust these docs only - everything else is autogenerated
|
||||||
- content - adjust these docs only, except those marked autogenerated
|
* command - these are auto generated - edit the corresponding .go file
|
||||||
or portions marked autogenerated where the corresponding .go file must be
|
* fs - main rclone definitions - minimal amount of code
|
||||||
edited instead, and everything else is autogenerated
|
* accounting - bandwidth limiting and statistics
|
||||||
- commands - these are auto-generated, edit the corresponding .go file
|
* asyncreader - an io.Reader which reads ahead
|
||||||
- fs - main rclone definitions - minimal amount of code
|
* config - manage the config file and flags
|
||||||
- accounting - bandwidth limiting and statistics
|
* driveletter - detect if a name is a drive letter
|
||||||
- asyncreader - an io.Reader which reads ahead
|
* filter - implements include/exclude filtering
|
||||||
- config - manage the config file and flags
|
* fserrors - rclone specific error handling
|
||||||
- driveletter - detect if a name is a drive letter
|
* fshttp - http handling for rclone
|
||||||
- filter - implements include/exclude filtering
|
* fspath - path handling for rclone
|
||||||
- fserrors - rclone specific error handling
|
* hash - defines rclone's hash types and functions
|
||||||
- fshttp - http handling for rclone
|
* list - list a remote
|
||||||
- fspath - path handling for rclone
|
* log - logging facilities
|
||||||
- hash - defines rclone's hash types and functions
|
* march - iterates directories in lock step
|
||||||
- list - list a remote
|
* object - in memory Fs objects
|
||||||
- log - logging facilities
|
* operations - primitives for sync, eg Copy, Move
|
||||||
- march - iterates directories in lock step
|
* sync - sync directories
|
||||||
- object - in memory Fs objects
|
* walk - walk a directory
|
||||||
- operations - primitives for sync, e.g. Copy, Move
|
* fstest - provides integration test framework
|
||||||
- sync - sync directories
|
* fstests - integration tests for the backends
|
||||||
- walk - walk a directory
|
* mockdir - mocks an fs.Directory
|
||||||
- fstest - provides integration test framework
|
* mockobject - mocks an fs.Object
|
||||||
- fstests - integration tests for the backends
|
* test_all - Runs integration tests for everything
|
||||||
- mockdir - mocks an fs.Directory
|
* graphics - the images used in the website etc
|
||||||
- mockobject - mocks an fs.Object
|
* lib - libraries used by the backend
|
||||||
- test_all - Runs integration tests for everything
|
* atexit - register functions to run when rclone exits
|
||||||
- graphics - the images used in the website, etc.
|
* dircache - directory ID to name caching
|
||||||
- lib - libraries used by the backend
|
* oauthutil - helpers for using oauth
|
||||||
- atexit - register functions to run when rclone exits
|
* pacer - retries with backoff and paces operations
|
||||||
- dircache - directory ID to name caching
|
* readers - a selection of useful io.Readers
|
||||||
- oauthutil - helpers for using oauth
|
* rest - a thin abstraction over net/http for REST
|
||||||
- pacer - retries with backoff and paces operations
|
* vendor - 3rd party code managed by `go mod`
|
||||||
- readers - a selection of useful io.Readers
|
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
||||||
- rest - a thin abstraction over net/http for REST
|
|
||||||
- librclone - in memory interface to rclone's API for embedding rclone
|
|
||||||
- vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
|
||||||
|
|
||||||
## Writing Documentation
|
## Writing Documentation ##
|
||||||
|
|
||||||
If you are adding a new feature then please update the documentation.
|
If you are adding a new feature then please update the documentation.
|
||||||
|
|
||||||
The documentation sources are generally in Markdown format, in conformance
|
|
||||||
with the CommonMark specification and compatible with GitHub Flavored
|
|
||||||
Markdown (GFM). The markdown format and style is checked as part of the lint
|
|
||||||
operation that runs automatically on pull requests, to enforce standards and
|
|
||||||
consistency. This is based on the [markdownlint](https://github.com/DavidAnson/markdownlint)
|
|
||||||
tool by David Anson, which can also be integrated into editors so you can
|
|
||||||
perform the same checks while writing. It generally follows Ciro Santilli's
|
|
||||||
[Markdown Style Guide](https://cirosantilli.com/markdown-style-guide), which
|
|
||||||
is good source if you want to know more.
|
|
||||||
|
|
||||||
HTML pages, served as website <rclone.org>, are generated from the Markdown,
|
|
||||||
using [Hugo](https://gohugo.io). Note that when generating the HTML pages,
|
|
||||||
there is currently used a different algorithm for generating header anchors
|
|
||||||
than what GitHub uses for its Markdown rendering. For example, in the HTML docs
|
|
||||||
generated by Hugo any leading `-` characters are ignored, which means when
|
|
||||||
linking to a header with text `--config string` we therefore need to use the
|
|
||||||
link `#config-string` in our Markdown source, which will not work in GitHub's
|
|
||||||
preview where `#--config-string` would be the correct link.
|
|
||||||
|
|
||||||
Most of the documentation are written directly in text files with extension
|
|
||||||
`.md`, mainly within folder `docs/content`. Note that several of such files
|
|
||||||
are autogenerated (e.g. the command documentation, and `docs/content/flags.md`),
|
|
||||||
or contain autogenerated portions (e.g. the backend documentation under
|
|
||||||
`docs/content/commands`). These are marked with an `autogenerated` comment.
|
|
||||||
The sources of the autogenerated text are usually Markdown formatted text
|
|
||||||
embedded as string values in the Go source code, so you need to locate these
|
|
||||||
and edit the `.go` file instead. The `MANUAL.*`, `rclone.1` and other text
|
|
||||||
files in the root of the repository are also autogenerated. The autogeneration
|
|
||||||
of files, and the website, will be done during the release process. See the
|
|
||||||
`make doc` and `make website` targets in the Makefile if you are interested in
|
|
||||||
how. You don't need to run these when adding a feature.
|
|
||||||
|
|
||||||
If you add a new general flag (not for a backend), then document it in
|
If you add a new general flag (not for a backend), then document it in
|
||||||
`docs/content/docs.md` - the flags there are supposed to be in
|
`docs/content/docs.md` - the flags there are supposed to be in
|
||||||
alphabetical order.
|
alphabetical order.
|
||||||
|
|
||||||
If you add a new backend option/flag, then it should be documented in
|
If you add a new backend option/flag, then it should be documented in
|
||||||
the source file in the `Help:` field:
|
the source file in the `Help:` field. The first line of this is used
|
||||||
|
for the flag help, the remainder is shown to the user in `rclone
|
||||||
|
config` and is added to the docs with `make backenddocs`.
|
||||||
|
|
||||||
- Start with the most important information about the option,
|
The only documentation you need to edit are the `docs/content/*.md`
|
||||||
as a single sentence on a single line.
|
files. The MANUAL.*, rclone.1, web site etc are all auto generated
|
||||||
- This text will be used for the command-line flag help.
|
from those during the release process. See the `make doc` and `make
|
||||||
- It will be combined with other information, such as any default value,
|
website` targets in the Makefile if you are interested in how. You
|
||||||
and the result will look odd if not written as a single sentence.
|
don't need to run these when adding a feature.
|
||||||
- It should end with a period/full stop character, which will be shown
|
|
||||||
in docs but automatically removed when producing the flag help.
|
|
||||||
- Try to keep it below 80 characters, to reduce text wrapping in the terminal.
|
|
||||||
- More details can be added in a new paragraph, after an empty line (`"\n\n"`).
|
|
||||||
- Like with docs generated from Markdown, a single line break is ignored
|
|
||||||
and two line breaks creates a new paragraph.
|
|
||||||
- This text will be shown to the user in `rclone config`
|
|
||||||
and in the docs (where it will be added by `make backenddocs`,
|
|
||||||
normally run some time before next release).
|
|
||||||
- To create options of enumeration type use the `Examples:` field.
|
|
||||||
- Each example value have their own `Help:` field, but they are treated
|
|
||||||
a bit different than the main option help text. They will be shown
|
|
||||||
as an unordered list, therefore a single line break is enough to
|
|
||||||
create a new list item. Also, for enumeration texts like name of
|
|
||||||
countries, it looks better without an ending period/full stop character.
|
|
||||||
- You can run `make backenddocs` to verify the resulting Markdown.
|
|
||||||
- This will update the autogenerated sections of the backend docs Markdown
|
|
||||||
files under `docs/content`.
|
|
||||||
- It requires you to have [Python](https://www.python.org) installed.
|
|
||||||
- The `backenddocs` make target runs the Python script `bin/make_backend_docs.py`,
|
|
||||||
and you can also run this directly, optionally with the name of a backend
|
|
||||||
as argument to only update the docs for a specific backend.
|
|
||||||
- **Do not** commit the updated Markdown files. This operation is run as part of
|
|
||||||
the release process. Since any manual changes in the autogenerated sections
|
|
||||||
of the Markdown files will then be lost, we have a pull request check that
|
|
||||||
reports error for any changes within the autogenerated sections. Should you
|
|
||||||
have done manual changes outside of the autogenerated sections they must be
|
|
||||||
committed, of course.
|
|
||||||
- You can run `make serve` to verify the resulting website.
|
|
||||||
- This will build the website and serve it locally, so you can open it in
|
|
||||||
your web browser and verify that the end result looks OK. Check specifically
|
|
||||||
any added links, also in light of the note above regarding different algorithms
|
|
||||||
for generated header anchors.
|
|
||||||
- It requires you to have the [Hugo](https://gohugo.io) tool available.
|
|
||||||
- The `serve` make target depends on the `website` target, which runs the
|
|
||||||
`hugo` command from the `docs` directory to build the website, and then
|
|
||||||
it serves the website locally with an embedded web server using a command
|
|
||||||
`hugo server --logLevel info -w --disableFastRender --ignoreCache`, so you
|
|
||||||
can run similar Hugo commands directly as well.
|
|
||||||
|
|
||||||
When writing documentation for an entirely new backend,
|
Documentation for rclone sub commands is with their code, eg
|
||||||
see [backend documentation](#backend-documentation).
|
`cmd/ls/ls.go`.
|
||||||
|
|
||||||
If you are updating documentation for a command, you must do that in the
|
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||||
command source code, e.g. `cmd/ls/ls.go`. Write flag help strings as a single
|
for small changes in the docs which makes it very easy.
|
||||||
sentence on a single line, without a period/full stop character at the end,
|
|
||||||
as it will be combined unmodified with other information (such as any default
|
|
||||||
value).
|
|
||||||
|
|
||||||
Note that you can use
|
## Making a release ##
|
||||||
[GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
|
||||||
for small changes in the docs which makes it very easy. Just remember the
|
|
||||||
caveat when linking to header anchors, noted above, which means that GitHub's
|
|
||||||
Markdown preview may not be an entirely reliable verification of the results.
|
|
||||||
|
|
||||||
After your changes have been merged, you can verify them on
|
|
||||||
[tip.rclone.org](https://tip.rclone.org). This site is updated daily with the
|
|
||||||
current state of the master branch at 07:00 UTC. The changes will be on the main
|
|
||||||
[rclone.org](https://rclone.org) site once they have been included in a release.
|
|
||||||
|
|
||||||
## Making a release
|
|
||||||
|
|
||||||
There are separate instructions for making a release in the RELEASE.md
|
There are separate instructions for making a release in the RELEASE.md
|
||||||
file.
|
file.
|
||||||
|
|
||||||
## Commit messages
|
## Commit messages ##
|
||||||
|
|
||||||
Please make the first line of your commit message a summary of the
|
Please make the first line of your commit message a summary of the
|
||||||
change that a user (not a developer) of rclone would like to read, and
|
change that a user (not a developer) of rclone would like to read, and
|
||||||
@@ -479,16 +236,16 @@ change will get linked into the issue.
|
|||||||
|
|
||||||
Here is an example of a short commit message:
|
Here is an example of a short commit message:
|
||||||
|
|
||||||
```text
|
```
|
||||||
drive: add team drive support - fixes #885
|
drive: add team drive support - fixes #885
|
||||||
```
|
```
|
||||||
|
|
||||||
And here is an example of a longer one:
|
And here is an example of a longer one:
|
||||||
|
|
||||||
```text
|
```
|
||||||
mount: fix hang on errored upload
|
mount: fix hang on errored upload
|
||||||
|
|
||||||
In certain circumstances, if an upload failed then the mount could hang
|
In certain circumstances if an upload failed then the mount could hang
|
||||||
indefinitely. This was fixed by closing the read pipe after the Put
|
indefinitely. This was fixed by closing the read pipe after the Put
|
||||||
completed. This will cause the write side to return a pipe closed
|
completed. This will cause the write side to return a pipe closed
|
||||||
error fixing the hang.
|
error fixing the hang.
|
||||||
@@ -496,39 +253,51 @@ error fixing the hang.
|
|||||||
Fixes #1498
|
Fixes #1498
|
||||||
```
|
```
|
||||||
|
|
||||||
## Adding a dependency
|
## Adding a dependency ##
|
||||||
|
|
||||||
rclone uses the [go
|
rclone uses the [go
|
||||||
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
|
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
|
||||||
support in go1.11 and later to manage its dependencies.
|
support in go1.11 and later to manage its dependencies.
|
||||||
|
|
||||||
rclone can be built with modules outside of the `GOPATH`.
|
**NB** you must be using go1.11 or above to add a dependency to
|
||||||
|
rclone. Rclone will still build with older versions of go, but we use
|
||||||
|
the `go mod` command for dependencies which is only in go1.11 and
|
||||||
|
above.
|
||||||
|
|
||||||
|
rclone can be built with modules outside of the GOPATH, but for
|
||||||
|
backwards compatibility with older go versions, rclone also maintains
|
||||||
|
a `vendor` directory with all the external code rclone needs for
|
||||||
|
building.
|
||||||
|
|
||||||
|
The `vendor` directory is entirely managed by the `go mod` tool, do
|
||||||
|
not add things manually.
|
||||||
|
|
||||||
To add a dependency `github.com/ncw/new_dependency` see the
|
To add a dependency `github.com/ncw/new_dependency` see the
|
||||||
instructions below. These will fetch the dependency and add it to
|
instructions below. These will fetch the dependency, add it to
|
||||||
`go.mod` and `go.sum`.
|
`go.mod` and `go.sum` and vendor it for older go versions.
|
||||||
|
|
||||||
```console
|
GO111MODULE=on go get github.com/ncw/new_dependency
|
||||||
go get github.com/ncw/new_dependency
|
GO111MODULE=on go mod vendor
|
||||||
```
|
|
||||||
|
|
||||||
You can add constraints on that package when doing `go get` (see the
|
You can add constraints on that package when doing `go get` (see the
|
||||||
go docs linked above), but don't unless you really need to.
|
go docs linked above), but don't unless you really need to.
|
||||||
|
|
||||||
Please check in the changes generated by `go mod` including `go.mod`
|
Please check in the changes generated by `go mod` including the
|
||||||
and `go.sum` in the same commit as your other changes.
|
`vendor` directory and `go.mod` and `go.sum` in a single commit
|
||||||
|
separate from any other code changes with the title "vendor: add
|
||||||
|
github.com/ncw/new_dependency". Remember to `git add` any new files
|
||||||
|
in `vendor`.
|
||||||
|
|
||||||
## Updating a dependency
|
## Updating a dependency ##
|
||||||
|
|
||||||
If you need to update a dependency then run
|
If you need to update a dependency then run
|
||||||
|
|
||||||
```console
|
GO111MODULE=on go get -u github.com/pkg/errors
|
||||||
go get golang.org/x/crypto
|
GO111MODULE=on go mod vendor
|
||||||
```
|
|
||||||
|
|
||||||
Check in a single commit as above.
|
Check in a single commit as above.
|
||||||
|
|
||||||
## Updating all the dependencies
|
## Updating all the dependencies ##
|
||||||
|
|
||||||
In order to update all the dependencies then run `make update`. This
|
In order to update all the dependencies then run `make update`. This
|
||||||
just uses the go modules to update all the modules to their latest
|
just uses the go modules to update all the modules to their latest
|
||||||
@@ -537,7 +306,7 @@ stable release. Check in the changes in a single commit as above.
|
|||||||
This should be done early in the release cycle to pick up new versions
|
This should be done early in the release cycle to pick up new versions
|
||||||
of packages in time for them to get some testing.
|
of packages in time for them to get some testing.
|
||||||
|
|
||||||
## Updating a backend
|
## Updating a backend ##
|
||||||
|
|
||||||
If you update a backend then please run the unit tests and the
|
If you update a backend then please run the unit tests and the
|
||||||
integration tests for that backend.
|
integration tests for that backend.
|
||||||
@@ -552,153 +321,105 @@ integration tests.
|
|||||||
|
|
||||||
The next section goes into more detail about the tests.
|
The next section goes into more detail about the tests.
|
||||||
|
|
||||||
## Writing a new backend
|
## Writing a new backend ##
|
||||||
|
|
||||||
Choose a name. The docs here will use `remote` as an example.
|
Choose a name. The docs here will use `remote` as an example.
|
||||||
|
|
||||||
Note that in rclone terminology a file system backend is called a
|
Note that in rclone terminology a file system backend is called a
|
||||||
remote or an fs.
|
remote or an fs.
|
||||||
|
|
||||||
### Research
|
Research
|
||||||
|
|
||||||
- Look at the interfaces defined in `fs/types.go`
|
* Look at the interfaces defined in `fs/fs.go`
|
||||||
- Study one or more of the existing remotes
|
* Study one or more of the existing remotes
|
||||||
|
|
||||||
### Getting going
|
Getting going
|
||||||
|
|
||||||
- Create `backend/remote/remote.go` (copy this from a similar remote)
|
* Create `backend/remote/remote.go` (copy this from a similar remote)
|
||||||
- box is a good one to start from if you have a directory-based remote (and
|
* box is a good one to start from if you have a directory based remote
|
||||||
shows how to use the directory cache)
|
* b2 is a good one to start from if you have a bucket based remote
|
||||||
- b2 is a good one to start from if you have a bucket-based remote
|
* Add your remote to the imports in `backend/all/all.go`
|
||||||
- Add your remote to the imports in `backend/all/all.go`
|
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
||||||
- HTTP based remotes are easiest to maintain if they use rclone's
|
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||||
[lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but
|
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||||
if there is a really good Go SDK from the provider then use that instead.
|
* `rclone purge -v TestRemote:rclone-info`
|
||||||
- Try to implement as many optional methods as possible as it makes the remote
|
* `rclone info --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||||
more usable.
|
* `go run cmd/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||||
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to
|
* open `remote.csv` in a spreadsheet and examine
|
||||||
make sure we can encode any path name and `rclone info` to help determine the
|
|
||||||
encodings needed
|
|
||||||
- `rclone purge -v TestRemote:rclone-info`
|
|
||||||
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
|
||||||
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
|
||||||
- open `remote.csv` in a spreadsheet and examine
|
|
||||||
|
|
||||||
### Guidelines for a speedy merge
|
Unit tests
|
||||||
|
|
||||||
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest)
|
* Create a config entry called `TestRemote` for the unit tests to use
|
||||||
if you are implementing a REST like backend and parsing XML/JSON in the backend.
|
* Create a `backend/remote/remote_test.go` - copy and adjust your example remote
|
||||||
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp)
|
* Make sure all tests pass with `go test -v`
|
||||||
if your backend is HTTP based - this adds features like `--dump bodies`,
|
|
||||||
`--tpslimit`, `--user-agent` without you having to code anything!
|
|
||||||
- **Do** follow your example backend exactly - use the same code order, function
|
|
||||||
names, layout, structure. **Don't** move stuff around and **Don't** delete the
|
|
||||||
comments.
|
|
||||||
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few
|
|
||||||
backends like that - don't follow them!)
|
|
||||||
- **Do** put your API type definitions in a separate file - by preference `api/types.go`
|
|
||||||
- **Remember** we have >50 backends to maintain so keeping them as similar as
|
|
||||||
possible to each other is a high priority!
|
|
||||||
|
|
||||||
### Unit tests
|
Integration tests
|
||||||
|
|
||||||
- Create a config entry called `TestRemote` for the unit tests to use
|
* Add your backend to `fstest/test_all/config.yaml`
|
||||||
- Create a `backend/remote/remote_test.go` - copy and adjust your example remote
|
* Once you've done that then you can use the integration test framework from the project root:
|
||||||
- Make sure all tests pass with `go test -v`
|
* go install ./...
|
||||||
|
* test_all -backends remote
|
||||||
### Integration tests
|
|
||||||
|
|
||||||
- Add your backend to `fstest/test_all/config.yaml`
|
|
||||||
- Once you've done that then you can use the integration test framework from
|
|
||||||
the project root:
|
|
||||||
- `go run ./fstest/test_all -backends remote`
|
|
||||||
|
|
||||||
Or if you want to run the integration tests manually:
|
Or if you want to run the integration tests manually:
|
||||||
|
|
||||||
- Make sure integration tests pass with
|
* Make sure integration tests pass with
|
||||||
- `cd fs/operations`
|
* `cd fs/operations`
|
||||||
- `go test -v -remote TestRemote:`
|
* `go test -v -remote TestRemote:`
|
||||||
- `cd fs/sync`
|
* `cd fs/sync`
|
||||||
- `go test -v -remote TestRemote:`
|
* `go test -v -remote TestRemote:`
|
||||||
- If your remote defines `ListR` check with this also
|
* If your remote defines `ListR` check with this also
|
||||||
- `go test -v -remote TestRemote: -fast-list`
|
* `go test -v -remote TestRemote: -fast-list`
|
||||||
|
|
||||||
See the [testing](#testing) section for more information on integration tests.
|
See the [testing](#testing) section for more information on integration tests.
|
||||||
|
|
||||||
### Backend documentation
|
Add your fs to the docs - you'll need to pick an icon for it from
|
||||||
|
|
||||||
Add your backend to the docs - you'll need to pick an icon for it from
|
|
||||||
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
||||||
alphabetical order of full name of remote (e.g. `drive` is ordered as
|
alphabetical order of full name of remote (eg `drive` is ordered as
|
||||||
`Google Drive`) but with the local file system last.
|
`Google Drive`) but with the local file system last.
|
||||||
|
|
||||||
- `README.md` - main GitHub page
|
* `README.md` - main GitHub page
|
||||||
- `docs/content/remote.md` - main docs page (note the backend options are
|
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||||
automatically added to this file with `make backenddocs`)
|
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||||
- make sure this has the `autogenerated options` comments in (see your
|
* update them with `make backenddocs` - revert any changes in other backends
|
||||||
reference backend docs)
|
* `docs/content/overview.md` - overview docs
|
||||||
- update them in your backend with `bin/make_backend_docs.py remote`
|
* `docs/content/docs.md` - list of remotes in config section
|
||||||
- `docs/content/overview.md` - overview docs - add an entry into the Features
|
* `docs/content/_index.md` - front page of rclone.org
|
||||||
table and the Optional Features table.
|
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||||
- `docs/content/docs.md` - list of remotes in config section
|
* `bin/make_manual.py` - add the page to the `docs` constant
|
||||||
- `docs/content/_index.md` - front page of rclone.org
|
|
||||||
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
|
||||||
- `bin/make_manual.py` - add the page to the `docs` constant
|
|
||||||
|
|
||||||
Once you've written the docs, run `make serve` and check they look OK
|
Once you've written the docs, run `make serve` and check they look OK
|
||||||
in the web browser and the links (internal and external) all work.
|
in the web browser and the links (internal and external) all work.
|
||||||
|
|
||||||
## Adding a new s3 provider
|
## Writing a plugin ##
|
||||||
|
|
||||||
[Please see the guide in the S3 backend directory](backend/s3/README.md).
|
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
|
||||||
|
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
|
||||||
|
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
|
||||||
|
|
||||||
## Writing a plugin
|
Usage
|
||||||
|
|
||||||
New features (backends, commands) can also be added "out-of-tree", through Go
|
- Naming
|
||||||
plugins. Changes will be kept in a dynamically loaded file instead of being
|
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
||||||
compiled into the main binary. This is useful if you can't merge your changes
|
- `KIND` should be one of `backend`, `command` or `bundle`.
|
||||||
upstream or don't want to maintain a fork of rclone.
|
- Example: A plugin with backend support for PiFS would be called
|
||||||
|
`librcloneplugin_backend_pifs.so`.
|
||||||
### Usage
|
- Loading
|
||||||
|
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
|
||||||
- Naming
|
- Supported on rclone v1.50 or greater.
|
||||||
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
|
||||||
- `KIND` should be one of `backend`, `command` or `bundle`.
|
- If this variable doesn't exist, plugin support is disabled.
|
||||||
- Example: A plugin with backend support for PiFS would be called
|
- Plugins must be compiled against the exact version of rclone to work.
|
||||||
`librcloneplugin_backend_pifs.so`.
|
(The rclone used during building the plugin must be the same as the source of rclone)
|
||||||
- Loading
|
|
||||||
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
|
Building
|
||||||
- Supported on rclone v1.50 or greater.
|
|
||||||
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
|
|
||||||
- If this variable doesn't exist, plugin support is disabled.
|
|
||||||
- Plugins must be compiled against the exact version of rclone to work.
|
|
||||||
(The rclone used during building the plugin must be the same as the source
|
|
||||||
of rclone)
|
|
||||||
|
|
||||||
### Building
|
|
||||||
|
|
||||||
To turn your existing additions into a Go plugin, move them to an external repository
|
To turn your existing additions into a Go plugin, move them to an external repository
|
||||||
and change the top-level package name to `main`.
|
and change the top-level package name to `main`.
|
||||||
|
|
||||||
Check `rclone --version` and make sure that the plugin's rclone dependency and
|
Check `rclone --version` and make sure that the plugin's rclone dependency and host Go version match.
|
||||||
host Go version match.
|
|
||||||
|
|
||||||
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
|
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
|
||||||
|
|
||||||
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
|
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
|
||||||
|
|
||||||
## Keeping a backend or command out of tree
|
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
|
||||||
|
|
||||||
Rclone was designed to be modular so it is very easy to keep a backend
|
|
||||||
or a command out of the main rclone source tree.
|
|
||||||
|
|
||||||
So for example if you had a backend which accessed your proprietary
|
|
||||||
systems or a command which was specialised for your needs you could
|
|
||||||
add them out of tree.
|
|
||||||
|
|
||||||
This may be easier than using a plugin and is supported on all
|
|
||||||
platforms not just macOS and Linux.
|
|
||||||
|
|
||||||
This is explained further in <https://github.com/rclone/rclone_out_of_tree_example>
|
|
||||||
which has an example of an out of tree backend `ram` (which is a
|
|
||||||
renamed version of the `memory` backend).
|
|
||||||
|
|||||||
47
Dockerfile
47
Dockerfile
@@ -1,52 +1,21 @@
|
|||||||
FROM golang:alpine AS builder
|
FROM golang AS builder
|
||||||
|
|
||||||
ARG CGO_ENABLED=0
|
|
||||||
|
|
||||||
|
COPY . /go/src/github.com/rclone/rclone/
|
||||||
WORKDIR /go/src/github.com/rclone/rclone/
|
WORKDIR /go/src/github.com/rclone/rclone/
|
||||||
|
|
||||||
RUN echo "**** Set Go Environment Variables ****" && \
|
RUN \
|
||||||
go env -w GOCACHE=/root/.cache/go-build
|
CGO_ENABLED=0 \
|
||||||
|
make
|
||||||
RUN echo "**** Install Dependencies ****" && \
|
RUN ./rclone version
|
||||||
apk add --no-cache \
|
|
||||||
make \
|
|
||||||
bash \
|
|
||||||
gawk \
|
|
||||||
git
|
|
||||||
|
|
||||||
COPY go.mod .
|
|
||||||
COPY go.sum .
|
|
||||||
|
|
||||||
RUN echo "**** Download Go Dependencies ****" && \
|
|
||||||
go mod download -x
|
|
||||||
|
|
||||||
RUN echo "**** Verify Go Dependencies ****" && \
|
|
||||||
go mod verify
|
|
||||||
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/go-build,sharing=locked \
|
|
||||||
echo "**** Build Binary ****" && \
|
|
||||||
make
|
|
||||||
|
|
||||||
RUN echo "**** Print Version Binary ****" && \
|
|
||||||
./rclone version
|
|
||||||
|
|
||||||
# Begin final image
|
# Begin final image
|
||||||
FROM alpine:latest
|
FROM alpine:latest
|
||||||
|
|
||||||
RUN echo "**** Install Dependencies ****" && \
|
RUN apk --no-cache add ca-certificates fuse tzdata && \
|
||||||
apk add --no-cache \
|
echo "user_allow_other" >> /etc/fuse.conf
|
||||||
ca-certificates \
|
|
||||||
fuse3 \
|
|
||||||
tzdata && \
|
|
||||||
echo "Enable user_allow_other in fuse" && \
|
|
||||||
echo "user_allow_other" >> /etc/fuse.conf
|
|
||||||
|
|
||||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||||
|
|
||||||
RUN addgroup -g 1009 rclone && adduser -u 1009 -Ds /bin/sh -G rclone rclone
|
|
||||||
|
|
||||||
ENTRYPOINT [ "rclone" ]
|
ENTRYPOINT [ "rclone" ]
|
||||||
|
|
||||||
WORKDIR /data
|
WORKDIR /data
|
||||||
|
|||||||
129
MAINTAINERS.md
129
MAINTAINERS.md
@@ -1,4 +1,4 @@
|
|||||||
# Maintainers guide for rclone
|
# Maintainers guide for rclone #
|
||||||
|
|
||||||
Current active maintainers of rclone are:
|
Current active maintainers of rclone are:
|
||||||
|
|
||||||
@@ -11,121 +11,86 @@ Current active maintainers of rclone are:
|
|||||||
| Fabian Möller | @B4dM4n | |
|
| Fabian Möller | @B4dM4n | |
|
||||||
| Alex Chen | @Cnly | onedrive backend |
|
| Alex Chen | @Cnly | onedrive backend |
|
||||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||||
| Sebastian Bünger | @buengese | jottacloud, yandex & compress backends |
|
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
|
||||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||||
| Max Sum | @Max-Sum | union backend |
|
| Max Sum | @Max-Sum | union backend |
|
||||||
| Fred | @creativeprojects | seafile backend |
|
| Fred | @creativeprojects | seafile backend |
|
||||||
| Caleb Case | @calebcase | storj backend |
|
| Caleb Case | @calebcase | tardigrade backend |
|
||||||
| wiserain | @wiserain | pikpak backend |
|
|
||||||
| albertony | @albertony | |
|
|
||||||
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
|
||||||
| Hideo Aoyama | @boukendesho | snap packaging |
|
|
||||||
| nielash | @nielash | bisync |
|
|
||||||
| Dan McArdle | @dmcardle | gitannex |
|
|
||||||
| Sam Harrison | @childish-sambino | filescom |
|
|
||||||
|
|
||||||
## This is a work in progress draft
|
**This is a work in progress Draft**
|
||||||
|
|
||||||
This is a guide for how to be an rclone maintainer. This is mostly a write-up
|
This is a guide for how to be an rclone maintainer. This is mostly a writeup of what I (@ncw) attempt to do.
|
||||||
of what I (@ncw) attempt to do.
|
|
||||||
|
|
||||||
## Triaging Tickets
|
## Triaging Tickets ##
|
||||||
|
|
||||||
When a ticket comes in it should be triaged. This means it should be classified
|
When a ticket comes in it should be triaged. This means it should be classified by adding labels and placed into a milestone. Quite a lot of tickets need a bit of back and forth to determine whether it is a valid ticket so tickets may remain without labels or milestone for a while.
|
||||||
by adding labels and placed into a milestone. Quite a lot of tickets need a bit
|
|
||||||
of back and forth to determine whether it is a valid ticket so tickets may
|
|
||||||
remain without labels or milestone for a while.
|
|
||||||
|
|
||||||
Rclone uses the labels like this:
|
Rclone uses the labels like this:
|
||||||
|
|
||||||
- `bug` - a definitely verified bug
|
* `bug` - a definite verified bug
|
||||||
- `can't reproduce` - a problem which we can't reproduce
|
* `can't reproduce` - a problem which we can't reproduce
|
||||||
- `doc fix` - a bug in the documentation - if users need help understanding the
|
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
|
||||||
docs add this label
|
* `duplicate` - normally close these and ask the user to subscribe to the original
|
||||||
- `duplicate` - normally close these and ask the user to subscribe to the original
|
* `enhancement: new remote` - a new rclone backend
|
||||||
- `enhancement: new remote` - a new rclone backend
|
* `enhancement` - a new feature
|
||||||
- `enhancement` - a new feature
|
* `FUSE` - to do with `rclone mount` command
|
||||||
- `FUSE` - to do with `rclone mount` command
|
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
|
||||||
- `good first issue` - mark these if you find a small self-contained issue -
|
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
|
||||||
these get shown to new visitors to the project
|
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
||||||
- `help` wanted - mark these if you find a self-contained issue - these get
|
* `maintenance` - internal enhancement, code re-organisation etc
|
||||||
shown to new visitors to the project
|
* `Needs Go 1.XX` - waiting for that version of Go to be released
|
||||||
- `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
|
||||||
- `maintenance` - internal enhancement, code re-organisation, etc.
|
* `Remote: XXX` - which rclone backend this affects
|
||||||
- `Needs Go 1.XX` - waiting for that version of Go to be released
|
* `thinking` - not decided on the course of action yet
|
||||||
- `question` - not a `bug` or `enhancement` - direct to the forum for next time
|
|
||||||
- `Remote: XXX` - which rclone backend this affects
|
|
||||||
- `thinking` - not decided on the course of action yet
|
|
||||||
|
|
||||||
If it turns out to be a bug or an enhancement it should be tagged as such, with
|
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
|
||||||
the appropriate other tags. Don't forget the "good first issue" tag to give new
|
|
||||||
contributors something easy to do to get going.
|
|
||||||
|
|
||||||
When a ticket is tagged it should be added to a milestone, either the next
|
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (eg the next go release).
|
||||||
release, the one after, Soon or Help Wanted. Bugs can be added to the
|
|
||||||
"Known Bugs" milestone if they aren't planned to be fixed or need to wait for
|
|
||||||
something (e.g. the next go release).
|
|
||||||
|
|
||||||
The milestones have these meanings:
|
The milestones have these meanings:
|
||||||
|
|
||||||
- v1.XX - stuff we would like to fit into this release
|
* v1.XX - stuff we would like to fit into this release
|
||||||
- v1.XX+1 - stuff we are leaving until the next release
|
* v1.XX+1 - stuff we are leaving until the next release
|
||||||
- Soon - stuff we think is a good idea - waiting to be scheduled for a release
|
* Soon - stuff we think is a good idea - waiting to be scheduled to a release
|
||||||
- Help wanted - blue sky stuff that might get moved up, or someone could help with
|
* Help wanted - blue sky stuff that might get moved up, or someone could help with
|
||||||
- Known bugs - bugs waiting on external factors or we aren't going to fix for
|
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
|
||||||
the moment
|
|
||||||
|
|
||||||
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile)
|
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
|
||||||
are good candidates for ones that have slipped between the gaps and need
|
|
||||||
following up.
|
|
||||||
|
|
||||||
## Closing Tickets
|
## Closing Tickets ##
|
||||||
|
|
||||||
Close tickets as soon as you can - make sure they are tagged with a release.
|
Close tickets as soon as you can - make sure they are tagged with a release. Post a link to a beta in the ticket with the fix in, asking for feedback.
|
||||||
Post a link to a beta in the ticket with the fix in, asking for feedback.
|
|
||||||
|
|
||||||
## Pull requests
|
## Pull requests ##
|
||||||
|
|
||||||
Try to process pull requests promptly!
|
Try to process pull requests promptly!
|
||||||
|
|
||||||
Merging pull requests on GitHub itself works quite well nowadays so you can
|
Merging pull requests on GitHub itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||||
squash and rebase or rebase pull requests. rclone doesn't use merge commits.
|
|
||||||
Use the squash and rebase option if you need to edit the commit message.
|
|
||||||
|
|
||||||
After merging the commit, in your local master branch, do `git pull` then run
|
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
|
||||||
`bin/update-authors.py` to update the authors file then `git push`.
|
|
||||||
|
|
||||||
Sometimes pull requests need to be left open for a while - this especially true
|
Sometimes pull requests need to be left open for a while - this especially true of contributions of new backends which take a long time to get right.
|
||||||
of contributions of new backends which take a long time to get right.
|
|
||||||
|
|
||||||
## Merges
|
## Merges ##
|
||||||
|
|
||||||
If you are merging a branch locally then do `git merge --ff-only branch-name` to
|
If you are merging a branch locally then do `git merge --ff-only branch-name` to avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
|
||||||
avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
|
|
||||||
|
|
||||||
## Release cycle
|
## Release cycle ##
|
||||||
|
|
||||||
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer
|
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer if there is something big to merge that didn't stabilize properly or for personal reasons.
|
||||||
if there is something big to merge that didn't stabilize properly or for personal
|
|
||||||
reasons.
|
|
||||||
|
|
||||||
High impact regressions should be fixed before the next release.
|
High impact regressions should be fixed before the next release.
|
||||||
|
|
||||||
Near the start of the release cycle, the dependencies should be updated with
|
Near the start of the release cycle the dependencies should be updated with `make update` to give time for bugs to surface.
|
||||||
`make update` to give time for bugs to surface.
|
|
||||||
|
|
||||||
Towards the end of the release cycle try not to merge anything too big so let
|
Towards the end of the release cycle try not to merge anything too big so let things settle down.
|
||||||
things settle down.
|
|
||||||
|
|
||||||
Follow the instructions in RELEASE.md for making the release. Note that the
|
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
|
||||||
testing part is the most time-consuming often needing several rounds of test
|
|
||||||
and fix depending on exactly how many new features rclone has gained.
|
|
||||||
|
|
||||||
## Mailing list
|
## Mailing list ##
|
||||||
|
|
||||||
There is now an invite-only mailing list for rclone developers `rclone-dev` on
|
There is now an invite only mailing list for rclone developers `rclone-dev` on google groups.
|
||||||
google groups.
|
|
||||||
|
|
||||||
## TODO
|
## TODO ##
|
||||||
|
|
||||||
I should probably make a <dev@rclone.org> to register with cloud providers.
|
I should probably make a dev@rclone.org to register with cloud providers.
|
||||||
|
|||||||
66959
MANUAL.html
generated
66959
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
55737
MANUAL.txt
generated
55737
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
218
Makefile
218
Makefile
@@ -1,83 +1,66 @@
|
|||||||
SHELL = bash
|
SHELL = bash
|
||||||
# Branch we are working on
|
# Branch we are working on
|
||||||
BRANCH := $(or $(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD))
|
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD))
|
||||||
# Tag of the current commit, if any. If this is not "" then we are building a release
|
# Tag of the current commit, if any. If this is not "" then we are building a release
|
||||||
RELEASE_TAG := $(shell git tag -l --points-at HEAD)
|
RELEASE_TAG := $(shell git tag -l --points-at HEAD)
|
||||||
# Version of last release (may not be on this branch)
|
# Version of last release (may not be on this branch)
|
||||||
VERSION := $(shell cat VERSION)
|
VERSION := $(shell cat VERSION)
|
||||||
# Last tag on this branch
|
# Last tag on this branch
|
||||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||||
# Next version
|
|
||||||
NEXT_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2+1,0}')
|
|
||||||
NEXT_PATCH_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2,$$3+1}')
|
|
||||||
# If we are working on a release, override branch to master
|
# If we are working on a release, override branch to master
|
||||||
ifdef RELEASE_TAG
|
ifdef RELEASE_TAG
|
||||||
BRANCH := master
|
BRANCH := master
|
||||||
LAST_TAG := $(shell git describe --abbrev=0 --tags $(VERSION)^)
|
|
||||||
endif
|
endif
|
||||||
TAG_BRANCH := .$(BRANCH)
|
TAG_BRANCH := -$(BRANCH)
|
||||||
BRANCH_PATH := branch/$(BRANCH)/
|
BRANCH_PATH := branch/
|
||||||
# If building HEAD or master then unset TAG_BRANCH and BRANCH_PATH
|
# If building HEAD or master then unset TAG_BRANCH and BRANCH_PATH
|
||||||
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||||
TAG_BRANCH :=
|
TAG_BRANCH :=
|
||||||
BRANCH_PATH :=
|
BRANCH_PATH :=
|
||||||
endif
|
endif
|
||||||
# Make version suffix -beta.NNNN.CCCCCCCC (N=Commit number, C=Commit)
|
# Make version suffix -DDD-gCCCCCCCC (D=commits since last relase, C=Commit) or blank
|
||||||
VERSION_SUFFIX := -beta.$(shell git rev-list --count HEAD).$(shell git show --no-patch --no-notes --pretty='%h' HEAD)
|
VERSION_SUFFIX := $(shell git describe --abbrev=8 --tags | perl -lpe 's/^v\d+\.\d+\.\d+//; s/^-(\d+)/"-".sprintf("%03d",$$1)/e;')
|
||||||
# TAG is current version + commit number + commit + branch
|
# TAG is current version + number of commits since last release + branch
|
||||||
TAG := $(VERSION)$(VERSION_SUFFIX)$(TAG_BRANCH)
|
TAG := $(VERSION)$(VERSION_SUFFIX)$(TAG_BRANCH)
|
||||||
ifdef RELEASE_TAG
|
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
||||||
TAG := $(RELEASE_TAG)
|
ifndef RELEASE_TAG
|
||||||
|
TAG := $(TAG)-beta
|
||||||
endif
|
endif
|
||||||
GO_VERSION := $(shell go version)
|
GO_VERSION := $(shell go version)
|
||||||
GO_OS := $(shell go env GOOS)
|
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||||
ifdef BETA_SUBDIR
|
ifdef BETA_SUBDIR
|
||||||
BETA_SUBDIR := /$(BETA_SUBDIR)
|
BETA_SUBDIR := /$(BETA_SUBDIR)
|
||||||
endif
|
endif
|
||||||
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
|
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
|
||||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||||
BETA_UPLOAD_ROOT := beta.rclone.org:
|
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||||
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
||||||
# Pass in GOTAGS=xyz on the make command line to set build tags
|
# Pass in GOTAGS=xyz on the make command line to set build tags
|
||||||
ifdef GOTAGS
|
ifdef GOTAGS
|
||||||
BUILDTAGS=-tags "$(GOTAGS)"
|
BUILDTAGS=-tags "$(GOTAGS)"
|
||||||
LINTTAGS=--build-tags "$(GOTAGS)"
|
LINTTAGS=--build-tags "$(GOTAGS)"
|
||||||
endif
|
endif
|
||||||
LDFLAGS=--ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)"
|
|
||||||
|
|
||||||
.PHONY: rclone test_all vars version
|
.PHONY: rclone test_all vars version
|
||||||
|
|
||||||
rclone:
|
rclone:
|
||||||
ifeq ($(GO_OS),windows)
|
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||||
go run bin/resource_windows.go -version $(TAG) -syso resource_windows_`go env GOARCH`.syso
|
|
||||||
endif
|
|
||||||
go build -v $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS)
|
|
||||||
ifeq ($(GO_OS),windows)
|
|
||||||
rm resource_windows_`go env GOARCH`.syso
|
|
||||||
endif
|
|
||||||
mkdir -p `go env GOPATH`/bin/
|
mkdir -p `go env GOPATH`/bin/
|
||||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
||||||
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
||||||
|
|
||||||
test_all:
|
test_all:
|
||||||
go install $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
@echo SHELL="'$(SHELL)'"
|
@echo SHELL="'$(SHELL)'"
|
||||||
@echo BRANCH="'$(BRANCH)'"
|
@echo BRANCH="'$(BRANCH)'"
|
||||||
@echo TAG="'$(TAG)'"
|
@echo TAG="'$(TAG)'"
|
||||||
@echo VERSION="'$(VERSION)'"
|
@echo VERSION="'$(VERSION)'"
|
||||||
|
@echo NEXT_VERSION="'$(NEXT_VERSION)'"
|
||||||
@echo GO_VERSION="'$(GO_VERSION)'"
|
@echo GO_VERSION="'$(GO_VERSION)'"
|
||||||
@echo BETA_URL="'$(BETA_URL)'"
|
@echo BETA_URL="'$(BETA_URL)'"
|
||||||
|
|
||||||
btest:
|
|
||||||
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
|
|
||||||
@echo "Copied markdown of beta release to clip board"
|
|
||||||
|
|
||||||
btesth:
|
|
||||||
@echo "<a href="$(BETA_URL)">$(TAG)</a> on branch <a href="https://github.com/rclone/rclone/tree/$(BRANCH)">$(BRANCH)</a> (uploaded in 15-30 mins)" | xclip -r -sel clip -t text/html
|
|
||||||
@echo "Copied beta release in HTML to clip board"
|
|
||||||
|
|
||||||
version:
|
version:
|
||||||
@echo '$(TAG)'
|
@echo '$(TAG)'
|
||||||
|
|
||||||
@@ -88,54 +71,43 @@ test: rclone test_all
|
|||||||
|
|
||||||
# Quick test
|
# Quick test
|
||||||
quicktest:
|
quicktest:
|
||||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) ./...
|
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
|
||||||
|
|
||||||
racequicktest:
|
racequicktest:
|
||||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -cpu=2 -race ./...
|
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
|
||||||
|
|
||||||
compiletest:
|
|
||||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -run XXX ./...
|
|
||||||
|
|
||||||
# Do source code quality checks
|
# Do source code quality checks
|
||||||
check: rclone
|
check: rclone
|
||||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||||
@golangci-lint run $(LINTTAGS) ./...
|
@golangci-lint run $(LINTTAGS) ./...
|
||||||
@bin/markdown-lint
|
|
||||||
@echo "-- END CODE QUALITY REPORT ---------------------------------"
|
@echo "-- END CODE QUALITY REPORT ---------------------------------"
|
||||||
|
|
||||||
# Get the build dependencies
|
# Get the build dependencies
|
||||||
build_dep:
|
build_dep:
|
||||||
go run bin/get-github-release.go -use-api -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
||||||
|
|
||||||
# Get the release dependencies we only install on linux
|
# Get the release dependencies
|
||||||
release_dep_linux:
|
release_dep:
|
||||||
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
|
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
||||||
|
go run bin/get-github-release.go -extract github-release aktau/github-release 'linux-amd64-github-release.tar.bz2'
|
||||||
|
|
||||||
# Update dependencies
|
# Update dependencies
|
||||||
showupdates:
|
|
||||||
@echo "*** Direct dependencies that could be updated ***"
|
|
||||||
@go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
|
||||||
|
|
||||||
# Update direct dependencies only
|
|
||||||
updatedirect:
|
|
||||||
go get $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
|
||||||
go mod tidy
|
|
||||||
|
|
||||||
# Update direct and indirect dependencies and test dependencies
|
|
||||||
update:
|
update:
|
||||||
go get -u -t ./...
|
GO111MODULE=on go get -u ./...
|
||||||
go mod tidy
|
GO111MODULE=on go mod tidy
|
||||||
|
GO111MODULE=on go mod vendor
|
||||||
|
|
||||||
# Tidy the module dependencies
|
# Tidy the module dependencies
|
||||||
tidy:
|
tidy:
|
||||||
go mod tidy
|
GO111MODULE=on go mod tidy
|
||||||
|
GO111MODULE=on go mod vendor
|
||||||
|
|
||||||
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||||
|
|
||||||
rclone.1: MANUAL.md
|
rclone.1: MANUAL.md
|
||||||
pandoc -s --from markdown-smart --to man MANUAL.md -o rclone.1
|
pandoc -s --from markdown-smart --to man MANUAL.md -o rclone.1
|
||||||
|
|
||||||
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs rcdocs
|
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs
|
||||||
./bin/make_manual.py
|
./bin/make_manual.py
|
||||||
|
|
||||||
MANUAL.html: MANUAL.md
|
MANUAL.html: MANUAL.md
|
||||||
@@ -145,23 +117,17 @@ MANUAL.txt: MANUAL.md
|
|||||||
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
||||||
|
|
||||||
commanddocs: rclone
|
commanddocs: rclone
|
||||||
go generate ./lib/transform
|
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
|
||||||
-@rmdir -p '$$HOME/.config/rclone'
|
|
||||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
|
|
||||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
|
||||||
go run bin/make_bisync_docs.go ./docs/content/
|
|
||||||
|
|
||||||
backenddocs: rclone bin/make_backend_docs.py
|
backenddocs: rclone bin/make_backend_docs.py
|
||||||
-@rmdir -p '$$HOME/.config/rclone'
|
|
||||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
|
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
|
||||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
|
||||||
|
|
||||||
rcdocs: rclone
|
rcdocs: rclone
|
||||||
bin/make_rc_docs.sh
|
bin/make_rc_docs.sh
|
||||||
|
|
||||||
install: rclone
|
install: rclone
|
||||||
install -d ${DESTDIR}/usr/bin
|
install -d ${DESTDIR}/usr/bin
|
||||||
install ${GOPATH}/bin/rclone ${DESTDIR}/usr/bin
|
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
go clean ./...
|
go clean ./...
|
||||||
@@ -175,7 +141,7 @@ website:
|
|||||||
@if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi
|
@if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi
|
||||||
|
|
||||||
upload_website: website
|
upload_website: website
|
||||||
rclone -v sync docs/public www.rclone.org:
|
rclone -v sync docs/public memstore:www-rclone-org
|
||||||
|
|
||||||
upload_test_website: website
|
upload_test_website: website
|
||||||
rclone -P sync docs/public test-rclone-org:
|
rclone -P sync docs/public test-rclone-org:
|
||||||
@@ -186,11 +152,6 @@ validate_website: website
|
|||||||
tarball:
|
tarball:
|
||||||
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
|
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
|
||||||
|
|
||||||
vendorball:
|
|
||||||
go mod vendor
|
|
||||||
tar -zcf build/rclone-$(TAG)-vendor.tar.gz vendor
|
|
||||||
rm -rf vendor
|
|
||||||
|
|
||||||
sign_upload:
|
sign_upload:
|
||||||
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
|
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
|
||||||
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
|
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
|
||||||
@@ -202,109 +163,82 @@ check_sign:
|
|||||||
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
|
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
|
||||||
|
|
||||||
upload:
|
upload:
|
||||||
rclone -P copy build/ downloads.rclone.org:/$(TAG)
|
rclone -P copy build/ memstore:downloads-rclone-org/$(TAG)
|
||||||
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "downloads.rclone.org:/$(TAG)/$$i" "downloads.rclone.org:/$$j"'
|
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"'
|
||||||
|
|
||||||
upload_github:
|
upload_github:
|
||||||
./bin/upload-github $(TAG)
|
./bin/upload-github $(TAG)
|
||||||
|
|
||||||
cross: doc
|
cross: doc
|
||||||
go run bin/cross-compile.go -release current $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
|
||||||
|
|
||||||
beta:
|
beta:
|
||||||
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
|
||||||
rclone -v copy build/ pub.rclone.org:/$(TAG)
|
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
||||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
||||||
|
|
||||||
log_since_last_release:
|
log_since_last_release:
|
||||||
git log $(LAST_TAG)..
|
git log $(LAST_TAG)..
|
||||||
|
|
||||||
compile_all:
|
compile_all:
|
||||||
go run bin/cross-compile.go -compile-only $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
|
||||||
|
|
||||||
ci_upload:
|
appveyor_upload:
|
||||||
sudo chown -R $$USER build
|
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||||
find build -type l -delete
|
ifndef BRANCH_PATH
|
||||||
gzip -r9v build
|
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||||
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
|
||||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
|
||||||
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
|
||||||
endif
|
|
||||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
|
||||||
|
|
||||||
ci_beta:
|
|
||||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
|
||||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
|
||||||
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
|
||||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
|
||||||
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
|
||||||
endif
|
endif
|
||||||
@echo Beta release ready at $(BETA_URL)
|
@echo Beta release ready at $(BETA_URL)
|
||||||
|
|
||||||
# Fetch the binary builds from GitHub actions
|
circleci_upload:
|
||||||
|
sudo chown -R $$USER build
|
||||||
|
find build -type l -delete
|
||||||
|
gzip -r9v build
|
||||||
|
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||||
|
ifndef BRANCH_PATH
|
||||||
|
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||||
|
endif
|
||||||
|
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||||
|
|
||||||
|
travis_beta:
|
||||||
|
ifeq (linux,$(filter linux,$(subst Linux,linux,$(TRAVIS_OS_NAME) $(AGENT_OS))))
|
||||||
|
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
|
||||||
|
endif
|
||||||
|
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||||
|
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
|
||||||
|
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||||
|
ifndef BRANCH_PATH
|
||||||
|
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
||||||
|
endif
|
||||||
|
@echo Beta release ready at $(BETA_URL)
|
||||||
|
|
||||||
|
# Fetch the binary builds from travis and appveyor
|
||||||
fetch_binaries:
|
fetch_binaries:
|
||||||
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
||||||
|
|
||||||
serve: website
|
serve: website
|
||||||
cd docs && hugo server --logLevel info -w --disableFastRender --ignoreCache
|
cd docs && hugo server -v -w --disableFastRender
|
||||||
|
|
||||||
tag: retag doc
|
tag: doc
|
||||||
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
|
@echo "Old tag is $(VERSION)"
|
||||||
|
@echo "New tag is $(NEXT_VERSION)"
|
||||||
|
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)\"\n" | gofmt > fs/version.go
|
||||||
|
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
||||||
|
echo "$(NEXT_VERSION)" > VERSION
|
||||||
|
git tag -s -m "Version $(NEXT_VERSION)" $(NEXT_VERSION)
|
||||||
|
bin/make_changelog.py $(LAST_TAG) $(NEXT_VERSION) > docs/content/changelog.md.new
|
||||||
mv docs/content/changelog.md.new docs/content/changelog.md
|
mv docs/content/changelog.md.new docs/content/changelog.md
|
||||||
@echo "Edit the new changelog in docs/content/changelog.md"
|
@echo "Edit the new changelog in docs/content/changelog.md"
|
||||||
@echo "Then commit all the changes"
|
@echo "Then commit all the changes"
|
||||||
@echo git commit -m \"Version $(VERSION)\" -a -v
|
@echo git commit -m \"Version $(NEXT_VERSION)\" -a -v
|
||||||
@echo "And finally run make retag before make cross, etc."
|
@echo "And finally run make retag before make cross etc"
|
||||||
|
|
||||||
retag:
|
retag:
|
||||||
@echo "Version is $(VERSION)"
|
|
||||||
git tag -f -s -m "Version $(VERSION)" $(VERSION)
|
git tag -f -s -m "Version $(VERSION)" $(VERSION)
|
||||||
|
|
||||||
startdev:
|
startdev:
|
||||||
@echo "Version is $(VERSION)"
|
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||||
@echo "Next version is $(NEXT_VERSION)"
|
git commit -m "Start $(VERSION)-DEV development" fs/version.go
|
||||||
echo -e "package fs\n\n// VersionTag of rclone\nvar VersionTag = \"$(NEXT_VERSION)\"\n" | gofmt > fs/versiontag.go
|
|
||||||
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
|
||||||
echo "$(NEXT_VERSION)" > VERSION
|
|
||||||
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/versiontag.go VERSION docs/layouts/partials/version.html
|
|
||||||
|
|
||||||
startstable:
|
|
||||||
@echo "Version is $(VERSION)"
|
|
||||||
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
|
|
||||||
echo -e "package fs\n\n// VersionTag of rclone\nvar VersionTag = \"$(NEXT_PATCH_VERSION)\"\n" | gofmt > fs/versiontag.go
|
|
||||||
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
|
|
||||||
echo "$(NEXT_PATCH_VERSION)" > VERSION
|
|
||||||
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/versiontag.go VERSION docs/layouts/partials/version.html
|
|
||||||
|
|
||||||
winzip:
|
winzip:
|
||||||
zip -9 rclone-$(TAG).zip rclone.exe
|
zip -9 rclone-$(TAG).zip rclone.exe
|
||||||
|
|
||||||
# docker volume plugin
|
|
||||||
PLUGIN_USER ?= rclone
|
|
||||||
PLUGIN_TAG ?= latest
|
|
||||||
PLUGIN_BASE_TAG ?= latest
|
|
||||||
PLUGIN_ARCH ?= amd64
|
|
||||||
PLUGIN_IMAGE := $(PLUGIN_USER)/docker-volume-rclone:$(PLUGIN_TAG)
|
|
||||||
PLUGIN_BASE := $(PLUGIN_USER)/rclone:$(PLUGIN_BASE_TAG)
|
|
||||||
PLUGIN_BUILD_DIR := ./build/docker-plugin
|
|
||||||
PLUGIN_CONTRIB_DIR := ./contrib/docker-plugin/managed
|
|
||||||
|
|
||||||
docker-plugin-create:
|
|
||||||
docker buildx inspect |grep -q /${PLUGIN_ARCH} || \
|
|
||||||
docker run --rm --privileged tonistiigi/binfmt --install all
|
|
||||||
rm -rf ${PLUGIN_BUILD_DIR}
|
|
||||||
docker buildx build \
|
|
||||||
--no-cache --pull \
|
|
||||||
--build-arg BASE_IMAGE=${PLUGIN_BASE} \
|
|
||||||
--platform linux/${PLUGIN_ARCH} \
|
|
||||||
--output ${PLUGIN_BUILD_DIR}/rootfs \
|
|
||||||
${PLUGIN_CONTRIB_DIR}
|
|
||||||
cp ${PLUGIN_CONTRIB_DIR}/config.json ${PLUGIN_BUILD_DIR}
|
|
||||||
docker plugin rm --force ${PLUGIN_IMAGE} 2>/dev/null || true
|
|
||||||
docker plugin create ${PLUGIN_IMAGE} ${PLUGIN_BUILD_DIR}
|
|
||||||
|
|
||||||
docker-plugin-push:
|
|
||||||
docker plugin push ${PLUGIN_IMAGE}
|
|
||||||
docker plugin rm ${PLUGIN_IMAGE}
|
|
||||||
|
|
||||||
docker-plugin: docker-plugin-create docker-plugin-push
|
|
||||||
|
|||||||
230
README.md
230
README.md
@@ -1,11 +1,8 @@
|
|||||||
<!-- markdownlint-disable-next-line first-line-heading no-inline-html -->
|
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
|
||||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
|
||||||
<!-- markdownlint-disable-next-line no-inline-html -->
|
|
||||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
|
||||||
|
|
||||||
[Website](https://rclone.org) |
|
[Website](https://rclone.org) |
|
||||||
[Documentation](https://rclone.org/docs/) |
|
[Documentation](https://rclone.org/docs/) |
|
||||||
[Download](https://rclone.org/downloads/) |
|
[Download](https://rclone.org/downloads/) |
|
||||||
[Contributing](CONTRIBUTING.md) |
|
[Contributing](CONTRIBUTING.md) |
|
||||||
[Changelog](https://rclone.org/changelog/) |
|
[Changelog](https://rclone.org/changelog/) |
|
||||||
[Installation](https://rclone.org/install/) |
|
[Installation](https://rclone.org/install/) |
|
||||||
@@ -13,172 +10,101 @@
|
|||||||
|
|
||||||
[](https://github.com/rclone/rclone/actions?query=workflow%3Abuild)
|
[](https://github.com/rclone/rclone/actions?query=workflow%3Abuild)
|
||||||
[](https://goreportcard.com/report/github.com/rclone/rclone)
|
[](https://goreportcard.com/report/github.com/rclone/rclone)
|
||||||
[](https://godoc.org/github.com/rclone/rclone)
|
[](https://godoc.org/github.com/rclone/rclone)
|
||||||
[](https://hub.docker.com/r/rclone/rclone)
|
[](https://hub.docker.com/r/rclone/rclone)
|
||||||
|
|
||||||
# Rclone
|
# Rclone
|
||||||
|
|
||||||
Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
Rclone *("rsync for cloud storage")* is a command line program to sync files and directories to and from different cloud storage providers.
|
||||||
directories to and from different cloud storage providers.
|
|
||||||
|
|
||||||
## Storage providers
|
## Storage providers
|
||||||
|
|
||||||
- 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||||
- Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||||
- Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||||
- Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||||
- ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||||
- Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||||
- Box [:page_facing_up:](https://rclone.org/box/)
|
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||||
- Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||||
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||||
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||||
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||||
- Cubbit DS3 [:page_facing_up:](https://rclone.org/s3/#Cubbit)
|
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||||
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||||
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||||
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||||
- Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||||
- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||||
- Exaba [:page_facing_up:](https://rclone.org/s3/#exaba)
|
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||||
- Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||||
- FileLu [:page_facing_up:](https://rclone.org/filelu/)
|
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||||
- Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||||
- FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
|
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||||
- FTP [:page_facing_up:](https://rclone.org/ftp/)
|
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||||
- GoFile [:page_facing_up:](https://rclone.org/gofile/)
|
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||||
- Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
* Memory [:page_facing_up:](https://rclone.org/memory/)
|
||||||
- Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||||
- Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||||
- HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||||
- Hetzner Object Storage [:page_facing_up:](https://rclone.org/s3/#hetzner)
|
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||||
- Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
||||||
- HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||||
- HTTP [:page_facing_up:](https://rclone.org/http/)
|
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||||
- Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||||
- iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
|
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||||
- ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||||
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||||
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||||
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||||
- Intercolo Object Storage [:page_facing_up:](https://rclone.org/s3/#intercolo)
|
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||||
- IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||||
- Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||||
- Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||||
- Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||||
- Linkbox [:page_facing_up:](https://rclone.org/linkbox)
|
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||||
- Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
|
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
||||||
- Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
|
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||||
- Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||||
- Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||||
- MEGA [:page_facing_up:](https://rclone.org/mega/)
|
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||||
- MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega)
|
|
||||||
- Memory [:page_facing_up:](https://rclone.org/memory/)
|
|
||||||
- Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
|
||||||
- Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
|
|
||||||
- Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
|
||||||
- Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
|
||||||
- Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
|
||||||
- Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
|
||||||
- OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
|
||||||
- OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
|
||||||
- Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
|
||||||
- Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
|
||||||
- Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
|
|
||||||
- OVHcloud Object Storage (Swift) [:page_facing_up:](https://rclone.org/swift/)
|
|
||||||
- OVHcloud Object Storage (S3-compatible) [:page_facing_up:](https://rclone.org/s3/#ovhcloud)
|
|
||||||
- ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
|
||||||
- pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
|
||||||
- Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
|
||||||
- PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
|
||||||
- Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
|
|
||||||
- premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
|
||||||
- put.io [:page_facing_up:](https://rclone.org/putio/)
|
|
||||||
- Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
|
||||||
- QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
|
||||||
- Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
|
||||||
- Rabata Cloud Storage [:page_facing_up:](https://rclone.org/s3/#Rabata)
|
|
||||||
- Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
|
||||||
- Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
|
||||||
- RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
|
||||||
- rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
|
|
||||||
- Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
|
||||||
- Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
|
||||||
- Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
|
|
||||||
- SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
|
||||||
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
|
||||||
- Servercore Object Storage [:page_facing_up:](https://rclone.org/s3/#servercore)
|
|
||||||
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
|
||||||
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
|
||||||
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
|
|
||||||
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
|
||||||
- Storj [:page_facing_up:](https://rclone.org/storj/)
|
|
||||||
- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
|
||||||
- Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
|
||||||
- Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
|
||||||
- Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
|
|
||||||
- Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
|
||||||
- WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
|
||||||
- Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
|
||||||
- Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
|
||||||
- Zata.ai [:page_facing_up:](https://rclone.org/s3/#Zata)
|
|
||||||
- The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
|
||||||
|
|
||||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||||
|
|
||||||
### Virtual storage providers
|
|
||||||
|
|
||||||
These backends adapt or modify other storage providers
|
|
||||||
|
|
||||||
- Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
|
|
||||||
- Archive: read archive files [:page_facing_up:](https://rclone.org/archive/)
|
|
||||||
- Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
|
|
||||||
- Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
|
|
||||||
- Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
|
|
||||||
- Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
|
|
||||||
- Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
|
|
||||||
- Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
|
|
||||||
- Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
|
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
- MD5/SHA-1 hashes checked at all times for file integrity
|
* MD5/SHA-1 hashes checked at all times for file integrity
|
||||||
- Timestamps preserved on files
|
* Timestamps preserved on files
|
||||||
- Partial syncs supported on a whole file basis
|
* Partial syncs supported on a whole file basis
|
||||||
- [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed
|
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
||||||
files
|
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
||||||
- [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory
|
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||||
identical
|
* Can sync to and from network, e.g. two different cloud accounts
|
||||||
- [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync
|
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||||
bidirectionally
|
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||||
- [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash
|
* Optional cache ([Cache](https://rclone.org/cache/))
|
||||||
equality
|
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||||
- Can sync to and from network, e.g. two different cloud accounts
|
* Multi-threaded downloads to local disk
|
||||||
- Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
|
||||||
- Optional transparent compression ([Compress](https://rclone.org/compress/))
|
|
||||||
- Optional encryption ([Crypt](https://rclone.org/crypt/))
|
|
||||||
- Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
|
||||||
- Multi-threaded downloads to local disk
|
|
||||||
- Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files
|
|
||||||
over HTTP/WebDAV/FTP/SFTP/DLNA
|
|
||||||
|
|
||||||
## Installation & documentation
|
## Installation & documentation
|
||||||
|
|
||||||
Please see the [rclone website](https://rclone.org/) for:
|
Please see the [rclone website](https://rclone.org/) for:
|
||||||
|
|
||||||
- [Installation](https://rclone.org/install/)
|
* [Installation](https://rclone.org/install/)
|
||||||
- [Documentation & configuration](https://rclone.org/docs/)
|
* [Documentation & configuration](https://rclone.org/docs/)
|
||||||
- [Changelog](https://rclone.org/changelog/)
|
* [Changelog](https://rclone.org/changelog/)
|
||||||
- [FAQ](https://rclone.org/faq/)
|
* [FAQ](https://rclone.org/faq/)
|
||||||
- [Storage providers](https://rclone.org/overview/)
|
* [Storage providers](https://rclone.org/overview/)
|
||||||
- [Forum](https://forum.rclone.org/)
|
* [Forum](https://forum.rclone.org/)
|
||||||
- ...and more
|
* ...and more
|
||||||
|
|
||||||
## Downloads
|
## Downloads
|
||||||
|
|
||||||
- <https://rclone.org/downloads/>
|
* https://rclone.org/downloads/
|
||||||
|
|
||||||
## License
|
License
|
||||||
|
-------
|
||||||
|
|
||||||
This is free software under the terms of the MIT license (check the
|
This is free software under the terms of MIT the license (check the
|
||||||
[COPYING file](/COPYING) included in this package).
|
[COPYING file](/COPYING) included in this package).
|
||||||
|
|||||||
254
RELEASE.md
254
RELEASE.md
@@ -4,206 +4,108 @@ This file describes how to make the various kinds of releases
|
|||||||
|
|
||||||
## Extra required software for making a release
|
## Extra required software for making a release
|
||||||
|
|
||||||
- [gh the github cli](https://github.com/cli/cli) for uploading packages
|
* [github-release](https://github.com/aktau/github-release) for uploading packages
|
||||||
- pandoc for making the html and man pages
|
* pandoc for making the html and man pages
|
||||||
|
|
||||||
## Making a release
|
## Making a release
|
||||||
|
|
||||||
- git checkout master # see below for stable branch
|
* git checkout master
|
||||||
- git pull # IMPORTANT
|
* git pull
|
||||||
- git status - make sure everything is checked in
|
* git status - make sure everything is checked in
|
||||||
- Check GitHub actions build for master is Green
|
* Check GitHub actions build for master is Green
|
||||||
- make test # see integration test server or run locally
|
* make test # see integration test server or run locally
|
||||||
- make tag
|
* make tag
|
||||||
- edit docs/content/changelog.md # make sure to remove duplicate logs from point
|
* edit docs/content/changelog.md # make sure to remove duplicate logs from point releases
|
||||||
releases
|
* make tidy
|
||||||
- make tidy
|
* make doc
|
||||||
- make doc
|
* git status - to check for new man pages - git add them
|
||||||
- git status - to check for new man pages - git add them
|
* git commit -a -v -m "Version v1.XX.0"
|
||||||
- git commit -a -v -m "Version v1.XX.0"
|
* make retag
|
||||||
- make retag
|
* git push --tags origin master
|
||||||
- git push origin # without --follow-tags so it doesn't push the tag if it fails
|
* # Wait for the GitHub builds to complete then...
|
||||||
- git push --follow-tags origin
|
* make fetch_binaries
|
||||||
- \# Wait for the GitHub builds to complete then...
|
* make tarball
|
||||||
- make fetch_binaries
|
* make sign_upload
|
||||||
- make tarball
|
* make check_sign
|
||||||
- make vendorball
|
* make upload
|
||||||
- make sign_upload
|
* make upload_website
|
||||||
- make check_sign
|
* make upload_github
|
||||||
- make upload
|
* make startdev
|
||||||
- make upload_website
|
* # announce with forum post, twitter post, patreon post
|
||||||
- make upload_github
|
|
||||||
- make startdev # make startstable for stable branch
|
|
||||||
- \# announce with forum post, twitter post, patreon post
|
|
||||||
|
|
||||||
## Update dependencies
|
Early in the next release cycle update the vendored dependencies
|
||||||
|
|
||||||
Early in the next release cycle update the dependencies.
|
* Review any pinned packages in go.mod and remove if possible
|
||||||
|
* make update
|
||||||
|
* git status
|
||||||
|
* git add new files
|
||||||
|
* git commit -a -v
|
||||||
|
|
||||||
- Review any pinned packages in go.mod and remove if possible
|
If `make update` fails with errors like this:
|
||||||
- `make updatedirect`
|
|
||||||
- `make GOTAGS=cmount`
|
|
||||||
- `make compiletest`
|
|
||||||
- Fix anything which doesn't compile at this point and commit changes here
|
|
||||||
- `git commit -a -v -m "build: update all dependencies"`
|
|
||||||
|
|
||||||
If the `make updatedirect` upgrades the version of go in the `go.mod`
|
```
|
||||||
|
# github.com/cpuguy83/go-md2man/md2man
|
||||||
```text
|
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
|
||||||
go 1.22.0
|
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
|
||||||
```
|
```
|
||||||
|
|
||||||
then go to manual mode. `go1.22` here is the lowest supported version
|
Can be fixed with
|
||||||
in the `go.mod`.
|
|
||||||
|
|
||||||
If `make updatedirect` added a `toolchain` directive then remove it.
|
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
|
||||||
We don't want to force a toolchain on our users. Linux packagers are
|
* GO111MODULE=on go mod tidy
|
||||||
often using a version of Go that is a few versions out of date.
|
* GO111MODULE=on go mod vendor
|
||||||
|
|
||||||
```console
|
|
||||||
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
|
||||||
go get -d $(cat /tmp/potential-upgrades)
|
|
||||||
go mod tidy -go=1.22 -compat=1.22
|
|
||||||
```
|
|
||||||
|
|
||||||
If the `go mod tidy` fails use the output from it to remove the
|
|
||||||
package which can't be upgraded from `/tmp/potential-upgrades` when
|
|
||||||
done
|
|
||||||
|
|
||||||
```console
|
|
||||||
git co go.mod go.sum
|
|
||||||
```
|
|
||||||
|
|
||||||
And try again.
|
|
||||||
|
|
||||||
Optionally upgrade the direct and indirect dependencies. This is very
|
|
||||||
likely to fail if the manual method was used abve - in that case
|
|
||||||
ignore it as it is too time consuming to fix.
|
|
||||||
|
|
||||||
- `make update`
|
|
||||||
- `make GOTAGS=cmount`
|
|
||||||
- `make compiletest`
|
|
||||||
- roll back any updates which didn't compile
|
|
||||||
- `git commit -a -v --amend`
|
|
||||||
- **NB** watch out for this changing the default go version in `go.mod`
|
|
||||||
|
|
||||||
Note that `make update` updates all direct and indirect dependencies
|
|
||||||
and there can occasionally be forwards compatibility problems with
|
|
||||||
doing that so it may be necessary to roll back dependencies to the
|
|
||||||
version specified by `make updatedirect` in order to get rclone to
|
|
||||||
build.
|
|
||||||
|
|
||||||
Once it compiles locally, push it on a test branch and commit fixes
|
|
||||||
until the tests pass.
|
|
||||||
|
|
||||||
### Major versions
|
|
||||||
|
|
||||||
The above procedure will not upgrade major versions, so v2 to v3.
|
|
||||||
However this tool can show which major versions might need to be
|
|
||||||
upgraded:
|
|
||||||
|
|
||||||
```console
|
|
||||||
go run github.com/icholy/gomajor@latest list -major
|
|
||||||
```
|
|
||||||
|
|
||||||
Expect API breakage when updating major versions.
|
|
||||||
|
|
||||||
## Tidy beta
|
|
||||||
|
|
||||||
At some point after the release run
|
|
||||||
|
|
||||||
```console
|
|
||||||
bin/tidy-beta v1.55
|
|
||||||
```
|
|
||||||
|
|
||||||
where the version number is that of a couple ago to remove old beta binaries.
|
|
||||||
|
|
||||||
## Making a point release
|
## Making a point release
|
||||||
|
|
||||||
If rclone needs a point release due to some horrendous bug:
|
If rclone needs a point release due to some horrendous bug:
|
||||||
|
|
||||||
Set vars
|
|
||||||
|
|
||||||
- BASE_TAG=v1.XX # e.g. v1.52
|
|
||||||
- NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
|
|
||||||
- echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
|
||||||
|
|
||||||
First make the release branch. If this is a second point release then
|
First make the release branch. If this is a second point release then
|
||||||
this will be done already.
|
this will be done already.
|
||||||
|
|
||||||
- git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
|
* BASE_TAG=v1.XX # eg v1.49
|
||||||
- make startstable
|
* NEW_TAG=${BASE_TAG}.Y # eg v1.49.1
|
||||||
|
* echo $BASE_TAG $NEW_TAG # v1.49 v1.49.1
|
||||||
|
* git branch ${BASE_TAG} ${BASE_TAG}-fixes
|
||||||
|
|
||||||
Now
|
Now
|
||||||
|
|
||||||
- git co ${BASE_TAG}-stable
|
* git co ${BASE_TAG}-fixes
|
||||||
- git cherry-pick any fixes
|
* git cherry-pick any fixes
|
||||||
- make startstable
|
* Test (see above)
|
||||||
- Do the steps as above
|
* make NEXT_VERSION=${NEW_TAG} tag
|
||||||
- git co master
|
* edit docs/content/changelog.md
|
||||||
- `#` cherry pick the changes to the changelog - check the diff to make sure it
|
* make TAG=${NEW_TAG} doc
|
||||||
is correct
|
* git commit -a -v -m "Version ${NEW_TAG}"
|
||||||
- git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
* git tag -d ${NEW_TAG}
|
||||||
- git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
|
||||||
- git push
|
* git push --tags -u origin ${BASE_TAG}-fixes
|
||||||
|
* Wait for builds to complete
|
||||||
## Sponsor logos
|
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
|
||||||
|
* make TAG=${NEW_TAG} tarball
|
||||||
If updating the website note that the sponsor logos have been moved out of the
|
* make TAG=${NEW_TAG} sign_upload
|
||||||
main repository.
|
* make TAG=${NEW_TAG} check_sign
|
||||||
|
* make TAG=${NEW_TAG} upload
|
||||||
You will need to checkout `/docs/static/img/logos` from <https://github.com/rclone/third-party-logos>
|
* make TAG=${NEW_TAG} upload_website
|
||||||
which is a private repo containing artwork from sponsors.
|
* make TAG=${NEW_TAG} upload_github
|
||||||
|
* NB this overwrites the current beta so we need to do this
|
||||||
## Update the website between releases
|
* git co master
|
||||||
|
* make VERSION=${NEW_TAG} startdev
|
||||||
Create an update website branch based off the last release
|
* # cherry pick the changes to the changelog and VERSION
|
||||||
|
* git checkout ${BASE_TAG}-fixes VERSION docs/content/changelog.md
|
||||||
```console
|
* git commit --amend
|
||||||
git co -b update-website
|
* git push
|
||||||
```
|
* Announce!
|
||||||
|
|
||||||
If the branch already exists, double check there are no commits that need saving.
|
|
||||||
|
|
||||||
Now reset the branch to the last release
|
|
||||||
|
|
||||||
```console
|
|
||||||
git reset --hard v1.64.0
|
|
||||||
```
|
|
||||||
|
|
||||||
Create the changes, check them in, test with `make serve` then
|
|
||||||
|
|
||||||
```console
|
|
||||||
make upload_test_website
|
|
||||||
```
|
|
||||||
|
|
||||||
Check out <https://test.rclone.org> and when happy
|
|
||||||
|
|
||||||
```console
|
|
||||||
make upload_website
|
|
||||||
```
|
|
||||||
|
|
||||||
Cherry pick any changes back to master and the stable branch if it is active.
|
|
||||||
|
|
||||||
## Making a manual build of docker
|
## Making a manual build of docker
|
||||||
|
|
||||||
To do a basic build of rclone's docker image to debug builds locally:
|
The rclone docker image should autobuild on docker hub. If it doesn't
|
||||||
|
or needs to be updated then rebuild like this.
|
||||||
|
|
||||||
```console
|
|
||||||
docker buildx build --load -t rclone/rclone:testing --progress=plain .
|
|
||||||
docker run --rm rclone/rclone:testing version
|
|
||||||
```
|
```
|
||||||
|
docker build -t rclone/rclone:1.49.1 -t rclone/rclone:1.49 -t rclone/rclone:1 -t rclone/rclone:latest .
|
||||||
To test the multipatform build
|
docker push rclone/rclone:1.49.1
|
||||||
|
docker push rclone/rclone:1.49
|
||||||
```console
|
docker push rclone/rclone:1
|
||||||
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
|
docker push rclone/rclone:latest
|
||||||
```
|
|
||||||
|
|
||||||
To make a full build then set the tags correctly and add `--push`
|
|
||||||
|
|
||||||
Note that you can't only build one architecture - you need to build them all.
|
|
||||||
|
|
||||||
```console
|
|
||||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -1,13 +1,10 @@
|
|||||||
// Package alias implements a virtual provider to rename existing remotes.
|
|
||||||
package alias
|
package alias
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
"errors"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/cache"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
@@ -21,7 +18,7 @@ func init() {
|
|||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
Help: "Remote or path to alias.\n\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
||||||
Required: true,
|
Required: true,
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
@@ -36,7 +33,7 @@ type Options struct {
|
|||||||
// NewFs constructs an Fs from the path.
|
// NewFs constructs an Fs from the path.
|
||||||
//
|
//
|
||||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -49,5 +46,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if strings.HasPrefix(opt.Remote, name+":") {
|
if strings.HasPrefix(opt.Remote, name+":") {
|
||||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||||
}
|
}
|
||||||
return cache.Get(ctx, fspath.JoinRootPath(opt.Remote, root))
|
fsInfo, configName, fsPath, config, err := fs.ConfigFs(opt.Remote)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fsInfo.NewFs(configName, fspath.JoinRootPath(fsPath, root), config)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/local" // pull in test backend
|
_ "github.com/rclone/rclone/backend/local" // pull in test backend
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configfile"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -20,11 +19,11 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func prepare(t *testing.T, root string) {
|
func prepare(t *testing.T, root string) {
|
||||||
configfile.Install()
|
config.LoadConfig()
|
||||||
|
|
||||||
// Configure the remote
|
// Configure the remote
|
||||||
config.FileSetValue(remoteName, "type", "alias")
|
config.FileSet(remoteName, "type", "alias")
|
||||||
config.FileSetValue(remoteName, "remote", root)
|
config.FileSet(remoteName, "remote", root)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewFS(t *testing.T) {
|
func TestNewFS(t *testing.T) {
|
||||||
@@ -55,22 +54,21 @@ func TestNewFS(t *testing.T) {
|
|||||||
{"four/under four.txt", 9, false},
|
{"four/under four.txt", 9, false},
|
||||||
}},
|
}},
|
||||||
{"four", "..", "", true, []testEntry{
|
{"four", "..", "", true, []testEntry{
|
||||||
{"five", -1, true},
|
{"four", -1, true},
|
||||||
{"under four.txt", 9, false},
|
{"one%.txt", 6, false},
|
||||||
|
{"three", -1, true},
|
||||||
|
{"two.html", 7, false},
|
||||||
}},
|
}},
|
||||||
{"", "../../three", "", true, []testEntry{
|
{"four", "../three", "", true, []testEntry{
|
||||||
{"underthree.txt", 9, false},
|
{"underthree.txt", 9, false},
|
||||||
}},
|
}},
|
||||||
{"four", "../../five", "", true, []testEntry{
|
|
||||||
{"underfive.txt", 6, false},
|
|
||||||
}},
|
|
||||||
} {
|
} {
|
||||||
what := fmt.Sprintf("test %d remoteRoot=%q, fsRoot=%q, fsList=%q", testi, test.remoteRoot, test.fsRoot, test.fsList)
|
what := fmt.Sprintf("test %d remoteRoot=%q, fsRoot=%q, fsList=%q", testi, test.remoteRoot, test.fsRoot, test.fsList)
|
||||||
|
|
||||||
remoteRoot, err := filepath.Abs(filepath.FromSlash(path.Join("test/files", test.remoteRoot)))
|
remoteRoot, err := filepath.Abs(filepath.FromSlash(path.Join("test/files", test.remoteRoot)))
|
||||||
require.NoError(t, err, what)
|
require.NoError(t, err, what)
|
||||||
prepare(t, remoteRoot)
|
prepare(t, remoteRoot)
|
||||||
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
|
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
|
||||||
require.NoError(t, err, what)
|
require.NoError(t, err, what)
|
||||||
gotEntries, err := f.List(context.Background(), test.fsList)
|
gotEntries, err := f.List(context.Background(), test.fsList)
|
||||||
require.NoError(t, err, what)
|
require.NoError(t, err, what)
|
||||||
@@ -81,12 +79,10 @@ func TestNewFS(t *testing.T) {
|
|||||||
for i, gotEntry := range gotEntries {
|
for i, gotEntry := range gotEntries {
|
||||||
what := fmt.Sprintf("%s, entry=%d", what, i)
|
what := fmt.Sprintf("%s, entry=%d", what, i)
|
||||||
wantEntry := test.entries[i]
|
wantEntry := test.entries[i]
|
||||||
_, isDir := gotEntry.(fs.Directory)
|
|
||||||
|
|
||||||
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
||||||
if !isDir {
|
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
|
||||||
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
|
_, isDir := gotEntry.(fs.Directory)
|
||||||
}
|
|
||||||
require.Equal(t, wantEntry.isDir, isDir, what)
|
require.Equal(t, wantEntry.isDir, isDir, what)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -94,7 +90,7 @@ func TestNewFS(t *testing.T) {
|
|||||||
|
|
||||||
func TestNewFSNoRemote(t *testing.T) {
|
func TestNewFSNoRemote(t *testing.T) {
|
||||||
prepare(t, "")
|
prepare(t, "")
|
||||||
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:", remoteName))
|
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
|
||||||
|
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Nil(t, f)
|
require.Nil(t, f)
|
||||||
@@ -102,7 +98,7 @@ func TestNewFSNoRemote(t *testing.T) {
|
|||||||
|
|
||||||
func TestNewFSInvalidRemote(t *testing.T) {
|
func TestNewFSInvalidRemote(t *testing.T) {
|
||||||
prepare(t, "not_existing_test_remote:")
|
prepare(t, "not_existing_test_remote:")
|
||||||
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:", remoteName))
|
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
|
||||||
|
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Nil(t, f)
|
require.Nil(t, f)
|
||||||
|
|||||||
@@ -1,70 +1,43 @@
|
|||||||
// Package all imports all the backends
|
|
||||||
package all
|
package all
|
||||||
|
|
||||||
import (
|
import (
|
||||||
// Active file systems
|
// Active file systems
|
||||||
_ "github.com/rclone/rclone/backend/alias"
|
_ "github.com/rclone/rclone/backend/alias"
|
||||||
_ "github.com/rclone/rclone/backend/archive"
|
_ "github.com/rclone/rclone/backend/amazonclouddrive"
|
||||||
_ "github.com/rclone/rclone/backend/azureblob"
|
_ "github.com/rclone/rclone/backend/azureblob"
|
||||||
_ "github.com/rclone/rclone/backend/azurefiles"
|
|
||||||
_ "github.com/rclone/rclone/backend/b2"
|
_ "github.com/rclone/rclone/backend/b2"
|
||||||
_ "github.com/rclone/rclone/backend/box"
|
_ "github.com/rclone/rclone/backend/box"
|
||||||
_ "github.com/rclone/rclone/backend/cache"
|
_ "github.com/rclone/rclone/backend/cache"
|
||||||
_ "github.com/rclone/rclone/backend/chunker"
|
_ "github.com/rclone/rclone/backend/chunker"
|
||||||
_ "github.com/rclone/rclone/backend/cloudinary"
|
|
||||||
_ "github.com/rclone/rclone/backend/combine"
|
|
||||||
_ "github.com/rclone/rclone/backend/compress"
|
|
||||||
_ "github.com/rclone/rclone/backend/crypt"
|
_ "github.com/rclone/rclone/backend/crypt"
|
||||||
_ "github.com/rclone/rclone/backend/doi"
|
|
||||||
_ "github.com/rclone/rclone/backend/drive"
|
_ "github.com/rclone/rclone/backend/drive"
|
||||||
_ "github.com/rclone/rclone/backend/dropbox"
|
_ "github.com/rclone/rclone/backend/dropbox"
|
||||||
_ "github.com/rclone/rclone/backend/fichier"
|
_ "github.com/rclone/rclone/backend/fichier"
|
||||||
_ "github.com/rclone/rclone/backend/filefabric"
|
|
||||||
_ "github.com/rclone/rclone/backend/filelu"
|
|
||||||
_ "github.com/rclone/rclone/backend/filescom"
|
|
||||||
_ "github.com/rclone/rclone/backend/ftp"
|
_ "github.com/rclone/rclone/backend/ftp"
|
||||||
_ "github.com/rclone/rclone/backend/gofile"
|
|
||||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||||
_ "github.com/rclone/rclone/backend/hasher"
|
|
||||||
_ "github.com/rclone/rclone/backend/hdfs"
|
|
||||||
_ "github.com/rclone/rclone/backend/hidrive"
|
|
||||||
_ "github.com/rclone/rclone/backend/http"
|
_ "github.com/rclone/rclone/backend/http"
|
||||||
_ "github.com/rclone/rclone/backend/iclouddrive"
|
_ "github.com/rclone/rclone/backend/hubic"
|
||||||
_ "github.com/rclone/rclone/backend/imagekit"
|
|
||||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
|
||||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||||
_ "github.com/rclone/rclone/backend/koofr"
|
_ "github.com/rclone/rclone/backend/koofr"
|
||||||
_ "github.com/rclone/rclone/backend/linkbox"
|
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
_ "github.com/rclone/rclone/backend/mailru"
|
_ "github.com/rclone/rclone/backend/mailru"
|
||||||
_ "github.com/rclone/rclone/backend/mega"
|
_ "github.com/rclone/rclone/backend/mega"
|
||||||
_ "github.com/rclone/rclone/backend/memory"
|
_ "github.com/rclone/rclone/backend/memory"
|
||||||
_ "github.com/rclone/rclone/backend/netstorage"
|
|
||||||
_ "github.com/rclone/rclone/backend/onedrive"
|
_ "github.com/rclone/rclone/backend/onedrive"
|
||||||
_ "github.com/rclone/rclone/backend/opendrive"
|
_ "github.com/rclone/rclone/backend/opendrive"
|
||||||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
|
||||||
_ "github.com/rclone/rclone/backend/pcloud"
|
_ "github.com/rclone/rclone/backend/pcloud"
|
||||||
_ "github.com/rclone/rclone/backend/pikpak"
|
|
||||||
_ "github.com/rclone/rclone/backend/pixeldrain"
|
|
||||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||||
_ "github.com/rclone/rclone/backend/protondrive"
|
|
||||||
_ "github.com/rclone/rclone/backend/putio"
|
_ "github.com/rclone/rclone/backend/putio"
|
||||||
_ "github.com/rclone/rclone/backend/qingstor"
|
_ "github.com/rclone/rclone/backend/qingstor"
|
||||||
_ "github.com/rclone/rclone/backend/quatrix"
|
|
||||||
_ "github.com/rclone/rclone/backend/s3"
|
_ "github.com/rclone/rclone/backend/s3"
|
||||||
_ "github.com/rclone/rclone/backend/seafile"
|
_ "github.com/rclone/rclone/backend/seafile"
|
||||||
_ "github.com/rclone/rclone/backend/sftp"
|
_ "github.com/rclone/rclone/backend/sftp"
|
||||||
_ "github.com/rclone/rclone/backend/sharefile"
|
_ "github.com/rclone/rclone/backend/sharefile"
|
||||||
_ "github.com/rclone/rclone/backend/sia"
|
|
||||||
_ "github.com/rclone/rclone/backend/smb"
|
|
||||||
_ "github.com/rclone/rclone/backend/storj"
|
|
||||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||||
_ "github.com/rclone/rclone/backend/swift"
|
_ "github.com/rclone/rclone/backend/swift"
|
||||||
_ "github.com/rclone/rclone/backend/ulozto"
|
_ "github.com/rclone/rclone/backend/tardigrade"
|
||||||
_ "github.com/rclone/rclone/backend/union"
|
_ "github.com/rclone/rclone/backend/union"
|
||||||
_ "github.com/rclone/rclone/backend/uptobox"
|
|
||||||
_ "github.com/rclone/rclone/backend/webdav"
|
_ "github.com/rclone/rclone/backend/webdav"
|
||||||
_ "github.com/rclone/rclone/backend/yandex"
|
_ "github.com/rclone/rclone/backend/yandex"
|
||||||
_ "github.com/rclone/rclone/backend/zoho"
|
|
||||||
)
|
)
|
||||||
|
|||||||
1431
backend/amazonclouddrive/amazonclouddrive.go
Normal file
1431
backend/amazonclouddrive/amazonclouddrive.go
Normal file
File diff suppressed because it is too large
Load Diff
20
backend/amazonclouddrive/amazonclouddrive_test.go
Normal file
20
backend/amazonclouddrive/amazonclouddrive_test.go
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
// Test AmazonCloudDrive filesystem interface
|
||||||
|
|
||||||
|
// +build acd
|
||||||
|
|
||||||
|
package amazonclouddrive_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/amazonclouddrive"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIntegration runs integration tests against the remote
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
|
||||||
|
fstests.RemoteName = "TestAmazonCloudDrive:"
|
||||||
|
fstests.Run(t)
|
||||||
|
}
|
||||||
@@ -1,679 +0,0 @@
|
|||||||
//go:build !plan9
|
|
||||||
|
|
||||||
// Package archive implements a backend to access archive files in a remote
|
|
||||||
package archive
|
|
||||||
|
|
||||||
// FIXME factor common code between backends out - eg VFS initialization
|
|
||||||
|
|
||||||
// FIXME can we generalize the VFS handle caching and use it in zip backend
|
|
||||||
|
|
||||||
// Factor more stuff out if possible
|
|
||||||
|
|
||||||
// Odd stats which are probably coming from the VFS
|
|
||||||
// * tensorflow.sqfs: 0% /3.074Gi, 204.426Ki/s, 4h22m46s
|
|
||||||
|
|
||||||
// FIXME this will perform poorly for unpacking as the VFS Reader is bad
|
|
||||||
// at multiple streams - need cache mode setting?
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
// Import all the required archivers here
|
|
||||||
_ "github.com/rclone/rclone/backend/archive/squashfs"
|
|
||||||
_ "github.com/rclone/rclone/backend/archive/zip"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/archive/archiver"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/cache"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Register with Fs
|
|
||||||
func init() {
|
|
||||||
fsi := &fs.RegInfo{
|
|
||||||
Name: "archive",
|
|
||||||
Description: "Read archives",
|
|
||||||
NewFs: NewFs,
|
|
||||||
MetadataInfo: &fs.MetadataInfo{
|
|
||||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
|
||||||
},
|
|
||||||
Options: []fs.Option{{
|
|
||||||
Name: "remote",
|
|
||||||
Help: `Remote to wrap to read archives from.
|
|
||||||
|
|
||||||
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
|
||||||
"myremote:bucket" or "myremote:".
|
|
||||||
|
|
||||||
If this is left empty, then the archive backend will use the root as
|
|
||||||
the remote.
|
|
||||||
|
|
||||||
This means that you can use :archive:remote:path and it will be
|
|
||||||
equivalent to setting remote="remote:path".
|
|
||||||
`,
|
|
||||||
Required: false,
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
fs.Register(fsi)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
|
||||||
type Options struct {
|
|
||||||
Remote string `config:"remote"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs represents a archive of upstreams
|
|
||||||
type Fs struct {
|
|
||||||
name string // name of this remote
|
|
||||||
features *fs.Features // optional features
|
|
||||||
opt Options // options for this Fs
|
|
||||||
root string // the path we are working on
|
|
||||||
f fs.Fs // remote we are wrapping
|
|
||||||
wrapper fs.Fs // fs that wraps us
|
|
||||||
|
|
||||||
mu sync.Mutex // protects the below
|
|
||||||
archives map[string]*archive // the archives we have, by path
|
|
||||||
}
|
|
||||||
|
|
||||||
// A single open archive
|
|
||||||
type archive struct {
|
|
||||||
archiver archiver.Archiver // archiver responsible
|
|
||||||
remote string // path to the archive
|
|
||||||
prefix string // prefix to add on to listings
|
|
||||||
root string // root of the archive to remove from listings
|
|
||||||
mu sync.Mutex // protects the following variables
|
|
||||||
f fs.Fs // the archive Fs, may be nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// If remote is an archive then return it otherwise return nil
|
|
||||||
func findArchive(remote string) *archive {
|
|
||||||
// FIXME use something faster than linear search?
|
|
||||||
for _, archiver := range archiver.Archivers {
|
|
||||||
if strings.HasSuffix(remote, archiver.Extension) {
|
|
||||||
return &archive{
|
|
||||||
archiver: archiver,
|
|
||||||
remote: remote,
|
|
||||||
prefix: remote,
|
|
||||||
root: "",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find an archive buried in remote
|
|
||||||
func subArchive(remote string) *archive {
|
|
||||||
archive := findArchive(remote)
|
|
||||||
if archive != nil {
|
|
||||||
return archive
|
|
||||||
}
|
|
||||||
parent := path.Dir(remote)
|
|
||||||
if parent == "/" || parent == "." {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return subArchive(parent)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If remote is an archive then return it otherwise return nil
|
|
||||||
func (f *Fs) findArchive(remote string) (archive *archive) {
|
|
||||||
archive = findArchive(remote)
|
|
||||||
if archive != nil {
|
|
||||||
f.mu.Lock()
|
|
||||||
f.archives[remote] = archive
|
|
||||||
f.mu.Unlock()
|
|
||||||
}
|
|
||||||
return archive
|
|
||||||
}
|
|
||||||
|
|
||||||
// Instantiate archive if it hasn't been instantiated yet
|
|
||||||
//
|
|
||||||
// This is done lazily so that we can list a directory full of
|
|
||||||
// archives without opening them all.
|
|
||||||
func (a *archive) init(ctx context.Context, f fs.Fs) (fs.Fs, error) {
|
|
||||||
a.mu.Lock()
|
|
||||||
defer a.mu.Unlock()
|
|
||||||
if a.f != nil {
|
|
||||||
return a.f, nil
|
|
||||||
}
|
|
||||||
newFs, err := a.archiver.New(ctx, f, a.remote, a.prefix, a.root)
|
|
||||||
if err != nil && err != fs.ErrorIsFile {
|
|
||||||
return nil, fmt.Errorf("failed to create archive %q: %w", a.remote, err)
|
|
||||||
}
|
|
||||||
a.f = newFs
|
|
||||||
return a.f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path.
|
|
||||||
//
|
|
||||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
|
||||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs.Fs, err error) {
|
|
||||||
// defer log.Trace(nil, "name=%q, root=%q, m=%v", name, root, m)("f=%+v, err=%v", &outFs, &err)
|
|
||||||
// Parse config into Options struct
|
|
||||||
opt := new(Options)
|
|
||||||
err = configstruct.Set(m, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
remote := opt.Remote
|
|
||||||
origRoot := root
|
|
||||||
|
|
||||||
// If remote is empty, use the root instead
|
|
||||||
if remote == "" {
|
|
||||||
remote = root
|
|
||||||
root = ""
|
|
||||||
}
|
|
||||||
isDirectory := strings.HasSuffix(remote, "/")
|
|
||||||
remote = strings.TrimRight(remote, "/")
|
|
||||||
if remote == "" {
|
|
||||||
remote = "/"
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(remote, name+":") {
|
|
||||||
return nil, errors.New("can't point archive remote at itself - check the value of the upstreams setting")
|
|
||||||
}
|
|
||||||
|
|
||||||
_ = isDirectory
|
|
||||||
|
|
||||||
foundArchive := subArchive(remote)
|
|
||||||
if foundArchive != nil {
|
|
||||||
fs.Debugf(nil, "Found archiver for %q remote %q", foundArchive.archiver.Extension, foundArchive.remote)
|
|
||||||
// Archive path
|
|
||||||
foundArchive.root = strings.Trim(remote[len(foundArchive.remote):], "/")
|
|
||||||
// Path to the archive
|
|
||||||
archiveRemote := remote[:len(foundArchive.remote)]
|
|
||||||
// Remote is archive leaf name
|
|
||||||
foundArchive.remote = path.Base(archiveRemote)
|
|
||||||
foundArchive.prefix = ""
|
|
||||||
// Point remote to archive file
|
|
||||||
remote = archiveRemote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure to remove trailing . referring to the current dir
|
|
||||||
if path.Base(root) == "." {
|
|
||||||
root = strings.TrimSuffix(root, ".")
|
|
||||||
}
|
|
||||||
remotePath := fspath.JoinRootPath(remote, root)
|
|
||||||
wrappedFs, err := cache.Get(ctx, remotePath)
|
|
||||||
if err != fs.ErrorIsFile && err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f := &Fs{
|
|
||||||
name: name,
|
|
||||||
//root: path.Join(remotePath, root),
|
|
||||||
root: origRoot,
|
|
||||||
opt: *opt,
|
|
||||||
f: wrappedFs,
|
|
||||||
archives: make(map[string]*archive),
|
|
||||||
}
|
|
||||||
cache.PinUntilFinalized(f.f, f)
|
|
||||||
// the features here are ones we could support, and they are
|
|
||||||
// ANDed with the ones from wrappedFs
|
|
||||||
f.features = (&fs.Features{
|
|
||||||
CaseInsensitive: true,
|
|
||||||
DuplicateFiles: false,
|
|
||||||
ReadMimeType: true,
|
|
||||||
WriteMimeType: true,
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
BucketBased: true,
|
|
||||||
SetTier: true,
|
|
||||||
GetTier: true,
|
|
||||||
ReadMetadata: true,
|
|
||||||
WriteMetadata: true,
|
|
||||||
UserMetadata: true,
|
|
||||||
PartialUploads: true,
|
|
||||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
|
||||||
|
|
||||||
if foundArchive != nil {
|
|
||||||
fs.Debugf(f, "Root is an archive")
|
|
||||||
if err != fs.ErrorIsFile {
|
|
||||||
return nil, fmt.Errorf("expecting to find a file at %q", remote)
|
|
||||||
}
|
|
||||||
return foundArchive.init(ctx, f.f)
|
|
||||||
}
|
|
||||||
// Correct root if definitely pointing to a file
|
|
||||||
if err == fs.ErrorIsFile {
|
|
||||||
f.root = path.Dir(f.root)
|
|
||||||
if f.root == "." || f.root == "/" {
|
|
||||||
f.root = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return f, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Root() string {
|
|
||||||
return f.root
|
|
||||||
}
|
|
||||||
|
|
||||||
// String converts this Fs to a string
|
|
||||||
func (f *Fs) String() string {
|
|
||||||
return fmt.Sprintf("archive root '%s'", f.root)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
|
||||||
func (f *Fs) Features() *fs.Features {
|
|
||||||
return f.features
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the root directory of the Fs object
|
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
||||||
return f.f.Rmdir(ctx, dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
|
||||||
func (f *Fs) Hashes() hash.Set {
|
|
||||||
return f.f.Hashes()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mkdir makes the root directory of the Fs object
|
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|
||||||
return f.f.Mkdir(ctx, dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Purge all files in the directory
|
|
||||||
//
|
|
||||||
// Implement this if you have a way of deleting all the files
|
|
||||||
// quicker than just running Remove() on the result of List()
|
|
||||||
//
|
|
||||||
// Return an error if it doesn't exist
|
|
||||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|
||||||
do := f.f.Features().Purge
|
|
||||||
if do == nil {
|
|
||||||
return fs.ErrorCantPurge
|
|
||||||
}
|
|
||||||
return do(ctx, dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
|
||||||
//
|
|
||||||
// This is stored with the remote path given.
|
|
||||||
//
|
|
||||||
// It returns the destination Object and a possible error.
|
|
||||||
//
|
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
|
||||||
//
|
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
|
||||||
do := f.f.Features().Copy
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorCantCopy
|
|
||||||
}
|
|
||||||
// FIXME
|
|
||||||
// o, ok := src.(*Object)
|
|
||||||
// if !ok {
|
|
||||||
// return nil, fs.ErrorCantCopy
|
|
||||||
// }
|
|
||||||
return do(ctx, src, remote)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
|
||||||
//
|
|
||||||
// This is stored with the remote path given.
|
|
||||||
//
|
|
||||||
// It returns the destination Object and a possible error.
|
|
||||||
//
|
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
|
||||||
//
|
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
|
||||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
|
||||||
do := f.f.Features().Move
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorCantMove
|
|
||||||
}
|
|
||||||
// FIXME
|
|
||||||
// o, ok := src.(*Object)
|
|
||||||
// if !ok {
|
|
||||||
// return nil, fs.ErrorCantMove
|
|
||||||
// }
|
|
||||||
return do(ctx, src, remote)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
|
||||||
// using server-side move operations.
|
|
||||||
//
|
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
|
||||||
//
|
|
||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
|
||||||
//
|
|
||||||
// If destination exists then return fs.ErrorDirExists
|
|
||||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
|
||||||
do := f.f.Features().DirMove
|
|
||||||
if do == nil {
|
|
||||||
return fs.ErrorCantDirMove
|
|
||||||
}
|
|
||||||
srcFs, ok := src.(*Fs)
|
|
||||||
if !ok {
|
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
|
||||||
return fs.ErrorCantDirMove
|
|
||||||
}
|
|
||||||
return do(ctx, srcFs.f, srcRemote, dstRemote)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChangeNotify calls the passed function with a path
|
|
||||||
// that has had changes. If the implementation
|
|
||||||
// uses polling, it should adhere to the given interval.
|
|
||||||
// At least one value will be written to the channel,
|
|
||||||
// specifying the initial value and updated values might
|
|
||||||
// follow. A 0 Duration should pause the polling.
|
|
||||||
// The ChangeNotify implementation must empty the channel
|
|
||||||
// regularly. When the channel gets closed, the implementation
|
|
||||||
// should stop polling and release resources.
|
|
||||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), ch <-chan time.Duration) {
|
|
||||||
do := f.f.Features().ChangeNotify
|
|
||||||
if do == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
|
||||||
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
|
||||||
notifyFunc(path, entryType)
|
|
||||||
}
|
|
||||||
do(ctx, wrappedNotifyFunc, ch)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirCacheFlush resets the directory cache - used in testing
|
|
||||||
// as an optional interface
|
|
||||||
func (f *Fs) DirCacheFlush() {
|
|
||||||
do := f.f.Features().DirCacheFlush
|
|
||||||
if do != nil {
|
|
||||||
do()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
var o fs.Object
|
|
||||||
var err error
|
|
||||||
if stream {
|
|
||||||
o, err = f.f.Features().PutStream(ctx, in, src, options...)
|
|
||||||
} else {
|
|
||||||
o, err = f.f.Put(ctx, in, src, options...)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put in to the remote path with the modTime given of the given size
|
|
||||||
//
|
|
||||||
// May create the object even if it returns an error - if so
|
|
||||||
// will return the object and the error, otherwise will return
|
|
||||||
// nil and the error
|
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
o, err := f.NewObject(ctx, src.Remote())
|
|
||||||
switch err {
|
|
||||||
case nil:
|
|
||||||
return o, o.Update(ctx, in, src, options...)
|
|
||||||
case fs.ErrorObjectNotFound:
|
|
||||||
return f.put(ctx, in, src, false, options...)
|
|
||||||
default:
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
|
||||||
//
|
|
||||||
// May create the object even if it returns an error - if so
|
|
||||||
// will return the object and the error, otherwise will return
|
|
||||||
// nil and the error
|
|
||||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
o, err := f.NewObject(ctx, src.Remote())
|
|
||||||
switch err {
|
|
||||||
case nil:
|
|
||||||
return o, o.Update(ctx, in, src, options...)
|
|
||||||
case fs.ErrorObjectNotFound:
|
|
||||||
return f.put(ctx, in, src, true, options...)
|
|
||||||
default:
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// About gets quota information from the Fs
|
|
||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|
||||||
do := f.f.Features().About
|
|
||||||
if do == nil {
|
|
||||||
return nil, errors.New("not supported by underlying remote")
|
|
||||||
}
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the Fs for the directory
|
|
||||||
func (f *Fs) findFs(ctx context.Context, dir string) (subFs fs.Fs, err error) {
|
|
||||||
f.mu.Lock()
|
|
||||||
defer f.mu.Unlock()
|
|
||||||
|
|
||||||
subFs = f.f
|
|
||||||
|
|
||||||
// FIXME should do this with a better datastructure like a prefix tree
|
|
||||||
// FIXME want to find the longest first otherwise nesting won't work
|
|
||||||
dirSlash := dir + "/"
|
|
||||||
for archiverRemote, archive := range f.archives {
|
|
||||||
subRemote := archiverRemote + "/"
|
|
||||||
if strings.HasPrefix(dirSlash, subRemote) {
|
|
||||||
subFs, err = archive.init(ctx, f.f)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return subFs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// List the objects and directories in dir into entries. The
|
|
||||||
// entries can be returned in any order but should be for a
|
|
||||||
// complete directory.
|
|
||||||
//
|
|
||||||
// dir should be "" to list the root, and should not have
|
|
||||||
// trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
|
||||||
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
|
|
||||||
|
|
||||||
subFs, err := f.findFs(ctx, dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
entries, err = subFs.List(ctx, dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for i, entry := range entries {
|
|
||||||
// Can only unarchive files
|
|
||||||
if o, ok := entry.(fs.Object); ok {
|
|
||||||
remote := o.Remote()
|
|
||||||
archive := f.findArchive(remote)
|
|
||||||
if archive != nil {
|
|
||||||
// Overwrite entry with directory
|
|
||||||
entries[i] = fs.NewDir(remote, o.ModTime(ctx))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject creates a new remote archive file object
|
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|
||||||
|
|
||||||
dir := path.Dir(remote)
|
|
||||||
if dir == "/" || dir == "." {
|
|
||||||
dir = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
subFs, err := f.findFs(ctx, dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
o, err := subFs.NewObject(ctx, remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Precision is the greatest precision of all the archivers
|
|
||||||
func (f *Fs) Precision() time.Duration {
|
|
||||||
return time.Second
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown the backend, closing any background tasks and any
|
|
||||||
// cached connections.
|
|
||||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
|
||||||
if do := f.f.Features().Shutdown; do != nil {
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
|
||||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
|
||||||
do := f.f.Features().PublicLink
|
|
||||||
if do == nil {
|
|
||||||
return "", errors.New("PublicLink not supported")
|
|
||||||
}
|
|
||||||
return do(ctx, remote, expire, unlink)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutUnchecked in to the remote path with the modTime given of the given size
|
|
||||||
//
|
|
||||||
// May create the object even if it returns an error - if so
|
|
||||||
// will return the object and the error, otherwise will return
|
|
||||||
// nil and the error
|
|
||||||
//
|
|
||||||
// May create duplicates or return errors if src already
|
|
||||||
// exists.
|
|
||||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
do := f.f.Features().PutUnchecked
|
|
||||||
if do == nil {
|
|
||||||
return nil, errors.New("can't PutUnchecked")
|
|
||||||
}
|
|
||||||
o, err := do(ctx, in, src, options...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MergeDirs merges the contents of all the directories passed
|
|
||||||
// in into the first one and rmdirs the other directories.
|
|
||||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|
||||||
if len(dirs) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
do := f.f.Features().MergeDirs
|
|
||||||
if do == nil {
|
|
||||||
return errors.New("MergeDirs not supported")
|
|
||||||
}
|
|
||||||
return do(ctx, dirs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CleanUp the trash in the Fs
|
|
||||||
//
|
|
||||||
// Implement this if you have a way of emptying the trash or
|
|
||||||
// otherwise cleaning up old versions of files.
|
|
||||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
|
||||||
do := f.f.Features().CleanUp
|
|
||||||
if do == nil {
|
|
||||||
return errors.New("not supported by underlying remote")
|
|
||||||
}
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenWriterAt opens with a handle for random access writes
|
|
||||||
//
|
|
||||||
// Pass in the remote desired and the size if known.
|
|
||||||
//
|
|
||||||
// It truncates any existing object
|
|
||||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
|
||||||
do := f.f.Features().OpenWriterAt
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do(ctx, remote, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnWrap returns the Fs that this Fs is wrapping
|
|
||||||
func (f *Fs) UnWrap() fs.Fs {
|
|
||||||
return f.f
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapFs returns the Fs that is wrapping this Fs
|
|
||||||
func (f *Fs) WrapFs() fs.Fs {
|
|
||||||
return f.wrapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetWrapper sets the Fs that is wrapping this Fs
|
|
||||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
|
||||||
f.wrapper = wrapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
|
||||||
//
|
|
||||||
// Pass in the remote and the src object
|
|
||||||
// You can also use options to hint at the desired chunk size
|
|
||||||
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
|
||||||
do := f.f.Features().OpenChunkWriter
|
|
||||||
if do == nil {
|
|
||||||
return info, nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do(ctx, remote, src, options...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UserInfo returns info about the connected user
|
|
||||||
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
|
|
||||||
do := f.f.Features().UserInfo
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disconnect the current user
|
|
||||||
func (f *Fs) Disconnect(ctx context.Context) error {
|
|
||||||
do := f.f.Features().Disconnect
|
|
||||||
if do == nil {
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
|
||||||
var (
|
|
||||||
_ fs.Fs = (*Fs)(nil)
|
|
||||||
_ fs.Purger = (*Fs)(nil)
|
|
||||||
_ fs.PutStreamer = (*Fs)(nil)
|
|
||||||
_ fs.Copier = (*Fs)(nil)
|
|
||||||
_ fs.Mover = (*Fs)(nil)
|
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
|
||||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
|
||||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
|
||||||
_ fs.Abouter = (*Fs)(nil)
|
|
||||||
_ fs.Shutdowner = (*Fs)(nil)
|
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
|
||||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
|
||||||
_ fs.MergeDirser = (*Fs)(nil)
|
|
||||||
_ fs.CleanUpper = (*Fs)(nil)
|
|
||||||
_ fs.OpenWriterAter = (*Fs)(nil)
|
|
||||||
_ fs.OpenChunkWriter = (*Fs)(nil)
|
|
||||||
_ fs.UserInfoer = (*Fs)(nil)
|
|
||||||
_ fs.Disconnecter = (*Fs)(nil)
|
|
||||||
// FIXME _ fs.FullObject = (*Object)(nil)
|
|
||||||
)
|
|
||||||
@@ -1,221 +0,0 @@
|
|||||||
//go:build !plan9
|
|
||||||
|
|
||||||
package archive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/cache"
|
|
||||||
"github.com/rclone/rclone/fs/filter"
|
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FIXME need to test Open with seek
|
|
||||||
|
|
||||||
// run - run a shell command
|
|
||||||
func run(t *testing.T, args ...string) {
|
|
||||||
cmd := exec.Command(args[0], args[1:]...)
|
|
||||||
fs.Debugf(nil, "run args = %v", args)
|
|
||||||
out, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf(`
|
|
||||||
----------------------------
|
|
||||||
Failed to run %v: %v
|
|
||||||
Command output was:
|
|
||||||
%s
|
|
||||||
----------------------------
|
|
||||||
`, args, err, out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// check the dst and src are identical
|
|
||||||
func checkTree(ctx context.Context, name string, t *testing.T, dstArchive, src string, expectedCount int) {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
fs.Debugf(nil, "check %q vs %q", dstArchive, src)
|
|
||||||
Farchive, err := cache.Get(ctx, dstArchive)
|
|
||||||
if err != fs.ErrorIsFile {
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
Fsrc, err := cache.Get(ctx, src)
|
|
||||||
if err != fs.ErrorIsFile {
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var matches bytes.Buffer
|
|
||||||
opt := operations.CheckOpt{
|
|
||||||
Fdst: Farchive,
|
|
||||||
Fsrc: Fsrc,
|
|
||||||
Match: &matches,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, action := range []string{"Check", "Download"} {
|
|
||||||
t.Run(action, func(t *testing.T) {
|
|
||||||
matches.Reset()
|
|
||||||
if action == "Download" {
|
|
||||||
assert.NoError(t, operations.CheckDownload(ctx, &opt))
|
|
||||||
} else {
|
|
||||||
assert.NoError(t, operations.Check(ctx, &opt))
|
|
||||||
}
|
|
||||||
if expectedCount > 0 {
|
|
||||||
assert.Equal(t, expectedCount, strings.Count(matches.String(), "\n"))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("NewObject", func(t *testing.T) {
|
|
||||||
// Check we can run NewObject on all files and read them
|
|
||||||
assert.NoError(t, operations.ListFn(ctx, Fsrc, func(srcObj fs.Object) {
|
|
||||||
if t.Failed() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
remote := srcObj.Remote()
|
|
||||||
archiveObj, err := Farchive.NewObject(ctx, remote)
|
|
||||||
require.NoError(t, err, remote)
|
|
||||||
assert.Equal(t, remote, archiveObj.Remote(), remote)
|
|
||||||
|
|
||||||
// Test that the contents are the same
|
|
||||||
archiveBuf := fstests.ReadObject(ctx, t, archiveObj, -1)
|
|
||||||
srcBuf := fstests.ReadObject(ctx, t, srcObj, -1)
|
|
||||||
assert.Equal(t, srcBuf, archiveBuf)
|
|
||||||
|
|
||||||
if len(srcBuf) < 81 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests that Open works with SeekOption
|
|
||||||
assert.Equal(t, srcBuf[50:], fstests.ReadObject(ctx, t, archiveObj, -1, &fs.SeekOption{Offset: 50}), "contents differ after seek")
|
|
||||||
|
|
||||||
// Tests that Open works with RangeOption
|
|
||||||
for _, test := range []struct {
|
|
||||||
ro fs.RangeOption
|
|
||||||
wantStart, wantEnd int
|
|
||||||
}{
|
|
||||||
{fs.RangeOption{Start: 5, End: 15}, 5, 16},
|
|
||||||
{fs.RangeOption{Start: 80, End: -1}, 80, len(srcBuf)},
|
|
||||||
{fs.RangeOption{Start: 81, End: 100000}, 81, len(srcBuf)},
|
|
||||||
{fs.RangeOption{Start: -1, End: 20}, len(srcBuf) - 20, len(srcBuf)}, // if start is omitted this means get the final bytes
|
|
||||||
// {fs.RangeOption{Start: -1, End: -1}, 0, len(srcBuf)}, - this seems to work but the RFC doesn't define it
|
|
||||||
} {
|
|
||||||
got := fstests.ReadObject(ctx, t, archiveObj, -1, &test.ro)
|
|
||||||
foundAt := strings.Index(srcBuf, got)
|
|
||||||
help := fmt.Sprintf("%#v failed want [%d:%d] got [%d:%d]", test.ro, test.wantStart, test.wantEnd, foundAt, foundAt+len(got))
|
|
||||||
assert.Equal(t, srcBuf[test.wantStart:test.wantEnd], got, help)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that the modtimes are correct
|
|
||||||
fstest.AssertTimeEqualWithPrecision(t, remote, srcObj.ModTime(ctx), archiveObj.ModTime(ctx), Farchive.Precision())
|
|
||||||
|
|
||||||
// Test that the sizes are correct
|
|
||||||
assert.Equal(t, srcObj.Size(), archiveObj.Size())
|
|
||||||
|
|
||||||
// Test that Strings are OK
|
|
||||||
assert.Equal(t, srcObj.String(), archiveObj.String())
|
|
||||||
}))
|
|
||||||
})
|
|
||||||
|
|
||||||
// t.Logf("Fdst ------------- %v", Fdst)
|
|
||||||
// operations.List(ctx, Fdst, os.Stdout)
|
|
||||||
// t.Logf("Fsrc ------------- %v", Fsrc)
|
|
||||||
// operations.List(ctx, Fsrc, os.Stdout)
|
|
||||||
})
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// test creating and reading back some archives
|
|
||||||
//
|
|
||||||
// Note that this uses rclone and zip as external binaries.
|
|
||||||
func testArchive(t *testing.T, archiveName string, archiveFn func(t *testing.T, output, input string)) {
|
|
||||||
ctx := context.Background()
|
|
||||||
checkFiles := 1000
|
|
||||||
|
|
||||||
// create random test input files
|
|
||||||
inputRoot := t.TempDir()
|
|
||||||
input := filepath.Join(inputRoot, archiveName)
|
|
||||||
require.NoError(t, os.Mkdir(input, 0777))
|
|
||||||
run(t, "rclone", "test", "makefiles", "--files", strconv.Itoa(checkFiles), "--ascii", input)
|
|
||||||
|
|
||||||
// Create the archive
|
|
||||||
output := t.TempDir()
|
|
||||||
zipFile := path.Join(output, archiveName)
|
|
||||||
archiveFn(t, zipFile, input)
|
|
||||||
|
|
||||||
// Check the archive itself
|
|
||||||
checkTree(ctx, "Archive", t, ":archive:"+zipFile, input, checkFiles)
|
|
||||||
|
|
||||||
// Now check a subdirectory
|
|
||||||
fis, err := os.ReadDir(input)
|
|
||||||
require.NoError(t, err)
|
|
||||||
subDir := "NOT FOUND"
|
|
||||||
aFile := "NOT FOUND"
|
|
||||||
for _, fi := range fis {
|
|
||||||
if fi.IsDir() {
|
|
||||||
subDir = fi.Name()
|
|
||||||
} else {
|
|
||||||
aFile = fi.Name()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
checkTree(ctx, "SubDir", t, ":archive:"+zipFile+"/"+subDir, filepath.Join(input, subDir), 0)
|
|
||||||
|
|
||||||
// Now check a single file
|
|
||||||
fiCtx, fi := filter.AddConfig(ctx)
|
|
||||||
require.NoError(t, fi.AddRule("+ "+aFile))
|
|
||||||
require.NoError(t, fi.AddRule("- *"))
|
|
||||||
checkTree(fiCtx, "SingleFile", t, ":archive:"+zipFile+"/"+aFile, filepath.Join(input, aFile), 0)
|
|
||||||
|
|
||||||
// Now check the level above
|
|
||||||
checkTree(ctx, "Root", t, ":archive:"+output, inputRoot, checkFiles)
|
|
||||||
// run(t, "cp", "-a", inputRoot, output, "/tmp/test-"+archiveName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure we have the executable named
|
|
||||||
func skipIfNoExe(t *testing.T, exeName string) {
|
|
||||||
_, err := exec.LookPath(exeName)
|
|
||||||
if err != nil {
|
|
||||||
t.Skipf("%s executable not installed", exeName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test creating and reading back some archives
|
|
||||||
//
|
|
||||||
// Note that this uses rclone and zip as external binaries.
|
|
||||||
func TestArchiveZip(t *testing.T) {
|
|
||||||
fstest.Initialise()
|
|
||||||
skipIfNoExe(t, "zip")
|
|
||||||
skipIfNoExe(t, "rclone")
|
|
||||||
testArchive(t, "test.zip", func(t *testing.T, output, input string) {
|
|
||||||
oldcwd, err := os.Getwd()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, os.Chdir(input))
|
|
||||||
defer func() {
|
|
||||||
require.NoError(t, os.Chdir(oldcwd))
|
|
||||||
}()
|
|
||||||
run(t, "zip", "-9r", output, ".")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test creating and reading back some archives
|
|
||||||
//
|
|
||||||
// Note that this uses rclone and squashfs as external binaries.
|
|
||||||
func TestArchiveSquashfs(t *testing.T) {
|
|
||||||
fstest.Initialise()
|
|
||||||
skipIfNoExe(t, "mksquashfs")
|
|
||||||
skipIfNoExe(t, "rclone")
|
|
||||||
testArchive(t, "test.sqfs", func(t *testing.T, output, input string) {
|
|
||||||
run(t, "mksquashfs", input, output)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,67 +0,0 @@
|
|||||||
//go:build !plan9
|
|
||||||
|
|
||||||
// Test Archive filesystem interface
|
|
||||||
package archive_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
|
||||||
_ "github.com/rclone/rclone/backend/memory"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
unimplementableFsMethods = []string{"ListR", "ListP", "MkdirMetadata", "DirSetModTime"}
|
|
||||||
// In these tests we receive objects from the underlying remote which don't implement these methods
|
|
||||||
unimplementableObjectMethods = []string{"GetTier", "ID", "Metadata", "MimeType", "SetTier", "UnWrap", "SetMetadata"}
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
if *fstest.RemoteName == "" {
|
|
||||||
t.Skip("Skipping as -remote not set")
|
|
||||||
}
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: *fstest.RemoteName,
|
|
||||||
UnimplementableFsMethods: unimplementableFsMethods,
|
|
||||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLocal(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
remote := t.TempDir()
|
|
||||||
name := "TestArchiveLocal"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":",
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "archive"},
|
|
||||||
{Name: name, Key: "remote", Value: remote},
|
|
||||||
},
|
|
||||||
QuickTestOK: true,
|
|
||||||
UnimplementableFsMethods: unimplementableFsMethods,
|
|
||||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemory(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
remote := ":memory:"
|
|
||||||
name := "TestArchiveMemory"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":",
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "archive"},
|
|
||||||
{Name: name, Key: "remote", Value: remote},
|
|
||||||
},
|
|
||||||
QuickTestOK: true,
|
|
||||||
UnimplementableFsMethods: unimplementableFsMethods,
|
|
||||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Build for archive for unsupported platforms to stop go complaining
|
|
||||||
// about "no buildable Go source files "
|
|
||||||
|
|
||||||
//go:build plan9
|
|
||||||
|
|
||||||
// Package archive implements a backend to access archive files in a remote
|
|
||||||
package archive
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
// Package archiver registers all the archivers
|
|
||||||
package archiver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Archiver describes an archive package
|
|
||||||
type Archiver struct {
|
|
||||||
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
|
||||||
// prefix with prefix and rooted at root
|
|
||||||
New func(ctx context.Context, f fs.Fs, remote, prefix, root string) (fs.Fs, error)
|
|
||||||
Extension string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Archivers is a slice of all registered archivers
|
|
||||||
var Archivers []Archiver
|
|
||||||
|
|
||||||
// Register adds the archivers provided to the list of known archivers
|
|
||||||
func Register(as ...Archiver) {
|
|
||||||
Archivers = append(Archivers, as...)
|
|
||||||
}
|
|
||||||
@@ -1,233 +0,0 @@
|
|||||||
// Package base is a base archive Fs
|
|
||||||
package base
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"path"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/vfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Fs represents a wrapped fs.Fs
|
|
||||||
type Fs struct {
|
|
||||||
f fs.Fs
|
|
||||||
wrapper fs.Fs
|
|
||||||
name string
|
|
||||||
features *fs.Features // optional features
|
|
||||||
vfs *vfs.VFS
|
|
||||||
node vfs.Node // archive object
|
|
||||||
remote string // remote of the archive object
|
|
||||||
prefix string // position for objects
|
|
||||||
prefixSlash string // position for objects with a slash on
|
|
||||||
root string // position to read from within the archive
|
|
||||||
}
|
|
||||||
|
|
||||||
var errNotImplemented = errors.New("internal error: method not implemented in archiver")
|
|
||||||
|
|
||||||
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
|
||||||
// prefix with prefix and rooted at root
|
|
||||||
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (*Fs, error) {
|
|
||||||
// FIXME vfs cache?
|
|
||||||
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
|
|
||||||
fs.Debugf(nil, "New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
|
|
||||||
VFS := vfs.New(wrappedFs, nil)
|
|
||||||
node, err := VFS.Stat(remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f := &Fs{
|
|
||||||
f: wrappedFs,
|
|
||||||
name: path.Join(fs.ConfigString(wrappedFs), remote),
|
|
||||||
vfs: VFS,
|
|
||||||
node: node,
|
|
||||||
remote: remote,
|
|
||||||
root: root,
|
|
||||||
prefix: prefix,
|
|
||||||
prefixSlash: prefix + "/",
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME
|
|
||||||
// the features here are ones we could support, and they are
|
|
||||||
// ANDed with the ones from wrappedFs
|
|
||||||
//
|
|
||||||
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
|
|
||||||
f.features = (&fs.Features{
|
|
||||||
CaseInsensitive: false,
|
|
||||||
DuplicateFiles: false,
|
|
||||||
ReadMimeType: false, // MimeTypes not supported with gzip
|
|
||||||
WriteMimeType: false,
|
|
||||||
BucketBased: false,
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
|
||||||
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Root() string {
|
|
||||||
return f.root
|
|
||||||
}
|
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
|
||||||
func (f *Fs) Features() *fs.Features {
|
|
||||||
return f.features
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a description of the FS
|
|
||||||
func (f *Fs) String() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// List the objects and directories in dir into entries. The
|
|
||||||
// entries can be returned in any order but should be for a
|
|
||||||
// complete directory.
|
|
||||||
//
|
|
||||||
// dir should be "" to list the root, and should not have
|
|
||||||
// trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
|
||||||
return nil, errNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject finds the Object at remote.
|
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
|
||||||
return nil, errNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// Precision of the ModTimes in this Fs
|
|
||||||
func (f *Fs) Precision() time.Duration {
|
|
||||||
return time.Second
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mkdir makes the directory (container, bucket)
|
|
||||||
//
|
|
||||||
// Shouldn't return an error if it already exists
|
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
|
||||||
//
|
|
||||||
// Return an error if it doesn't exist or isn't empty
|
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put in to the remote path with the modTime given of the given size
|
|
||||||
//
|
|
||||||
// May create the object even if it returns an error - if so
|
|
||||||
// will return the object and the error, otherwise will return
|
|
||||||
// nil and the error
|
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
|
||||||
return nil, vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashes returns the supported hash sets.
|
|
||||||
func (f *Fs) Hashes() hash.Set {
|
|
||||||
return hash.Set(hash.None)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnWrap returns the Fs that this Fs is wrapping
|
|
||||||
func (f *Fs) UnWrap() fs.Fs {
|
|
||||||
return f.f
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapFs returns the Fs that is wrapping this Fs
|
|
||||||
func (f *Fs) WrapFs() fs.Fs {
|
|
||||||
return f.wrapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetWrapper sets the Fs that is wrapping this Fs
|
|
||||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
|
||||||
f.wrapper = wrapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object describes an object to be read from the raw zip file
|
|
||||||
type Object struct {
|
|
||||||
f *Fs
|
|
||||||
remote string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs returns read only access to the Fs that this object is part of
|
|
||||||
func (o *Object) Fs() fs.Info {
|
|
||||||
return o.f
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return a string version
|
|
||||||
func (o *Object) String() string {
|
|
||||||
if o == nil {
|
|
||||||
return "<nil>"
|
|
||||||
}
|
|
||||||
return o.Remote()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remote returns the remote path
|
|
||||||
func (o *Object) Remote() string {
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the size of the file
|
|
||||||
func (o *Object) Size() int64 {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModTime returns the modification time of the object
|
|
||||||
//
|
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
|
||||||
// LastModified returned in the http headers
|
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
|
||||||
return time.Now()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storable raturns a boolean indicating if this object is storable
|
|
||||||
func (o *Object) Storable() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns the selected checksum of the file
|
|
||||||
// If no checksum is available it returns ""
|
|
||||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
|
||||||
return "", hash.ErrUnsupported
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
|
||||||
return nil, errNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update in to the object with the modTime given of the given size
|
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove an object
|
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
|
||||||
var (
|
|
||||||
_ fs.Fs = (*Fs)(nil)
|
|
||||||
_ fs.UnWrapper = (*Fs)(nil)
|
|
||||||
_ fs.Wrapper = (*Fs)(nil)
|
|
||||||
_ fs.Object = (*Object)(nil)
|
|
||||||
)
|
|
||||||
@@ -1,165 +0,0 @@
|
|||||||
package squashfs
|
|
||||||
|
|
||||||
// Could just be using bare object Open with RangeRequest which
|
|
||||||
// would transfer the minimum amount of data but may be slower.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/fs"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/diskfs/go-diskfs/backend"
|
|
||||||
"github.com/rclone/rclone/vfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Cache file handles for accessing the file
|
|
||||||
type cache struct {
|
|
||||||
node vfs.Node
|
|
||||||
fhsMu sync.Mutex
|
|
||||||
fhs []cacheHandle
|
|
||||||
}
|
|
||||||
|
|
||||||
// A cached file handle
|
|
||||||
type cacheHandle struct {
|
|
||||||
offset int64
|
|
||||||
fh vfs.Handle
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make a new cache
|
|
||||||
func newCache(node vfs.Node) *cache {
|
|
||||||
return &cache{
|
|
||||||
node: node,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get a vfs.Handle from the pool or open one
|
|
||||||
//
|
|
||||||
// This tries to find an open file handle which doesn't require seeking.
|
|
||||||
func (c *cache) open(off int64) (fh vfs.Handle, err error) {
|
|
||||||
c.fhsMu.Lock()
|
|
||||||
defer c.fhsMu.Unlock()
|
|
||||||
|
|
||||||
if len(c.fhs) > 0 {
|
|
||||||
// Look for exact match first
|
|
||||||
for i, cfh := range c.fhs {
|
|
||||||
if cfh.offset == off {
|
|
||||||
// fs.Debugf(nil, "CACHE MATCH")
|
|
||||||
c.fhs = append(c.fhs[:i], c.fhs[i+1:]...)
|
|
||||||
return cfh.fh, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// fs.Debugf(nil, "CACHE MISS")
|
|
||||||
// Just take the first one if not found
|
|
||||||
cfh := c.fhs[0]
|
|
||||||
c.fhs = c.fhs[1:]
|
|
||||||
return cfh.fh, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
fh, err = c.node.Open(os.O_RDONLY)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to open squashfs archive: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fh, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close a vfs.Handle or return it to the pool
|
|
||||||
//
|
|
||||||
// off should be the offset the file handle would read from without seeking
|
|
||||||
func (c *cache) close(fh vfs.Handle, off int64) {
|
|
||||||
c.fhsMu.Lock()
|
|
||||||
defer c.fhsMu.Unlock()
|
|
||||||
|
|
||||||
c.fhs = append(c.fhs, cacheHandle{
|
|
||||||
offset: off,
|
|
||||||
fh: fh,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadAt reads len(p) bytes into p starting at offset off in the underlying
|
|
||||||
// input source. It returns the number of bytes read (0 <= n <= len(p)) and any
|
|
||||||
// error encountered.
|
|
||||||
//
|
|
||||||
// When ReadAt returns n < len(p), it returns a non-nil error explaining why
|
|
||||||
// more bytes were not returned. In this respect, ReadAt is stricter than Read.
|
|
||||||
//
|
|
||||||
// Even if ReadAt returns n < len(p), it may use all of p as scratch
|
|
||||||
// space during the call. If some data is available but not len(p) bytes,
|
|
||||||
// ReadAt blocks until either all the data is available or an error occurs.
|
|
||||||
// In this respect ReadAt is different from Read.
|
|
||||||
//
|
|
||||||
// If the n = len(p) bytes returned by ReadAt are at the end of the input
|
|
||||||
// source, ReadAt may return either err == EOF or err == nil.
|
|
||||||
//
|
|
||||||
// If ReadAt is reading from an input source with a seek offset, ReadAt should
|
|
||||||
// not affect nor be affected by the underlying seek offset.
|
|
||||||
//
|
|
||||||
// Clients of ReadAt can execute parallel ReadAt calls on the same input
|
|
||||||
// source.
|
|
||||||
//
|
|
||||||
// Implementations must not retain p.
|
|
||||||
func (c *cache) ReadAt(p []byte, off int64) (n int, err error) {
|
|
||||||
fh, err := c.open(off)
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
c.close(fh, off+int64(len(p)))
|
|
||||||
}()
|
|
||||||
// fs.Debugf(nil, "ReadAt(p[%d], off=%d, fh=%p)", len(p), off, fh)
|
|
||||||
return fh.ReadAt(p, off)
|
|
||||||
}
|
|
||||||
|
|
||||||
var errCacheNotImplemented = errors.New("internal error: squashfs cache doesn't implement method")
|
|
||||||
|
|
||||||
// WriteAt method dummy stub to satisfy interface
|
|
||||||
func (c *cache) WriteAt(p []byte, off int64) (n int, err error) {
|
|
||||||
return 0, errCacheNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seek method dummy stub to satisfy interface
|
|
||||||
func (c *cache) Seek(offset int64, whence int) (int64, error) {
|
|
||||||
return 0, errCacheNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read method dummy stub to satisfy interface
|
|
||||||
func (c *cache) Read(p []byte) (n int, err error) {
|
|
||||||
return 0, errCacheNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *cache) Stat() (fs.FileInfo, error) {
|
|
||||||
return nil, errCacheNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close the file
|
|
||||||
func (c *cache) Close() (err error) {
|
|
||||||
c.fhsMu.Lock()
|
|
||||||
defer c.fhsMu.Unlock()
|
|
||||||
|
|
||||||
// Close any open file handles
|
|
||||||
for i := range c.fhs {
|
|
||||||
fh := &c.fhs[i]
|
|
||||||
newErr := fh.fh.Close()
|
|
||||||
if err == nil {
|
|
||||||
err = newErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.fhs = nil
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sys returns OS-specific file for ioctl calls via fd
|
|
||||||
func (c *cache) Sys() (*os.File, error) {
|
|
||||||
return nil, errCacheNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writable returns file for read-write operations
|
|
||||||
func (c *cache) Writable() (backend.WritableFile, error) {
|
|
||||||
return nil, errCacheNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// check interfaces
|
|
||||||
var _ backend.Storage = (*cache)(nil)
|
|
||||||
@@ -1,446 +0,0 @@
|
|||||||
// Package squashfs implements a squashfs archiver for the archive backend
|
|
||||||
package squashfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/diskfs/go-diskfs/filesystem/squashfs"
|
|
||||||
"github.com/rclone/rclone/backend/archive/archiver"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/fs/log"
|
|
||||||
"github.com/rclone/rclone/lib/readers"
|
|
||||||
"github.com/rclone/rclone/vfs"
|
|
||||||
"github.com/rclone/rclone/vfs/vfscommon"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
archiver.Register(archiver.Archiver{
|
|
||||||
New: New,
|
|
||||||
Extension: ".sqfs",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs represents a wrapped fs.Fs
|
|
||||||
type Fs struct {
|
|
||||||
f fs.Fs
|
|
||||||
wrapper fs.Fs
|
|
||||||
name string
|
|
||||||
features *fs.Features // optional features
|
|
||||||
vfs *vfs.VFS
|
|
||||||
sqfs *squashfs.FileSystem // interface to the squashfs
|
|
||||||
c *cache
|
|
||||||
node vfs.Node // squashfs file object - set if reading
|
|
||||||
remote string // remote of the squashfs file object
|
|
||||||
prefix string // position for objects
|
|
||||||
prefixSlash string // position for objects with a slash on
|
|
||||||
root string // position to read from within the archive
|
|
||||||
}
|
|
||||||
|
|
||||||
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
|
||||||
// prefix with prefix and rooted at root
|
|
||||||
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) {
|
|
||||||
// FIXME vfs cache?
|
|
||||||
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
|
|
||||||
fs.Debugf(nil, "Squashfs: New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
|
|
||||||
vfsOpt := vfscommon.Opt
|
|
||||||
vfsOpt.ReadWait = 0
|
|
||||||
VFS := vfs.New(wrappedFs, &vfsOpt)
|
|
||||||
node, err := VFS.Stat(remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
c := newCache(node)
|
|
||||||
|
|
||||||
// FIXME blocksize
|
|
||||||
sqfs, err := squashfs.Read(c, node.Size(), 0, 1024*1024)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to read squashfs: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f := &Fs{
|
|
||||||
f: wrappedFs,
|
|
||||||
name: path.Join(fs.ConfigString(wrappedFs), remote),
|
|
||||||
vfs: VFS,
|
|
||||||
node: node,
|
|
||||||
sqfs: sqfs,
|
|
||||||
c: c,
|
|
||||||
remote: remote,
|
|
||||||
root: strings.Trim(root, "/"),
|
|
||||||
prefix: prefix,
|
|
||||||
prefixSlash: prefix + "/",
|
|
||||||
}
|
|
||||||
if prefix == "" {
|
|
||||||
f.prefixSlash = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
singleObject := false
|
|
||||||
|
|
||||||
// Find the directory the root points to
|
|
||||||
if f.root != "" && !strings.HasSuffix(root, "/") {
|
|
||||||
native, err := f.toNative("")
|
|
||||||
if err == nil {
|
|
||||||
native = strings.TrimRight(native, "/")
|
|
||||||
_, err := f.newObjectNative(native)
|
|
||||||
if err == nil {
|
|
||||||
// If it pointed to a file, find the directory above
|
|
||||||
f.root = path.Dir(f.root)
|
|
||||||
if f.root == "." || f.root == "/" {
|
|
||||||
f.root = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME
|
|
||||||
// the features here are ones we could support, and they are
|
|
||||||
// ANDed with the ones from wrappedFs
|
|
||||||
//
|
|
||||||
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
|
|
||||||
f.features = (&fs.Features{
|
|
||||||
CaseInsensitive: false,
|
|
||||||
DuplicateFiles: false,
|
|
||||||
ReadMimeType: false, // MimeTypes not supported with gsquashfs
|
|
||||||
WriteMimeType: false,
|
|
||||||
BucketBased: false,
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
|
||||||
|
|
||||||
if singleObject {
|
|
||||||
return f, fs.ErrorIsFile
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Root() string {
|
|
||||||
return f.root
|
|
||||||
}
|
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
|
||||||
func (f *Fs) Features() *fs.Features {
|
|
||||||
return f.features
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a description of the FS
|
|
||||||
func (f *Fs) String() string {
|
|
||||||
return fmt.Sprintf("Squashfs %q", f.name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This turns a remote into a native path in the squashfs starting with a /
|
|
||||||
func (f *Fs) toNative(remote string) (string, error) {
|
|
||||||
native := strings.Trim(remote, "/")
|
|
||||||
if f.prefix == "" {
|
|
||||||
native = "/" + native
|
|
||||||
} else if native == f.prefix {
|
|
||||||
native = "/"
|
|
||||||
} else if !strings.HasPrefix(native, f.prefixSlash) {
|
|
||||||
return "", fmt.Errorf("internal error: %q doesn't start with prefix %q", native, f.prefixSlash)
|
|
||||||
} else {
|
|
||||||
native = native[len(f.prefix):]
|
|
||||||
}
|
|
||||||
if f.root != "" {
|
|
||||||
native = "/" + f.root + native
|
|
||||||
}
|
|
||||||
return native, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Turn a (nativeDir, leaf) into a remote
|
|
||||||
func (f *Fs) fromNative(nativeDir string, leaf string) string {
|
|
||||||
// fs.Debugf(nil, "nativeDir = %q, leaf = %q, root=%q", nativeDir, leaf, f.root)
|
|
||||||
dir := nativeDir
|
|
||||||
if f.root != "" {
|
|
||||||
dir = strings.TrimPrefix(dir, "/"+f.root)
|
|
||||||
}
|
|
||||||
remote := f.prefixSlash + strings.Trim(path.Join(dir, leaf), "/")
|
|
||||||
// fs.Debugf(nil, "dir = %q, remote=%q", dir, remote)
|
|
||||||
return remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert a FileInfo into an Object from native dir
|
|
||||||
func (f *Fs) objectFromFileInfo(nativeDir string, item squashfs.FileStat) *Object {
|
|
||||||
return &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: f.fromNative(nativeDir, item.Name()),
|
|
||||||
size: item.Size(),
|
|
||||||
modTime: item.ModTime(),
|
|
||||||
item: item,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// List the objects and directories in dir into entries. The
|
|
||||||
// entries can be returned in any order but should be for a
|
|
||||||
// complete directory.
|
|
||||||
//
|
|
||||||
// dir should be "" to list the root, and should not have
|
|
||||||
// trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
|
||||||
defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
|
||||||
|
|
||||||
nativeDir, err := f.toNative(dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
items, err := f.sqfs.ReadDir(nativeDir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("read squashfs: couldn't read directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
entries = make(fs.DirEntries, 0, len(items))
|
|
||||||
for _, fi := range items {
|
|
||||||
item, ok := fi.(squashfs.FileStat)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi)
|
|
||||||
}
|
|
||||||
// fs.Debugf(item.Name(), "entry = %#v", item)
|
|
||||||
var entry fs.DirEntry
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error reading item %q: %q", item.Name(), err)
|
|
||||||
}
|
|
||||||
if item.IsDir() {
|
|
||||||
var remote = f.fromNative(nativeDir, item.Name())
|
|
||||||
entry = fs.NewDir(remote, item.ModTime())
|
|
||||||
} else {
|
|
||||||
if item.Mode().IsRegular() {
|
|
||||||
entry = f.objectFromFileInfo(nativeDir, item)
|
|
||||||
} else {
|
|
||||||
fs.Debugf(item.Name(), "FIXME Not regular file - skipping")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
entries = append(entries, entry)
|
|
||||||
}
|
|
||||||
|
|
||||||
// fs.Debugf(f, "dir=%q, entries=%v", dir, entries)
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newObjectNative finds the object at the native path passed in
|
|
||||||
func (f *Fs) newObjectNative(nativePath string) (o fs.Object, err error) {
|
|
||||||
// get the path and filename
|
|
||||||
dir, leaf := path.Split(nativePath)
|
|
||||||
dir = strings.TrimRight(dir, "/")
|
|
||||||
leaf = strings.Trim(leaf, "/")
|
|
||||||
|
|
||||||
// FIXME need to detect directory not found
|
|
||||||
fis, err := f.sqfs.ReadDir(dir)
|
|
||||||
if err != nil {
|
|
||||||
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, fi := range fis {
|
|
||||||
if fi.Name() == leaf {
|
|
||||||
if fi.IsDir() {
|
|
||||||
return nil, fs.ErrorNotAFile
|
|
||||||
}
|
|
||||||
item, ok := fi.(squashfs.FileStat)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi)
|
|
||||||
}
|
|
||||||
o = f.objectFromFileInfo(dir, item)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if o == nil {
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject finds the Object at remote.
|
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
|
||||||
defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err)
|
|
||||||
|
|
||||||
nativePath, err := f.toNative(remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return f.newObjectNative(nativePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Precision of the ModTimes in this Fs
|
|
||||||
func (f *Fs) Precision() time.Duration {
|
|
||||||
return time.Second
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mkdir makes the directory (container, bucket)
|
|
||||||
//
|
|
||||||
// Shouldn't return an error if it already exists
|
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
|
||||||
//
|
|
||||||
// Return an error if it doesn't exist or isn't empty
|
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put in to the remote path with the modTime given of the given size
|
|
||||||
//
|
|
||||||
// May create the object even if it returns an error - if so
|
|
||||||
// will return the object and the error, otherwise will return
|
|
||||||
// nil and the error
|
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
|
||||||
return nil, vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashes returns the supported hash sets.
|
|
||||||
func (f *Fs) Hashes() hash.Set {
|
|
||||||
return hash.Set(hash.None)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnWrap returns the Fs that this Fs is wrapping
|
|
||||||
func (f *Fs) UnWrap() fs.Fs {
|
|
||||||
return f.f
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapFs returns the Fs that is wrapping this Fs
|
|
||||||
func (f *Fs) WrapFs() fs.Fs {
|
|
||||||
return f.wrapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetWrapper sets the Fs that is wrapping this Fs
|
|
||||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
|
||||||
f.wrapper = wrapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object describes an object to be read from the raw squashfs file
|
|
||||||
type Object struct {
|
|
||||||
fs *Fs
|
|
||||||
remote string
|
|
||||||
size int64
|
|
||||||
modTime time.Time
|
|
||||||
item squashfs.FileStat
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs returns read only access to the Fs that this object is part of
|
|
||||||
func (o *Object) Fs() fs.Info {
|
|
||||||
return o.fs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return a string version
|
|
||||||
func (o *Object) String() string {
|
|
||||||
if o == nil {
|
|
||||||
return "<nil>"
|
|
||||||
}
|
|
||||||
return o.Remote()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Turn a squashfs path into a full path for the parent Fs
|
|
||||||
// func (o *Object) path(remote string) string {
|
|
||||||
// return path.Join(o.fs.prefix, remote)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// Remote returns the remote path
|
|
||||||
func (o *Object) Remote() string {
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the size of the file
|
|
||||||
func (o *Object) Size() int64 {
|
|
||||||
return o.size
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModTime returns the modification time of the object
|
|
||||||
//
|
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
|
||||||
// LastModified returned in the http headers
|
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
|
||||||
return o.modTime
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storable raturns a boolean indicating if this object is storable
|
|
||||||
func (o *Object) Storable() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns the selected checksum of the file
|
|
||||||
// If no checksum is available it returns ""
|
|
||||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
|
||||||
return "", hash.ErrUnsupported
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
|
||||||
var offset, limit int64 = 0, -1
|
|
||||||
for _, option := range options {
|
|
||||||
switch x := option.(type) {
|
|
||||||
case *fs.SeekOption:
|
|
||||||
offset = x.Offset
|
|
||||||
case *fs.RangeOption:
|
|
||||||
offset, limit = x.Decode(o.Size())
|
|
||||||
default:
|
|
||||||
if option.Mandatory() {
|
|
||||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
remote, err := o.fs.toNative(o.remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Debugf(o, "Opening %q", remote)
|
|
||||||
//fh, err := o.fs.sqfs.OpenFile(remote, os.O_RDONLY)
|
|
||||||
fh, err := o.item.Open()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// discard data from start as necessary
|
|
||||||
if offset > 0 {
|
|
||||||
_, err = fh.Seek(offset, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If limited then don't return everything
|
|
||||||
if limit >= 0 {
|
|
||||||
fs.Debugf(nil, "limit=%d, offset=%d, options=%v", limit, offset, options)
|
|
||||||
return readers.NewLimitedReadCloser(fh, limit), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fh, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update in to the object with the modTime given of the given size
|
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove an object
|
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
|
||||||
var (
|
|
||||||
_ fs.Fs = (*Fs)(nil)
|
|
||||||
_ fs.UnWrapper = (*Fs)(nil)
|
|
||||||
_ fs.Wrapper = (*Fs)(nil)
|
|
||||||
_ fs.Object = (*Object)(nil)
|
|
||||||
)
|
|
||||||
@@ -1,385 +0,0 @@
|
|||||||
// Package zip implements a zip archiver for the archive backend
|
|
||||||
package zip
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/zip"
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/archive/archiver"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/dirtree"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/fs/log"
|
|
||||||
"github.com/rclone/rclone/lib/readers"
|
|
||||||
"github.com/rclone/rclone/vfs"
|
|
||||||
"github.com/rclone/rclone/vfs/vfscommon"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
archiver.Register(archiver.Archiver{
|
|
||||||
New: New,
|
|
||||||
Extension: ".zip",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs represents a wrapped fs.Fs
|
|
||||||
type Fs struct {
|
|
||||||
f fs.Fs
|
|
||||||
wrapper fs.Fs
|
|
||||||
name string
|
|
||||||
features *fs.Features // optional features
|
|
||||||
vfs *vfs.VFS
|
|
||||||
node vfs.Node // zip file object - set if reading
|
|
||||||
remote string // remote of the zip file object
|
|
||||||
prefix string // position for objects
|
|
||||||
prefixSlash string // position for objects with a slash on
|
|
||||||
root string // position to read from within the archive
|
|
||||||
dt dirtree.DirTree // read from zipfile
|
|
||||||
}
|
|
||||||
|
|
||||||
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
|
||||||
// prefix with prefix and rooted at root
|
|
||||||
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) {
|
|
||||||
// FIXME vfs cache?
|
|
||||||
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
|
|
||||||
fs.Debugf(nil, "Zip: New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
|
|
||||||
vfsOpt := vfscommon.Opt
|
|
||||||
vfsOpt.ReadWait = 0
|
|
||||||
VFS := vfs.New(wrappedFs, &vfsOpt)
|
|
||||||
node, err := VFS.Stat(remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f := &Fs{
|
|
||||||
f: wrappedFs,
|
|
||||||
name: path.Join(fs.ConfigString(wrappedFs), remote),
|
|
||||||
vfs: VFS,
|
|
||||||
node: node,
|
|
||||||
remote: remote,
|
|
||||||
root: root,
|
|
||||||
prefix: prefix,
|
|
||||||
prefixSlash: prefix + "/",
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the contents of the zip file
|
|
||||||
singleObject, err := f.readZip()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to open zip file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME
|
|
||||||
// the features here are ones we could support, and they are
|
|
||||||
// ANDed with the ones from wrappedFs
|
|
||||||
//
|
|
||||||
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
|
|
||||||
f.features = (&fs.Features{
|
|
||||||
CaseInsensitive: false,
|
|
||||||
DuplicateFiles: false,
|
|
||||||
ReadMimeType: false, // MimeTypes not supported with gzip
|
|
||||||
WriteMimeType: false,
|
|
||||||
BucketBased: false,
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
|
||||||
|
|
||||||
if singleObject {
|
|
||||||
return f, fs.ErrorIsFile
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Root() string {
|
|
||||||
return f.root
|
|
||||||
}
|
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
|
||||||
func (f *Fs) Features() *fs.Features {
|
|
||||||
return f.features
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a description of the FS
|
|
||||||
func (f *Fs) String() string {
|
|
||||||
return fmt.Sprintf("Zip %q", f.name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// readZip the zip file into f
|
|
||||||
//
|
|
||||||
// Returns singleObject=true if f.root points to a file
|
|
||||||
func (f *Fs) readZip() (singleObject bool, err error) {
|
|
||||||
if f.node == nil {
|
|
||||||
return singleObject, fs.ErrorDirNotFound
|
|
||||||
}
|
|
||||||
size := f.node.Size()
|
|
||||||
if size < 0 {
|
|
||||||
return singleObject, errors.New("can't read from zip file with unknown size")
|
|
||||||
}
|
|
||||||
r, err := f.node.Open(os.O_RDONLY)
|
|
||||||
if err != nil {
|
|
||||||
return singleObject, fmt.Errorf("failed to open zip file: %w", err)
|
|
||||||
}
|
|
||||||
zr, err := zip.NewReader(r, size)
|
|
||||||
if err != nil {
|
|
||||||
return singleObject, fmt.Errorf("failed to read zip file: %w", err)
|
|
||||||
}
|
|
||||||
dt := dirtree.New()
|
|
||||||
for _, file := range zr.File {
|
|
||||||
remote := strings.Trim(path.Clean(file.Name), "/")
|
|
||||||
if remote == "." {
|
|
||||||
remote = ""
|
|
||||||
}
|
|
||||||
remote = path.Join(f.prefix, remote)
|
|
||||||
if f.root != "" {
|
|
||||||
// Ignore all files outside the root
|
|
||||||
if !strings.HasPrefix(remote, f.root) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if remote == f.root {
|
|
||||||
remote = ""
|
|
||||||
} else {
|
|
||||||
remote = strings.TrimPrefix(remote, f.root+"/")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if strings.HasSuffix(file.Name, "/") {
|
|
||||||
dir := fs.NewDir(remote, file.Modified)
|
|
||||||
dt.AddDir(dir)
|
|
||||||
} else {
|
|
||||||
if remote == "" {
|
|
||||||
remote = path.Base(f.root)
|
|
||||||
singleObject = true
|
|
||||||
dt = dirtree.New()
|
|
||||||
}
|
|
||||||
o := &Object{
|
|
||||||
f: f,
|
|
||||||
remote: remote,
|
|
||||||
fh: &file.FileHeader,
|
|
||||||
file: file,
|
|
||||||
}
|
|
||||||
dt.Add(o)
|
|
||||||
if singleObject {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dt.CheckParents("")
|
|
||||||
dt.Sort()
|
|
||||||
f.dt = dt
|
|
||||||
//fs.Debugf(nil, "dt = %v", dt)
|
|
||||||
return singleObject, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// List the objects and directories in dir into entries. The
|
|
||||||
// entries can be returned in any order but should be for a
|
|
||||||
// complete directory.
|
|
||||||
//
|
|
||||||
// dir should be "" to list the root, and should not have
|
|
||||||
// trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
|
||||||
defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
|
||||||
// _, err = f.strip(dir)
|
|
||||||
// if err != nil {
|
|
||||||
// return nil, err
|
|
||||||
// }
|
|
||||||
entries, ok := f.dt[dir]
|
|
||||||
if !ok {
|
|
||||||
return nil, fs.ErrorDirNotFound
|
|
||||||
}
|
|
||||||
fs.Debugf(f, "dir=%q, entries=%v", dir, entries)
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject finds the Object at remote.
|
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
|
||||||
defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err)
|
|
||||||
if f.dt == nil {
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
_, entry := f.dt.Find(remote)
|
|
||||||
if entry == nil {
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
o, ok := entry.(*Object)
|
|
||||||
if !ok {
|
|
||||||
return nil, fs.ErrorNotAFile
|
|
||||||
}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Precision of the ModTimes in this Fs
|
|
||||||
func (f *Fs) Precision() time.Duration {
|
|
||||||
return time.Second
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mkdir makes the directory (container, bucket)
|
|
||||||
//
|
|
||||||
// Shouldn't return an error if it already exists
|
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
|
||||||
//
|
|
||||||
// Return an error if it doesn't exist or isn't empty
|
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put in to the remote path with the modTime given of the given size
|
|
||||||
//
|
|
||||||
// May create the object even if it returns an error - if so
|
|
||||||
// will return the object and the error, otherwise will return
|
|
||||||
// nil and the error
|
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
|
||||||
return nil, vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashes returns the supported hash sets.
|
|
||||||
func (f *Fs) Hashes() hash.Set {
|
|
||||||
return hash.Set(hash.CRC32)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnWrap returns the Fs that this Fs is wrapping
|
|
||||||
func (f *Fs) UnWrap() fs.Fs {
|
|
||||||
return f.f
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapFs returns the Fs that is wrapping this Fs
|
|
||||||
func (f *Fs) WrapFs() fs.Fs {
|
|
||||||
return f.wrapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetWrapper sets the Fs that is wrapping this Fs
|
|
||||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
|
||||||
f.wrapper = wrapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object describes an object to be read from the raw zip file
|
|
||||||
type Object struct {
|
|
||||||
f *Fs
|
|
||||||
remote string
|
|
||||||
fh *zip.FileHeader
|
|
||||||
file *zip.File
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs returns read only access to the Fs that this object is part of
|
|
||||||
func (o *Object) Fs() fs.Info {
|
|
||||||
return o.f
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return a string version
|
|
||||||
func (o *Object) String() string {
|
|
||||||
if o == nil {
|
|
||||||
return "<nil>"
|
|
||||||
}
|
|
||||||
return o.Remote()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remote returns the remote path
|
|
||||||
func (o *Object) Remote() string {
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the size of the file
|
|
||||||
func (o *Object) Size() int64 {
|
|
||||||
return int64(o.fh.UncompressedSize64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModTime returns the modification time of the object
|
|
||||||
//
|
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
|
||||||
// LastModified returned in the http headers
|
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
|
||||||
return o.fh.Modified
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storable raturns a boolean indicating if this object is storable
|
|
||||||
func (o *Object) Storable() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns the selected checksum of the file
|
|
||||||
// If no checksum is available it returns ""
|
|
||||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
|
||||||
if ht == hash.CRC32 {
|
|
||||||
// FIXME return empty CRC if writing
|
|
||||||
if o.f.dt == nil {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%08x", o.fh.CRC32), nil
|
|
||||||
}
|
|
||||||
return "", hash.ErrUnsupported
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
|
||||||
var offset, limit int64 = 0, -1
|
|
||||||
for _, option := range options {
|
|
||||||
switch x := option.(type) {
|
|
||||||
case *fs.SeekOption:
|
|
||||||
offset = x.Offset
|
|
||||||
case *fs.RangeOption:
|
|
||||||
offset, limit = x.Decode(o.Size())
|
|
||||||
default:
|
|
||||||
if option.Mandatory() {
|
|
||||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rc, err = o.file.Open()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// discard data from start as necessary
|
|
||||||
if offset > 0 {
|
|
||||||
_, err = io.CopyN(io.Discard, rc, offset)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If limited then don't return everything
|
|
||||||
if limit >= 0 {
|
|
||||||
return readers.NewLimitedReadCloser(rc, limit), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return rc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update in to the object with the modTime given of the given size
|
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove an object
|
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
|
||||||
var (
|
|
||||||
_ fs.Fs = (*Fs)(nil)
|
|
||||||
_ fs.UnWrapper = (*Fs)(nil)
|
|
||||||
_ fs.Wrapper = (*Fs)(nil)
|
|
||||||
_ fs.Object = (*Object)(nil)
|
|
||||||
)
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,151 +1,35 @@
|
|||||||
//go:build !plan9 && !solaris && !js
|
// +build !plan9,!solaris
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"encoding/base64"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
"github.com/rclone/rclone/lib/random"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBlockIDCreator(t *testing.T) {
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
// Check creation and random number
|
// Check first feature flags are set on this
|
||||||
bic, err := newBlockIDCreator()
|
// remote
|
||||||
require.NoError(t, err)
|
|
||||||
bic2, err := newBlockIDCreator()
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NotEqual(t, bic.random, bic2.random)
|
|
||||||
assert.NotEqual(t, bic.random, [8]byte{})
|
|
||||||
|
|
||||||
// Set random to known value for tests
|
|
||||||
bic.random = [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
|
|
||||||
chunkNumber := uint64(0xFEDCBA9876543210)
|
|
||||||
|
|
||||||
// Check creation of ID
|
|
||||||
want := base64.StdEncoding.EncodeToString([]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, 1, 2, 3, 4, 5, 6, 7, 8})
|
|
||||||
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", want)
|
|
||||||
got := bic.newBlockID(chunkNumber)
|
|
||||||
assert.Equal(t, want, got)
|
|
||||||
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", got)
|
|
||||||
|
|
||||||
// Test checkID is working
|
|
||||||
assert.NoError(t, bic.checkID(chunkNumber, got))
|
|
||||||
assert.ErrorContains(t, bic.checkID(chunkNumber, "$"+got), "illegal base64")
|
|
||||||
assert.ErrorContains(t, bic.checkID(chunkNumber, "AAAA"+got), "bad block ID length")
|
|
||||||
assert.ErrorContains(t, bic.checkID(chunkNumber+1, got), "expecting decoded")
|
|
||||||
assert.ErrorContains(t, bic2.checkID(chunkNumber, got), "random bytes")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) testFeatures(t *testing.T) {
|
|
||||||
// Check first feature flags are set on this remote
|
|
||||||
enabled := f.Features().SetTier
|
enabled := f.Features().SetTier
|
||||||
assert.True(t, enabled)
|
assert.True(t, enabled)
|
||||||
enabled = f.Features().GetTier
|
enabled = f.Features().GetTier
|
||||||
assert.True(t, enabled)
|
assert.True(t, enabled)
|
||||||
}
|
}
|
||||||
|
|
||||||
type ReadSeekCloser struct {
|
func TestIncrement(t *testing.T) {
|
||||||
*strings.Reader
|
for _, test := range []struct {
|
||||||
}
|
in []byte
|
||||||
|
want []byte
|
||||||
func (r *ReadSeekCloser) Close() error {
|
}{
|
||||||
return nil
|
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
|
||||||
}
|
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
|
||||||
|
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
|
||||||
// Stage a block at remote but don't commit it
|
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
|
||||||
func (f *Fs) stageBlockWithoutCommit(ctx context.Context, t *testing.T, remote string) {
|
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
|
||||||
var (
|
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
|
||||||
containerName, blobPath = f.split(remote)
|
} {
|
||||||
containerClient = f.cntSVC(containerName)
|
increment(test.in)
|
||||||
blobClient = containerClient.NewBlockBlobClient(blobPath)
|
assert.Equal(t, test.want, test.in)
|
||||||
data = "uncommitted data"
|
|
||||||
blockID = "1"
|
|
||||||
blockIDBase64 = base64.StdEncoding.EncodeToString([]byte(blockID))
|
|
||||||
)
|
|
||||||
r := &ReadSeekCloser{strings.NewReader(data)}
|
|
||||||
_, err := blobClient.StageBlock(ctx, blockIDBase64, r, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify the block is staged but not committed
|
|
||||||
blockList, err := blobClient.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
found := false
|
|
||||||
for _, block := range blockList.UncommittedBlocks {
|
|
||||||
if *block.Name == blockIDBase64 {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
require.True(t, found, "Block ID not found in uncommitted blocks")
|
|
||||||
}
|
|
||||||
|
|
||||||
// This tests uploading a blob where it has uncommitted blocks with a different ID size.
|
|
||||||
//
|
|
||||||
// https://gauravmantri.com/2013/05/18/windows-azure-blob-storage-dealing-with-the-specified-blob-or-block-content-is-invalid-error/
|
|
||||||
//
|
|
||||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/WriteUncommittedBlocks
|
|
||||||
func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
|
|
||||||
var (
|
|
||||||
ctx = context.Background()
|
|
||||||
remote = "testBlob"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Multipart copy the blob please
|
|
||||||
oldUseCopyBlob, oldCopyCutoff := f.opt.UseCopyBlob, f.opt.CopyCutoff
|
|
||||||
f.opt.UseCopyBlob = false
|
|
||||||
f.opt.CopyCutoff = f.opt.ChunkSize
|
|
||||||
defer func() {
|
|
||||||
f.opt.UseCopyBlob, f.opt.CopyCutoff = oldUseCopyBlob, oldCopyCutoff
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Create a blob with uncommitted blocks
|
|
||||||
f.stageBlockWithoutCommit(ctx, t, remote)
|
|
||||||
|
|
||||||
// Now attempt to overwrite the block with a different sized block ID to provoke this error
|
|
||||||
|
|
||||||
// Check the object does not exist
|
|
||||||
_, err := f.NewObject(ctx, remote)
|
|
||||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
|
||||||
|
|
||||||
// Upload a multipart file over the block with uncommitted chunks of a different ID size
|
|
||||||
size := 4*int(f.opt.ChunkSize) - 1
|
|
||||||
contents := random.String(size)
|
|
||||||
item := fstest.NewItem(remote, contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
|
||||||
o := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
|
||||||
|
|
||||||
// Check size
|
|
||||||
assert.Equal(t, int64(size), o.Size())
|
|
||||||
|
|
||||||
// Create a new blob with uncommitted blocks
|
|
||||||
newRemote := "testBlob2"
|
|
||||||
f.stageBlockWithoutCommit(ctx, t, newRemote)
|
|
||||||
|
|
||||||
// Copy over that block
|
|
||||||
dst, err := f.Copy(ctx, o, newRemote)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Check basics
|
|
||||||
assert.Equal(t, int64(size), dst.Size())
|
|
||||||
assert.Equal(t, newRemote, dst.Remote())
|
|
||||||
|
|
||||||
// Check contents
|
|
||||||
gotContents := fstests.ReadObject(ctx, t, dst, -1)
|
|
||||||
assert.Equal(t, contents, gotContents)
|
|
||||||
|
|
||||||
// Remove the object
|
|
||||||
require.NoError(t, dst.Remove(ctx))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
|
||||||
t.Run("Features", f.testFeatures)
|
|
||||||
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Test AzureBlob filesystem interface
|
// Test AzureBlob filesystem interface
|
||||||
|
|
||||||
//go:build !plan9 && !solaris && !js
|
// +build !plan9,!solaris
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
@@ -8,43 +8,17 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
name := "TestAzureBlob"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: name + ":",
|
RemoteName: "TestAzureBlob:",
|
||||||
NilObject: (*Object)(nil),
|
NilObject: (*Object)(nil),
|
||||||
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
TiersToTest: []string{"Hot", "Cool"},
|
||||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||||
MinChunkSize: defaultChunkSize,
|
MaxChunkSize: maxChunkSize,
|
||||||
},
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "use_copy_blob", Value: "false"},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestIntegration2 runs integration tests against the remote
|
|
||||||
func TestIntegration2(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
name := "TestAzureBlob"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":",
|
|
||||||
NilObject: (*Object)(nil),
|
|
||||||
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
|
||||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
|
||||||
MinChunkSize: defaultChunkSize,
|
|
||||||
},
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "directory_markers", Value: "true"},
|
|
||||||
{Name: name, Key: "use_copy_blob", Value: "false"},
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -53,34 +27,11 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|||||||
return f.setUploadChunkSize(cs)
|
return f.setUploadChunkSize(cs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||||
return f.setCopyCutoff(cs)
|
return f.setUploadCutoff(cs)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||||
_ fstests.SetCopyCutoffer = (*Fs)(nil)
|
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestValidateAccessTier(t *testing.T) {
|
|
||||||
tests := map[string]struct {
|
|
||||||
accessTier string
|
|
||||||
want bool
|
|
||||||
}{
|
|
||||||
"hot": {"hot", true},
|
|
||||||
"HOT": {"HOT", true},
|
|
||||||
"Hot": {"Hot", true},
|
|
||||||
"cool": {"cool", true},
|
|
||||||
"cold": {"cold", true},
|
|
||||||
"archive": {"archive", true},
|
|
||||||
"empty": {"", false},
|
|
||||||
"unknown": {"unknown", false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, test := range tests {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
got := validateAccessTier(test.accessTier)
|
|
||||||
assert.Equal(t, test.want, got)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
// Build for azureblob for unsupported platforms to stop go complaining
|
// Build for azureblob for unsupported platforms to stop go complaining
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
//go:build plan9 || solaris || js
|
// +build plan9 solaris
|
||||||
|
|
||||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,69 +0,0 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
|
|
||||||
package azurefiles
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"math/rand"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
|
||||||
t.Run("Authentication", f.InternalTestAuth)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
|
||||||
|
|
||||||
func (f *Fs) InternalTestAuth(t *testing.T) {
|
|
||||||
t.Skip("skipping since this requires authentication credentials which are not part of repo")
|
|
||||||
shareName := "test-rclone-oct-2023"
|
|
||||||
testCases := []struct {
|
|
||||||
name string
|
|
||||||
options *Options
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "ConnectionString",
|
|
||||||
options: &Options{
|
|
||||||
ShareName: shareName,
|
|
||||||
ConnectionString: "",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "AccountAndKey",
|
|
||||||
options: &Options{
|
|
||||||
ShareName: shareName,
|
|
||||||
Account: "",
|
|
||||||
Key: "",
|
|
||||||
}},
|
|
||||||
{
|
|
||||||
name: "SASUrl",
|
|
||||||
options: &Options{
|
|
||||||
ShareName: shareName,
|
|
||||||
SASURL: "",
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range testCases {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
fs, err := newFsFromOptions(context.TODO(), "TestAzureFiles", "", tc.options)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
dirName := randomString(10)
|
|
||||||
assert.NoError(t, fs.Mkdir(context.TODO(), dirName))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX"
|
|
||||||
|
|
||||||
func randomString(charCount int) string {
|
|
||||||
strBldr := strings.Builder{}
|
|
||||||
for range charCount {
|
|
||||||
randPos := rand.Int63n(52)
|
|
||||||
strBldr.WriteByte(chars[randPos])
|
|
||||||
}
|
|
||||||
return strBldr.String()
|
|
||||||
}
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
|
|
||||||
package azurefiles
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
var objPtr *Object
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: "TestAzureFiles:",
|
|
||||||
NilObject: objPtr,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Build for azurefiles for unsupported platforms to stop go complaining
|
|
||||||
// about "no buildable Go source files "
|
|
||||||
|
|
||||||
//go:build plan9 || js
|
|
||||||
|
|
||||||
// Package azurefiles provides an interface to Microsoft Azure Files
|
|
||||||
package azurefiles
|
|
||||||
@@ -1,13 +1,13 @@
|
|||||||
// Package api provides types used by the Backblaze B2 API.
|
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/lib/version"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Error describes a B2 error response
|
// Error describes a B2 error response
|
||||||
@@ -33,27 +33,10 @@ var _ fserrors.Fataler = (*Error)(nil)
|
|||||||
|
|
||||||
// Bucket describes a B2 bucket
|
// Bucket describes a B2 bucket
|
||||||
type Bucket struct {
|
type Bucket struct {
|
||||||
ID string `json:"bucketId"`
|
ID string `json:"bucketId"`
|
||||||
AccountID string `json:"accountId"`
|
AccountID string `json:"accountId"`
|
||||||
Name string `json:"bucketName"`
|
Name string `json:"bucketName"`
|
||||||
Type string `json:"bucketType"`
|
Type string `json:"bucketType"`
|
||||||
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// LifecycleRule is a single lifecycle rule
|
|
||||||
type LifecycleRule struct {
|
|
||||||
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
|
|
||||||
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
|
|
||||||
DaysFromStartingToCancelingUnfinishedLargeFiles *int `json:"daysFromStartingToCancelingUnfinishedLargeFiles"`
|
|
||||||
FileNamePrefix string `json:"fileNamePrefix"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServerSideEncryption is a configuration object for B2 Server-Side Encryption
|
|
||||||
type ServerSideEncryption struct {
|
|
||||||
Mode string `json:"mode"`
|
|
||||||
Algorithm string `json:"algorithm"` // Encryption algorithm to use
|
|
||||||
CustomerKey string `json:"customerKey"` // User provided Base64 encoded key that is used by the server to encrypt files
|
|
||||||
CustomerKeyMd5 string `json:"customerKeyMd5"` // An MD5 hash of the decoded key
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Timestamp is a UTC time when this file was uploaded. It is a base
|
// Timestamp is a UTC time when this file was uploaded. It is a base
|
||||||
@@ -80,17 +63,16 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasVersion returns true if it looks like the passed filename has a timestamp on it.
|
const versionFormat = "-v2006-01-02-150405.000"
|
||||||
//
|
|
||||||
// Note that the passed filename's timestamp may still be invalid even if this
|
|
||||||
// function returns true.
|
|
||||||
func HasVersion(remote string) bool {
|
|
||||||
return version.Match(remote)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddVersion adds the timestamp as a version string into the filename passed in.
|
// AddVersion adds the timestamp as a version string into the filename passed in.
|
||||||
func (t Timestamp) AddVersion(remote string) string {
|
func (t Timestamp) AddVersion(remote string) string {
|
||||||
return version.Add(remote, time.Time(t))
|
ext := path.Ext(remote)
|
||||||
|
base := remote[:len(remote)-len(ext)]
|
||||||
|
s := time.Time(t).Format(versionFormat)
|
||||||
|
// Replace the '.' with a '-'
|
||||||
|
s = strings.Replace(s, ".", "-", -1)
|
||||||
|
return base + s + ext
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveVersion removes the timestamp from a filename as a version string.
|
// RemoveVersion removes the timestamp from a filename as a version string.
|
||||||
@@ -98,9 +80,24 @@ func (t Timestamp) AddVersion(remote string) string {
|
|||||||
// It returns the new file name and a timestamp, or the old filename
|
// It returns the new file name and a timestamp, or the old filename
|
||||||
// and a zero timestamp.
|
// and a zero timestamp.
|
||||||
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||||
time, newRemote := version.Remove(remote)
|
newRemote = remote
|
||||||
t = Timestamp(time)
|
ext := path.Ext(remote)
|
||||||
return
|
base := remote[:len(remote)-len(ext)]
|
||||||
|
if len(base) < len(versionFormat) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
versionStart := len(base) - len(versionFormat)
|
||||||
|
// Check it ends in -xxx
|
||||||
|
if base[len(base)-4] != '-' {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Replace with .xxx for parsing
|
||||||
|
base = base[:len(base)-4] + "." + base[len(base)-3:]
|
||||||
|
newT, err := time.Parse(versionFormat, base[versionStart:])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return Timestamp(newT), base[:versionStart] + ext
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsZero returns true if the timestamp is uninitialized
|
// IsZero returns true if the timestamp is uninitialized
|
||||||
@@ -138,10 +135,10 @@ type AuthorizeAccountResponse struct {
|
|||||||
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
||||||
AccountID string `json:"accountId"` // The identifier for the account.
|
AccountID string `json:"accountId"` // The identifier for the account.
|
||||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||||
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||||
} `json:"allowed"`
|
} `json:"allowed"`
|
||||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||||
@@ -223,10 +220,9 @@ type FileInfo struct {
|
|||||||
|
|
||||||
// CreateBucketRequest is used to create a bucket
|
// CreateBucketRequest is used to create a bucket
|
||||||
type CreateBucketRequest struct {
|
type CreateBucketRequest struct {
|
||||||
AccountID string `json:"accountId"`
|
AccountID string `json:"accountId"`
|
||||||
Name string `json:"bucketName"`
|
Name string `json:"bucketName"`
|
||||||
Type string `json:"bucketType"`
|
Type string `json:"bucketType"`
|
||||||
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteBucketRequest is used to create a bucket
|
// DeleteBucketRequest is used to create a bucket
|
||||||
@@ -257,7 +253,7 @@ type GetFileInfoRequest struct {
|
|||||||
// If the original source of the file being uploaded has a last
|
// If the original source of the file being uploaded has a last
|
||||||
// modified time concept, Backblaze recommends using
|
// modified time concept, Backblaze recommends using
|
||||||
// src_last_modified_millis as the name, and a string holding the base
|
// src_last_modified_millis as the name, and a string holding the base
|
||||||
// 10 number of milliseconds since midnight, January 1, 1970
|
// 10 number number of milliseconds since midnight, January 1, 1970
|
||||||
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
||||||
// programming language Java. It is intended to be compatible with
|
// programming language Java. It is intended to be compatible with
|
||||||
// Java's time long. For example, it can be passed directly into the
|
// Java's time long. For example, it can be passed directly into the
|
||||||
@@ -269,22 +265,21 @@ type GetFileInfoRequest struct {
|
|||||||
//
|
//
|
||||||
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
|
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
|
||||||
type StartLargeFileRequest struct {
|
type StartLargeFileRequest struct {
|
||||||
BucketID string `json:"bucketId"` // The ID of the bucket that the file will go in.
|
BucketID string `json:"bucketId"` //The ID of the bucket that the file will go in.
|
||||||
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
|
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
|
||||||
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
|
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
|
||||||
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
|
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
|
||||||
ServerSideEncryption *ServerSideEncryption `json:"serverSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartLargeFileResponse is the response to StartLargeFileRequest
|
// StartLargeFileResponse is the response to StartLargeFileRequest
|
||||||
type StartLargeFileResponse struct {
|
type StartLargeFileResponse struct {
|
||||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||||
AccountID string `json:"accountId"` // The identifier for the account.
|
AccountID string `json:"accountId"` // The identifier for the account.
|
||||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||||
UploadTimestamp Timestamp `json:"uploadTimestamp,omitempty"` // This is a UTC time when this file was uploaded.
|
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
|
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
|
||||||
@@ -334,31 +329,11 @@ type CancelLargeFileResponse struct {
|
|||||||
|
|
||||||
// CopyFileRequest is as passed to b2_copy_file
|
// CopyFileRequest is as passed to b2_copy_file
|
||||||
type CopyFileRequest struct {
|
type CopyFileRequest struct {
|
||||||
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
||||||
Name string `json:"fileName"` // The name of the new file being created.
|
Name string `json:"fileName"` // The name of the new file being created.
|
||||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||||
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
|
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
|
||||||
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
|
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
|
||||||
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
|
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
|
||||||
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
|
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
|
||||||
SourceServerSideEncryption *ServerSideEncryption `json:"sourceServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the source file
|
|
||||||
DestinationServerSideEncryption *ServerSideEncryption `json:"destinationServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the destination file
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyPartRequest is the request for b2_copy_part - the response is UploadPartResponse
|
|
||||||
type CopyPartRequest struct {
|
|
||||||
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
|
||||||
LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file.
|
|
||||||
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
|
||||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
|
||||||
SourceServerSideEncryption *ServerSideEncryption `json:"sourceServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the source file
|
|
||||||
DestinationServerSideEncryption *ServerSideEncryption `json:"destinationServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the destination file
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateBucketRequest describes a request to modify a B2 bucket
|
|
||||||
type UpdateBucketRequest struct {
|
|
||||||
ID string `json:"bucketId"`
|
|
||||||
AccountID string `json:"accountId"`
|
|
||||||
Type string `json:"bucketType,omitempty"`
|
|
||||||
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
var (
|
var (
|
||||||
emptyT api.Timestamp
|
emptyT api.Timestamp
|
||||||
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
|
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
|
||||||
|
t0r = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123000000Z"))
|
||||||
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
|
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -35,6 +36,40 @@ func TestTimestampUnmarshalJSON(t *testing.T) {
|
|||||||
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
|
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTimestampAddVersion(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
t api.Timestamp
|
||||||
|
in string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
|
||||||
|
{t1, "potato", "potato-v2001-02-03-040506-123"},
|
||||||
|
{t1, "", "-v2001-02-03-040506-123"},
|
||||||
|
} {
|
||||||
|
actual := test.t.AddVersion(test.in)
|
||||||
|
assert.Equal(t, test.expected, actual, test.in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTimestampRemoveVersion(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
in string
|
||||||
|
expectedT api.Timestamp
|
||||||
|
expectedRemote string
|
||||||
|
}{
|
||||||
|
{"potato.txt", emptyT, "potato.txt"},
|
||||||
|
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
|
||||||
|
{"potato-v2001-02-03-040506-123", t1, "potato"},
|
||||||
|
{"-v2001-02-03-040506-123", t1, ""},
|
||||||
|
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
|
||||||
|
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
|
||||||
|
} {
|
||||||
|
actualT, actualRemote := api.RemoveVersion(test.in)
|
||||||
|
assert.Equal(t, test.expectedT, actualT, test.in)
|
||||||
|
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestTimestampIsZero(t *testing.T) {
|
func TestTimestampIsZero(t *testing.T) {
|
||||||
assert.True(t, emptyT.IsZero())
|
assert.True(t, emptyT.IsZero())
|
||||||
assert.False(t, t0.IsZero())
|
assert.False(t, t0.IsZero())
|
||||||
@@ -42,11 +77,11 @@ func TestTimestampIsZero(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTimestampEqual(t *testing.T) {
|
func TestTimestampEqual(t *testing.T) {
|
||||||
assert.False(t, emptyT.Equal(emptyT)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
assert.False(t, emptyT.Equal(emptyT))
|
||||||
assert.False(t, t0.Equal(emptyT))
|
assert.False(t, t0.Equal(emptyT))
|
||||||
assert.False(t, emptyT.Equal(t0))
|
assert.False(t, emptyT.Equal(t0))
|
||||||
assert.False(t, t0.Equal(t1))
|
assert.False(t, t0.Equal(t1))
|
||||||
assert.False(t, t1.Equal(t0))
|
assert.False(t, t1.Equal(t0))
|
||||||
assert.True(t, t0.Equal(t0)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
assert.True(t, t0.Equal(t0))
|
||||||
assert.True(t, t1.Equal(t1)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
assert.True(t, t1.Equal(t1))
|
||||||
}
|
}
|
||||||
|
|||||||
1230
backend/b2/b2.go
1230
backend/b2/b2.go
File diff suppressed because it is too large
Load Diff
@@ -1,31 +1,14 @@
|
|||||||
package b2
|
package b2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"crypto/sha1"
|
|
||||||
"fmt"
|
|
||||||
"path"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/b2/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/cache"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/fs/object"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
"github.com/rclone/rclone/lib/bucket"
|
|
||||||
"github.com/rclone/rclone/lib/random"
|
|
||||||
"github.com/rclone/rclone/lib/version"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Test b2 string encoding
|
// Test b2 string encoding
|
||||||
// https://www.backblaze.com/docs/cloud-storage-native-api-string-encoding
|
// https://www.backblaze.com/b2/docs/string_encoding.html
|
||||||
|
|
||||||
var encodeTest = []struct {
|
var encodeTest = []struct {
|
||||||
fullyEncoded string
|
fullyEncoded string
|
||||||
@@ -185,435 +168,3 @@ func TestParseTimeString(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return a map of the headers in the options with keys stripped of the "x-bz-info-" prefix
|
|
||||||
func OpenOptionToMetaData(options []fs.OpenOption) map[string]string {
|
|
||||||
var headers = make(map[string]string)
|
|
||||||
for _, option := range options {
|
|
||||||
k, v := option.Header()
|
|
||||||
k = strings.ToLower(k)
|
|
||||||
if strings.HasPrefix(k, headerPrefix) {
|
|
||||||
headers[k[len(headerPrefix):]] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return headers
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string, chunkSize string) {
|
|
||||||
what := fmt.Sprintf("Size%s/UploadCutoff%s/ChunkSize%s", size, uploadCutoff, chunkSize)
|
|
||||||
t.Run(what, func(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
ss := fs.SizeSuffix(0)
|
|
||||||
err := ss.Set(size)
|
|
||||||
require.NoError(t, err)
|
|
||||||
original := random.String(int(ss))
|
|
||||||
|
|
||||||
contents := fstest.Gz(t, original)
|
|
||||||
mimeType := "text/html"
|
|
||||||
|
|
||||||
if chunkSize != "" {
|
|
||||||
ss := fs.SizeSuffix(0)
|
|
||||||
err := ss.Set(chunkSize)
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = f.SetUploadChunkSize(ss)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if uploadCutoff != "" {
|
|
||||||
ss := fs.SizeSuffix(0)
|
|
||||||
err := ss.Set(uploadCutoff)
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = f.SetUploadCutoff(ss)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
|
||||||
btime := time.Now()
|
|
||||||
metadata := fs.Metadata{
|
|
||||||
// Just mtime for now - limit to milliseconds since x-bz-info-src_last_modified_millis can't support any
|
|
||||||
|
|
||||||
"mtime": "2009-05-06T04:05:06.499Z",
|
|
||||||
}
|
|
||||||
|
|
||||||
// Need to specify HTTP options with the header prefix since they are passed as-is
|
|
||||||
options := []fs.OpenOption{
|
|
||||||
&fs.HTTPOption{Key: "X-Bz-Info-a", Value: "1"},
|
|
||||||
&fs.HTTPOption{Key: "X-Bz-Info-b", Value: "2"},
|
|
||||||
}
|
|
||||||
|
|
||||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, mimeType, metadata, options...)
|
|
||||||
defer func() {
|
|
||||||
assert.NoError(t, obj.Remove(ctx))
|
|
||||||
}()
|
|
||||||
o := obj.(*Object)
|
|
||||||
gotMetadata, err := o.getMetaData(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// X-Bz-Info-a & X-Bz-Info-b
|
|
||||||
optMetadata := OpenOptionToMetaData(options)
|
|
||||||
for k, v := range optMetadata {
|
|
||||||
got := gotMetadata.Info[k]
|
|
||||||
assert.Equal(t, v, got, k)
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
|
||||||
|
|
||||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
|
||||||
var mtime api.Timestamp
|
|
||||||
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
|
|
||||||
if err != nil {
|
|
||||||
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
|
|
||||||
}
|
|
||||||
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
|
|
||||||
|
|
||||||
// Upload time
|
|
||||||
gotBtime := time.Time(gotMetadata.UploadTimestamp)
|
|
||||||
dt := gotBtime.Sub(btime)
|
|
||||||
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
|
|
||||||
|
|
||||||
t.Run("GzipEncoding", func(t *testing.T) {
|
|
||||||
// Test that the gzipped file we uploaded can be
|
|
||||||
// downloaded
|
|
||||||
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
|
||||||
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
|
||||||
assert.Equal(t, wantContents, gotContents)
|
|
||||||
assert.Equal(t, wantSize, o.Size())
|
|
||||||
gotHash, err := o.Hash(ctx, hash.SHA1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, wantHash, gotHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("NoDecompress", func(t *testing.T) {
|
|
||||||
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTestMetadata(t *testing.T) {
|
|
||||||
// 1 kB regular file
|
|
||||||
f.internalTestMetadata(t, "1kiB", "", "")
|
|
||||||
|
|
||||||
// 10 MiB large file
|
|
||||||
f.internalTestMetadata(t, "10MiB", "6MiB", "6MiB")
|
|
||||||
}
|
|
||||||
|
|
||||||
func sha1Sum(t *testing.T, s string) string {
|
|
||||||
hash := sha1.Sum([]byte(s))
|
|
||||||
return fmt.Sprintf("%x", hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is adapted from the s3 equivalent.
|
|
||||||
func (f *Fs) InternalTestVersions(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
// Small pause to make the LastModified different since AWS
|
|
||||||
// only seems to track them to 1 second granularity
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
|
|
||||||
// Create an object
|
|
||||||
const dirName = "versions"
|
|
||||||
const fileName = dirName + "/" + "test-versions.txt"
|
|
||||||
contents := random.String(100)
|
|
||||||
item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
|
||||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
|
||||||
defer func() {
|
|
||||||
assert.NoError(t, obj.Remove(ctx))
|
|
||||||
}()
|
|
||||||
objMetadata, err := obj.(*Object).getMetaData(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Small pause
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
|
|
||||||
// Remove it
|
|
||||||
assert.NoError(t, obj.Remove(ctx))
|
|
||||||
|
|
||||||
// Small pause to make the LastModified different since AWS only seems to track them to 1 second granularity
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
|
|
||||||
// And create it with different size and contents
|
|
||||||
newContents := random.String(101)
|
|
||||||
newItem := fstest.NewItem(fileName, newContents, fstest.Time("2002-05-06T04:05:06.499999999Z"))
|
|
||||||
newObj := fstests.PutTestContents(ctx, t, f, &newItem, newContents, true)
|
|
||||||
newObjMetadata, err := newObj.(*Object).getMetaData(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Run("Versions", func(t *testing.T) {
|
|
||||||
// Set --b2-versions for this test
|
|
||||||
f.opt.Versions = true
|
|
||||||
defer func() {
|
|
||||||
f.opt.Versions = false
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Read the contents
|
|
||||||
entries, err := f.List(ctx, dirName)
|
|
||||||
require.NoError(t, err)
|
|
||||||
tests := 0
|
|
||||||
var fileNameVersion string
|
|
||||||
for _, entry := range entries {
|
|
||||||
t.Log(entry)
|
|
||||||
remote := entry.Remote()
|
|
||||||
if remote == fileName {
|
|
||||||
t.Run("ReadCurrent", func(t *testing.T) {
|
|
||||||
assert.Equal(t, newContents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
|
|
||||||
})
|
|
||||||
tests++
|
|
||||||
} else if versionTime, p := version.Remove(remote); !versionTime.IsZero() && p == fileName {
|
|
||||||
t.Run("ReadVersion", func(t *testing.T) {
|
|
||||||
assert.Equal(t, contents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
|
|
||||||
})
|
|
||||||
assert.WithinDuration(t, time.Time(objMetadata.UploadTimestamp), versionTime, time.Second, "object time must be with 1 second of version time")
|
|
||||||
fileNameVersion = remote
|
|
||||||
tests++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert.Equal(t, 2, tests, "object missing from listing")
|
|
||||||
|
|
||||||
// Check we can read the object with a version suffix
|
|
||||||
t.Run("NewObject", func(t *testing.T) {
|
|
||||||
o, err := f.NewObject(ctx, fileNameVersion)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, o)
|
|
||||||
assert.Equal(t, int64(100), o.Size(), o.Remote())
|
|
||||||
})
|
|
||||||
|
|
||||||
// Check we can make a NewFs from that object with a version suffix
|
|
||||||
t.Run("NewFs", func(t *testing.T) {
|
|
||||||
newPath := bucket.Join(fs.ConfigStringFull(f), fileNameVersion)
|
|
||||||
// Make sure --b2-versions is set in the config of the new remote
|
|
||||||
fs.Debugf(nil, "oldPath = %q", newPath)
|
|
||||||
lastColon := strings.LastIndex(newPath, ":")
|
|
||||||
require.True(t, lastColon >= 0)
|
|
||||||
newPath = newPath[:lastColon] + ",versions" + newPath[lastColon:]
|
|
||||||
fs.Debugf(nil, "newPath = %q", newPath)
|
|
||||||
fNew, err := cache.Get(ctx, newPath)
|
|
||||||
// This should return pointing to a file
|
|
||||||
require.Equal(t, fs.ErrorIsFile, err)
|
|
||||||
require.NotNil(t, fNew)
|
|
||||||
// With the directory above
|
|
||||||
assert.Equal(t, dirName, path.Base(fs.ConfigStringFull(fNew)))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("VersionAt", func(t *testing.T) {
|
|
||||||
// We set --b2-version-at for this test so make sure we reset it at the end
|
|
||||||
defer func() {
|
|
||||||
f.opt.VersionAt = fs.Time{}
|
|
||||||
}()
|
|
||||||
|
|
||||||
var (
|
|
||||||
firstObjectTime = time.Time(objMetadata.UploadTimestamp)
|
|
||||||
secondObjectTime = time.Time(newObjMetadata.UploadTimestamp)
|
|
||||||
)
|
|
||||||
|
|
||||||
for _, test := range []struct {
|
|
||||||
what string
|
|
||||||
at time.Time
|
|
||||||
want []fstest.Item
|
|
||||||
wantErr error
|
|
||||||
wantSize int64
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
what: "Before",
|
|
||||||
at: firstObjectTime.Add(-time.Second),
|
|
||||||
want: fstests.InternalTestFiles,
|
|
||||||
wantErr: fs.ErrorObjectNotFound,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
what: "AfterOne",
|
|
||||||
at: firstObjectTime.Add(time.Second),
|
|
||||||
want: append([]fstest.Item{item}, fstests.InternalTestFiles...),
|
|
||||||
wantSize: 100,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
what: "AfterDelete",
|
|
||||||
at: secondObjectTime.Add(-time.Second),
|
|
||||||
want: fstests.InternalTestFiles,
|
|
||||||
wantErr: fs.ErrorObjectNotFound,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
what: "AfterTwo",
|
|
||||||
at: secondObjectTime.Add(time.Second),
|
|
||||||
want: append([]fstest.Item{newItem}, fstests.InternalTestFiles...),
|
|
||||||
wantSize: 101,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(test.what, func(t *testing.T) {
|
|
||||||
f.opt.VersionAt = fs.Time(test.at)
|
|
||||||
t.Run("List", func(t *testing.T) {
|
|
||||||
fstest.CheckListing(t, f, test.want)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("NewObject", func(t *testing.T) {
|
|
||||||
gotObj, gotErr := f.NewObject(ctx, fileName)
|
|
||||||
assert.Equal(t, test.wantErr, gotErr)
|
|
||||||
if gotErr == nil {
|
|
||||||
assert.Equal(t, test.wantSize, gotObj.Size())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Cleanup", func(t *testing.T) {
|
|
||||||
t.Run("DryRun", func(t *testing.T) {
|
|
||||||
f.opt.Versions = true
|
|
||||||
defer func() {
|
|
||||||
f.opt.Versions = false
|
|
||||||
}()
|
|
||||||
// Listing should be unchanged after dry run
|
|
||||||
before := listAllFiles(ctx, t, f, dirName)
|
|
||||||
ctx, ci := fs.AddConfig(ctx)
|
|
||||||
ci.DryRun = true
|
|
||||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
|
||||||
after := listAllFiles(ctx, t, f, dirName)
|
|
||||||
assert.Equal(t, before, after)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("RealThing", func(t *testing.T) {
|
|
||||||
f.opt.Versions = true
|
|
||||||
defer func() {
|
|
||||||
f.opt.Versions = false
|
|
||||||
}()
|
|
||||||
// Listing should reflect current state after cleanup
|
|
||||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
|
||||||
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
|
|
||||||
fstest.CheckListing(t, f, items)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
// Purge gets tested later
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTestCleanupUnfinished(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
// B2CleanupHidden tests cleaning up hidden files
|
|
||||||
t.Run("CleanupUnfinished", func(t *testing.T) {
|
|
||||||
dirName := "unfinished"
|
|
||||||
fileCount := 5
|
|
||||||
expectedFiles := []string{}
|
|
||||||
for i := 1; i < fileCount; i++ {
|
|
||||||
fileName := fmt.Sprintf("%s/unfinished-%d", dirName, i)
|
|
||||||
expectedFiles = append(expectedFiles, fileName)
|
|
||||||
obj := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: fileName,
|
|
||||||
}
|
|
||||||
objInfo := object.NewStaticObjectInfo(fileName, fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
|
|
||||||
_, err := f.newLargeUpload(ctx, obj, nil, objInfo, f.opt.ChunkSize, false, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
checkListing(ctx, t, f, dirName, expectedFiles)
|
|
||||||
|
|
||||||
t.Run("DryRun", func(t *testing.T) {
|
|
||||||
// Listing should not change after dry run
|
|
||||||
ctx, ci := fs.AddConfig(ctx)
|
|
||||||
ci.DryRun = true
|
|
||||||
require.NoError(t, f.cleanUp(ctx, false, true, 0))
|
|
||||||
checkListing(ctx, t, f, dirName, expectedFiles)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("RealThing", func(t *testing.T) {
|
|
||||||
// Listing should be empty after real cleanup
|
|
||||||
require.NoError(t, f.cleanUp(ctx, false, true, 0))
|
|
||||||
checkListing(ctx, t, f, dirName, []string{})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func listAllFiles(ctx context.Context, t *testing.T, f *Fs, dirName string) []string {
|
|
||||||
bucket, directory := f.split(dirName)
|
|
||||||
foundFiles := []string{}
|
|
||||||
require.NoError(t, f.list(ctx, bucket, directory, "", false, true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
|
|
||||||
if !isDirectory {
|
|
||||||
foundFiles = append(foundFiles, object.Name)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}))
|
|
||||||
sort.Strings(foundFiles)
|
|
||||||
return foundFiles
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkListing(ctx context.Context, t *testing.T, f *Fs, dirName string, expectedFiles []string) {
|
|
||||||
foundFiles := listAllFiles(ctx, t, f, dirName)
|
|
||||||
sort.Strings(expectedFiles)
|
|
||||||
assert.Equal(t, expectedFiles, foundFiles)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTestLifecycleRules(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
opt := map[string]string{}
|
|
||||||
|
|
||||||
t.Run("InitState", func(t *testing.T) {
|
|
||||||
// There should be no lifecycle rules at the outset
|
|
||||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
|
||||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, len(lifecycleRules))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("DryRun", func(t *testing.T) {
|
|
||||||
// There should still be no lifecycle rules after each dry run operation
|
|
||||||
ctx, ci := fs.AddConfig(ctx)
|
|
||||||
ci.DryRun = true
|
|
||||||
|
|
||||||
opt["daysFromHidingToDeleting"] = "30"
|
|
||||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
|
||||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, len(lifecycleRules))
|
|
||||||
|
|
||||||
delete(opt, "daysFromHidingToDeleting")
|
|
||||||
opt["daysFromUploadingToHiding"] = "40"
|
|
||||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
|
||||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, len(lifecycleRules))
|
|
||||||
|
|
||||||
opt["daysFromHidingToDeleting"] = "30"
|
|
||||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
|
||||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, len(lifecycleRules))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("RealThing", func(t *testing.T) {
|
|
||||||
opt["daysFromHidingToDeleting"] = "30"
|
|
||||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
|
||||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, len(lifecycleRules))
|
|
||||||
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
|
|
||||||
|
|
||||||
delete(opt, "daysFromHidingToDeleting")
|
|
||||||
opt["daysFromUploadingToHiding"] = "40"
|
|
||||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
|
||||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, len(lifecycleRules))
|
|
||||||
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
|
|
||||||
|
|
||||||
opt["daysFromHidingToDeleting"] = "30"
|
|
||||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
|
||||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, len(lifecycleRules))
|
|
||||||
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
|
|
||||||
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
|
||||||
t.Run("Metadata", f.InternalTestMetadata)
|
|
||||||
t.Run("Versions", f.InternalTestVersions)
|
|
||||||
t.Run("CleanupUnfinished", f.InternalTestCleanupUnfinished)
|
|
||||||
t.Run("LifecycleRules", f.InternalTestLifecycleRules)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
|
||||||
|
|||||||
@@ -28,12 +28,7 @@ func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|||||||
return f.setUploadCutoff(cs)
|
return f.setUploadCutoff(cs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|
||||||
return f.setCopyCutoff(cs)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||||
_ fstests.SetCopyCutoffer = (*Fs)(nil)
|
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
// Upload large files for b2
|
// Upload large files for b2
|
||||||
//
|
//
|
||||||
// Docs - https://www.backblaze.com/docs/cloud-storage-large-files
|
// Docs - https://www.backblaze.com/b2/docs/large_files.html
|
||||||
|
|
||||||
package b2
|
package b2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
@@ -14,15 +15,12 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/b2/api"
|
"github.com/rclone/rclone/backend/b2/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/chunksize"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/atexit"
|
|
||||||
"github.com/rclone/rclone/lib/pool"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type hashAppendingReader struct {
|
type hashAppendingReader struct {
|
||||||
@@ -70,39 +68,41 @@ func newHashAppendingReader(in io.Reader, h gohash.Hash) *hashAppendingReader {
|
|||||||
|
|
||||||
// largeUpload is used to control the upload of large files which need chunking
|
// largeUpload is used to control the upload of large files which need chunking
|
||||||
type largeUpload struct {
|
type largeUpload struct {
|
||||||
f *Fs // parent Fs
|
f *Fs // parent Fs
|
||||||
o *Object // object being uploaded
|
o *Object // object being uploaded
|
||||||
doCopy bool // doing copy rather than upload
|
in io.Reader // read the data from here
|
||||||
what string // text name of operation for logs
|
wrap accounting.WrapFn // account parts being transferred
|
||||||
in io.Reader // read the data from here
|
id string // ID of the file being uploaded
|
||||||
wrap accounting.WrapFn // account parts being transferred
|
size int64 // total size
|
||||||
id string // ID of the file being uploaded
|
parts int64 // calculated number of parts, if known
|
||||||
size int64 // total size
|
sha1s []string // slice of SHA1s for each part
|
||||||
parts int // calculated number of parts, if known
|
uploadMu sync.Mutex // lock for upload variable
|
||||||
sha1smu sync.Mutex // mutex to protect sha1s
|
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
||||||
sha1s []string // slice of SHA1s for each part
|
|
||||||
uploadMu sync.Mutex // lock for upload variable
|
|
||||||
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
|
||||||
chunkSize int64 // chunk size to use
|
|
||||||
src *Object // if copying, object we are reading from
|
|
||||||
info *api.FileInfo // final response with info about the object
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||||
//
|
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
|
||||||
// If newInfo is set then metadata from that will be used instead of reading it from src
|
remote := o.remote
|
||||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File, options ...fs.OpenOption) (up *largeUpload, err error) {
|
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
parts := 0
|
parts := int64(0)
|
||||||
chunkSize := defaultChunkSize
|
sha1SliceSize := int64(maxParts)
|
||||||
if size == -1 {
|
if size == -1 {
|
||||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||||
} else {
|
} else {
|
||||||
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
|
parts = size / int64(o.fs.opt.ChunkSize)
|
||||||
parts = int(size / int64(chunkSize))
|
if size%int64(o.fs.opt.ChunkSize) != 0 {
|
||||||
if size%int64(chunkSize) != 0 {
|
|
||||||
parts++
|
parts++
|
||||||
}
|
}
|
||||||
|
if parts > maxParts {
|
||||||
|
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
|
||||||
|
}
|
||||||
|
sha1SliceSize = parts
|
||||||
|
}
|
||||||
|
|
||||||
|
modTime := src.ModTime(ctx)
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/b2_start_large_file",
|
||||||
}
|
}
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
bucketID, err := f.getBucketID(ctx, bucket)
|
bucketID, err := f.getBucketID(ctx, bucket)
|
||||||
@@ -110,53 +110,19 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var request = api.StartLargeFileRequest{
|
var request = api.StartLargeFileRequest{
|
||||||
BucketID: bucketID,
|
BucketID: bucketID,
|
||||||
Name: f.opt.Enc.FromStandardPath(bucketPath),
|
Name: f.opt.Enc.FromStandardPath(bucketPath),
|
||||||
}
|
ContentType: fs.MimeType(ctx, src),
|
||||||
optionsToSend := make([]fs.OpenOption, 0, len(options))
|
Info: map[string]string{
|
||||||
if newInfo == nil {
|
|
||||||
modTime, err := o.getModTime(ctx, src, options)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
request.ContentType = fs.MimeType(ctx, src)
|
|
||||||
request.Info = map[string]string{
|
|
||||||
timeKey: timeString(modTime),
|
timeKey: timeString(modTime),
|
||||||
}
|
},
|
||||||
// Custom upload headers - remove header prefix since they are sent in the body
|
|
||||||
for _, option := range options {
|
|
||||||
k, v := option.Header()
|
|
||||||
k = strings.ToLower(k)
|
|
||||||
if strings.HasPrefix(k, headerPrefix) {
|
|
||||||
request.Info[k[len(headerPrefix):]] = v
|
|
||||||
} else {
|
|
||||||
optionsToSend = append(optionsToSend, option)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Set the SHA1 if known
|
|
||||||
if !o.fs.opt.DisableCheckSum || doCopy {
|
|
||||||
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
|
||||||
request.Info[sha1Key] = calculatedSha1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
request.ContentType = newInfo.ContentType
|
|
||||||
request.Info = newInfo.Info
|
|
||||||
}
|
}
|
||||||
if o.fs.opt.SSECustomerKey != "" && o.fs.opt.SSECustomerKeyMD5 != "" {
|
// Set the SHA1 if known
|
||||||
request.ServerSideEncryption = &api.ServerSideEncryption{
|
if !o.fs.opt.DisableCheckSum {
|
||||||
Mode: "SSE-C",
|
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||||
Algorithm: o.fs.opt.SSECustomerAlgorithm,
|
request.Info[sha1Key] = calculatedSha1
|
||||||
CustomerKey: o.fs.opt.SSECustomerKeyBase64,
|
|
||||||
CustomerKeyMd5: o.fs.opt.SSECustomerKeyMD5,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/b2_start_large_file",
|
|
||||||
Options: optionsToSend,
|
|
||||||
}
|
|
||||||
var response api.StartLargeFileResponse
|
var response api.StartLargeFileResponse
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||||
@@ -165,24 +131,18 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
up = &largeUpload{
|
|
||||||
f: f,
|
|
||||||
o: o,
|
|
||||||
doCopy: doCopy,
|
|
||||||
what: "upload",
|
|
||||||
id: response.ID,
|
|
||||||
size: size,
|
|
||||||
parts: parts,
|
|
||||||
sha1s: make([]string, 0, 16),
|
|
||||||
chunkSize: int64(chunkSize),
|
|
||||||
}
|
|
||||||
// unwrap the accounting from the input, we use wrap to put it
|
// unwrap the accounting from the input, we use wrap to put it
|
||||||
// back on after the buffering
|
// back on after the buffering
|
||||||
if doCopy {
|
in, wrap := accounting.UnWrap(in)
|
||||||
up.what = "copy"
|
up = &largeUpload{
|
||||||
up.src = src.(*Object)
|
f: f,
|
||||||
} else {
|
o: o,
|
||||||
up.in, up.wrap = accounting.UnWrap(in)
|
in: in,
|
||||||
|
wrap: wrap,
|
||||||
|
id: response.ID,
|
||||||
|
size: size,
|
||||||
|
parts: parts,
|
||||||
|
sha1s: make([]string, sha1SliceSize),
|
||||||
}
|
}
|
||||||
return up, nil
|
return up, nil
|
||||||
}
|
}
|
||||||
@@ -192,26 +152,24 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
// This should be returned with returnUploadURL when finished
|
// This should be returned with returnUploadURL when finished
|
||||||
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
|
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
|
||||||
up.uploadMu.Lock()
|
up.uploadMu.Lock()
|
||||||
if len(up.uploads) > 0 {
|
defer up.uploadMu.Unlock()
|
||||||
|
if len(up.uploads) == 0 {
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/b2_get_upload_part_url",
|
||||||
|
}
|
||||||
|
var request = api.GetUploadPartURLRequest{
|
||||||
|
ID: up.id,
|
||||||
|
}
|
||||||
|
err := up.f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
|
||||||
|
return up.f.shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
||||||
up.uploadMu.Unlock()
|
|
||||||
return upload, nil
|
|
||||||
}
|
|
||||||
up.uploadMu.Unlock()
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/b2_get_upload_part_url",
|
|
||||||
}
|
|
||||||
var request = api.GetUploadPartURLRequest{
|
|
||||||
ID: up.id,
|
|
||||||
}
|
|
||||||
err = up.f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
|
|
||||||
return up.f.shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to get upload URL: %w", err)
|
|
||||||
}
|
}
|
||||||
return upload, nil
|
return upload, nil
|
||||||
}
|
}
|
||||||
@@ -226,39 +184,10 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
|
|||||||
up.uploadMu.Unlock()
|
up.uploadMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add an sha1 to the being built up sha1s
|
// Transfer a chunk
|
||||||
func (up *largeUpload) addSha1(chunkNumber int, sha1 string) {
|
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
|
||||||
up.sha1smu.Lock()
|
err := up.f.pacer.Call(func() (bool, error) {
|
||||||
defer up.sha1smu.Unlock()
|
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
||||||
if len(up.sha1s) < chunkNumber+1 {
|
|
||||||
up.sha1s = append(up.sha1s, make([]string, chunkNumber+1-len(up.sha1s))...)
|
|
||||||
}
|
|
||||||
up.sha1s[chunkNumber] = sha1
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
|
||||||
func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) {
|
|
||||||
// Only account after the checksum reads have been done
|
|
||||||
if do, ok := reader.(pool.DelayAccountinger); ok {
|
|
||||||
// To figure out this number, do a transfer and if the accounted size is 0 or a
|
|
||||||
// multiple of what it should be, increase or decrease this number.
|
|
||||||
do.DelayAccounting(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = up.f.pacer.Call(func() (bool, error) {
|
|
||||||
// Discover the size by seeking to the end
|
|
||||||
size, err = reader.Seek(0, io.SeekEnd)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// rewind the reader on retry and after reading size
|
|
||||||
_, err = reader.Seek(0, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Debugf(up.o, "Sending chunk %d length %d", chunkNumber, size)
|
|
||||||
|
|
||||||
// Get upload URL
|
// Get upload URL
|
||||||
upload, err := up.getUploadURL(ctx)
|
upload, err := up.getUploadURL(ctx)
|
||||||
@@ -266,8 +195,8 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
in := newHashAppendingReader(reader, sha1.New())
|
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
|
||||||
sizeWithHash := size + int64(in.AdditionalLength())
|
size := int64(len(body)) + int64(in.AdditionalLength())
|
||||||
|
|
||||||
// Authorization
|
// Authorization
|
||||||
//
|
//
|
||||||
@@ -282,14 +211,14 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
|||||||
//
|
//
|
||||||
// The number of bytes in the file being uploaded. Note that
|
// The number of bytes in the file being uploaded. Note that
|
||||||
// this header is required; you cannot leave it out and just
|
// this header is required; you cannot leave it out and just
|
||||||
// use chunked encoding. The minimum size of every part but
|
// use chunked encoding. The minimum size of every part but
|
||||||
// the last one is 100 MB (100,000,000 bytes)
|
// the last one is 100MB.
|
||||||
//
|
//
|
||||||
// X-Bz-Content-Sha1
|
// X-Bz-Content-Sha1
|
||||||
//
|
//
|
||||||
// The SHA1 checksum of the this part of the file. B2 will
|
// The SHA1 checksum of the this part of the file. B2 will
|
||||||
// check this when the part is uploaded, to make sure that the
|
// check this when the part is uploaded, to make sure that the
|
||||||
// data arrived correctly. The same SHA1 checksum must be
|
// data arrived correctly. The same SHA1 checksum must be
|
||||||
// passed to b2_finish_large_file.
|
// passed to b2_finish_large_file.
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
@@ -297,16 +226,10 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
|||||||
Body: up.wrap(in),
|
Body: up.wrap(in),
|
||||||
ExtraHeaders: map[string]string{
|
ExtraHeaders: map[string]string{
|
||||||
"Authorization": upload.AuthorizationToken,
|
"Authorization": upload.AuthorizationToken,
|
||||||
"X-Bz-Part-Number": fmt.Sprintf("%d", chunkNumber+1),
|
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
|
||||||
sha1Header: "hex_digits_at_end",
|
sha1Header: "hex_digits_at_end",
|
||||||
},
|
},
|
||||||
ContentLength: &sizeWithHash,
|
ContentLength: &size,
|
||||||
}
|
|
||||||
|
|
||||||
if up.o.fs.opt.SSECustomerKey != "" && up.o.fs.opt.SSECustomerKeyMD5 != "" {
|
|
||||||
opts.ExtraHeaders[sseAlgorithmHeader] = up.o.fs.opt.SSECustomerAlgorithm
|
|
||||||
opts.ExtraHeaders[sseKeyHeader] = up.o.fs.opt.SSECustomerKeyBase64
|
|
||||||
opts.ExtraHeaders[sseMd5Header] = up.o.fs.opt.SSECustomerKeyMD5
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var response api.UploadPartResponse
|
var response api.UploadPartResponse
|
||||||
@@ -314,7 +237,7 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
|||||||
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||||
retry, err := up.f.shouldRetry(ctx, resp, err)
|
retry, err := up.f.shouldRetry(ctx, resp, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err)
|
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||||
}
|
}
|
||||||
// On retryable error clear PartUploadURL
|
// On retryable error clear PartUploadURL
|
||||||
if retry {
|
if retry {
|
||||||
@@ -322,63 +245,20 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
|||||||
upload = nil
|
upload = nil
|
||||||
}
|
}
|
||||||
up.returnUploadURL(upload)
|
up.returnUploadURL(upload)
|
||||||
up.addSha1(chunkNumber, in.HexSum())
|
up.sha1s[part-1] = in.HexSum()
|
||||||
return retry, err
|
return retry, err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(up.o, "Error sending chunk %d: %v", chunkNumber, err)
|
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(up.o, "Done sending chunk %d", chunkNumber)
|
fs.Debugf(up.o, "Done sending chunk %d", part)
|
||||||
}
|
|
||||||
return size, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy a chunk
|
|
||||||
func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64) error {
|
|
||||||
err := up.f.pacer.Call(func() (bool, error) {
|
|
||||||
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/b2_copy_part",
|
|
||||||
}
|
|
||||||
offset := int64(part) * up.chunkSize // where we are in the source file
|
|
||||||
var request = api.CopyPartRequest{
|
|
||||||
SourceID: up.src.id,
|
|
||||||
LargeFileID: up.id,
|
|
||||||
PartNumber: int64(part + 1),
|
|
||||||
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
|
|
||||||
}
|
|
||||||
|
|
||||||
if up.o.fs.opt.SSECustomerKey != "" && up.o.fs.opt.SSECustomerKeyMD5 != "" {
|
|
||||||
serverSideEncryptionConfig := api.ServerSideEncryption{
|
|
||||||
Mode: "SSE-C",
|
|
||||||
Algorithm: up.o.fs.opt.SSECustomerAlgorithm,
|
|
||||||
CustomerKey: up.o.fs.opt.SSECustomerKeyBase64,
|
|
||||||
CustomerKeyMd5: up.o.fs.opt.SSECustomerKeyMD5,
|
|
||||||
}
|
|
||||||
request.SourceServerSideEncryption = &serverSideEncryptionConfig
|
|
||||||
request.DestinationServerSideEncryption = &serverSideEncryptionConfig
|
|
||||||
}
|
|
||||||
var response api.UploadPartResponse
|
|
||||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
|
||||||
retry, err := up.f.shouldRetry(ctx, resp, err)
|
|
||||||
if err != nil {
|
|
||||||
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
|
||||||
}
|
|
||||||
up.addSha1(part, response.SHA1)
|
|
||||||
return retry, err
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
fs.Debugf(up.o, "Error copying chunk %d: %v", part, err)
|
|
||||||
} else {
|
|
||||||
fs.Debugf(up.o, "Done copying chunk %d", part)
|
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes off the large upload
|
// finish closes off the large upload
|
||||||
func (up *largeUpload) Close(ctx context.Context) error {
|
func (up *largeUpload) finish(ctx context.Context) error {
|
||||||
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
|
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: "/b2_finish_large_file",
|
Path: "/b2_finish_large_file",
|
||||||
@@ -395,13 +275,11 @@ func (up *largeUpload) Close(ctx context.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
up.info = &response
|
return up.o.decodeMetaDataFileInfo(&response)
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Abort aborts the large upload
|
// cancel aborts the large upload
|
||||||
func (up *largeUpload) Abort(ctx context.Context) error {
|
func (up *largeUpload) cancel(ctx context.Context) error {
|
||||||
fs.Debugf(up.o, "Cancelling large file %s", up.what)
|
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: "/b2_cancel_large_file",
|
Path: "/b2_cancel_large_file",
|
||||||
@@ -414,113 +292,139 @@ func (up *largeUpload) Abort(ctx context.Context) error {
|
|||||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||||
return up.f.shouldRetry(ctx, resp, err)
|
return up.f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(up.o, "Failed to cancel large file %s: %v", up.what, err)
|
|
||||||
}
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (up *largeUpload) managedTransferChunk(ctx context.Context, wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(part int64, buf []byte) {
|
||||||
|
defer wg.Done()
|
||||||
|
defer up.f.putUploadBlock(buf)
|
||||||
|
err := up.transferChunk(ctx, part, buf)
|
||||||
|
if err != nil {
|
||||||
|
select {
|
||||||
|
case errs <- err:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(part, buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (up *largeUpload) finishOrCancelOnError(ctx context.Context, err error, errs chan error) error {
|
||||||
|
if err == nil {
|
||||||
|
select {
|
||||||
|
case err = <-errs:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
|
||||||
|
cancelErr := up.cancel(ctx)
|
||||||
|
if cancelErr != nil {
|
||||||
|
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return up.finish(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
// Stream uploads the chunks from the input, starting with a required initial
|
// Stream uploads the chunks from the input, starting with a required initial
|
||||||
// chunk. Assumes the file size is unknown and will upload until the input
|
// chunk. Assumes the file size is unknown and will upload until the input
|
||||||
// reaches EOF.
|
// reaches EOF.
|
||||||
//
|
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
|
||||||
// Note that initialUploadBlock must be returned to f.putBuf()
|
|
||||||
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW) (err error) {
|
|
||||||
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
|
|
||||||
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
||||||
var (
|
errs := make(chan error, 1)
|
||||||
g, gCtx = errgroup.WithContext(ctx)
|
hasMoreParts := true
|
||||||
hasMoreParts = true
|
var wg sync.WaitGroup
|
||||||
)
|
|
||||||
up.size = initialUploadBlock.Size()
|
// Transfer initial chunk
|
||||||
up.parts = 0
|
up.size = int64(len(initialUploadBlock))
|
||||||
for part := 0; hasMoreParts; part++ {
|
up.managedTransferChunk(ctx, &wg, errs, 1, initialUploadBlock)
|
||||||
// Get a block of memory from the pool and token which limits concurrency.
|
|
||||||
var rw *pool.RW
|
outer:
|
||||||
if part == 0 {
|
for part := int64(2); hasMoreParts; part++ {
|
||||||
rw = initialUploadBlock
|
// Check any errors
|
||||||
} else {
|
select {
|
||||||
rw = up.f.getRW(false)
|
case err = <-errs:
|
||||||
|
break outer
|
||||||
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fail fast, in case an errgroup managed function returns an error
|
// Get a block of memory
|
||||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
buf := up.f.getUploadBlock()
|
||||||
if gCtx.Err() != nil {
|
|
||||||
up.f.putRW(rw)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the chunk
|
// Read the chunk
|
||||||
var n int64
|
var n int
|
||||||
if part == 0 {
|
n, err = io.ReadFull(up.in, buf)
|
||||||
n = rw.Size()
|
if err == io.ErrUnexpectedEOF {
|
||||||
} else {
|
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
|
||||||
n, err = io.CopyN(rw, up.in, up.chunkSize)
|
buf = buf[:n]
|
||||||
if err == io.EOF {
|
hasMoreParts = false
|
||||||
if n == 0 {
|
err = nil
|
||||||
fs.Debugf(up.o, "Not sending empty chunk after EOF - ending.")
|
} else if err == io.EOF {
|
||||||
up.f.putRW(rw)
|
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
|
||||||
break
|
up.f.putUploadBlock(buf)
|
||||||
} else {
|
err = nil
|
||||||
fs.Debugf(up.o, "Read less than a full chunk %d, making this the last one.", n)
|
break outer
|
||||||
}
|
} else if err != nil {
|
||||||
hasMoreParts = false
|
// other kinds of errors indicate failure
|
||||||
} else if err != nil {
|
up.f.putUploadBlock(buf)
|
||||||
// other kinds of errors indicate failure
|
break outer
|
||||||
up.f.putRW(rw)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Keep stats up to date
|
// Keep stats up to date
|
||||||
up.parts += 1
|
up.parts = part
|
||||||
up.size += n
|
up.size += int64(n)
|
||||||
if part > maxParts {
|
if part > maxParts {
|
||||||
up.f.putRW(rw)
|
err = errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||||
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
break outer
|
||||||
}
|
}
|
||||||
|
|
||||||
part := part // for the closure
|
// Transfer the chunk
|
||||||
g.Go(func() (err error) {
|
up.managedTransferChunk(ctx, &wg, errs, part, buf)
|
||||||
defer up.f.putRW(rw)
|
|
||||||
_, err = up.WriteChunk(gCtx, part, rw)
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
err = g.Wait()
|
wg.Wait()
|
||||||
if err != nil {
|
up.sha1s = up.sha1s[:up.parts]
|
||||||
return err
|
|
||||||
}
|
return up.finishOrCancelOnError(ctx, err, errs)
|
||||||
return up.Close(ctx)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy the chunks from the source to the destination
|
// Upload uploads the chunks from the input
|
||||||
func (up *largeUpload) Copy(ctx context.Context) (err error) {
|
func (up *largeUpload) Upload(ctx context.Context) error {
|
||||||
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
|
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
|
||||||
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
|
remaining := up.size
|
||||||
var (
|
errs := make(chan error, 1)
|
||||||
g, gCtx = errgroup.WithContext(ctx)
|
var wg sync.WaitGroup
|
||||||
remaining = up.size
|
var err error
|
||||||
)
|
outer:
|
||||||
g.SetLimit(up.f.opt.UploadConcurrency)
|
for part := int64(1); part <= up.parts; part++ {
|
||||||
for part := range up.parts {
|
// Check any errors
|
||||||
// Fail fast, in case an errgroup managed function returns an error
|
select {
|
||||||
// gCtx is cancelled. There is no point in copying all the other parts.
|
case err = <-errs:
|
||||||
if gCtx.Err() != nil {
|
break outer
|
||||||
break
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
reqSize := min(remaining, up.chunkSize)
|
reqSize := remaining
|
||||||
|
if reqSize >= int64(up.f.opt.ChunkSize) {
|
||||||
|
reqSize = int64(up.f.opt.ChunkSize)
|
||||||
|
}
|
||||||
|
|
||||||
part := part // for the closure
|
// Get a block of memory
|
||||||
g.Go(func() (err error) {
|
buf := up.f.getUploadBlock()[:reqSize]
|
||||||
return up.copyChunk(gCtx, part, reqSize)
|
|
||||||
})
|
// Read the chunk
|
||||||
|
_, err = io.ReadFull(up.in, buf)
|
||||||
|
if err != nil {
|
||||||
|
up.f.putUploadBlock(buf)
|
||||||
|
break outer
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transfer the chunk
|
||||||
|
up.managedTransferChunk(ctx, &wg, errs, part, buf)
|
||||||
remaining -= reqSize
|
remaining -= reqSize
|
||||||
}
|
}
|
||||||
err = g.Wait()
|
wg.Wait()
|
||||||
if err != nil {
|
|
||||||
return err
|
return up.finishOrCancelOnError(ctx, err, errs)
|
||||||
}
|
|
||||||
return up.Close(ctx)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ const (
|
|||||||
timeFormat = `"` + time.RFC3339 + `"`
|
timeFormat = `"` + time.RFC3339 + `"`
|
||||||
)
|
)
|
||||||
|
|
||||||
// Time represents date and time information for the
|
// Time represents represents date and time information for the
|
||||||
// box API, by using RFC3339
|
// box API, by using RFC3339
|
||||||
type Time time.Time
|
type Time time.Time
|
||||||
|
|
||||||
@@ -36,13 +36,13 @@ func (t *Time) UnmarshalJSON(data []byte) error {
|
|||||||
|
|
||||||
// Error is returned from box when things go wrong
|
// Error is returned from box when things go wrong
|
||||||
type Error struct {
|
type Error struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
Status int `json:"status"`
|
Status int `json:"status"`
|
||||||
Code string `json:"code"`
|
Code string `json:"code"`
|
||||||
ContextInfo json.RawMessage `json:"context_info"`
|
ContextInfo json.RawMessage
|
||||||
HelpURL string `json:"help_url"`
|
HelpURL string `json:"help_url"`
|
||||||
Message string `json:"message"`
|
Message string `json:"message"`
|
||||||
RequestID string `json:"request_id"`
|
RequestID string `json:"request_id"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error returns a string for the error and satisfies the error interface
|
// Error returns a string for the error and satisfies the error interface
|
||||||
@@ -52,7 +52,7 @@ func (e *Error) Error() string {
|
|||||||
out += ": " + e.Message
|
out += ": " + e.Message
|
||||||
}
|
}
|
||||||
if e.ContextInfo != nil {
|
if e.ContextInfo != nil {
|
||||||
out += fmt.Sprintf(" (%s)", string(e.ContextInfo))
|
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
|
||||||
}
|
}
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
@@ -61,9 +61,9 @@ func (e *Error) Error() string {
|
|||||||
var _ error = (*Error)(nil)
|
var _ error = (*Error)(nil)
|
||||||
|
|
||||||
// ItemFields are the fields needed for FileInfo
|
// ItemFields are the fields needed for FileInfo
|
||||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
|
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link"
|
||||||
|
|
||||||
// Types of things in Item/ItemMini
|
// Types of things in Item
|
||||||
const (
|
const (
|
||||||
ItemTypeFolder = "folder"
|
ItemTypeFolder = "folder"
|
||||||
ItemTypeFile = "file"
|
ItemTypeFile = "file"
|
||||||
@@ -72,41 +72,24 @@ const (
|
|||||||
ItemStatusDeleted = "deleted"
|
ItemStatusDeleted = "deleted"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ItemMini is a subset of the elements in a full Item returned by some API calls
|
|
||||||
type ItemMini struct {
|
|
||||||
Type string `json:"type"`
|
|
||||||
ID string `json:"id"`
|
|
||||||
SequenceID int64 `json:"sequence_id,string"`
|
|
||||||
Etag string `json:"etag"`
|
|
||||||
SHA1 string `json:"sha1"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Item describes a folder or a file as returned by Get Folder Items and others
|
// Item describes a folder or a file as returned by Get Folder Items and others
|
||||||
type Item struct {
|
type Item struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
SequenceID int64 `json:"sequence_id,string"`
|
SequenceID string `json:"sequence_id"`
|
||||||
Etag string `json:"etag"`
|
Etag string `json:"etag"`
|
||||||
SHA1 string `json:"sha1"`
|
SHA1 string `json:"sha1"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
|
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
|
||||||
CreatedAt Time `json:"created_at"`
|
CreatedAt Time `json:"created_at"`
|
||||||
ModifiedAt Time `json:"modified_at"`
|
ModifiedAt Time `json:"modified_at"`
|
||||||
ContentCreatedAt Time `json:"content_created_at"`
|
ContentCreatedAt Time `json:"content_created_at"`
|
||||||
ContentModifiedAt Time `json:"content_modified_at"`
|
ContentModifiedAt Time `json:"content_modified_at"`
|
||||||
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
||||||
Parent ItemMini `json:"parent"`
|
|
||||||
SharedLink struct {
|
SharedLink struct {
|
||||||
URL string `json:"url,omitempty"`
|
URL string `json:"url,omitempty"`
|
||||||
Access string `json:"access,omitempty"`
|
Access string `json:"access,omitempty"`
|
||||||
} `json:"shared_link"`
|
} `json:"shared_link"`
|
||||||
OwnedBy struct {
|
|
||||||
Type string `json:"type"`
|
|
||||||
ID string `json:"id"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Login string `json:"login"`
|
|
||||||
} `json:"owned_by"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ModTime returns the modification time of the item
|
// ModTime returns the modification time of the item
|
||||||
@@ -120,26 +103,14 @@ func (i *Item) ModTime() (t time.Time) {
|
|||||||
|
|
||||||
// FolderItems is returned from the GetFolderItems call
|
// FolderItems is returned from the GetFolderItems call
|
||||||
type FolderItems struct {
|
type FolderItems struct {
|
||||||
TotalCount int `json:"total_count"`
|
TotalCount int `json:"total_count"`
|
||||||
Entries []Item `json:"entries"`
|
Entries []Item `json:"entries"`
|
||||||
Offset int `json:"offset"`
|
Offset int `json:"offset"`
|
||||||
Limit int `json:"limit"`
|
Limit int `json:"limit"`
|
||||||
NextMarker *string `json:"next_marker,omitempty"`
|
Order []struct {
|
||||||
// There is some confusion about how this is actually
|
By string `json:"by"`
|
||||||
// returned. The []struct has worked for many years, but in
|
Direction string `json:"direction"`
|
||||||
// https://github.com/rclone/rclone/issues/8776 box was
|
} `json:"order"`
|
||||||
// returning it returned not as a list. We don't actually use
|
|
||||||
// this so comment it out.
|
|
||||||
//
|
|
||||||
// Order struct {
|
|
||||||
// By string `json:"by"`
|
|
||||||
// Direction string `json:"direction"`
|
|
||||||
// } `json:"order"`
|
|
||||||
//
|
|
||||||
// Order []struct {
|
|
||||||
// By string `json:"by"`
|
|
||||||
// Direction string `json:"direction"`
|
|
||||||
// } `json:"order"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parent defined the ID of the parent directory
|
// Parent defined the ID of the parent directory
|
||||||
@@ -161,26 +132,6 @@ type UploadFile struct {
|
|||||||
ContentModifiedAt Time `json:"content_modified_at"`
|
ContentModifiedAt Time `json:"content_modified_at"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PreUploadCheck is the request for upload preflight check
|
|
||||||
type PreUploadCheck struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Parent Parent `json:"parent"`
|
|
||||||
Size *int64 `json:"size,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PreUploadCheckResponse is the response from upload preflight check
|
|
||||||
// if successful
|
|
||||||
type PreUploadCheckResponse struct {
|
|
||||||
UploadToken string `json:"upload_token"`
|
|
||||||
UploadURL string `json:"upload_url"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PreUploadCheckConflict is returned in the ContextInfo error field
|
|
||||||
// from PreUploadCheck when the error code is "item_name_in_use"
|
|
||||||
type PreUploadCheckConflict struct {
|
|
||||||
Conflicts ItemMini `json:"conflicts"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateFileModTime is used in Update File Info
|
// UpdateFileModTime is used in Update File Info
|
||||||
type UpdateFileModTime struct {
|
type UpdateFileModTime struct {
|
||||||
ContentModifiedAt Time `json:"content_modified_at"`
|
ContentModifiedAt Time `json:"content_modified_at"`
|
||||||
@@ -282,39 +233,12 @@ type User struct {
|
|||||||
ModifiedAt time.Time `json:"modified_at"`
|
ModifiedAt time.Time `json:"modified_at"`
|
||||||
Language string `json:"language"`
|
Language string `json:"language"`
|
||||||
Timezone string `json:"timezone"`
|
Timezone string `json:"timezone"`
|
||||||
SpaceAmount float64 `json:"space_amount"`
|
SpaceAmount int64 `json:"space_amount"`
|
||||||
SpaceUsed float64 `json:"space_used"`
|
SpaceUsed int64 `json:"space_used"`
|
||||||
MaxUploadSize float64 `json:"max_upload_size"`
|
MaxUploadSize int64 `json:"max_upload_size"`
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
JobTitle string `json:"job_title"`
|
JobTitle string `json:"job_title"`
|
||||||
Phone string `json:"phone"`
|
Phone string `json:"phone"`
|
||||||
Address string `json:"address"`
|
Address string `json:"address"`
|
||||||
AvatarURL string `json:"avatar_url"`
|
AvatarURL string `json:"avatar_url"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileTreeChangeEventTypes are the events that can require cache invalidation
|
|
||||||
var FileTreeChangeEventTypes = map[string]struct{}{
|
|
||||||
"ITEM_COPY": {},
|
|
||||||
"ITEM_CREATE": {},
|
|
||||||
"ITEM_MAKE_CURRENT_VERSION": {},
|
|
||||||
"ITEM_MODIFY": {},
|
|
||||||
"ITEM_MOVE": {},
|
|
||||||
"ITEM_RENAME": {},
|
|
||||||
"ITEM_TRASH": {},
|
|
||||||
"ITEM_UNDELETE_VIA_TRASH": {},
|
|
||||||
"ITEM_UPLOAD": {},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Event is an array element in the response returned from /events
|
|
||||||
type Event struct {
|
|
||||||
EventType string `json:"event_type"`
|
|
||||||
EventID string `json:"event_id"`
|
|
||||||
Source Item `json:"source"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Events is returned from /events
|
|
||||||
type Events struct {
|
|
||||||
ChunkSize int64 `json:"chunk_size"`
|
|
||||||
Entries []Event `json:"entries"`
|
|
||||||
NextStreamPosition int64 `json:"next_stream_position"`
|
|
||||||
}
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
|||||||
// multipart upload for box
|
// multpart upload for box
|
||||||
|
|
||||||
package box
|
package box
|
||||||
|
|
||||||
@@ -8,7 +8,6 @@ import (
|
|||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -16,10 +15,10 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/box/api"
|
"github.com/rclone/rclone/backend/box/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/lib/atexit"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -44,7 +43,7 @@ func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID stri
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response)
|
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response)
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -74,7 +73,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
|
|||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
opts.Body = wrap(bytes.NewReader(chunk))
|
opts.Body = wrap(bytes.NewReader(chunk))
|
||||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response)
|
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response)
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -105,14 +104,14 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
|
|||||||
const defaultDelay = 10
|
const defaultDelay = 10
|
||||||
var tries int
|
var tries int
|
||||||
outer:
|
outer:
|
||||||
for tries = range maxTries {
|
for tries = 0; tries < maxTries; tries++ {
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(resp, err)
|
||||||
}
|
}
|
||||||
body, err = rest.ReadBody(resp)
|
body, err = rest.ReadBody(resp)
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
delay := defaultDelay
|
delay := defaultDelay
|
||||||
var why string
|
var why string
|
||||||
@@ -140,7 +139,7 @@ outer:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
|
return nil, errors.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why)
|
fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why)
|
||||||
@@ -151,7 +150,7 @@ outer:
|
|||||||
}
|
}
|
||||||
err = json.Unmarshal(body, &result)
|
err = json.Unmarshal(body, &result)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't decode commit response: %q: %w", body, err)
|
return nil, errors.Wrapf(err, "couldn't decode commit response: %q", body)
|
||||||
}
|
}
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
@@ -167,7 +166,7 @@ func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error)
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -177,19 +176,21 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
|
|||||||
// Create upload session
|
// Create upload session
|
||||||
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
|
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("multipart upload create session failed: %w", err)
|
return errors.Wrap(err, "multipart upload create session failed")
|
||||||
}
|
}
|
||||||
chunkSize := session.PartSize
|
chunkSize := session.PartSize
|
||||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
|
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
|
||||||
|
|
||||||
// Cancel the session if something went wrong
|
// Cancel the session if something went wrong
|
||||||
defer atexit.OnError(&err, func() {
|
defer func() {
|
||||||
fs.Debugf(o, "Cancelling multipart upload: %v", err)
|
if err != nil {
|
||||||
cancelErr := o.abortUpload(ctx, session.ID)
|
fs.Debugf(o, "Cancelling multipart upload: %v", err)
|
||||||
if cancelErr != nil {
|
cancelErr := o.abortUpload(ctx, session.ID)
|
||||||
fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr)
|
if cancelErr != nil {
|
||||||
|
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})()
|
}()
|
||||||
|
|
||||||
// unwrap the accounting from the input, we use wrap to put it
|
// unwrap the accounting from the input, we use wrap to put it
|
||||||
// back on after the buffering
|
// back on after the buffering
|
||||||
@@ -203,7 +204,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
|
|||||||
errs := make(chan error, 1)
|
errs := make(chan error, 1)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
outer:
|
outer:
|
||||||
for part := range session.TotalParts {
|
for part := 0; part < session.TotalParts; part++ {
|
||||||
// Check any errors
|
// Check any errors
|
||||||
select {
|
select {
|
||||||
case err = <-errs:
|
case err = <-errs:
|
||||||
@@ -211,7 +212,10 @@ outer:
|
|||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
reqSize := min(remaining, chunkSize)
|
reqSize := remaining
|
||||||
|
if reqSize >= chunkSize {
|
||||||
|
reqSize = chunkSize
|
||||||
|
}
|
||||||
|
|
||||||
// Make a block of memory
|
// Make a block of memory
|
||||||
buf := make([]byte, reqSize)
|
buf := make([]byte, reqSize)
|
||||||
@@ -219,7 +223,7 @@ outer:
|
|||||||
// Read the chunk
|
// Read the chunk
|
||||||
_, err = io.ReadFull(in, buf)
|
_, err = io.ReadFull(in, buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("multipart upload failed to read source: %w", err)
|
err = errors.Wrap(err, "multipart upload failed to read source")
|
||||||
break outer
|
break outer
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -235,7 +239,7 @@ outer:
|
|||||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||||
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...)
|
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("multipart upload failed to upload part: %w", err)
|
err = errors.Wrap(err, "multipart upload failed to upload part")
|
||||||
select {
|
select {
|
||||||
case errs <- err:
|
case errs <- err:
|
||||||
default:
|
default:
|
||||||
@@ -263,11 +267,11 @@ outer:
|
|||||||
// Finalise the upload session
|
// Finalise the upload session
|
||||||
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
|
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("multipart upload failed to finalize: %w", err)
|
return errors.Wrap(err, "multipart upload failed to finalize")
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.TotalCount != 1 || len(result.Entries) != 1 {
|
if result.TotalCount != 1 || len(result.Entries) != 1 {
|
||||||
return fmt.Errorf("multipart upload failed %v - not sure why", o)
|
return errors.Errorf("multipart upload failed %v - not sure why", o)
|
||||||
}
|
}
|
||||||
return o.setMetaData(&result.Entries[0])
|
return o.setMetaData(&result.Entries[0])
|
||||||
}
|
}
|
||||||
|
|||||||
227
backend/cache/cache.go
vendored
227
backend/cache/cache.go
vendored
@@ -1,11 +1,9 @@
|
|||||||
//go:build !plan9 && !js
|
// +build !plan9
|
||||||
|
|
||||||
// Package cache implements a virtual provider to cache existing remotes.
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
@@ -20,6 +18,7 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/crypt"
|
"github.com/rclone/rclone/backend/crypt"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/cache"
|
"github.com/rclone/rclone/fs/cache"
|
||||||
@@ -29,7 +28,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/list"
|
|
||||||
"github.com/rclone/rclone/fs/rc"
|
"github.com/rclone/rclone/fs/rc"
|
||||||
"github.com/rclone/rclone/fs/walk"
|
"github.com/rclone/rclone/fs/walk"
|
||||||
"github.com/rclone/rclone/lib/atexit"
|
"github.com/rclone/rclone/lib/atexit"
|
||||||
@@ -70,28 +68,26 @@ func init() {
|
|||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
Help: "Remote to cache.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_url",
|
Name: "plex_url",
|
||||||
Help: "The URL of the Plex server.",
|
Help: "The URL of the Plex server",
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_username",
|
Name: "plex_username",
|
||||||
Help: "The username of the Plex user.",
|
Help: "The username of the Plex user",
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_password",
|
Name: "plex_password",
|
||||||
Help: "The password of the Plex user.",
|
Help: "The password of the Plex user",
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_token",
|
Name: "plex_token",
|
||||||
Help: "The plex token for authentication - auto set normally.",
|
Help: "The plex token for authentication - auto set normally",
|
||||||
Hide: fs.OptionHideBoth,
|
Hide: fs.OptionHideBoth,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_insecure",
|
Name: "plex_insecure",
|
||||||
Help: "Skip all certificate verification when connecting to the Plex server.",
|
Help: "Skip all certificate verification when connecting to the Plex server",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
@@ -102,18 +98,18 @@ changed, any downloaded chunks will be invalid and cache-chunk-path
|
|||||||
will need to be cleared or unexpected EOF errors will occur.`,
|
will need to be cleared or unexpected EOF errors will occur.`,
|
||||||
Default: DefCacheChunkSize,
|
Default: DefCacheChunkSize,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "1M",
|
Value: "1m",
|
||||||
Help: "1 MiB",
|
Help: "1MB",
|
||||||
}, {
|
}, {
|
||||||
Value: "5M",
|
Value: "5M",
|
||||||
Help: "5 MiB",
|
Help: "5 MB",
|
||||||
}, {
|
}, {
|
||||||
Value: "10M",
|
Value: "10M",
|
||||||
Help: "10 MiB",
|
Help: "10 MB",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "info_age",
|
Name: "info_age",
|
||||||
Help: `How long to cache file structure information (directory listings, file size, times, etc.).
|
Help: `How long to cache file structure information (directory listings, file size, times etc).
|
||||||
If all write operations are done through the cache then you can safely make
|
If all write operations are done through the cache then you can safely make
|
||||||
this value very large as the cache store will also be updated in real time.`,
|
this value very large as the cache store will also be updated in real time.`,
|
||||||
Default: DefCacheInfoAge,
|
Default: DefCacheInfoAge,
|
||||||
@@ -136,22 +132,22 @@ oldest chunks until it goes under this value.`,
|
|||||||
Default: DefCacheTotalChunkSize,
|
Default: DefCacheTotalChunkSize,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "500M",
|
Value: "500M",
|
||||||
Help: "500 MiB",
|
Help: "500 MB",
|
||||||
}, {
|
}, {
|
||||||
Value: "1G",
|
Value: "1G",
|
||||||
Help: "1 GiB",
|
Help: "1 GB",
|
||||||
}, {
|
}, {
|
||||||
Value: "10G",
|
Value: "10G",
|
||||||
Help: "10 GiB",
|
Help: "10 GB",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "db_path",
|
Name: "db_path",
|
||||||
Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
|
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||||
Help: "Directory to store file structure metadata DB.\n\nThe remote name is used as the DB file name.",
|
Help: "Directory to store file structure metadata DB.\nThe remote name is used as the DB file name.",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_path",
|
Name: "chunk_path",
|
||||||
Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
|
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||||
Help: `Directory to cache chunk files.
|
Help: `Directory to cache chunk files.
|
||||||
|
|
||||||
Path to where partial file data (chunks) are stored locally. The remote
|
Path to where partial file data (chunks) are stored locally. The remote
|
||||||
@@ -171,7 +167,6 @@ then "--cache-chunk-path" will use the same path as "--cache-db-path".`,
|
|||||||
Name: "chunk_clean_interval",
|
Name: "chunk_clean_interval",
|
||||||
Default: DefCacheChunkCleanInterval,
|
Default: DefCacheChunkCleanInterval,
|
||||||
Help: `How often should the cache perform cleanups of the chunk storage.
|
Help: `How often should the cache perform cleanups of the chunk storage.
|
||||||
|
|
||||||
The default value should be ok for most people. If you find that the
|
The default value should be ok for most people. If you find that the
|
||||||
cache goes over "cache-chunk-total-size" too often then try to lower
|
cache goes over "cache-chunk-total-size" too often then try to lower
|
||||||
this value to force it to perform cleanups more often.`,
|
this value to force it to perform cleanups more often.`,
|
||||||
@@ -225,7 +220,7 @@ available on the local machine.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "rps",
|
Name: "rps",
|
||||||
Default: int(DefCacheRps),
|
Default: int(DefCacheRps),
|
||||||
Help: `Limits the number of requests per second to the source FS (-1 to disable).
|
Help: `Limits the number of requests per second to the source FS (-1 to disable)
|
||||||
|
|
||||||
This setting places a hard limit on the number of requests per second
|
This setting places a hard limit on the number of requests per second
|
||||||
that cache will be doing to the cloud provider remote and try to
|
that cache will be doing to the cloud provider remote and try to
|
||||||
@@ -246,7 +241,7 @@ still pass.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "writes",
|
Name: "writes",
|
||||||
Default: DefCacheWrites,
|
Default: DefCacheWrites,
|
||||||
Help: `Cache file data on writes through the FS.
|
Help: `Cache file data on writes through the FS
|
||||||
|
|
||||||
If you need to read files immediately after you upload them through
|
If you need to read files immediately after you upload them through
|
||||||
cache you can enable this flag to have their data stored in the
|
cache you can enable this flag to have their data stored in the
|
||||||
@@ -267,7 +262,7 @@ provider`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "tmp_wait_time",
|
Name: "tmp_wait_time",
|
||||||
Default: DefCacheTmpWaitTime,
|
Default: DefCacheTmpWaitTime,
|
||||||
Help: `How long should files be stored in local cache before being uploaded.
|
Help: `How long should files be stored in local cache before being uploaded
|
||||||
|
|
||||||
This is the duration that a file must wait in the temporary location
|
This is the duration that a file must wait in the temporary location
|
||||||
_cache-tmp-upload-path_ before it is selected for upload.
|
_cache-tmp-upload-path_ before it is selected for upload.
|
||||||
@@ -278,7 +273,7 @@ to start the upload if a queue formed for this purpose.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "db_wait_time",
|
Name: "db_wait_time",
|
||||||
Default: DefCacheDbWaitTime,
|
Default: DefCacheDbWaitTime,
|
||||||
Help: `How long to wait for the DB to be available - 0 is unlimited.
|
Help: `How long to wait for the DB to be available - 0 is unlimited
|
||||||
|
|
||||||
Only one process can have the DB open at any one time, so rclone waits
|
Only one process can have the DB open at any one time, so rclone waits
|
||||||
for this duration for the DB to become available before it gives an
|
for this duration for the DB to become available before it gives an
|
||||||
@@ -344,14 +339,8 @@ func parseRootPath(path string) (string, error) {
|
|||||||
return strings.Trim(path, "/"), nil
|
return strings.Trim(path, "/"), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var warnDeprecated sync.Once
|
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
warnDeprecated.Do(func() {
|
|
||||||
fs.Logf(nil, "WARNING: Cache backend is deprecated and may be removed in future. Please use VFS instead.")
|
|
||||||
})
|
|
||||||
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -359,7 +348,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
|
if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
|
||||||
return nil, fmt.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
|
return nil, errors.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
|
||||||
opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
|
opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -369,13 +358,18 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
|
|
||||||
rpath, err := parseRootPath(rootPath)
|
rpath, err := parseRootPath(rootPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to clean root path %q: %w", rootPath, err)
|
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
|
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(opt.Remote)
|
||||||
wrappedFs, wrapErr := cache.Get(ctx, remotePath)
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", opt.Remote)
|
||||||
|
}
|
||||||
|
|
||||||
|
remotePath := fspath.JoinRootPath(wPath, rootPath)
|
||||||
|
wrappedFs, wrapErr := wInfo.NewFs(wName, remotePath, wConfig)
|
||||||
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
||||||
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remotePath, wrapErr)
|
return nil, errors.Wrapf(wrapErr, "failed to make remote %s:%s to wrap", wName, remotePath)
|
||||||
}
|
}
|
||||||
var fsErr error
|
var fsErr error
|
||||||
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
|
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
|
||||||
@@ -396,30 +390,27 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
cleanupChan: make(chan bool, 1),
|
cleanupChan: make(chan bool, 1),
|
||||||
notifiedRemotes: make(map[string]bool),
|
notifiedRemotes: make(map[string]bool),
|
||||||
}
|
}
|
||||||
cache.PinUntilFinalized(f.Fs, f)
|
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
|
||||||
rps := rate.Inf
|
|
||||||
if opt.Rps > 0 {
|
|
||||||
rps = rate.Limit(float64(opt.Rps))
|
|
||||||
}
|
|
||||||
f.rateLimiter = rate.NewLimiter(rps, opt.TotalWorkers)
|
|
||||||
|
|
||||||
f.plexConnector = &plexConnector{}
|
f.plexConnector = &plexConnector{}
|
||||||
if opt.PlexURL != "" {
|
if opt.PlexURL != "" {
|
||||||
if opt.PlexToken != "" {
|
if opt.PlexToken != "" {
|
||||||
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure)
|
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
|
||||||
}
|
}
|
||||||
} else if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
} else {
|
||||||
decPass, err := obscure.Reveal(opt.PlexPassword)
|
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||||
if err != nil {
|
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||||
decPass = opt.PlexPassword
|
if err != nil {
|
||||||
}
|
decPass = opt.PlexPassword
|
||||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
}
|
||||||
m.Set("plex_token", token)
|
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
||||||
})
|
m.Set("plex_token", token)
|
||||||
if err != nil {
|
})
|
||||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -427,8 +418,8 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
dbPath := f.opt.DbPath
|
dbPath := f.opt.DbPath
|
||||||
chunkPath := f.opt.ChunkPath
|
chunkPath := f.opt.ChunkPath
|
||||||
// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
|
// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
|
||||||
if dbPath != filepath.Join(config.GetCacheDir(), "cache-backend") &&
|
if dbPath != filepath.Join(config.CacheDir, "cache-backend") &&
|
||||||
chunkPath == filepath.Join(config.GetCacheDir(), "cache-backend") {
|
chunkPath == filepath.Join(config.CacheDir, "cache-backend") {
|
||||||
chunkPath = dbPath
|
chunkPath = dbPath
|
||||||
}
|
}
|
||||||
if filepath.Ext(dbPath) != "" {
|
if filepath.Ext(dbPath) != "" {
|
||||||
@@ -439,11 +430,11 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
}
|
}
|
||||||
err = os.MkdirAll(dbPath, os.ModePerm)
|
err = os.MkdirAll(dbPath, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create cache directory %v: %w", dbPath, err)
|
return nil, errors.Wrapf(err, "failed to create cache directory %v", dbPath)
|
||||||
}
|
}
|
||||||
err = os.MkdirAll(chunkPath, os.ModePerm)
|
err = os.MkdirAll(chunkPath, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create cache directory %v: %w", chunkPath, err)
|
return nil, errors.Wrapf(err, "failed to create cache directory %v", chunkPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
dbPath = filepath.Join(dbPath, name+".db")
|
dbPath = filepath.Join(dbPath, name+".db")
|
||||||
@@ -455,7 +446,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
DbWaitTime: time.Duration(opt.DbWaitTime),
|
DbWaitTime: time.Duration(opt.DbWaitTime),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to start cache db: %w", err)
|
return nil, errors.Wrapf(err, "failed to start cache db")
|
||||||
}
|
}
|
||||||
// Trap SIGINT and SIGTERM to close the DB handle gracefully
|
// Trap SIGINT and SIGTERM to close the DB handle gracefully
|
||||||
c := make(chan os.Signal, 1)
|
c := make(chan os.Signal, 1)
|
||||||
@@ -489,12 +480,12 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
if f.opt.TempWritePath != "" {
|
if f.opt.TempWritePath != "" {
|
||||||
err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm)
|
err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create cache directory %v: %w", f.opt.TempWritePath, err)
|
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
|
||||||
}
|
}
|
||||||
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
||||||
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
|
f.tempFs, err = cache.Get(f.opt.TempWritePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create temp fs: %w", err)
|
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
|
||||||
}
|
}
|
||||||
fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
|
fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
|
||||||
fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
|
fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
|
||||||
@@ -519,13 +510,13 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
|
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
|
||||||
pollInterval := make(chan time.Duration, 1)
|
pollInterval := make(chan time.Duration, 1)
|
||||||
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
|
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
|
||||||
doChangeNotify(ctx, f.receiveChangeNotify, pollInterval)
|
doChangeNotify(context.Background(), f.receiveChangeNotify, pollInterval)
|
||||||
}
|
}
|
||||||
|
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
DuplicateFiles: false, // storage doesn't permit this
|
DuplicateFiles: false, // storage doesn't permit this
|
||||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
// override only those features that use a temp fs and it doesn't support them
|
// override only those features that use a temp fs and it doesn't support them
|
||||||
//f.features.ChangeNotify = f.ChangeNotify
|
//f.features.ChangeNotify = f.ChangeNotify
|
||||||
if f.opt.TempWritePath != "" {
|
if f.opt.TempWritePath != "" {
|
||||||
@@ -594,7 +585,7 @@ Some valid examples are:
|
|||||||
"0:10" -> the first ten chunks
|
"0:10" -> the first ten chunks
|
||||||
|
|
||||||
Any parameter with a key that starts with "file" can be used to
|
Any parameter with a key that starts with "file" can be used to
|
||||||
specify files to fetch, e.g.
|
specify files to fetch, eg
|
||||||
|
|
||||||
rclone rc cache/fetch chunks=0 file=hello file2=home/goodbye
|
rclone rc cache/fetch chunks=0 file=hello file2=home/goodbye
|
||||||
|
|
||||||
@@ -611,7 +602,7 @@ func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err er
|
|||||||
out = make(rc.Params)
|
out = make(rc.Params)
|
||||||
m, err := f.Stats()
|
m, err := f.Stats()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return out, fmt.Errorf("error while getting cache stats")
|
return out, errors.Errorf("error while getting cache stats")
|
||||||
}
|
}
|
||||||
out["status"] = "ok"
|
out["status"] = "ok"
|
||||||
out["stats"] = m
|
out["stats"] = m
|
||||||
@@ -638,7 +629,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
|||||||
out = make(rc.Params)
|
out = make(rc.Params)
|
||||||
remoteInt, ok := in["remote"]
|
remoteInt, ok := in["remote"]
|
||||||
if !ok {
|
if !ok {
|
||||||
return out, fmt.Errorf("remote is needed")
|
return out, errors.Errorf("remote is needed")
|
||||||
}
|
}
|
||||||
remote := remoteInt.(string)
|
remote := remoteInt.(string)
|
||||||
withData := false
|
withData := false
|
||||||
@@ -649,7 +640,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
|||||||
|
|
||||||
remote = f.unwrapRemote(remote)
|
remote = f.unwrapRemote(remote)
|
||||||
if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
|
if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
|
||||||
return out, fmt.Errorf("%s doesn't exist in cache", remote)
|
return out, errors.Errorf("%s doesn't exist in cache", remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
co := NewObject(f, remote)
|
co := NewObject(f, remote)
|
||||||
@@ -658,7 +649,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
|||||||
cd := NewDirectory(f, remote)
|
cd := NewDirectory(f, remote)
|
||||||
err := f.cache.ExpireDir(cd)
|
err := f.cache.ExpireDir(cd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return out, fmt.Errorf("error expiring directory: %w", err)
|
return out, errors.WithMessage(err, "error expiring directory")
|
||||||
}
|
}
|
||||||
// notify vfs too
|
// notify vfs too
|
||||||
f.notifyChangeUpstream(cd.Remote(), fs.EntryDirectory)
|
f.notifyChangeUpstream(cd.Remote(), fs.EntryDirectory)
|
||||||
@@ -669,7 +660,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
|||||||
// expire the entry
|
// expire the entry
|
||||||
err = f.cache.ExpireObject(co, withData)
|
err = f.cache.ExpireObject(co, withData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return out, fmt.Errorf("error expiring file: %w", err)
|
return out, errors.WithMessage(err, "error expiring file")
|
||||||
}
|
}
|
||||||
// notify vfs too
|
// notify vfs too
|
||||||
f.notifyChangeUpstream(co.Remote(), fs.EntryObject)
|
f.notifyChangeUpstream(co.Remote(), fs.EntryObject)
|
||||||
@@ -684,30 +675,30 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
|||||||
start, end int64
|
start, end int64
|
||||||
}
|
}
|
||||||
parseChunks := func(ranges string) (crs []chunkRange, err error) {
|
parseChunks := func(ranges string) (crs []chunkRange, err error) {
|
||||||
for part := range strings.SplitSeq(ranges, ",") {
|
for _, part := range strings.Split(ranges, ",") {
|
||||||
var start, end int64 = 0, math.MaxInt64
|
var start, end int64 = 0, math.MaxInt64
|
||||||
switch ints := strings.Split(part, ":"); len(ints) {
|
switch ints := strings.Split(part, ":"); len(ints) {
|
||||||
case 1:
|
case 1:
|
||||||
start, err = strconv.ParseInt(ints[0], 10, 64)
|
start, err = strconv.ParseInt(ints[0], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid range: %q", part)
|
return nil, errors.Errorf("invalid range: %q", part)
|
||||||
}
|
}
|
||||||
end = start + 1
|
end = start + 1
|
||||||
case 2:
|
case 2:
|
||||||
if ints[0] != "" {
|
if ints[0] != "" {
|
||||||
start, err = strconv.ParseInt(ints[0], 10, 64)
|
start, err = strconv.ParseInt(ints[0], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid range: %q", part)
|
return nil, errors.Errorf("invalid range: %q", part)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ints[1] != "" {
|
if ints[1] != "" {
|
||||||
end, err = strconv.ParseInt(ints[1], 10, 64)
|
end, err = strconv.ParseInt(ints[1], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid range: %q", part)
|
return nil, errors.Errorf("invalid range: %q", part)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("invalid range: %q", part)
|
return nil, errors.Errorf("invalid range: %q", part)
|
||||||
}
|
}
|
||||||
crs = append(crs, chunkRange{start: start, end: end})
|
crs = append(crs, chunkRange{start: start, end: end})
|
||||||
}
|
}
|
||||||
@@ -762,18 +753,18 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
|||||||
delete(in, "chunks")
|
delete(in, "chunks")
|
||||||
crs, err := parseChunks(s)
|
crs, err := parseChunks(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid chunks parameter: %w", err)
|
return nil, errors.Wrap(err, "invalid chunks parameter")
|
||||||
}
|
}
|
||||||
var files [][2]string
|
var files [][2]string
|
||||||
for k, v := range in {
|
for k, v := range in {
|
||||||
if !strings.HasPrefix(k, "file") {
|
if !strings.HasPrefix(k, "file") {
|
||||||
return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
|
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
|
||||||
}
|
}
|
||||||
switch v := v.(type) {
|
switch v := v.(type) {
|
||||||
case string:
|
case string:
|
||||||
files = append(files, [2]string{v, f.unwrapRemote(v)})
|
files = append(files, [2]string{v, f.unwrapRemote(v)})
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
|
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
type fileStatus struct {
|
type fileStatus struct {
|
||||||
@@ -1038,7 +1029,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
}
|
}
|
||||||
fs.Debugf(dir, "list: remove entry: %v", entryRemote)
|
fs.Debugf(dir, "list: remove entry: %v", entryRemote)
|
||||||
}
|
}
|
||||||
entries = nil //nolint:ineffassign
|
entries = nil
|
||||||
|
|
||||||
// and then iterate over the ones from source (temp Objects will override source ones)
|
// and then iterate over the ones from source (temp Objects will override source ones)
|
||||||
var batchDirectories []*Directory
|
var batchDirectories []*Directory
|
||||||
@@ -1087,13 +1078,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
return cachedEntries, nil
|
return cachedEntries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) recurse(ctx context.Context, dir string, list *list.Helper) error {
|
func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
|
||||||
entries, err := f.List(ctx, dir)
|
entries, err := f.List(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range entries {
|
for i := 0; i < len(entries); i++ {
|
||||||
innerDir, ok := entries[i].(fs.Directory)
|
innerDir, ok := entries[i].(fs.Directory)
|
||||||
if ok {
|
if ok {
|
||||||
err := f.recurse(ctx, innerDir.Remote(), list)
|
err := f.recurse(ctx, innerDir.Remote(), list)
|
||||||
@@ -1129,7 +1120,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
|
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown object type %T", entry)
|
return errors.Errorf("Unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1139,7 +1130,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
// if we're here, we're gonna do a standard recursive traversal and cache everything
|
// if we're here, we're gonna do a standard recursive traversal and cache everything
|
||||||
list := list.NewHelper(callback)
|
list := walk.NewListRHelper(callback)
|
||||||
err = f.recurse(ctx, dir, list)
|
err = f.recurse(ctx, dir, list)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1249,7 +1240,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server-side move operations.
|
// using server side move operations.
|
||||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
|
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
|
||||||
|
|
||||||
@@ -1429,7 +1420,7 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// wait until both are done
|
// wait until both are done
|
||||||
for range 2 {
|
for c := 0; c < 2; c++ {
|
||||||
<-done
|
<-done
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1530,7 +1521,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
return f.put(ctx, in, src, options, do)
|
return f.put(ctx, in, src, options, do)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server side copy operations.
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
|
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
|
||||||
|
|
||||||
@@ -1607,7 +1598,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return co, nil
|
return co, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
// Move src to this remote using server side move operations.
|
||||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
|
fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
|
||||||
|
|
||||||
@@ -1711,20 +1702,17 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
return f.Fs.Hashes()
|
return f.Fs.Hashes()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Purge all files in the directory
|
// Purge all files in the root and the root directory
|
||||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
if dir == "" {
|
fs.Infof(f, "purging cache")
|
||||||
// FIXME this isn't quite right as it should purge the dir prefix
|
f.cache.Purge()
|
||||||
fs.Infof(f, "purging cache")
|
|
||||||
f.cache.Purge()
|
|
||||||
}
|
|
||||||
|
|
||||||
do := f.Fs.Features().Purge
|
do := f.Fs.Features().Purge
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return fs.ErrorCantPurge
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
err := do(ctx, dir)
|
err := do(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1748,13 +1736,13 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
do := f.Fs.Features().About
|
do := f.Fs.Features().About
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, errors.New("not supported by underlying remote")
|
return nil, errors.New("About not supported")
|
||||||
}
|
}
|
||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stats returns stats about the cache storage
|
// Stats returns stats about the cache storage
|
||||||
func (f *Fs) Stats() (map[string]map[string]any, error) {
|
func (f *Fs) Stats() (map[string]map[string]interface{}, error) {
|
||||||
return f.cache.Stats()
|
return f.cache.Stats()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1787,7 +1775,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// StopBackgroundRunners will signal all the runners to stop their work
|
// StopBackgroundRunners will signall all the runners to stop their work
|
||||||
// can be triggered from a terminate signal or from testing between runs
|
// can be triggered from a terminate signal or from testing between runs
|
||||||
func (f *Fs) StopBackgroundRunners() {
|
func (f *Fs) StopBackgroundRunners() {
|
||||||
f.cleanupChan <- false
|
f.cleanupChan <- false
|
||||||
@@ -1841,19 +1829,6 @@ func (f *Fs) isRootInPath(p string) bool {
|
|||||||
return strings.HasPrefix(p, f.Root()+"/")
|
return strings.HasPrefix(p, f.Root()+"/")
|
||||||
}
|
}
|
||||||
|
|
||||||
// MergeDirs merges the contents of all the directories passed
|
|
||||||
// in into the first one and rmdirs the other directories.
|
|
||||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|
||||||
do := f.Fs.Features().MergeDirs
|
|
||||||
if do == nil {
|
|
||||||
return errors.New("MergeDirs not supported")
|
|
||||||
}
|
|
||||||
for _, dir := range dirs {
|
|
||||||
_ = f.cache.RemoveDir(dir.Remote())
|
|
||||||
}
|
|
||||||
return do(ctx, dirs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirCacheFlush flushes the dir cache
|
// DirCacheFlush flushes the dir cache
|
||||||
func (f *Fs) DirCacheFlush() {
|
func (f *Fs) DirCacheFlush() {
|
||||||
_ = f.cache.RemoveDir("")
|
_ = f.cache.RemoveDir("")
|
||||||
@@ -1908,16 +1883,6 @@ func (f *Fs) Disconnect(ctx context.Context) error {
|
|||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown the backend, closing any background tasks and any
|
|
||||||
// cached connections.
|
|
||||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
|
||||||
do := f.Fs.Features().Shutdown
|
|
||||||
if do == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{
|
var commandHelp = []fs.CommandHelp{
|
||||||
{
|
{
|
||||||
Name: "stats",
|
Name: "stats",
|
||||||
@@ -1934,7 +1899,7 @@ var commandHelp = []fs.CommandHelp{
|
|||||||
// The result should be capable of being JSON encoded
|
// The result should be capable of being JSON encoded
|
||||||
// If it is a string or a []string it will be shown to the user
|
// If it is a string or a []string it will be shown to the user
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
// otherwise it will be JSON encoded and shown to the user like that
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) {
|
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
|
||||||
switch name {
|
switch name {
|
||||||
case "stats":
|
case "stats":
|
||||||
return f.Stats()
|
return f.Stats()
|
||||||
@@ -1961,6 +1926,4 @@ var (
|
|||||||
_ fs.UserInfoer = (*Fs)(nil)
|
_ fs.UserInfoer = (*Fs)(nil)
|
||||||
_ fs.Disconnecter = (*Fs)(nil)
|
_ fs.Disconnecter = (*Fs)(nil)
|
||||||
_ fs.Commander = (*Fs)(nil)
|
_ fs.Commander = (*Fs)(nil)
|
||||||
_ fs.MergeDirser = (*Fs)(nil)
|
|
||||||
_ fs.Shutdowner = (*Fs)(nil)
|
|
||||||
)
|
)
|
||||||
|
|||||||
601
backend/cache/cache_internal_test.go
vendored
601
backend/cache/cache_internal_test.go
vendored
File diff suppressed because it is too large
Load Diff
21
backend/cache/cache_mount_other_test.go
vendored
Normal file
21
backend/cache/cache_mount_other_test.go
vendored
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
// +build !linux !go1.13
|
||||||
|
// +build !darwin !go1.13
|
||||||
|
// +build !freebsd !go1.13
|
||||||
|
// +build !windows
|
||||||
|
// +build !race
|
||||||
|
|
||||||
|
package cache_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (r *run) mountFs(t *testing.T, f fs.Fs) {
|
||||||
|
panic("mountFs not defined for this platform")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
|
||||||
|
panic("unmountFs not defined for this platform")
|
||||||
|
}
|
||||||
79
backend/cache/cache_mount_unix_test.go
vendored
Normal file
79
backend/cache/cache_mount_unix_test.go
vendored
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
// +build linux,go1.13 darwin,go1.13 freebsd,go1.13
|
||||||
|
// +build !race
|
||||||
|
|
||||||
|
package cache_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"bazil.org/fuse"
|
||||||
|
fusefs "bazil.org/fuse/fs"
|
||||||
|
"github.com/rclone/rclone/cmd/mount"
|
||||||
|
"github.com/rclone/rclone/cmd/mountlib"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (r *run) mountFs(t *testing.T, f fs.Fs) {
|
||||||
|
device := f.Name() + ":" + f.Root()
|
||||||
|
var options = []fuse.MountOption{
|
||||||
|
fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
|
||||||
|
fuse.Subtype("rclone"),
|
||||||
|
fuse.FSName(device), fuse.VolumeName(device),
|
||||||
|
fuse.NoAppleDouble(),
|
||||||
|
fuse.NoAppleXattr(),
|
||||||
|
//fuse.AllowOther(),
|
||||||
|
}
|
||||||
|
err := os.MkdirAll(r.mntDir, os.ModePerm)
|
||||||
|
require.NoError(t, err)
|
||||||
|
c, err := fuse.Mount(r.mntDir, options...)
|
||||||
|
require.NoError(t, err)
|
||||||
|
filesys := mount.NewFS(f)
|
||||||
|
server := fusefs.New(c, nil)
|
||||||
|
|
||||||
|
// Serve the mount point in the background returning error to errChan
|
||||||
|
r.unmountRes = make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
err := server.Serve(filesys)
|
||||||
|
closeErr := c.Close()
|
||||||
|
if err == nil {
|
||||||
|
err = closeErr
|
||||||
|
}
|
||||||
|
r.unmountRes <- err
|
||||||
|
}()
|
||||||
|
|
||||||
|
// check if the mount process has an error to report
|
||||||
|
<-c.Ready
|
||||||
|
require.NoError(t, c.MountError)
|
||||||
|
|
||||||
|
r.unmountFn = func() error {
|
||||||
|
// Shutdown the VFS
|
||||||
|
filesys.VFS.Shutdown()
|
||||||
|
return fuse.Unmount(r.mntDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.vfs = filesys.VFS
|
||||||
|
r.isMounted = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
for i := 0; i < 4; i++ {
|
||||||
|
err = r.unmountFn()
|
||||||
|
if err != nil {
|
||||||
|
//log.Printf("signal to umount failed - retrying: %v", err)
|
||||||
|
time.Sleep(3 * time.Second)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = <-r.unmountRes
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = r.vfs.CleanUp()
|
||||||
|
require.NoError(t, err)
|
||||||
|
r.isMounted = false
|
||||||
|
}
|
||||||
125
backend/cache/cache_mount_windows_test.go
vendored
Normal file
125
backend/cache/cache_mount_windows_test.go
vendored
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
// +build windows
|
||||||
|
// +build !race
|
||||||
|
|
||||||
|
package cache_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/billziss-gh/cgofuse/fuse"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/cmd/cmount"
|
||||||
|
"github.com/rclone/rclone/cmd/mountlib"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// waitFor runs fn() until it returns true or the timeout expires
|
||||||
|
func waitFor(fn func() bool) (ok bool) {
|
||||||
|
const totalWait = 10 * time.Second
|
||||||
|
const individualWait = 10 * time.Millisecond
|
||||||
|
for i := 0; i < int(totalWait/individualWait); i++ {
|
||||||
|
ok = fn()
|
||||||
|
if ok {
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
time.Sleep(individualWait)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *run) mountFs(t *testing.T, f fs.Fs) {
|
||||||
|
// FIXME implement cmount
|
||||||
|
t.Skip("windows not supported yet")
|
||||||
|
|
||||||
|
device := f.Name() + ":" + f.Root()
|
||||||
|
options := []string{
|
||||||
|
"-o", "fsname=" + device,
|
||||||
|
"-o", "subtype=rclone",
|
||||||
|
"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
|
||||||
|
"-o", "uid=-1",
|
||||||
|
"-o", "gid=-1",
|
||||||
|
"-o", "allow_other",
|
||||||
|
// This causes FUSE to supply O_TRUNC with the Open
|
||||||
|
// call which is more efficient for cmount. However
|
||||||
|
// it does not work with cgofuse on Windows with
|
||||||
|
// WinFSP so cmount must work with or without it.
|
||||||
|
"-o", "atomic_o_trunc",
|
||||||
|
"--FileSystemName=rclone",
|
||||||
|
}
|
||||||
|
|
||||||
|
fsys := cmount.NewFS(f)
|
||||||
|
host := fuse.NewFileSystemHost(fsys)
|
||||||
|
|
||||||
|
// Serve the mount point in the background returning error to errChan
|
||||||
|
r.unmountRes = make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
var err error
|
||||||
|
ok := host.Mount(r.mntDir, options)
|
||||||
|
if !ok {
|
||||||
|
err = errors.New("mount failed")
|
||||||
|
}
|
||||||
|
r.unmountRes <- err
|
||||||
|
}()
|
||||||
|
|
||||||
|
// unmount
|
||||||
|
r.unmountFn = func() error {
|
||||||
|
// Shutdown the VFS
|
||||||
|
fsys.VFS.Shutdown()
|
||||||
|
if host.Unmount() {
|
||||||
|
if !waitFor(func() bool {
|
||||||
|
_, err := os.Stat(r.mntDir)
|
||||||
|
return err != nil
|
||||||
|
}) {
|
||||||
|
t.Fatalf("mountpoint %q didn't disappear after unmount - continuing anyway", r.mntDir)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errors.New("host unmount failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for the filesystem to become ready, checking the file
|
||||||
|
// system didn't blow up before starting
|
||||||
|
select {
|
||||||
|
case err := <-r.unmountRes:
|
||||||
|
require.NoError(t, err)
|
||||||
|
case <-time.After(time.Second * 3):
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for the mount point to be available on Windows
|
||||||
|
// On Windows the Init signal comes slightly before the mount is ready
|
||||||
|
if !waitFor(func() bool {
|
||||||
|
_, err := os.Stat(r.mntDir)
|
||||||
|
return err == nil
|
||||||
|
}) {
|
||||||
|
t.Errorf("mountpoint %q didn't became available on mount", r.mntDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.vfs = fsys.VFS
|
||||||
|
r.isMounted = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
|
||||||
|
// FIXME implement cmount
|
||||||
|
t.Skip("windows not supported yet")
|
||||||
|
var err error
|
||||||
|
|
||||||
|
for i := 0; i < 4; i++ {
|
||||||
|
err = r.unmountFn()
|
||||||
|
if err != nil {
|
||||||
|
//log.Printf("signal to umount failed - retrying: %v", err)
|
||||||
|
time.Sleep(3 * time.Second)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = <-r.unmountRes
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = r.vfs.CleanUp()
|
||||||
|
require.NoError(t, err)
|
||||||
|
r.isMounted = false
|
||||||
|
}
|
||||||
14
backend/cache/cache_test.go
vendored
14
backend/cache/cache_test.go
vendored
@@ -1,6 +1,7 @@
|
|||||||
// Test Cache filesystem interface
|
// Test Cache filesystem interface
|
||||||
|
|
||||||
//go:build !plan9 && !js && !race
|
// +build !plan9
|
||||||
|
// +build !race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
@@ -15,11 +16,10 @@ import (
|
|||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestCache:",
|
RemoteName: "TestCache:",
|
||||||
NilObject: (*cache.Object)(nil),
|
NilObject: (*cache.Object)(nil),
|
||||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata", "ListP"},
|
UnimplementableFsMethods: []string{"PublicLink", "MergeDirs", "OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"},
|
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
|
||||||
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
|
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
3
backend/cache/cache_unsupported.go
vendored
3
backend/cache/cache_unsupported.go
vendored
@@ -1,7 +1,6 @@
|
|||||||
// Build for cache for unsupported platforms to stop go complaining
|
// Build for cache for unsupported platforms to stop go complaining
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
//go:build plan9 || js
|
// +build plan9
|
||||||
|
|
||||||
// Package cache implements a virtual provider to cache existing remotes.
|
|
||||||
package cache
|
package cache
|
||||||
|
|||||||
33
backend/cache/cache_upload_test.go
vendored
33
backend/cache/cache_upload_test.go
vendored
@@ -1,4 +1,5 @@
|
|||||||
//go:build !plan9 && !js && !race
|
// +build !plan9
|
||||||
|
// +build !race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
@@ -20,8 +21,10 @@ import (
|
|||||||
|
|
||||||
func TestInternalUploadTempDirCreated(t *testing.T) {
|
func TestInternalUploadTempDirCreated(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||||
runInstance.newCacheFs(t, remoteName, id, false, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||||
|
nil,
|
||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -60,7 +63,9 @@ func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltD
|
|||||||
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||||
}
|
}
|
||||||
@@ -68,15 +73,19 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
|||||||
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir(context.Background(), "one")
|
err := rootFs.Mkdir(context.Background(), "one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -110,8 +119,10 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalUploadTempPathCleaned(t *testing.T) {
|
func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
|
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir(context.Background(), "one")
|
err := rootFs.Mkdir(context.Background(), "one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -151,19 +162,21 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir(context.Background(), "test")
|
err := rootFs.Mkdir(context.Background(), "test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
minSize := 5242880
|
minSize := 5242880
|
||||||
maxSize := 10485760
|
maxSize := 10485760
|
||||||
totalFiles := 10
|
totalFiles := 10
|
||||||
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
|
rand.Seed(time.Now().Unix())
|
||||||
|
|
||||||
lastFile := ""
|
lastFile := ""
|
||||||
for i := range totalFiles {
|
for i := 0; i < totalFiles; i++ {
|
||||||
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
|
size := int64(rand.Intn(maxSize-minSize) + minSize)
|
||||||
testReader := runInstance.randomReader(t, size)
|
testReader := runInstance.randomReader(t, size)
|
||||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||||
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
||||||
@@ -200,7 +213,9 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
|||||||
func TestInternalUploadTempFileOperations(t *testing.T) {
|
func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||||
id := "tiutfo"
|
id := "tiutfo"
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
boltDb.PurgeTempUploads()
|
boltDb.PurgeTempUploads()
|
||||||
|
|
||||||
@@ -328,7 +343,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
|||||||
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||||
id := "tiuufo"
|
id := "tiuufo"
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
boltDb.PurgeTempUploads()
|
boltDb.PurgeTempUploads()
|
||||||
|
|
||||||
|
|||||||
2
backend/cache/directory.go
vendored
2
backend/cache/directory.go
vendored
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !js
|
// +build !plan9
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
24
backend/cache/handle.go
vendored
24
backend/cache/handle.go
vendored
@@ -1,10 +1,9 @@
|
|||||||
//go:build !plan9 && !js
|
// +build !plan9
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
@@ -13,6 +12,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
)
|
)
|
||||||
@@ -118,7 +118,7 @@ func (r *Handle) startReadWorkers() {
|
|||||||
r.scaleWorkers(totalWorkers)
|
r.scaleWorkers(totalWorkers)
|
||||||
}
|
}
|
||||||
|
|
||||||
// scaleWorkers will increase the worker pool count by the provided amount
|
// scaleOutWorkers will increase the worker pool count by the provided amount
|
||||||
func (r *Handle) scaleWorkers(desired int) {
|
func (r *Handle) scaleWorkers(desired int) {
|
||||||
current := r.workers
|
current := r.workers
|
||||||
if current == desired {
|
if current == desired {
|
||||||
@@ -182,7 +182,7 @@ func (r *Handle) queueOffset(offset int64) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range r.workers {
|
for i := 0; i < r.workers; i++ {
|
||||||
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
|
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
|
||||||
if o < 0 || o >= r.cachedObject.Size() {
|
if o < 0 || o >= r.cachedObject.Size() {
|
||||||
continue
|
continue
|
||||||
@@ -208,7 +208,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
|||||||
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
||||||
|
|
||||||
// we align the start offset of the first chunk to a likely chunk in the storage
|
// we align the start offset of the first chunk to a likely chunk in the storage
|
||||||
chunkStart -= offset
|
chunkStart = chunkStart - offset
|
||||||
r.queueOffset(chunkStart)
|
r.queueOffset(chunkStart)
|
||||||
found := false
|
found := false
|
||||||
|
|
||||||
@@ -222,7 +222,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
|||||||
if !found {
|
if !found {
|
||||||
// we're gonna give the workers a chance to pickup the chunk
|
// we're gonna give the workers a chance to pickup the chunk
|
||||||
// and retry a couple of times
|
// and retry a couple of times
|
||||||
for i := range r.cacheFs().opt.ReadRetries * 8 {
|
for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
|
||||||
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
found = true
|
found = true
|
||||||
@@ -242,7 +242,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
|||||||
return nil, io.ErrUnexpectedEOF
|
return nil, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("chunk not found %v", chunkStart)
|
return nil, errors.Errorf("chunk not found %v", chunkStart)
|
||||||
}
|
}
|
||||||
|
|
||||||
// first chunk will be aligned with the start
|
// first chunk will be aligned with the start
|
||||||
@@ -322,12 +322,12 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
|||||||
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
|
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
|
||||||
r.offset = r.cachedObject.Size() + offset
|
r.offset = r.cachedObject.Size() + offset
|
||||||
default:
|
default:
|
||||||
err = fmt.Errorf("cache: unimplemented seek whence %v", whence)
|
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||||
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
||||||
chunkStart -= int64(r.cacheFs().opt.ChunkSize)
|
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
|
||||||
}
|
}
|
||||||
r.queueOffset(chunkStart)
|
r.queueOffset(chunkStart)
|
||||||
|
|
||||||
@@ -415,8 +415,10 @@ func (w *worker) run() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
} else {
|
||||||
continue
|
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
||||||
|
|||||||
16
backend/cache/object.go
vendored
16
backend/cache/object.go
vendored
@@ -1,15 +1,15 @@
|
|||||||
//go:build !plan9 && !js
|
// +build !plan9
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
@@ -177,14 +177,10 @@ func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
|
|||||||
}
|
}
|
||||||
if o.isTempFile() {
|
if o.isTempFile() {
|
||||||
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
|
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
|
||||||
if err != nil {
|
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
|
||||||
err = fmt.Errorf("in parent fs %v: %w", o.ParentFs, err)
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
|
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
|
||||||
if err != nil {
|
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
|
||||||
err = fmt.Errorf("in cache fs %v: %w", o.CacheFs.Fs, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "error refreshing object in : %v", err)
|
fs.Errorf(o, "error refreshing object in : %v", err)
|
||||||
@@ -256,7 +252,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
defer o.CacheFs.backgroundRunner.play()
|
defer o.CacheFs.backgroundRunner.play()
|
||||||
// don't allow started uploads
|
// don't allow started uploads
|
||||||
if o.isTempFile() && o.tempFileStartedUpload() {
|
if o.isTempFile() && o.tempFileStartedUpload() {
|
||||||
return fmt.Errorf("%v is currently uploading, can't update", o)
|
return errors.Errorf("%v is currently uploading, can't update", o)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fs.Debugf(o, "updating object contents with size %v", src.Size())
|
fs.Debugf(o, "updating object contents with size %v", src.Size())
|
||||||
@@ -295,7 +291,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
|||||||
defer o.CacheFs.backgroundRunner.play()
|
defer o.CacheFs.backgroundRunner.play()
|
||||||
// don't allow started uploads
|
// don't allow started uploads
|
||||||
if o.isTempFile() && o.tempFileStartedUpload() {
|
if o.isTempFile() && o.tempFileStartedUpload() {
|
||||||
return fmt.Errorf("%v is currently uploading, can't delete", o)
|
return errors.Errorf("%v is currently uploading, can't delete", o)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err := o.Object.Remove(ctx)
|
err := o.Object.Remove(ctx)
|
||||||
|
|||||||
16
backend/cache/plex.go
vendored
16
backend/cache/plex.go
vendored
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !js
|
// +build !plan9
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
@@ -7,7 +7,7 @@ import (
|
|||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -166,7 +166,7 @@ func (p *plexConnector) listenWebsocket() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
var data []byte
|
var data []byte
|
||||||
data, err = io.ReadAll(resp.Body)
|
data, err = ioutil.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -209,10 +209,10 @@ func (p *plexConnector) authenticate() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var data map[string]any
|
var data map[string]interface{}
|
||||||
err = json.NewDecoder(resp.Body).Decode(&data)
|
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to obtain token: %w", err)
|
return fmt.Errorf("failed to obtain token: %v", err)
|
||||||
}
|
}
|
||||||
tokenGen, ok := get(data, "user", "authToken")
|
tokenGen, ok := get(data, "user", "authToken")
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -273,11 +273,11 @@ func (p *plexConnector) isPlaying(co *Object) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// adapted from: https://stackoverflow.com/a/28878037 (credit)
|
// adapted from: https://stackoverflow.com/a/28878037 (credit)
|
||||||
func get(m any, path ...any) (any, bool) {
|
func get(m interface{}, path ...interface{}) (interface{}, bool) {
|
||||||
for _, p := range path {
|
for _, p := range path {
|
||||||
switch idx := p.(type) {
|
switch idx := p.(type) {
|
||||||
case string:
|
case string:
|
||||||
if mm, ok := m.(map[string]any); ok {
|
if mm, ok := m.(map[string]interface{}); ok {
|
||||||
if val, found := mm[idx]; found {
|
if val, found := mm[idx]; found {
|
||||||
m = val
|
m = val
|
||||||
continue
|
continue
|
||||||
@@ -285,7 +285,7 @@ func get(m any, path ...any) (any, bool) {
|
|||||||
}
|
}
|
||||||
return nil, false
|
return nil, false
|
||||||
case int:
|
case int:
|
||||||
if mm, ok := m.([]any); ok {
|
if mm, ok := m.([]interface{}); ok {
|
||||||
if len(mm) > idx {
|
if len(mm) > idx {
|
||||||
m = mm[idx]
|
m = mm[idx]
|
||||||
continue
|
continue
|
||||||
|
|||||||
11
backend/cache/storage_memory.go
vendored
11
backend/cache/storage_memory.go
vendored
@@ -1,14 +1,14 @@
|
|||||||
//go:build !plan9 && !js
|
// +build !plan9
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cache "github.com/patrickmn/go-cache"
|
cache "github.com/patrickmn/go-cache"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -52,7 +52,7 @@ func (m *Memory) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
|
|||||||
return data, nil
|
return data, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("couldn't get cached object data at offset %v", offset)
|
return nil, errors.Errorf("couldn't get cached object data at offset %v", offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddChunk adds a new chunk of a cached object
|
// AddChunk adds a new chunk of a cached object
|
||||||
@@ -75,7 +75,10 @@ func (m *Memory) CleanChunksByAge(chunkAge time.Duration) {
|
|||||||
|
|
||||||
// CleanChunksByNeed will cleanup chunks after the FS passes a specific chunk
|
// CleanChunksByNeed will cleanup chunks after the FS passes a specific chunk
|
||||||
func (m *Memory) CleanChunksByNeed(offset int64) {
|
func (m *Memory) CleanChunksByNeed(offset int64) {
|
||||||
for key := range m.db.Items() {
|
var items map[string]cache.Item
|
||||||
|
|
||||||
|
items = m.db.Items()
|
||||||
|
for key := range items {
|
||||||
sepIdx := strings.LastIndex(key, "-")
|
sepIdx := strings.LastIndex(key, "-")
|
||||||
keyOffset, err := strconv.ParseInt(key[sepIdx+1:], 10, 64)
|
keyOffset, err := strconv.ParseInt(key[sepIdx+1:], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
106
backend/cache/storage_persistent.go
vendored
106
backend/cache/storage_persistent.go
vendored
@@ -1,4 +1,4 @@
|
|||||||
//go:build !plan9 && !js
|
// +build !plan9
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -15,10 +16,10 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/walk"
|
"github.com/rclone/rclone/fs/walk"
|
||||||
bolt "go.etcd.io/bbolt"
|
bolt "go.etcd.io/bbolt"
|
||||||
"go.etcd.io/bbolt/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Constants
|
// Constants
|
||||||
@@ -118,11 +119,11 @@ func (b *Persistent) connect() error {
|
|||||||
|
|
||||||
err = os.MkdirAll(b.dataPath, os.ModePerm)
|
err = os.MkdirAll(b.dataPath, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create a data directory %q: %w", b.dataPath, err)
|
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
|
||||||
}
|
}
|
||||||
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
|
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to open a cache connection to %q: %w", b.dbPath, err)
|
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
|
||||||
}
|
}
|
||||||
if b.features.PurgeDb {
|
if b.features.PurgeDb {
|
||||||
b.Purge()
|
b.Purge()
|
||||||
@@ -174,7 +175,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
|
|||||||
err := b.db.View(func(tx *bolt.Tx) error {
|
err := b.db.View(func(tx *bolt.Tx) error {
|
||||||
bucket := b.getBucket(remote, false, tx)
|
bucket := b.getBucket(remote, false, tx)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return fmt.Errorf("couldn't open bucket (%v)", remote)
|
return errors.Errorf("couldn't open bucket (%v)", remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
data := bucket.Get([]byte("."))
|
data := bucket.Get([]byte("."))
|
||||||
@@ -182,7 +183,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
|
|||||||
return json.Unmarshal(data, cd)
|
return json.Unmarshal(data, cd)
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("%v not found", remote)
|
return errors.Errorf("%v not found", remote)
|
||||||
})
|
})
|
||||||
|
|
||||||
return cd, err
|
return cd, err
|
||||||
@@ -207,7 +208,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
|
|||||||
bucket = b.getBucket(cachedDirs[0].Dir, true, tx)
|
bucket = b.getBucket(cachedDirs[0].Dir, true, tx)
|
||||||
}
|
}
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return fmt.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
|
return errors.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, cachedDir := range cachedDirs {
|
for _, cachedDir := range cachedDirs {
|
||||||
@@ -224,7 +225,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
|
|||||||
|
|
||||||
encoded, err := json.Marshal(cachedDir)
|
encoded, err := json.Marshal(cachedDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
|
return errors.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
|
||||||
}
|
}
|
||||||
err = b.Put([]byte("."), encoded)
|
err = b.Put([]byte("."), encoded)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -242,17 +243,17 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
|
|||||||
err := b.db.View(func(tx *bolt.Tx) error {
|
err := b.db.View(func(tx *bolt.Tx) error {
|
||||||
bucket := b.getBucket(cachedDir.abs(), false, tx)
|
bucket := b.getBucket(cachedDir.abs(), false, tx)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return fmt.Errorf("couldn't open bucket (%v)", cachedDir.abs())
|
return errors.Errorf("couldn't open bucket (%v)", cachedDir.abs())
|
||||||
}
|
}
|
||||||
|
|
||||||
val := bucket.Get([]byte("."))
|
val := bucket.Get([]byte("."))
|
||||||
if val != nil {
|
if val != nil {
|
||||||
err := json.Unmarshal(val, cachedDir)
|
err := json.Unmarshal(val, cachedDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error during unmarshalling obj: %w", err)
|
return errors.Errorf("error during unmarshalling obj: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("missing cached dir: %v", cachedDir)
|
return errors.Errorf("missing cached dir: %v", cachedDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
c := bucket.Cursor()
|
c := bucket.Cursor()
|
||||||
@@ -267,7 +268,7 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
|
|||||||
// we try to find a cached meta for the dir
|
// we try to find a cached meta for the dir
|
||||||
currentBucket := c.Bucket().Bucket(k)
|
currentBucket := c.Bucket().Bucket(k)
|
||||||
if currentBucket == nil {
|
if currentBucket == nil {
|
||||||
return fmt.Errorf("couldn't open bucket (%v)", string(k))
|
return errors.Errorf("couldn't open bucket (%v)", string(k))
|
||||||
}
|
}
|
||||||
|
|
||||||
metaKey := currentBucket.Get([]byte("."))
|
metaKey := currentBucket.Get([]byte("."))
|
||||||
@@ -316,7 +317,7 @@ func (b *Persistent) RemoveDir(fp string) error {
|
|||||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket := b.getBucket(cleanPath(parentDir), false, tx)
|
bucket := b.getBucket(cleanPath(parentDir), false, tx)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return fmt.Errorf("couldn't open bucket (%v)", fp)
|
return errors.Errorf("couldn't open bucket (%v)", fp)
|
||||||
}
|
}
|
||||||
// delete the cached dir
|
// delete the cached dir
|
||||||
err := bucket.DeleteBucket([]byte(cleanPath(dirName)))
|
err := bucket.DeleteBucket([]byte(cleanPath(dirName)))
|
||||||
@@ -376,13 +377,13 @@ func (b *Persistent) GetObject(cachedObject *Object) (err error) {
|
|||||||
return b.db.View(func(tx *bolt.Tx) error {
|
return b.db.View(func(tx *bolt.Tx) error {
|
||||||
bucket := b.getBucket(cachedObject.Dir, false, tx)
|
bucket := b.getBucket(cachedObject.Dir, false, tx)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
|
return errors.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
|
||||||
}
|
}
|
||||||
val := bucket.Get([]byte(cachedObject.Name))
|
val := bucket.Get([]byte(cachedObject.Name))
|
||||||
if val != nil {
|
if val != nil {
|
||||||
return json.Unmarshal(val, cachedObject)
|
return json.Unmarshal(val, cachedObject)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("couldn't find object (%v)", cachedObject.Name)
|
return errors.Errorf("couldn't find object (%v)", cachedObject.Name)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -391,16 +392,16 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
|
|||||||
return b.db.Update(func(tx *bolt.Tx) error {
|
return b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket := b.getBucket(cachedObject.Dir, true, tx)
|
bucket := b.getBucket(cachedObject.Dir, true, tx)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject)
|
return errors.Errorf("couldn't open parent bucket for %v", cachedObject)
|
||||||
}
|
}
|
||||||
// cache Object Info
|
// cache Object Info
|
||||||
encoded, err := json.Marshal(cachedObject)
|
encoded, err := json.Marshal(cachedObject)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(cachedObject.Name), encoded)
|
err = bucket.Put([]byte(cachedObject.Name), encoded)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@@ -412,7 +413,7 @@ func (b *Persistent) RemoveObject(fp string) error {
|
|||||||
return b.db.Update(func(tx *bolt.Tx) error {
|
return b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket := b.getBucket(cleanPath(parentDir), false, tx)
|
bucket := b.getBucket(cleanPath(parentDir), false, tx)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return fmt.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
|
return errors.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
|
||||||
}
|
}
|
||||||
err := bucket.Delete([]byte(cleanPath(objName)))
|
err := bucket.Delete([]byte(cleanPath(objName)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -444,7 +445,7 @@ func (b *Persistent) HasEntry(remote string) bool {
|
|||||||
err := b.db.View(func(tx *bolt.Tx) error {
|
err := b.db.View(func(tx *bolt.Tx) error {
|
||||||
bucket := b.getBucket(dir, false, tx)
|
bucket := b.getBucket(dir, false, tx)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return fmt.Errorf("couldn't open parent bucket for %v", remote)
|
return errors.Errorf("couldn't open parent bucket for %v", remote)
|
||||||
}
|
}
|
||||||
if f := bucket.Bucket([]byte(name)); f != nil {
|
if f := bucket.Bucket([]byte(name)); f != nil {
|
||||||
return nil
|
return nil
|
||||||
@@ -453,9 +454,12 @@ func (b *Persistent) HasEntry(remote string) bool {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("couldn't find object (%v)", remote)
|
return errors.Errorf("couldn't find object (%v)", remote)
|
||||||
})
|
})
|
||||||
return err == nil
|
if err == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasChunk confirms the existence of a single chunk of an object
|
// HasChunk confirms the existence of a single chunk of an object
|
||||||
@@ -472,7 +476,7 @@ func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error
|
|||||||
var data []byte
|
var data []byte
|
||||||
|
|
||||||
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
|
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
|
||||||
data, err := os.ReadFile(fp)
|
data, err := ioutil.ReadFile(fp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -485,7 +489,7 @@ func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error {
|
|||||||
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
|
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
|
||||||
|
|
||||||
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
|
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
|
||||||
err := os.WriteFile(filePath, data, os.ModePerm)
|
err := ioutil.WriteFile(filePath, data, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -550,7 +554,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
|||||||
err := b.db.Update(func(tx *bolt.Tx) error {
|
err := b.db.Update(func(tx *bolt.Tx) error {
|
||||||
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
|
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
|
||||||
if dataTsBucket == nil {
|
if dataTsBucket == nil {
|
||||||
return fmt.Errorf("couldn't open (%v) bucket", DataTsBucket)
|
return errors.Errorf("Couldn't open (%v) bucket", DataTsBucket)
|
||||||
}
|
}
|
||||||
// iterate through ts
|
// iterate through ts
|
||||||
c := dataTsBucket.Cursor()
|
c := dataTsBucket.Cursor()
|
||||||
@@ -598,7 +602,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == errors.ErrDatabaseNotOpen {
|
if err == bolt.ErrDatabaseNotOpen {
|
||||||
// we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore
|
// we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -607,16 +611,16 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Stats returns a go map with the stats key values
|
// Stats returns a go map with the stats key values
|
||||||
func (b *Persistent) Stats() (map[string]map[string]any, error) {
|
func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
|
||||||
r := make(map[string]map[string]any)
|
r := make(map[string]map[string]interface{})
|
||||||
r["data"] = make(map[string]any)
|
r["data"] = make(map[string]interface{})
|
||||||
r["data"]["oldest-ts"] = time.Now()
|
r["data"]["oldest-ts"] = time.Now()
|
||||||
r["data"]["oldest-file"] = ""
|
r["data"]["oldest-file"] = ""
|
||||||
r["data"]["newest-ts"] = time.Now()
|
r["data"]["newest-ts"] = time.Now()
|
||||||
r["data"]["newest-file"] = ""
|
r["data"]["newest-file"] = ""
|
||||||
r["data"]["total-chunks"] = 0
|
r["data"]["total-chunks"] = 0
|
||||||
r["data"]["total-size"] = int64(0)
|
r["data"]["total-size"] = int64(0)
|
||||||
r["files"] = make(map[string]any)
|
r["files"] = make(map[string]interface{})
|
||||||
r["files"]["oldest-ts"] = time.Now()
|
r["files"]["oldest-ts"] = time.Now()
|
||||||
r["files"]["oldest-name"] = ""
|
r["files"]["oldest-name"] = ""
|
||||||
r["files"]["newest-ts"] = time.Now()
|
r["files"]["newest-ts"] = time.Now()
|
||||||
@@ -728,7 +732,7 @@ func (b *Persistent) GetChunkTs(path string, offset int64) (time.Time, error) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return fmt.Errorf("not found %v-%v", path, offset)
|
return errors.Errorf("not found %v-%v", path, offset)
|
||||||
})
|
})
|
||||||
|
|
||||||
return t, err
|
return t, err
|
||||||
@@ -768,7 +772,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
|
|||||||
return b.db.Update(func(tx *bolt.Tx) error {
|
return b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||||
}
|
}
|
||||||
tempObj := &tempUploadInfo{
|
tempObj := &tempUploadInfo{
|
||||||
DestPath: destPath,
|
DestPath: destPath,
|
||||||
@@ -779,11 +783,11 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
|
|||||||
// cache Object Info
|
// cache Object Info
|
||||||
encoded, err := json.Marshal(tempObj)
|
encoded, err := json.Marshal(tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(destPath), encoded)
|
err = bucket.Put([]byte(destPath), encoded)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -798,7 +802,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
|
|||||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
c := bucket.Cursor()
|
c := bucket.Cursor()
|
||||||
@@ -831,7 +835,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("no pending upload found")
|
return errors.Errorf("no pending upload found")
|
||||||
})
|
})
|
||||||
|
|
||||||
return destPath, err
|
return destPath, err
|
||||||
@@ -842,14 +846,14 @@ func (b *Persistent) SearchPendingUpload(remote string) (started bool, err error
|
|||||||
err = b.db.View(func(tx *bolt.Tx) error {
|
err = b.db.View(func(tx *bolt.Tx) error {
|
||||||
bucket := tx.Bucket([]byte(tempBucket))
|
bucket := tx.Bucket([]byte(tempBucket))
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
var tempObj = &tempUploadInfo{}
|
var tempObj = &tempUploadInfo{}
|
||||||
v := bucket.Get([]byte(remote))
|
v := bucket.Get([]byte(remote))
|
||||||
err = json.Unmarshal(v, tempObj)
|
err = json.Unmarshal(v, tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
|
return errors.Errorf("pending upload (%v) not found %v", remote, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
started = tempObj.Started
|
started = tempObj.Started
|
||||||
@@ -864,7 +868,7 @@ func (b *Persistent) searchPendingUploadFromDir(dir string) (remotes []string, e
|
|||||||
err = b.db.View(func(tx *bolt.Tx) error {
|
err = b.db.View(func(tx *bolt.Tx) error {
|
||||||
bucket := tx.Bucket([]byte(tempBucket))
|
bucket := tx.Bucket([]byte(tempBucket))
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
c := bucket.Cursor()
|
c := bucket.Cursor()
|
||||||
@@ -894,22 +898,22 @@ func (b *Persistent) rollbackPendingUpload(remote string) error {
|
|||||||
return b.db.Update(func(tx *bolt.Tx) error {
|
return b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||||
}
|
}
|
||||||
var tempObj = &tempUploadInfo{}
|
var tempObj = &tempUploadInfo{}
|
||||||
v := bucket.Get([]byte(remote))
|
v := bucket.Get([]byte(remote))
|
||||||
err = json.Unmarshal(v, tempObj)
|
err = json.Unmarshal(v, tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("pending upload (%v) not found: %w", remote, err)
|
return errors.Errorf("pending upload (%v) not found %v", remote, err)
|
||||||
}
|
}
|
||||||
tempObj.Started = false
|
tempObj.Started = false
|
||||||
v2, err := json.Marshal(tempObj)
|
v2, err := json.Marshal(tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("pending upload not updated: %w", err)
|
return errors.Errorf("pending upload not updated %v", err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("pending upload not updated: %w", err)
|
return errors.Errorf("pending upload not updated %v", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@@ -922,7 +926,7 @@ func (b *Persistent) removePendingUpload(remote string) error {
|
|||||||
return b.db.Update(func(tx *bolt.Tx) error {
|
return b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||||
}
|
}
|
||||||
return bucket.Delete([]byte(remote))
|
return bucket.Delete([]byte(remote))
|
||||||
})
|
})
|
||||||
@@ -937,17 +941,17 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
|
|||||||
return b.db.Update(func(tx *bolt.Tx) error {
|
return b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
var tempObj = &tempUploadInfo{}
|
var tempObj = &tempUploadInfo{}
|
||||||
v := bucket.Get([]byte(remote))
|
v := bucket.Get([]byte(remote))
|
||||||
err = json.Unmarshal(v, tempObj)
|
err = json.Unmarshal(v, tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
|
return errors.Errorf("pending upload (%v) not found %v", remote, err)
|
||||||
}
|
}
|
||||||
if tempObj.Started {
|
if tempObj.Started {
|
||||||
return fmt.Errorf("pending upload already started %v", remote)
|
return errors.Errorf("pending upload already started %v", remote)
|
||||||
}
|
}
|
||||||
err = fn(tempObj)
|
err = fn(tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -965,11 +969,11 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
|
|||||||
}
|
}
|
||||||
v2, err := json.Marshal(tempObj)
|
v2, err := json.Marshal(tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("pending upload not updated: %w", err)
|
return errors.Errorf("pending upload not updated %v", err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("pending upload not updated: %w", err)
|
return errors.Errorf("pending upload not updated %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -1010,11 +1014,11 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
|
|||||||
// cache Object Info
|
// cache Object Info
|
||||||
encoded, err := json.Marshal(tempObj)
|
encoded, err := json.Marshal(tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(destPath), encoded)
|
err = bucket.Put([]byte(destPath), encoded)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||||
}
|
}
|
||||||
fs.Debugf(cacheFs, "reconciled temporary upload: %v", destPath)
|
fs.Debugf(cacheFs, "reconciled temporary upload: %v", destPath)
|
||||||
}
|
}
|
||||||
|
|||||||
2
backend/cache/utils_test.go
vendored
2
backend/cache/utils_test.go
vendored
@@ -1,5 +1,3 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
import bolt "go.etcd.io/bbolt"
|
import bolt "go.etcd.io/bbolt"
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -5,17 +5,14 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io/ioutil"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/object"
|
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
@@ -35,35 +32,11 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
|
|||||||
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
|
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
|
||||||
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
||||||
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
|
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
|
||||||
Size: int64(kilobytes) * int64(fs.Kibi),
|
Size: int64(kilobytes) * int64(fs.KibiByte),
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
type settings map[string]any
|
|
||||||
|
|
||||||
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
|
|
||||||
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
|
||||||
configMap := configmap.Simple{}
|
|
||||||
for key, val := range opts {
|
|
||||||
configMap[key] = fmt.Sprintf("%v", val)
|
|
||||||
}
|
|
||||||
rpath := fspath.JoinRootPath(f.Root(), path)
|
|
||||||
remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), rpath)
|
|
||||||
fixFs, err := fs.NewFs(ctx, remote)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return fixFs
|
|
||||||
}
|
|
||||||
|
|
||||||
var mtime1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
|
|
||||||
|
|
||||||
func testPutFile(ctx context.Context, t *testing.T, f fs.Fs, name, contents, message string, check bool) fs.Object {
|
|
||||||
item := fstest.Item{Path: name, ModTime: mtime1}
|
|
||||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
|
||||||
assert.NotNil(t, obj, message)
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
|
|
||||||
// test chunk name parser
|
// test chunk name parser
|
||||||
func testChunkNameFormat(t *testing.T, f *Fs) {
|
func testChunkNameFormat(t *testing.T, f *Fs) {
|
||||||
saveOpt := f.opt
|
saveOpt := f.opt
|
||||||
@@ -413,7 +386,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
|
|||||||
if r == nil {
|
if r == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
data, err := io.ReadAll(r)
|
data, err := ioutil.ReadAll(r)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, contents, string(data))
|
assert.Equal(t, contents, string(data))
|
||||||
_ = r.Close()
|
_ = r.Close()
|
||||||
@@ -440,7 +413,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
|
|||||||
checkSmallFile := func(name, contents string) {
|
checkSmallFile := func(name, contents string) {
|
||||||
filename := path.Join(dir, name)
|
filename := path.Join(dir, name)
|
||||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||||
put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
|
_, put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
|
||||||
assert.NotNil(t, put)
|
assert.NotNil(t, put)
|
||||||
checkSmallFileInternals(put)
|
checkSmallFileInternals(put)
|
||||||
checkContents(put, contents)
|
checkContents(put, contents)
|
||||||
@@ -489,20 +462,14 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
|||||||
|
|
||||||
newFile := func(name string) fs.Object {
|
newFile := func(name string) fs.Object {
|
||||||
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
|
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
|
||||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||||
require.NotNil(t, obj)
|
require.NotNil(t, obj)
|
||||||
return obj
|
return obj
|
||||||
}
|
}
|
||||||
billyObj := newFile("billy")
|
billyObj := newFile("billy")
|
||||||
billyTxn := billyObj.(*Object).xactID
|
|
||||||
if f.useNoRename {
|
|
||||||
require.True(t, billyTxn != "")
|
|
||||||
} else {
|
|
||||||
require.True(t, billyTxn == "")
|
|
||||||
}
|
|
||||||
|
|
||||||
billyChunkName := func(chunkNo int) string {
|
billyChunkName := func(chunkNo int) string {
|
||||||
return f.makeChunkName(billyObj.Remote(), chunkNo, "", billyTxn)
|
return f.makeChunkName(billyObj.Remote(), chunkNo, "", "")
|
||||||
}
|
}
|
||||||
|
|
||||||
err := f.Mkdir(ctx, billyChunkName(1))
|
err := f.Mkdir(ctx, billyChunkName(1))
|
||||||
@@ -519,13 +486,11 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
|||||||
// accessing chunks in strict mode is prohibited
|
// accessing chunks in strict mode is prohibited
|
||||||
f.opt.FailHard = true
|
f.opt.FailHard = true
|
||||||
billyChunk4Name := billyChunkName(4)
|
billyChunk4Name := billyChunkName(4)
|
||||||
_, err = f.base.NewObject(ctx, billyChunk4Name)
|
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = f.NewObject(ctx, billyChunk4Name)
|
|
||||||
assertOverlapError(err)
|
assertOverlapError(err)
|
||||||
|
|
||||||
f.opt.FailHard = false
|
f.opt.FailHard = false
|
||||||
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
|
billyChunk4, err = f.NewObject(ctx, billyChunk4Name)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
require.NotNil(t, billyChunk4)
|
require.NotNil(t, billyChunk4)
|
||||||
|
|
||||||
@@ -538,7 +503,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
var chunkContents []byte
|
var chunkContents []byte
|
||||||
assert.NotPanics(t, func() {
|
assert.NotPanics(t, func() {
|
||||||
chunkContents, err = io.ReadAll(r)
|
chunkContents, err = ioutil.ReadAll(r)
|
||||||
_ = r.Close()
|
_ = r.Close()
|
||||||
})
|
})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -554,8 +519,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
|||||||
|
|
||||||
// recreate billy in case it was anyhow corrupted
|
// recreate billy in case it was anyhow corrupted
|
||||||
willyObj := newFile("willy")
|
willyObj := newFile("willy")
|
||||||
willyTxn := willyObj.(*Object).xactID
|
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", "")
|
||||||
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", willyTxn)
|
|
||||||
f.opt.FailHard = false
|
f.opt.FailHard = false
|
||||||
willyChunk, err := f.NewObject(ctx, willyChunkName)
|
willyChunk, err := f.NewObject(ctx, willyChunkName)
|
||||||
f.opt.FailHard = true
|
f.opt.FailHard = true
|
||||||
@@ -573,7 +537,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
|||||||
r, err = willyChunk.Open(ctx)
|
r, err = willyChunk.Open(ctx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.NotPanics(t, func() {
|
assert.NotPanics(t, func() {
|
||||||
_, err = io.ReadAll(r)
|
_, err = ioutil.ReadAll(r)
|
||||||
_ = r.Close()
|
_ = r.Close()
|
||||||
})
|
})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -596,20 +560,17 @@ func testChunkNumberOverflow(t *testing.T, f *Fs) {
|
|||||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||||
contents := random.String(100)
|
contents := random.String(100)
|
||||||
|
|
||||||
newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) {
|
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
||||||
filename = path.Join(dir, name)
|
filename := path.Join(dir, name)
|
||||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||||
obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||||
require.NotNil(t, obj)
|
require.NotNil(t, obj)
|
||||||
if chunkObj, isChunkObj := obj.(*Object); isChunkObj {
|
return obj, filename
|
||||||
txnID = chunkObj.xactID
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
f.opt.FailHard = false
|
f.opt.FailHard = false
|
||||||
file, fileName, fileTxn := newFile(f, "wreaker")
|
file, fileName := newFile(f, "wreaker")
|
||||||
wreak, _, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", fileTxn))
|
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", ""))
|
||||||
|
|
||||||
f.opt.FailHard = false
|
f.opt.FailHard = false
|
||||||
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
|
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
|
||||||
@@ -643,13 +604,22 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
|||||||
}()
|
}()
|
||||||
f.opt.FailHard = false
|
f.opt.FailHard = false
|
||||||
|
|
||||||
|
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||||
|
|
||||||
|
putFile := func(f fs.Fs, name, contents, message string, check bool) fs.Object {
|
||||||
|
item := fstest.Item{Path: name, ModTime: modTime}
|
||||||
|
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
||||||
|
assert.NotNil(t, obj, message)
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
|
||||||
runSubtest := func(contents, name string) {
|
runSubtest := func(contents, name string) {
|
||||||
description := fmt.Sprintf("file with %s metadata", name)
|
description := fmt.Sprintf("file with %s metadata", name)
|
||||||
filename := path.Join(dir, name)
|
filename := path.Join(dir, name)
|
||||||
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
|
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
|
||||||
|
|
||||||
part := testPutFile(ctx, t, f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
|
part := putFile(f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
|
||||||
_ = testPutFile(ctx, t, f, filename, contents, "upload "+description, false)
|
_ = putFile(f, filename, contents, "upload "+description, false)
|
||||||
|
|
||||||
obj, err := f.NewObject(ctx, filename)
|
obj, err := f.NewObject(ctx, filename)
|
||||||
assert.NoError(t, err, "access "+description)
|
assert.NoError(t, err, "access "+description)
|
||||||
@@ -672,14 +642,14 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
|||||||
assert.NoError(t, err, "open "+description)
|
assert.NoError(t, err, "open "+description)
|
||||||
assert.NotNil(t, r, "open stream of "+description)
|
assert.NotNil(t, r, "open stream of "+description)
|
||||||
if err == nil && r != nil {
|
if err == nil && r != nil {
|
||||||
data, err := io.ReadAll(r)
|
data, err := ioutil.ReadAll(r)
|
||||||
assert.NoError(t, err, "read all of "+description)
|
assert.NoError(t, err, "read all of "+description)
|
||||||
assert.Equal(t, contents, string(data), description+" contents is ok")
|
assert.Equal(t, contents, string(data), description+" contents is ok")
|
||||||
_ = r.Close()
|
_ = r.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "", "")
|
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
todaysMeta := string(metaData)
|
todaysMeta := string(metaData)
|
||||||
runSubtest(todaysMeta, "today")
|
runSubtest(todaysMeta, "today")
|
||||||
@@ -693,212 +663,6 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
|||||||
runSubtest(futureMeta, "future")
|
runSubtest(futureMeta, "future")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that chunker refuses to change on objects with future/unknown metadata
|
|
||||||
func testFutureProof(t *testing.T, f *Fs) {
|
|
||||||
if !f.useMeta {
|
|
||||||
t.Skip("this test requires metadata support")
|
|
||||||
}
|
|
||||||
|
|
||||||
saveOpt := f.opt
|
|
||||||
ctx := context.Background()
|
|
||||||
f.opt.FailHard = true
|
|
||||||
const dir = "future"
|
|
||||||
const file = dir + "/test"
|
|
||||||
defer func() {
|
|
||||||
f.opt.FailHard = false
|
|
||||||
_ = operations.Purge(ctx, f.base, dir)
|
|
||||||
f.opt = saveOpt
|
|
||||||
}()
|
|
||||||
|
|
||||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
|
||||||
putPart := func(name string, part int, data, msg string) {
|
|
||||||
if part > 0 {
|
|
||||||
name = f.makeChunkName(name, part-1, "", "")
|
|
||||||
}
|
|
||||||
item := fstest.Item{Path: name, ModTime: modTime}
|
|
||||||
obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
|
|
||||||
assert.NotNil(t, obj, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// simulate chunked object from future
|
|
||||||
meta := `{"ver":999,"nchunks":3,"size":9,"garbage":"litter","sha1":"0707f2970043f9f7c22029482db27733deaec029"}`
|
|
||||||
putPart(file, 0, meta, "metaobject")
|
|
||||||
putPart(file, 1, "abc", "chunk1")
|
|
||||||
putPart(file, 2, "def", "chunk2")
|
|
||||||
putPart(file, 3, "ghi", "chunk3")
|
|
||||||
|
|
||||||
// List should succeed
|
|
||||||
ls, err := f.List(ctx, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, len(ls))
|
|
||||||
assert.Equal(t, int64(9), ls[0].Size())
|
|
||||||
|
|
||||||
// NewObject should succeed
|
|
||||||
obj, err := f.NewObject(ctx, file)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, file, obj.Remote())
|
|
||||||
assert.Equal(t, int64(9), obj.Size())
|
|
||||||
|
|
||||||
// Hash must fail
|
|
||||||
_, err = obj.Hash(ctx, hash.SHA1)
|
|
||||||
assert.Equal(t, ErrMetaUnknown, err)
|
|
||||||
|
|
||||||
// Move must fail
|
|
||||||
mobj, err := operations.Move(ctx, f, nil, file+"2", obj)
|
|
||||||
assert.Nil(t, mobj)
|
|
||||||
assert.Error(t, err)
|
|
||||||
if err != nil {
|
|
||||||
assert.Contains(t, err.Error(), "please upgrade rclone")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put must fail
|
|
||||||
oi := object.NewStaticObjectInfo(file, modTime, 3, true, nil, nil)
|
|
||||||
buf := bytes.NewBufferString("abc")
|
|
||||||
_, err = f.Put(ctx, buf, oi)
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
// Rcat must fail
|
|
||||||
in := io.NopCloser(bytes.NewBufferString("abc"))
|
|
||||||
robj, err := operations.Rcat(ctx, f, file, in, modTime, nil)
|
|
||||||
assert.Nil(t, robj)
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
if err != nil {
|
|
||||||
assert.Contains(t, err.Error(), "please upgrade rclone")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The newer method of doing transactions without renaming should still be able to correctly process chunks that were created with renaming
|
|
||||||
// If you attempt to do the inverse, however, the data chunks will be ignored causing commands to perform incorrectly
|
|
||||||
func testBackwardsCompatibility(t *testing.T, f *Fs) {
|
|
||||||
if !f.useMeta {
|
|
||||||
t.Skip("Can't do norename transactions without metadata")
|
|
||||||
}
|
|
||||||
const dir = "backcomp"
|
|
||||||
ctx := context.Background()
|
|
||||||
saveOpt := f.opt
|
|
||||||
saveUseNoRename := f.useNoRename
|
|
||||||
defer func() {
|
|
||||||
f.opt.FailHard = false
|
|
||||||
_ = operations.Purge(ctx, f.base, dir)
|
|
||||||
f.opt = saveOpt
|
|
||||||
f.useNoRename = saveUseNoRename
|
|
||||||
}()
|
|
||||||
f.opt.ChunkSize = fs.SizeSuffix(10)
|
|
||||||
|
|
||||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
|
||||||
contents := random.String(250)
|
|
||||||
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
|
||||||
filename := path.Join(dir, name)
|
|
||||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
|
||||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
|
||||||
require.NotNil(t, obj)
|
|
||||||
return obj, filename
|
|
||||||
}
|
|
||||||
|
|
||||||
f.opt.FailHard = false
|
|
||||||
f.useNoRename = false
|
|
||||||
file, fileName := newFile(f, "renamefile")
|
|
||||||
|
|
||||||
f.opt.FailHard = false
|
|
||||||
item := fstest.NewItem(fileName, contents, modTime)
|
|
||||||
|
|
||||||
var items []fstest.Item
|
|
||||||
items = append(items, item)
|
|
||||||
|
|
||||||
f.useNoRename = true
|
|
||||||
fstest.CheckListingWithRoot(t, f, dir, items, nil, f.Precision())
|
|
||||||
_, err := f.NewObject(ctx, fileName)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
f.opt.FailHard = true
|
|
||||||
_, err = f.List(ctx, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
f.opt.FailHard = false
|
|
||||||
_ = file.Remove(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testChunkerServerSideMove(t *testing.T, f *Fs) {
|
|
||||||
if !f.useMeta {
|
|
||||||
t.Skip("Can't test norename transactions without metadata")
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
const dir = "servermovetest"
|
|
||||||
subRemote := fmt.Sprintf("%s:%s/%s", f.Name(), f.Root(), dir)
|
|
||||||
|
|
||||||
subFs1, err := fs.NewFs(ctx, subRemote+"/subdir1")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
fs1, isChunkerFs := subFs1.(*Fs)
|
|
||||||
assert.True(t, isChunkerFs)
|
|
||||||
fs1.useNoRename = false
|
|
||||||
fs1.opt.ChunkSize = fs.SizeSuffix(3)
|
|
||||||
|
|
||||||
subFs2, err := fs.NewFs(ctx, subRemote+"/subdir2")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
fs2, isChunkerFs := subFs2.(*Fs)
|
|
||||||
assert.True(t, isChunkerFs)
|
|
||||||
fs2.useNoRename = true
|
|
||||||
fs2.opt.ChunkSize = fs.SizeSuffix(3)
|
|
||||||
|
|
||||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
|
||||||
item := fstest.Item{Path: "movefile", ModTime: modTime}
|
|
||||||
contents := "abcdef"
|
|
||||||
file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
|
|
||||||
|
|
||||||
dstOverwritten, _ := fs2.NewObject(ctx, "movefile")
|
|
||||||
dstFile, err := operations.Move(ctx, fs2, dstOverwritten, "movefile", file)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(len(contents)), dstFile.Size())
|
|
||||||
|
|
||||||
r, err := dstFile.Open(ctx)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.NotNil(t, r)
|
|
||||||
data, err := io.ReadAll(r)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, contents, string(data))
|
|
||||||
_ = r.Close()
|
|
||||||
_ = operations.Purge(ctx, f.base, dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that md5all creates metadata even for small files
|
|
||||||
func testMD5AllSlow(t *testing.T, f *Fs) {
|
|
||||||
ctx := context.Background()
|
|
||||||
fsResult := deriveFs(ctx, t, f, "md5all", settings{
|
|
||||||
"chunk_size": "1P",
|
|
||||||
"name_format": "*.#",
|
|
||||||
"hash_type": "md5all",
|
|
||||||
"transactions": "rename",
|
|
||||||
"meta_format": "simplejson",
|
|
||||||
})
|
|
||||||
chunkFs, ok := fsResult.(*Fs)
|
|
||||||
require.True(t, ok, "fs must be a chunker remote")
|
|
||||||
baseFs := chunkFs.base
|
|
||||||
if !baseFs.Features().SlowHash {
|
|
||||||
t.Skipf("this test needs a base fs with slow hash, e.g. local")
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.True(t, chunkFs.useMD5, "must use md5")
|
|
||||||
assert.True(t, chunkFs.hashAll, "must hash all files")
|
|
||||||
|
|
||||||
_ = testPutFile(ctx, t, chunkFs, "file", "-", "error", true)
|
|
||||||
obj, err := chunkFs.NewObject(ctx, "file")
|
|
||||||
require.NoError(t, err)
|
|
||||||
sum, err := obj.Hash(ctx, hash.MD5)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, "336d5ebc5436534e61d16e63ddfca327", sum)
|
|
||||||
|
|
||||||
list, err := baseFs.List(ctx, "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 2, len(list))
|
|
||||||
_, err = baseFs.NewObject(ctx, "file")
|
|
||||||
assert.NoError(t, err, "metadata must be created")
|
|
||||||
_, err = baseFs.NewObject(ctx, "file.1")
|
|
||||||
assert.NoError(t, err, "first chunk must be created")
|
|
||||||
|
|
||||||
require.NoError(t, operations.Purge(ctx, baseFs, ""))
|
|
||||||
}
|
|
||||||
|
|
||||||
// InternalTest dispatches all internal tests
|
// InternalTest dispatches all internal tests
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
t.Run("PutLarge", func(t *testing.T) {
|
t.Run("PutLarge", func(t *testing.T) {
|
||||||
@@ -922,18 +686,6 @@ func (f *Fs) InternalTest(t *testing.T) {
|
|||||||
t.Run("MetadataInput", func(t *testing.T) {
|
t.Run("MetadataInput", func(t *testing.T) {
|
||||||
testMetadataInput(t, f)
|
testMetadataInput(t, f)
|
||||||
})
|
})
|
||||||
t.Run("FutureProof", func(t *testing.T) {
|
|
||||||
testFutureProof(t, f)
|
|
||||||
})
|
|
||||||
t.Run("BackwardsCompatibility", func(t *testing.T) {
|
|
||||||
testBackwardsCompatibility(t, f)
|
|
||||||
})
|
|
||||||
t.Run("ChunkerServerSideMove", func(t *testing.T) {
|
|
||||||
testChunkerServerSideMove(t, f)
|
|
||||||
})
|
|
||||||
t.Run("MD5AllSlow", func(t *testing.T) {
|
|
||||||
testMD5AllSlow(t, f)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
var _ fstests.InternalTester = (*Fs)(nil)
|
||||||
|
|||||||
@@ -15,10 +15,10 @@ import (
|
|||||||
|
|
||||||
// Command line flags
|
// Command line flags
|
||||||
var (
|
var (
|
||||||
// Invalid characters are not supported by some remotes, e.g. Mailru.
|
// Invalid characters are not supported by some remotes, eg. Mailru.
|
||||||
// We enable testing with invalid characters when -remote is not set, so
|
// We enable testing with invalid characters when -remote is not set, so
|
||||||
// chunker overlays a local directory, but invalid characters are disabled
|
// chunker overlays a local directory, but invalid characters are disabled
|
||||||
// by default when -remote is set, e.g. when test_all runs backend tests.
|
// by default when -remote is set, eg. when test_all runs backend tests.
|
||||||
// You can still test with invalid characters using the below flag.
|
// You can still test with invalid characters using the below flag.
|
||||||
UseBadChars = flag.Bool("bad-chars", false, "Set to test bad characters in file names when -remote is set")
|
UseBadChars = flag.Bool("bad-chars", false, "Set to test bad characters in file names when -remote is set")
|
||||||
)
|
)
|
||||||
@@ -35,18 +35,14 @@ func TestIntegration(t *testing.T) {
|
|||||||
"MimeType",
|
"MimeType",
|
||||||
"GetTier",
|
"GetTier",
|
||||||
"SetTier",
|
"SetTier",
|
||||||
"Metadata",
|
|
||||||
"SetMetadata",
|
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{
|
UnimplementableFsMethods: []string{
|
||||||
"PublicLink",
|
"PublicLink",
|
||||||
"OpenWriterAt",
|
"OpenWriterAt",
|
||||||
"OpenChunkWriter",
|
|
||||||
"MergeDirs",
|
"MergeDirs",
|
||||||
"DirCacheFlush",
|
"DirCacheFlush",
|
||||||
"UserInfo",
|
"UserInfo",
|
||||||
"Disconnect",
|
"Disconnect",
|
||||||
"ListP",
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if *fstest.RemoteName == "" {
|
if *fstest.RemoteName == "" {
|
||||||
@@ -57,7 +53,6 @@ func TestIntegration(t *testing.T) {
|
|||||||
{Name: name, Key: "type", Value: "chunker"},
|
{Name: name, Key: "type", Value: "chunker"},
|
||||||
{Name: name, Key: "remote", Value: tempDir},
|
{Name: name, Key: "remote", Value: tempDir},
|
||||||
}
|
}
|
||||||
opt.QuickTestOK = true
|
|
||||||
}
|
}
|
||||||
fstests.Run(t, &opt)
|
fstests.Run(t, &opt)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,48 +0,0 @@
|
|||||||
// Package api has type definitions for cloudinary
|
|
||||||
package api
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CloudinaryEncoder extends the built-in encoder
|
|
||||||
type CloudinaryEncoder interface {
|
|
||||||
// FromStandardPath takes a / separated path in Standard encoding
|
|
||||||
// and converts it to a / separated path in this encoding.
|
|
||||||
FromStandardPath(string) string
|
|
||||||
// FromStandardName takes name in Standard encoding and converts
|
|
||||||
// it in this encoding.
|
|
||||||
FromStandardName(string) string
|
|
||||||
// ToStandardPath takes a / separated path in this encoding
|
|
||||||
// and converts it to a / separated path in Standard encoding.
|
|
||||||
ToStandardPath(string) string
|
|
||||||
// ToStandardName takes name in this encoding and converts
|
|
||||||
// it in Standard encoding.
|
|
||||||
ToStandardName(string, string) string
|
|
||||||
// Encoded root of the remote (as passed into NewFs)
|
|
||||||
FromStandardFullPath(string) string
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateOptions was created to pass options from Update to Put
|
|
||||||
type UpdateOptions struct {
|
|
||||||
PublicID string
|
|
||||||
ResourceType string
|
|
||||||
DeliveryType string
|
|
||||||
AssetFolder string
|
|
||||||
DisplayName string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Header formats the option as a string
|
|
||||||
func (o *UpdateOptions) Header() (string, string) {
|
|
||||||
return "UpdateOption", fmt.Sprintf("%s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mandatory returns whether the option must be parsed or can be ignored
|
|
||||||
func (o *UpdateOptions) Mandatory() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// String formats the option into human-readable form
|
|
||||||
func (o *UpdateOptions) String() string {
|
|
||||||
return fmt.Sprintf("Fully qualified Public ID: %s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
|
|
||||||
}
|
|
||||||
@@ -1,754 +0,0 @@
|
|||||||
// Package cloudinary provides an interface to the Cloudinary DAM
|
|
||||||
package cloudinary
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"path"
|
|
||||||
"slices"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/cloudinary/cloudinary-go/v2"
|
|
||||||
SDKApi "github.com/cloudinary/cloudinary-go/v2/api"
|
|
||||||
"github.com/cloudinary/cloudinary-go/v2/api/admin"
|
|
||||||
"github.com/cloudinary/cloudinary-go/v2/api/admin/search"
|
|
||||||
"github.com/cloudinary/cloudinary-go/v2/api/uploader"
|
|
||||||
"github.com/rclone/rclone/backend/cloudinary/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
"github.com/zeebo/blake3"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Cloudinary shouldn't have a trailing dot if there is no path
|
|
||||||
func cldPathDir(somePath string) string {
|
|
||||||
if somePath == "" || somePath == "." {
|
|
||||||
return somePath
|
|
||||||
}
|
|
||||||
dir := path.Dir(somePath)
|
|
||||||
if dir == "." {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return dir
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register with Fs
|
|
||||||
func init() {
|
|
||||||
fs.Register(&fs.RegInfo{
|
|
||||||
Name: "cloudinary",
|
|
||||||
Description: "Cloudinary",
|
|
||||||
NewFs: NewFs,
|
|
||||||
Options: []fs.Option{
|
|
||||||
{
|
|
||||||
Name: "cloud_name",
|
|
||||||
Help: "Cloudinary Environment Name",
|
|
||||||
Required: true,
|
|
||||||
Sensitive: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "api_key",
|
|
||||||
Help: "Cloudinary API Key",
|
|
||||||
Required: true,
|
|
||||||
Sensitive: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "api_secret",
|
|
||||||
Help: "Cloudinary API Secret",
|
|
||||||
Required: true,
|
|
||||||
Sensitive: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "upload_prefix",
|
|
||||||
Help: "Specify the API endpoint for environments out of the US",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "upload_preset",
|
|
||||||
Help: "Upload Preset to select asset manipulation on upload",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: config.ConfigEncoding,
|
|
||||||
Help: config.ConfigEncodingHelp,
|
|
||||||
Advanced: true,
|
|
||||||
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
|
|
||||||
encoder.EncodeSlash |
|
|
||||||
encoder.EncodeLtGt |
|
|
||||||
encoder.EncodeDoubleQuote |
|
|
||||||
encoder.EncodeQuestion |
|
|
||||||
encoder.EncodeAsterisk |
|
|
||||||
encoder.EncodePipe |
|
|
||||||
encoder.EncodeHash |
|
|
||||||
encoder.EncodePercent |
|
|
||||||
encoder.EncodeBackSlash |
|
|
||||||
encoder.EncodeDel |
|
|
||||||
encoder.EncodeCtl |
|
|
||||||
encoder.EncodeRightSpace |
|
|
||||||
encoder.EncodeInvalidUtf8 |
|
|
||||||
encoder.EncodeDot),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "eventually_consistent_delay",
|
|
||||||
Default: fs.Duration(0),
|
|
||||||
Advanced: true,
|
|
||||||
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "adjust_media_files_extensions",
|
|
||||||
Default: true,
|
|
||||||
Advanced: true,
|
|
||||||
Help: "Cloudinary handles media formats as a file attribute and strips it from the name, which is unlike most other file systems",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "media_extensions",
|
|
||||||
Default: []string{
|
|
||||||
"3ds", "3g2", "3gp", "ai", "arw", "avi", "avif", "bmp", "bw",
|
|
||||||
"cr2", "cr3", "djvu", "dng", "eps3", "fbx", "flif", "flv", "gif",
|
|
||||||
"glb", "gltf", "hdp", "heic", "heif", "ico", "indd", "jp2", "jpe",
|
|
||||||
"jpeg", "jpg", "jxl", "jxr", "m2ts", "mov", "mp4", "mpeg", "mts",
|
|
||||||
"mxf", "obj", "ogv", "pdf", "ply", "png", "psd", "svg", "tga",
|
|
||||||
"tif", "tiff", "ts", "u3ma", "usdz", "wdp", "webm", "webp", "wmv"},
|
|
||||||
Advanced: true,
|
|
||||||
Help: "Cloudinary supported media extensions",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
|
||||||
type Options struct {
|
|
||||||
CloudName string `config:"cloud_name"`
|
|
||||||
APIKey string `config:"api_key"`
|
|
||||||
APISecret string `config:"api_secret"`
|
|
||||||
UploadPrefix string `config:"upload_prefix"`
|
|
||||||
UploadPreset string `config:"upload_preset"`
|
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
|
||||||
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
|
|
||||||
MediaExtensions []string `config:"media_extensions"`
|
|
||||||
AdjustMediaFilesExtensions bool `config:"adjust_media_files_extensions"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs represents a remote cloudinary server
|
|
||||||
type Fs struct {
|
|
||||||
name string
|
|
||||||
root string
|
|
||||||
opt Options
|
|
||||||
features *fs.Features
|
|
||||||
pacer *fs.Pacer
|
|
||||||
srv *rest.Client // For downloading assets via the Cloudinary CDN
|
|
||||||
cld *cloudinary.Cloudinary // API calls are going through the Cloudinary SDK
|
|
||||||
lastCRUD time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object describes a cloudinary object
|
|
||||||
type Object struct {
|
|
||||||
fs *Fs
|
|
||||||
remote string
|
|
||||||
size int64
|
|
||||||
modTime time.Time
|
|
||||||
url string
|
|
||||||
md5sum string
|
|
||||||
publicID string
|
|
||||||
resourceType string
|
|
||||||
deliveryType string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, bucket:path
|
|
||||||
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
|
|
||||||
opt := new(Options)
|
|
||||||
err := configstruct.Set(m, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize the Cloudinary client
|
|
||||||
cld, err := cloudinary.NewFromParams(opt.CloudName, opt.APIKey, opt.APISecret)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create Cloudinary client: %w", err)
|
|
||||||
}
|
|
||||||
cld.Admin.Client = *fshttp.NewClient(ctx)
|
|
||||||
cld.Upload.Client = *fshttp.NewClient(ctx)
|
|
||||||
if opt.UploadPrefix != "" {
|
|
||||||
cld.Config.API.UploadPrefix = opt.UploadPrefix
|
|
||||||
}
|
|
||||||
client := fshttp.NewClient(ctx)
|
|
||||||
f := &Fs{
|
|
||||||
name: name,
|
|
||||||
root: root,
|
|
||||||
opt: *opt,
|
|
||||||
cld: cld,
|
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1000), pacer.MaxSleep(10000), pacer.DecayConstant(2))),
|
|
||||||
srv: rest.NewClient(client),
|
|
||||||
}
|
|
||||||
|
|
||||||
f.features = (&fs.Features{
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
}).Fill(ctx, f)
|
|
||||||
|
|
||||||
if root != "" {
|
|
||||||
// Check to see if the root actually an existing file
|
|
||||||
remote := path.Base(root)
|
|
||||||
f.root = cldPathDir(root)
|
|
||||||
_, err := f.NewObject(ctx, remote)
|
|
||||||
if err != nil {
|
|
||||||
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
|
|
||||||
// File doesn't exist so return the previous root
|
|
||||||
f.root = root
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// return an error with an fs which points to the parent
|
|
||||||
return f, fs.ErrorIsFile
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
|
||||||
|
|
||||||
// FromStandardPath implementation of the api.CloudinaryEncoder
|
|
||||||
func (f *Fs) FromStandardPath(s string) string {
|
|
||||||
return strings.ReplaceAll(f.opt.Enc.FromStandardPath(s), "&", "\uFF06")
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromStandardName implementation of the api.CloudinaryEncoder
|
|
||||||
func (f *Fs) FromStandardName(s string) string {
|
|
||||||
if f.opt.AdjustMediaFilesExtensions {
|
|
||||||
parsedURL, err := url.Parse(s)
|
|
||||||
ext := ""
|
|
||||||
if err != nil {
|
|
||||||
fs.Logf(nil, "Error parsing URL: %v", err)
|
|
||||||
} else {
|
|
||||||
ext = path.Ext(parsedURL.Path)
|
|
||||||
if slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
|
|
||||||
s = strings.TrimSuffix(parsedURL.Path, ext)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToStandardPath implementation of the api.CloudinaryEncoder
|
|
||||||
func (f *Fs) ToStandardPath(s string) string {
|
|
||||||
return strings.ReplaceAll(f.opt.Enc.ToStandardPath(s), "\uFF06", "&")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToStandardName implementation of the api.CloudinaryEncoder
|
|
||||||
func (f *Fs) ToStandardName(s string, assetURL string) string {
|
|
||||||
ext := ""
|
|
||||||
if f.opt.AdjustMediaFilesExtensions {
|
|
||||||
parsedURL, err := url.Parse(assetURL)
|
|
||||||
if err != nil {
|
|
||||||
fs.Logf(nil, "Error parsing URL: %v", err)
|
|
||||||
} else {
|
|
||||||
ext = path.Ext(parsedURL.Path)
|
|
||||||
if !slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
|
|
||||||
ext = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&") + ext
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromStandardFullPath encodes a full path to Cloudinary standard
|
|
||||||
func (f *Fs) FromStandardFullPath(dir string) string {
|
|
||||||
return path.Join(api.CloudinaryEncoder.FromStandardPath(f, f.root), api.CloudinaryEncoder.FromStandardPath(f, dir))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToAssetFolderAPI encodes folders as expected by the Cloudinary SDK
|
|
||||||
func (f *Fs) ToAssetFolderAPI(dir string) string {
|
|
||||||
return strings.ReplaceAll(dir, "%", "%25")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToDisplayNameElastic encodes a special case of elasticsearch
|
|
||||||
func (f *Fs) ToDisplayNameElastic(dir string) string {
|
|
||||||
return strings.ReplaceAll(dir, "!", "\\!")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Root() string {
|
|
||||||
return f.root
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitEventuallyConsistent waits till the FS is eventually consistent
|
|
||||||
func (f *Fs) WaitEventuallyConsistent() {
|
|
||||||
if f.opt.EventuallyConsistentDelay == fs.Duration(0) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
delay := time.Duration(f.opt.EventuallyConsistentDelay)
|
|
||||||
timeSinceLastCRUD := time.Since(f.lastCRUD)
|
|
||||||
if timeSinceLastCRUD < delay {
|
|
||||||
time.Sleep(delay - timeSinceLastCRUD)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// String converts this Fs to a string
|
|
||||||
func (f *Fs) String() string {
|
|
||||||
return fmt.Sprintf("Cloudinary root '%s'", f.root)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
|
||||||
func (f *Fs) Features() *fs.Features {
|
|
||||||
return f.features
|
|
||||||
}
|
|
||||||
|
|
||||||
// List the objects and directories in dir into entries
|
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
|
||||||
remotePrefix := f.FromStandardFullPath(dir)
|
|
||||||
if remotePrefix != "" && !strings.HasSuffix(remotePrefix, "/") {
|
|
||||||
remotePrefix += "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
var entries fs.DirEntries
|
|
||||||
dirs := make(map[string]struct{})
|
|
||||||
nextCursor := ""
|
|
||||||
f.WaitEventuallyConsistent()
|
|
||||||
for {
|
|
||||||
// user the folders api to list folders.
|
|
||||||
folderParams := admin.SubFoldersParams{
|
|
||||||
Folder: f.ToAssetFolderAPI(remotePrefix),
|
|
||||||
MaxResults: 500,
|
|
||||||
}
|
|
||||||
if nextCursor != "" {
|
|
||||||
folderParams.NextCursor = nextCursor
|
|
||||||
}
|
|
||||||
|
|
||||||
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to list sub-folders: %w", err)
|
|
||||||
}
|
|
||||||
if results.Error.Message != "" {
|
|
||||||
if strings.HasPrefix(results.Error.Message, "Can't find folder with path") {
|
|
||||||
return nil, fs.ErrorDirNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("failed to list sub-folders: %s", results.Error.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, folder := range results.Folders {
|
|
||||||
relativePath := api.CloudinaryEncoder.ToStandardPath(f, strings.TrimPrefix(folder.Path, remotePrefix))
|
|
||||||
parts := strings.Split(relativePath, "/")
|
|
||||||
|
|
||||||
// It's a directory
|
|
||||||
dirName := parts[len(parts)-1]
|
|
||||||
if _, found := dirs[dirName]; !found {
|
|
||||||
d := fs.NewDir(path.Join(dir, dirName), time.Time{})
|
|
||||||
entries = append(entries, d)
|
|
||||||
dirs[dirName] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Break if there are no more results
|
|
||||||
if results.NextCursor == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
nextCursor = results.NextCursor
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
// Use the assets.AssetsByAssetFolder API to list assets
|
|
||||||
assetsParams := admin.AssetsByAssetFolderParams{
|
|
||||||
AssetFolder: remotePrefix,
|
|
||||||
MaxResults: 500,
|
|
||||||
}
|
|
||||||
if nextCursor != "" {
|
|
||||||
assetsParams.NextCursor = nextCursor
|
|
||||||
}
|
|
||||||
|
|
||||||
results, err := f.cld.Admin.AssetsByAssetFolder(ctx, assetsParams)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to list assets: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, asset := range results.Assets {
|
|
||||||
remote := path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName, asset.SecureURL))
|
|
||||||
o := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: remote,
|
|
||||||
size: int64(asset.Bytes),
|
|
||||||
modTime: asset.CreatedAt,
|
|
||||||
url: asset.SecureURL,
|
|
||||||
publicID: asset.PublicID,
|
|
||||||
resourceType: asset.AssetType,
|
|
||||||
deliveryType: asset.Type,
|
|
||||||
}
|
|
||||||
entries = append(entries, o)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Break if there are no more results
|
|
||||||
if results.NextCursor == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
nextCursor = results.NextCursor
|
|
||||||
}
|
|
||||||
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found it returns the error fs.ErrorObjectNotFound.
|
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|
||||||
searchParams := search.Query{
|
|
||||||
Expression: fmt.Sprintf("asset_folder:\"%s\" AND display_name:\"%s\"",
|
|
||||||
f.FromStandardFullPath(cldPathDir(remote)),
|
|
||||||
f.ToDisplayNameElastic(api.CloudinaryEncoder.FromStandardName(f, path.Base(remote)))),
|
|
||||||
SortBy: []search.SortByField{{"uploaded_at": "desc"}},
|
|
||||||
MaxResults: 2,
|
|
||||||
}
|
|
||||||
var results *admin.SearchResult
|
|
||||||
f.WaitEventuallyConsistent()
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
var err1 error
|
|
||||||
results, err1 = f.cld.Admin.Search(ctx, searchParams)
|
|
||||||
if err1 == nil && results.TotalCount != len(results.Assets) {
|
|
||||||
err1 = errors.New("partial response so waiting for eventual consistency")
|
|
||||||
}
|
|
||||||
return shouldRetry(ctx, nil, err1)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
if results.TotalCount == 0 || len(results.Assets) == 0 {
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
asset := results.Assets[0]
|
|
||||||
|
|
||||||
o := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: remote,
|
|
||||||
size: int64(asset.Bytes),
|
|
||||||
modTime: asset.UploadedAt,
|
|
||||||
url: asset.SecureURL,
|
|
||||||
md5sum: asset.Etag,
|
|
||||||
publicID: asset.PublicID,
|
|
||||||
resourceType: asset.ResourceType,
|
|
||||||
deliveryType: asset.Type,
|
|
||||||
}
|
|
||||||
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) getSuggestedPublicID(assetFolder string, displayName string, modTime time.Time) string {
|
|
||||||
payload := []byte(path.Join(assetFolder, displayName))
|
|
||||||
hash := blake3.Sum256(payload)
|
|
||||||
return hex.EncodeToString(hash[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put uploads content to Cloudinary
|
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
if src.Size() == 0 {
|
|
||||||
return nil, fs.ErrorCantUploadEmptyFiles
|
|
||||||
}
|
|
||||||
|
|
||||||
params := uploader.UploadParams{
|
|
||||||
UploadPreset: f.opt.UploadPreset,
|
|
||||||
}
|
|
||||||
|
|
||||||
updateObject := false
|
|
||||||
var modTime time.Time
|
|
||||||
for _, option := range options {
|
|
||||||
if updateOptions, ok := option.(*api.UpdateOptions); ok {
|
|
||||||
if updateOptions.PublicID != "" {
|
|
||||||
updateObject = true
|
|
||||||
params.Overwrite = SDKApi.Bool(true)
|
|
||||||
params.Invalidate = SDKApi.Bool(true)
|
|
||||||
params.PublicID = updateOptions.PublicID
|
|
||||||
params.ResourceType = updateOptions.ResourceType
|
|
||||||
params.Type = SDKApi.DeliveryType(updateOptions.DeliveryType)
|
|
||||||
params.AssetFolder = updateOptions.AssetFolder
|
|
||||||
params.DisplayName = updateOptions.DisplayName
|
|
||||||
modTime = src.ModTime(ctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !updateObject {
|
|
||||||
params.AssetFolder = f.FromStandardFullPath(cldPathDir(src.Remote()))
|
|
||||||
params.DisplayName = api.CloudinaryEncoder.FromStandardName(f, path.Base(src.Remote()))
|
|
||||||
// We want to conform to the unique asset ID of rclone, which is (asset_folder,display_name,last_modified).
|
|
||||||
// We also want to enable customers to choose their own public_id, in case duplicate names are not a crucial use case.
|
|
||||||
// Upload_presets that apply randomness to the public ID would not work well with rclone duplicate assets support.
|
|
||||||
params.FilenameOverride = f.getSuggestedPublicID(params.AssetFolder, params.DisplayName, src.ModTime(ctx))
|
|
||||||
}
|
|
||||||
uploadResult, err := f.cld.Upload.Upload(ctx, in, params)
|
|
||||||
f.lastCRUD = time.Now()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to upload to Cloudinary: %w", err)
|
|
||||||
}
|
|
||||||
if !updateObject {
|
|
||||||
modTime = uploadResult.CreatedAt
|
|
||||||
}
|
|
||||||
if uploadResult.Error.Message != "" {
|
|
||||||
return nil, errors.New(uploadResult.Error.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
o := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: src.Remote(),
|
|
||||||
size: int64(uploadResult.Bytes),
|
|
||||||
modTime: modTime,
|
|
||||||
url: uploadResult.SecureURL,
|
|
||||||
md5sum: uploadResult.Etag,
|
|
||||||
publicID: uploadResult.PublicID,
|
|
||||||
resourceType: uploadResult.ResourceType,
|
|
||||||
deliveryType: uploadResult.Type,
|
|
||||||
}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Precision of the remote
|
|
||||||
func (f *Fs) Precision() time.Duration {
|
|
||||||
return fs.ModTimeNotSupported
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashes returns the supported hash sets
|
|
||||||
func (f *Fs) Hashes() hash.Set {
|
|
||||||
return hash.Set(hash.MD5)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mkdir creates empty folders
|
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|
||||||
params := admin.CreateFolderParams{Folder: f.ToAssetFolderAPI(f.FromStandardFullPath(dir))}
|
|
||||||
res, err := f.cld.Admin.CreateFolder(ctx, params)
|
|
||||||
f.lastCRUD = time.Now()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if res.Error.Message != "" {
|
|
||||||
return errors.New(res.Error.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir deletes empty folders
|
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
||||||
// Additional test because Cloudinary will delete folders without
|
|
||||||
// assets, regardless of empty sub-folders
|
|
||||||
folder := f.ToAssetFolderAPI(f.FromStandardFullPath(dir))
|
|
||||||
folderParams := admin.SubFoldersParams{
|
|
||||||
Folder: folder,
|
|
||||||
MaxResults: 1,
|
|
||||||
}
|
|
||||||
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if results.TotalCount > 0 {
|
|
||||||
return fs.ErrorDirectoryNotEmpty
|
|
||||||
}
|
|
||||||
|
|
||||||
params := admin.DeleteFolderParams{Folder: folder}
|
|
||||||
res, err := f.cld.Admin.DeleteFolder(ctx, params)
|
|
||||||
f.lastCRUD = time.Now()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if res.Error.Message != "" {
|
|
||||||
if strings.HasPrefix(res.Error.Message, "Can't find folder with path") {
|
|
||||||
return fs.ErrorDirNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
return errors.New(res.Error.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// retryErrorCodes is a slice of error codes that we will retry
|
|
||||||
var retryErrorCodes = []int{
|
|
||||||
420, // Too Many Requests (legacy)
|
|
||||||
429, // Too Many Requests
|
|
||||||
500, // Internal Server Error
|
|
||||||
502, // Bad Gateway
|
|
||||||
503, // Service Unavailable
|
|
||||||
504, // Gateway Timeout
|
|
||||||
509, // Bandwidth Limit Exceeded
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this resp and err
|
|
||||||
// deserve to be retried. It returns the err as a convenience
|
|
||||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
|
||||||
if fserrors.ContextError(ctx, &err) {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
tryAgain := "Try again on "
|
|
||||||
if idx := strings.Index(err.Error(), tryAgain); idx != -1 {
|
|
||||||
layout := "2006-01-02 15:04:05 UTC"
|
|
||||||
dateStr := err.Error()[idx+len(tryAgain) : idx+len(tryAgain)+len(layout)]
|
|
||||||
timestamp, err2 := time.Parse(layout, dateStr)
|
|
||||||
if err2 == nil {
|
|
||||||
return true, fserrors.NewErrorRetryAfter(time.Until(timestamp))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Debugf(nil, "Retrying API error %v", err)
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
|
||||||
|
|
||||||
// Hash returns the MD5 of an object
|
|
||||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
|
||||||
if ty != hash.MD5 {
|
|
||||||
return "", hash.ErrUnsupported
|
|
||||||
}
|
|
||||||
return o.md5sum, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return a string version
|
|
||||||
func (o *Object) String() string {
|
|
||||||
if o == nil {
|
|
||||||
return "<nil>"
|
|
||||||
}
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs returns the parent Fs
|
|
||||||
func (o *Object) Fs() fs.Info {
|
|
||||||
return o.fs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remote returns the remote path
|
|
||||||
func (o *Object) Remote() string {
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModTime returns the modification time of the object
|
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
|
||||||
return o.modTime
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size of object in bytes
|
|
||||||
func (o *Object) Size() int64 {
|
|
||||||
return o.size
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storable returns if this object is storable
|
|
||||||
func (o *Object) Storable() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|
||||||
return fs.ErrorCantSetModTime
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open an object for read
|
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|
||||||
var resp *http.Response
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
RootURL: o.url,
|
|
||||||
Options: options,
|
|
||||||
}
|
|
||||||
var offset int64
|
|
||||||
var count int64
|
|
||||||
var key string
|
|
||||||
var value string
|
|
||||||
fs.FixRangeOption(options, o.size)
|
|
||||||
for _, option := range options {
|
|
||||||
switch x := option.(type) {
|
|
||||||
case *fs.RangeOption:
|
|
||||||
offset, count = x.Decode(o.size)
|
|
||||||
if count < 0 {
|
|
||||||
count = o.size - offset
|
|
||||||
}
|
|
||||||
key, value = option.Header()
|
|
||||||
case *fs.SeekOption:
|
|
||||||
offset = x.Offset
|
|
||||||
count = o.size - offset
|
|
||||||
key, value = option.Header()
|
|
||||||
default:
|
|
||||||
if option.Mandatory() {
|
|
||||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if key != "" && value != "" {
|
|
||||||
opts.ExtraHeaders = make(map[string]string)
|
|
||||||
opts.ExtraHeaders[key] = value
|
|
||||||
}
|
|
||||||
// Make sure that the asset is fully available
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
|
||||||
if err == nil {
|
|
||||||
cl, clErr := strconv.Atoi(resp.Header.Get("content-length"))
|
|
||||||
if clErr == nil && count == int64(cl) {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed download of \"%s\": %w", o.url, err)
|
|
||||||
}
|
|
||||||
return resp.Body, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the object with the contents of the io.Reader
|
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
|
||||||
options = append(options, &api.UpdateOptions{
|
|
||||||
PublicID: o.publicID,
|
|
||||||
ResourceType: o.resourceType,
|
|
||||||
DeliveryType: o.deliveryType,
|
|
||||||
DisplayName: api.CloudinaryEncoder.FromStandardName(o.fs, path.Base(o.Remote())),
|
|
||||||
AssetFolder: o.fs.FromStandardFullPath(cldPathDir(o.Remote())),
|
|
||||||
})
|
|
||||||
updatedObj, err := o.fs.Put(ctx, in, src, options...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if uo, ok := updatedObj.(*Object); ok {
|
|
||||||
o.size = uo.size
|
|
||||||
o.modTime = time.Now() // Skipping uo.modTime because the API returns the create time
|
|
||||||
o.url = uo.url
|
|
||||||
o.md5sum = uo.md5sum
|
|
||||||
o.publicID = uo.publicID
|
|
||||||
o.resourceType = uo.resourceType
|
|
||||||
o.deliveryType = uo.deliveryType
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove an object
|
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
|
||||||
params := uploader.DestroyParams{
|
|
||||||
PublicID: o.publicID,
|
|
||||||
ResourceType: o.resourceType,
|
|
||||||
Type: o.deliveryType,
|
|
||||||
}
|
|
||||||
res, dErr := o.fs.cld.Upload.Destroy(ctx, params)
|
|
||||||
o.fs.lastCRUD = time.Now()
|
|
||||||
if dErr != nil {
|
|
||||||
return dErr
|
|
||||||
}
|
|
||||||
|
|
||||||
if res.Error.Message != "" {
|
|
||||||
return errors.New(res.Error.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
if res.Result != "ok" {
|
|
||||||
return errors.New(res.Result)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
// Test Cloudinary filesystem interface
|
|
||||||
|
|
||||||
package cloudinary_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/cloudinary"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
name := "TestCloudinary"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":",
|
|
||||||
NilObject: (*cloudinary.Object)(nil),
|
|
||||||
SkipInvalidUTF8: true,
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,94 +0,0 @@
|
|||||||
package combine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAdjustmentDo(t *testing.T) {
|
|
||||||
for _, test := range []struct {
|
|
||||||
root string
|
|
||||||
mountpoint string
|
|
||||||
in string
|
|
||||||
want string
|
|
||||||
wantErr error
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
root: "",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "path/to/file.txt",
|
|
||||||
want: "mountpoint/path/to/file.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "mountpoint",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "path/to/file.txt",
|
|
||||||
want: "path/to/file.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "mountpoint/path",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "path/to/file.txt",
|
|
||||||
want: "to/file.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "mountpoint/path",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "wrongpath/to/file.txt",
|
|
||||||
want: "",
|
|
||||||
wantErr: errNotUnderRoot,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
what := fmt.Sprintf("%+v", test)
|
|
||||||
a := newAdjustment(test.root, test.mountpoint)
|
|
||||||
got, gotErr := a.do(test.in)
|
|
||||||
assert.Equal(t, test.wantErr, gotErr)
|
|
||||||
assert.Equal(t, test.want, got, what)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAdjustmentUndo(t *testing.T) {
|
|
||||||
for _, test := range []struct {
|
|
||||||
root string
|
|
||||||
mountpoint string
|
|
||||||
in string
|
|
||||||
want string
|
|
||||||
wantErr error
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
root: "",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "mountpoint/path/to/file.txt",
|
|
||||||
want: "path/to/file.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "mountpoint",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "path/to/file.txt",
|
|
||||||
want: "path/to/file.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "mountpoint/path",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "to/file.txt",
|
|
||||||
want: "path/to/file.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "wrongmountpoint/path",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "to/file.txt",
|
|
||||||
want: "",
|
|
||||||
wantErr: errNotUnderRoot,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
what := fmt.Sprintf("%+v", test)
|
|
||||||
a := newAdjustment(test.root, test.mountpoint)
|
|
||||||
got, gotErr := a.undo(test.in)
|
|
||||||
assert.Equal(t, test.wantErr, gotErr)
|
|
||||||
assert.Equal(t, test.want, got, what)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -1,92 +0,0 @@
|
|||||||
// Test Combine filesystem interface
|
|
||||||
package combine_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
|
||||||
_ "github.com/rclone/rclone/backend/memory"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "OpenChunkWriter"}
|
|
||||||
unimplementableObjectMethods = []string{}
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
if *fstest.RemoteName == "" {
|
|
||||||
t.Skip("Skipping as -remote not set")
|
|
||||||
}
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: *fstest.RemoteName,
|
|
||||||
UnimplementableFsMethods: unimplementableFsMethods,
|
|
||||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLocal(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
dirs := MakeTestDirs(t, 3)
|
|
||||||
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=" + dirs[2]
|
|
||||||
name := "TestCombineLocal"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":dir1",
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "combine"},
|
|
||||||
{Name: name, Key: "upstreams", Value: upstreams},
|
|
||||||
},
|
|
||||||
QuickTestOK: true,
|
|
||||||
UnimplementableFsMethods: unimplementableFsMethods,
|
|
||||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemory(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
upstreams := "dir1=:memory:dir1 dir2=:memory:dir2 dir3=:memory:dir3"
|
|
||||||
name := "TestCombineMemory"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":dir1",
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "combine"},
|
|
||||||
{Name: name, Key: "upstreams", Value: upstreams},
|
|
||||||
},
|
|
||||||
QuickTestOK: true,
|
|
||||||
UnimplementableFsMethods: unimplementableFsMethods,
|
|
||||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMixed(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
dirs := MakeTestDirs(t, 2)
|
|
||||||
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=:memory:dir3"
|
|
||||||
name := "TestCombineMixed"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":dir1",
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "combine"},
|
|
||||||
{Name: name, Key: "upstreams", Value: upstreams},
|
|
||||||
},
|
|
||||||
UnimplementableFsMethods: unimplementableFsMethods,
|
|
||||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakeTestDirs makes directories in /tmp for testing
|
|
||||||
func MakeTestDirs(t *testing.T, n int) (dirs []string) {
|
|
||||||
for i := 1; i <= n; i++ {
|
|
||||||
dir := t.TempDir()
|
|
||||||
dirs = append(dirs, dir)
|
|
||||||
}
|
|
||||||
return dirs
|
|
||||||
}
|
|
||||||
1
backend/compress/.gitignore
vendored
1
backend/compress/.gitignore
vendored
@@ -1 +0,0 @@
|
|||||||
test
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,75 +0,0 @@
|
|||||||
// Test Crypt filesystem interface
|
|
||||||
package compress
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
_ "github.com/rclone/rclone/backend/drive"
|
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
|
||||||
_ "github.com/rclone/rclone/backend/s3"
|
|
||||||
_ "github.com/rclone/rclone/backend/swift"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
var defaultOpt = fstests.Opt{
|
|
||||||
RemoteName: "TestCompress:",
|
|
||||||
NilObject: (*Object)(nil),
|
|
||||||
UnimplementableFsMethods: []string{
|
|
||||||
"OpenWriterAt",
|
|
||||||
"OpenChunkWriter",
|
|
||||||
"MergeDirs",
|
|
||||||
"DirCacheFlush",
|
|
||||||
"PutUnchecked",
|
|
||||||
"PutStream",
|
|
||||||
"UserInfo",
|
|
||||||
"Disconnect",
|
|
||||||
},
|
|
||||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
|
||||||
UnimplementableObjectMethods: []string{},
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
fstests.Run(t, &defaultOpt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestRemoteGzip tests GZIP compression
|
|
||||||
func TestRemoteGzip(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
|
|
||||||
name := "TestCompressGzip"
|
|
||||||
opt := defaultOpt
|
|
||||||
opt.RemoteName = name + ":"
|
|
||||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "compress"},
|
|
||||||
{Name: name, Key: "remote", Value: tempdir},
|
|
||||||
{Name: name, Key: "mode", Value: "gzip"},
|
|
||||||
{Name: name, Key: "level", Value: "-1"},
|
|
||||||
}
|
|
||||||
opt.QuickTestOK = true
|
|
||||||
fstests.Run(t, &opt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestRemoteZstd tests ZSTD compression
|
|
||||||
func TestRemoteZstd(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-zstd")
|
|
||||||
name := "TestCompressZstd"
|
|
||||||
opt := defaultOpt
|
|
||||||
opt.RemoteName = name + ":"
|
|
||||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "compress"},
|
|
||||||
{Name: name, Key: "remote", Value: tempdir},
|
|
||||||
{Name: name, Key: "mode", Value: "zstd"},
|
|
||||||
{Name: name, Key: "level", Value: "2"},
|
|
||||||
}
|
|
||||||
opt.QuickTestOK = true
|
|
||||||
fstests.Run(t, &opt)
|
|
||||||
}
|
|
||||||
@@ -1,207 +0,0 @@
|
|||||||
package compress
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"crypto/md5"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/buengese/sgzip"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
|
||||||
"github.com/rclone/rclone/fs/chunkedreader"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
)
|
|
||||||
|
|
||||||
// gzipModeHandler implements compressionModeHandler for gzip
|
|
||||||
type gzipModeHandler struct{}
|
|
||||||
|
|
||||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
|
||||||
// the configured threshold
|
|
||||||
func (g *gzipModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
|
||||||
var b bytes.Buffer
|
|
||||||
var n int64
|
|
||||||
w, err := sgzip.NewWriterLevel(&b, sgzip.DefaultCompression)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
n, err = io.Copy(w, r)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
err = w.Close()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
ratio := float64(n) / float64(b.Len())
|
|
||||||
return ratio > minCompressionRatio, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
|
||||||
func (g *gzipModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
|
||||||
if meta.CompressionMetadataGzip == nil {
|
|
||||||
return 0, errors.New("missing gzip metadata")
|
|
||||||
}
|
|
||||||
return meta.CompressionMetadataGzip.Size, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
|
||||||
func (g *gzipModeHandler) openGetReadCloser(
|
|
||||||
ctx context.Context,
|
|
||||||
o *Object,
|
|
||||||
offset int64,
|
|
||||||
limit int64,
|
|
||||||
cr chunkedreader.ChunkedReader,
|
|
||||||
closer io.Closer,
|
|
||||||
options ...fs.OpenOption,
|
|
||||||
) (rc io.ReadCloser, err error) {
|
|
||||||
var file io.Reader
|
|
||||||
|
|
||||||
if offset != 0 {
|
|
||||||
file, err = sgzip.NewReaderAt(cr, o.meta.CompressionMetadataGzip, offset)
|
|
||||||
} else {
|
|
||||||
file, err = sgzip.NewReader(cr)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileReader io.Reader
|
|
||||||
if limit != -1 {
|
|
||||||
fileReader = io.LimitReader(file, limit)
|
|
||||||
} else {
|
|
||||||
fileReader = file
|
|
||||||
}
|
|
||||||
// Return a ReadCloser
|
|
||||||
return ReadCloserWrapper{Reader: fileReader, Closer: closer}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
|
||||||
func (g *gzipModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
|
||||||
if compressionMode == Gzip {
|
|
||||||
return gzFileExt
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
|
||||||
func (g *gzipModeHandler) putCompress(
|
|
||||||
ctx context.Context,
|
|
||||||
f *Fs,
|
|
||||||
in io.Reader,
|
|
||||||
src fs.ObjectInfo,
|
|
||||||
options []fs.OpenOption,
|
|
||||||
mimeType string,
|
|
||||||
) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
// Unwrap reader accounting
|
|
||||||
in, wrap := accounting.UnWrap(in)
|
|
||||||
|
|
||||||
// Add the metadata hasher
|
|
||||||
metaHasher := md5.New()
|
|
||||||
in = io.TeeReader(in, metaHasher)
|
|
||||||
|
|
||||||
// Compress the file
|
|
||||||
pipeReader, pipeWriter := io.Pipe()
|
|
||||||
|
|
||||||
resultsGzip := make(chan compressionResult[sgzip.GzipMetadata])
|
|
||||||
go func() {
|
|
||||||
gz, err := sgzip.NewWriterLevel(pipeWriter, f.opt.CompressionLevel)
|
|
||||||
if err != nil {
|
|
||||||
resultsGzip <- compressionResult[sgzip.GzipMetadata]{err: err, meta: sgzip.GzipMetadata{}}
|
|
||||||
close(resultsGzip)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_, err = io.Copy(gz, in)
|
|
||||||
gzErr := gz.Close()
|
|
||||||
if gzErr != nil && err == nil {
|
|
||||||
err = gzErr
|
|
||||||
}
|
|
||||||
closeErr := pipeWriter.Close()
|
|
||||||
if closeErr != nil && err == nil {
|
|
||||||
err = closeErr
|
|
||||||
}
|
|
||||||
resultsGzip <- compressionResult[sgzip.GzipMetadata]{err: err, meta: gz.MetaData()}
|
|
||||||
close(resultsGzip)
|
|
||||||
}()
|
|
||||||
|
|
||||||
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize)) // Probably no longer needed as sgzip has it's own buffering
|
|
||||||
|
|
||||||
// Find a hash the destination supports to compute a hash of
|
|
||||||
// the compressed data.
|
|
||||||
ht := f.Fs.Hashes().GetOne()
|
|
||||||
var hasher *hash.MultiHasher
|
|
||||||
var err error
|
|
||||||
if ht != hash.None {
|
|
||||||
// unwrap the accounting again
|
|
||||||
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
|
|
||||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
// add the hasher and re-wrap the accounting
|
|
||||||
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
|
||||||
wrappedIn = wrap(wrappedIn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transfer the data
|
|
||||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
|
||||||
if err != nil {
|
|
||||||
if o != nil {
|
|
||||||
if removeErr := o.Remove(ctx); removeErr != nil {
|
|
||||||
fs.Errorf(o, "Failed to remove partially transferred object: %v", removeErr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
// Check whether we got an error during compression
|
|
||||||
result := <-resultsGzip
|
|
||||||
if result.err != nil {
|
|
||||||
if o != nil {
|
|
||||||
if removeErr := o.Remove(ctx); removeErr != nil {
|
|
||||||
fs.Errorf(o, "Failed to remove partially compressed object: %v", removeErr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, nil, result.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate metadata
|
|
||||||
meta := g.newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
|
|
||||||
|
|
||||||
// Check the hashes of the compressed data if we were comparing them
|
|
||||||
if ht != hash.None && hasher != nil {
|
|
||||||
err = f.verifyObjectHash(ctx, o, hasher, ht)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return o, meta, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
|
||||||
func (g *gzipModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
return o, g.newMetadata(o.Size(), mode, sgzip.GzipMetadata{}, hex.EncodeToString(sum), mimeType), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
|
||||||
// Warning: This function panics if cmeta is not of the expected type.
|
|
||||||
func (g *gzipModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
|
||||||
meta, ok := cmeta.(sgzip.GzipMetadata)
|
|
||||||
if !ok {
|
|
||||||
panic("invalid cmeta type: expected sgzip.GzipMetadata")
|
|
||||||
}
|
|
||||||
|
|
||||||
objMeta := new(ObjectMetadata)
|
|
||||||
objMeta.Size = size
|
|
||||||
objMeta.Mode = mode
|
|
||||||
objMeta.CompressionMetadataGzip = &meta
|
|
||||||
objMeta.CompressionMetadataZstd = nil
|
|
||||||
objMeta.MD5 = md5
|
|
||||||
objMeta.MimeType = mimeType
|
|
||||||
|
|
||||||
return objMeta
|
|
||||||
}
|
|
||||||
@@ -1,327 +0,0 @@
|
|||||||
package compress
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"runtime"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
szstd "github.com/a1ex3/zstd-seekable-format-go/pkg"
|
|
||||||
"github.com/klauspost/compress/zstd"
|
|
||||||
)
|
|
||||||
|
|
||||||
const szstdChunkSize int = 1 << 20 // 1 MiB chunk size
|
|
||||||
|
|
||||||
// SzstdMetadata holds metadata for szstd compressed files.
|
|
||||||
type SzstdMetadata struct {
|
|
||||||
BlockSize int // BlockSize is the size of the blocks in the zstd file
|
|
||||||
Size int64 // Size is the uncompressed size of the file
|
|
||||||
BlockData []uint32 // BlockData is the block data for the zstd file, used for seeking
|
|
||||||
}
|
|
||||||
|
|
||||||
// SzstdWriter is a writer that compresses data in szstd format.
|
|
||||||
type SzstdWriter struct {
|
|
||||||
enc *zstd.Encoder
|
|
||||||
w szstd.ConcurrentWriter
|
|
||||||
metadata SzstdMetadata
|
|
||||||
mu sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriterSzstd creates a new szstd writer with the specified options.
|
|
||||||
// It initializes the szstd writer with a zstd encoder and returns a pointer to the SzstdWriter.
|
|
||||||
// The writer can be used to write data in chunks, and it will automatically handle block sizes and metadata.
|
|
||||||
func NewWriterSzstd(w io.Writer, opts ...zstd.EOption) (*SzstdWriter, error) {
|
|
||||||
encoder, err := zstd.NewWriter(nil, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sw, err := szstd.NewWriter(w, encoder)
|
|
||||||
if err != nil {
|
|
||||||
if err := encoder.Close(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &SzstdWriter{
|
|
||||||
enc: encoder,
|
|
||||||
w: sw,
|
|
||||||
metadata: SzstdMetadata{
|
|
||||||
BlockSize: szstdChunkSize,
|
|
||||||
Size: 0,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write writes data to the szstd writer in chunks of szstdChunkSize.
|
|
||||||
// It handles the block size and metadata updates automatically.
|
|
||||||
func (w *SzstdWriter) Write(p []byte) (int, error) {
|
|
||||||
if len(p) == 0 {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if w.metadata.BlockData == nil {
|
|
||||||
numBlocks := (len(p) + w.metadata.BlockSize - 1) / w.metadata.BlockSize
|
|
||||||
w.metadata.BlockData = make([]uint32, 1, numBlocks+1)
|
|
||||||
w.metadata.BlockData[0] = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
start := 0
|
|
||||||
total := len(p)
|
|
||||||
|
|
||||||
var writerFunc szstd.FrameSource = func() ([]byte, error) {
|
|
||||||
if start >= total {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
end := min(start+w.metadata.BlockSize, total)
|
|
||||||
chunk := p[start:end]
|
|
||||||
size := end - start
|
|
||||||
|
|
||||||
w.mu.Lock()
|
|
||||||
w.metadata.Size += int64(size)
|
|
||||||
w.mu.Unlock()
|
|
||||||
|
|
||||||
start = end
|
|
||||||
return chunk, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// write sizes of compressed blocks in the callback
|
|
||||||
err := w.w.WriteMany(context.Background(), writerFunc,
|
|
||||||
szstd.WithWriteCallback(func(size uint32) {
|
|
||||||
w.mu.Lock()
|
|
||||||
lastOffset := w.metadata.BlockData[len(w.metadata.BlockData)-1]
|
|
||||||
w.metadata.BlockData = append(w.metadata.BlockData, lastOffset+size)
|
|
||||||
w.mu.Unlock()
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return total, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the SzstdWriter and its underlying encoder.
|
|
||||||
func (w *SzstdWriter) Close() error {
|
|
||||||
if err := w.w.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.enc.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetadata returns the metadata of the szstd writer.
|
|
||||||
func (w *SzstdWriter) GetMetadata() SzstdMetadata {
|
|
||||||
return w.metadata
|
|
||||||
}
|
|
||||||
|
|
||||||
// SzstdReaderAt is a reader that allows random access in szstd compressed data.
|
|
||||||
type SzstdReaderAt struct {
|
|
||||||
r szstd.Reader
|
|
||||||
decoder *zstd.Decoder
|
|
||||||
metadata *SzstdMetadata
|
|
||||||
pos int64
|
|
||||||
mu sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReaderAtSzstd creates a new SzstdReaderAt at the specified io.ReadSeeker.
|
|
||||||
func NewReaderAtSzstd(rs io.ReadSeeker, meta *SzstdMetadata, offset int64, opts ...zstd.DOption) (*SzstdReaderAt, error) {
|
|
||||||
decoder, err := zstd.NewReader(nil, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
r, err := szstd.NewReader(rs, decoder)
|
|
||||||
if err != nil {
|
|
||||||
decoder.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sr := &SzstdReaderAt{
|
|
||||||
r: r,
|
|
||||||
decoder: decoder,
|
|
||||||
metadata: meta,
|
|
||||||
pos: 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set initial position to the provided offset
|
|
||||||
if _, err := sr.Seek(offset, io.SeekStart); err != nil {
|
|
||||||
if err := sr.Close(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return sr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seek sets the offset for the next Read.
|
|
||||||
func (s *SzstdReaderAt) Seek(offset int64, whence int) (int64, error) {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
|
|
||||||
pos, err := s.r.Seek(offset, whence)
|
|
||||||
if err == nil {
|
|
||||||
s.pos = pos
|
|
||||||
}
|
|
||||||
return pos, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SzstdReaderAt) Read(p []byte) (int, error) {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
|
|
||||||
n, err := s.r.Read(p)
|
|
||||||
if err == nil {
|
|
||||||
s.pos += int64(n)
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadAt reads data at the specified offset.
|
|
||||||
func (s *SzstdReaderAt) ReadAt(p []byte, off int64) (int, error) {
|
|
||||||
if off < 0 {
|
|
||||||
return 0, errors.New("invalid offset")
|
|
||||||
}
|
|
||||||
if off >= s.metadata.Size {
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
endOff := min(off+int64(len(p)), s.metadata.Size)
|
|
||||||
|
|
||||||
// Find all blocks covered by the range
|
|
||||||
type blockInfo struct {
|
|
||||||
index int // Block index
|
|
||||||
offsetInBlock int64 // Offset within the block for starting reading
|
|
||||||
bytesToRead int64 // How many bytes to read from this block
|
|
||||||
}
|
|
||||||
|
|
||||||
var blocks []blockInfo
|
|
||||||
uncompressedOffset := int64(0)
|
|
||||||
currentOff := off
|
|
||||||
|
|
||||||
for i := 0; i < len(s.metadata.BlockData)-1; i++ {
|
|
||||||
blockUncompressedEnd := min(uncompressedOffset+int64(s.metadata.BlockSize), s.metadata.Size)
|
|
||||||
|
|
||||||
if currentOff < blockUncompressedEnd && endOff > uncompressedOffset {
|
|
||||||
offsetInBlock := max(0, currentOff-uncompressedOffset)
|
|
||||||
bytesToRead := min(blockUncompressedEnd-uncompressedOffset-offsetInBlock, endOff-currentOff)
|
|
||||||
|
|
||||||
blocks = append(blocks, blockInfo{
|
|
||||||
index: i,
|
|
||||||
offsetInBlock: offsetInBlock,
|
|
||||||
bytesToRead: bytesToRead,
|
|
||||||
})
|
|
||||||
|
|
||||||
currentOff += bytesToRead
|
|
||||||
if currentOff >= endOff {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
uncompressedOffset = blockUncompressedEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(blocks) == 0 {
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parallel block decoding
|
|
||||||
type decodeResult struct {
|
|
||||||
index int
|
|
||||||
data []byte
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
resultCh := make(chan decodeResult, len(blocks))
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
sem := make(chan struct{}, runtime.NumCPU())
|
|
||||||
|
|
||||||
for _, block := range blocks {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(block blockInfo) {
|
|
||||||
defer wg.Done()
|
|
||||||
sem <- struct{}{}
|
|
||||||
defer func() { <-sem }()
|
|
||||||
|
|
||||||
startOffset := int64(s.metadata.BlockData[block.index])
|
|
||||||
endOffset := int64(s.metadata.BlockData[block.index+1])
|
|
||||||
compressedSize := endOffset - startOffset
|
|
||||||
|
|
||||||
compressed := make([]byte, compressedSize)
|
|
||||||
_, err := s.r.ReadAt(compressed, startOffset)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
resultCh <- decodeResult{index: block.index, err: err}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
decoded, err := s.decoder.DecodeAll(compressed, nil)
|
|
||||||
if err != nil {
|
|
||||||
resultCh <- decodeResult{index: block.index, err: err}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
resultCh <- decodeResult{index: block.index, data: decoded, err: nil}
|
|
||||||
}(block)
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
wg.Wait()
|
|
||||||
close(resultCh)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Collect results in block index order
|
|
||||||
totalRead := 0
|
|
||||||
results := make(map[int]decodeResult)
|
|
||||||
expected := len(blocks)
|
|
||||||
minIndex := blocks[0].index
|
|
||||||
|
|
||||||
for res := range resultCh {
|
|
||||||
results[res.index] = res
|
|
||||||
for {
|
|
||||||
if result, ok := results[minIndex]; ok {
|
|
||||||
if result.err != nil {
|
|
||||||
return 0, result.err
|
|
||||||
}
|
|
||||||
// find the corresponding blockInfo
|
|
||||||
var blk blockInfo
|
|
||||||
for _, b := range blocks {
|
|
||||||
if b.index == result.index {
|
|
||||||
blk = b
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
start := blk.offsetInBlock
|
|
||||||
end := start + blk.bytesToRead
|
|
||||||
copy(p[totalRead:totalRead+int(blk.bytesToRead)], result.data[start:end])
|
|
||||||
totalRead += int(blk.bytesToRead)
|
|
||||||
minIndex++
|
|
||||||
if minIndex-blocks[0].index >= len(blocks) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(results) == expected && minIndex-blocks[0].index >= len(blocks) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return totalRead, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the SzstdReaderAt and underlying decoder.
|
|
||||||
func (s *SzstdReaderAt) Close() error {
|
|
||||||
if err := s.r.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.decoder.Close()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,65 +0,0 @@
|
|||||||
package compress
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/chunkedreader"
|
|
||||||
)
|
|
||||||
|
|
||||||
// uncompressedModeHandler implements compressionModeHandler for uncompressed files
|
|
||||||
type uncompressedModeHandler struct{}
|
|
||||||
|
|
||||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
|
||||||
// the configured threshold
|
|
||||||
func (u *uncompressedModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
|
||||||
func (u *uncompressedModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
|
||||||
func (u *uncompressedModeHandler) openGetReadCloser(
|
|
||||||
ctx context.Context,
|
|
||||||
o *Object,
|
|
||||||
offset int64,
|
|
||||||
limit int64,
|
|
||||||
cr chunkedreader.ChunkedReader,
|
|
||||||
closer io.Closer,
|
|
||||||
options ...fs.OpenOption,
|
|
||||||
) (rc io.ReadCloser, err error) {
|
|
||||||
return o.Object.Open(ctx, options...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
|
||||||
func (u *uncompressedModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
|
||||||
func (u *uncompressedModeHandler) putCompress(
|
|
||||||
ctx context.Context,
|
|
||||||
f *Fs,
|
|
||||||
in io.Reader,
|
|
||||||
src fs.ObjectInfo,
|
|
||||||
options []fs.OpenOption,
|
|
||||||
mimeType string,
|
|
||||||
) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
return nil, nil, fmt.Errorf("unsupported compression mode %d", f.mode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
|
||||||
func (u *uncompressedModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
return nil, nil, fmt.Errorf("unsupported compression mode %d", Uncompressed)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
|
||||||
// Warning: This function panics if cmeta is not of the expected type.
|
|
||||||
func (u *uncompressedModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,65 +0,0 @@
|
|||||||
package compress
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/chunkedreader"
|
|
||||||
)
|
|
||||||
|
|
||||||
// unknownModeHandler implements compressionModeHandler for unknown compression types
|
|
||||||
type unknownModeHandler struct{}
|
|
||||||
|
|
||||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
|
||||||
// the configured threshold
|
|
||||||
func (unk *unknownModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
|
||||||
return false, fmt.Errorf("unknown compression mode %d", compressionMode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
|
||||||
func (unk *unknownModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
|
||||||
func (unk *unknownModeHandler) openGetReadCloser(
|
|
||||||
ctx context.Context,
|
|
||||||
o *Object,
|
|
||||||
offset int64,
|
|
||||||
limit int64,
|
|
||||||
cr chunkedreader.ChunkedReader,
|
|
||||||
closer io.Closer,
|
|
||||||
options ...fs.OpenOption,
|
|
||||||
) (rc io.ReadCloser, err error) {
|
|
||||||
return nil, fmt.Errorf("unknown compression mode %d", o.meta.Mode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
|
||||||
func (unk *unknownModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
|
||||||
func (unk *unknownModeHandler) putCompress(
|
|
||||||
ctx context.Context,
|
|
||||||
f *Fs,
|
|
||||||
in io.Reader,
|
|
||||||
src fs.ObjectInfo,
|
|
||||||
options []fs.OpenOption,
|
|
||||||
mimeType string,
|
|
||||||
) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
return nil, nil, fmt.Errorf("unknown compression mode %d", f.mode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
|
||||||
func (unk *unknownModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
return nil, nil, fmt.Errorf("unknown compression mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
|
||||||
// Warning: This function panics if cmeta is not of the expected type.
|
|
||||||
func (unk *unknownModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,192 +0,0 @@
|
|||||||
package compress
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"crypto/md5"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/klauspost/compress/zstd"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
|
||||||
"github.com/rclone/rclone/fs/chunkedreader"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
)
|
|
||||||
|
|
||||||
// zstdModeHandler implements compressionModeHandler for zstd
|
|
||||||
type zstdModeHandler struct{}
|
|
||||||
|
|
||||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
|
||||||
// the configured threshold
|
|
||||||
func (z *zstdModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
|
||||||
var b bytes.Buffer
|
|
||||||
var n int64
|
|
||||||
w, err := NewWriterSzstd(&b, zstd.WithEncoderLevel(zstd.SpeedDefault))
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
n, err = io.Copy(w, r)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
err = w.Close()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
ratio := float64(n) / float64(b.Len())
|
|
||||||
return ratio > minCompressionRatio, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
|
||||||
func (z *zstdModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
|
||||||
if meta.CompressionMetadataZstd == nil {
|
|
||||||
return 0, errors.New("missing zstd metadata")
|
|
||||||
}
|
|
||||||
return meta.CompressionMetadataZstd.Size, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
|
||||||
func (z *zstdModeHandler) openGetReadCloser(
|
|
||||||
ctx context.Context,
|
|
||||||
o *Object,
|
|
||||||
offset int64,
|
|
||||||
limit int64,
|
|
||||||
cr chunkedreader.ChunkedReader,
|
|
||||||
closer io.Closer,
|
|
||||||
options ...fs.OpenOption,
|
|
||||||
) (rc io.ReadCloser, err error) {
|
|
||||||
var file io.Reader
|
|
||||||
|
|
||||||
if offset != 0 {
|
|
||||||
file, err = NewReaderAtSzstd(cr, o.meta.CompressionMetadataZstd, offset)
|
|
||||||
} else {
|
|
||||||
file, err = zstd.NewReader(cr)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileReader io.Reader
|
|
||||||
if limit != -1 {
|
|
||||||
fileReader = io.LimitReader(file, limit)
|
|
||||||
} else {
|
|
||||||
fileReader = file
|
|
||||||
}
|
|
||||||
// Return a ReadCloser
|
|
||||||
return ReadCloserWrapper{Reader: fileReader, Closer: closer}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
|
||||||
func (z *zstdModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
|
||||||
if compressionMode == Zstd {
|
|
||||||
return zstdFileExt
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
|
||||||
func (z *zstdModeHandler) putCompress(
|
|
||||||
ctx context.Context,
|
|
||||||
f *Fs,
|
|
||||||
in io.Reader,
|
|
||||||
src fs.ObjectInfo,
|
|
||||||
options []fs.OpenOption,
|
|
||||||
mimeType string,
|
|
||||||
) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
// Unwrap reader accounting
|
|
||||||
in, wrap := accounting.UnWrap(in)
|
|
||||||
|
|
||||||
// Add the metadata hasher
|
|
||||||
metaHasher := md5.New()
|
|
||||||
in = io.TeeReader(in, metaHasher)
|
|
||||||
|
|
||||||
// Compress the file
|
|
||||||
pipeReader, pipeWriter := io.Pipe()
|
|
||||||
|
|
||||||
resultsZstd := make(chan compressionResult[SzstdMetadata])
|
|
||||||
go func() {
|
|
||||||
writer, err := NewWriterSzstd(pipeWriter, zstd.WithEncoderLevel(zstd.EncoderLevel(f.opt.CompressionLevel)))
|
|
||||||
if err != nil {
|
|
||||||
resultsZstd <- compressionResult[SzstdMetadata]{err: err}
|
|
||||||
close(resultsZstd)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_, err = io.Copy(writer, in)
|
|
||||||
if wErr := writer.Close(); wErr != nil && err == nil {
|
|
||||||
err = wErr
|
|
||||||
}
|
|
||||||
if cErr := pipeWriter.Close(); cErr != nil && err == nil {
|
|
||||||
err = cErr
|
|
||||||
}
|
|
||||||
|
|
||||||
resultsZstd <- compressionResult[SzstdMetadata]{err: err, meta: writer.GetMetadata()}
|
|
||||||
close(resultsZstd)
|
|
||||||
}()
|
|
||||||
|
|
||||||
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize))
|
|
||||||
|
|
||||||
ht := f.Fs.Hashes().GetOne()
|
|
||||||
var hasher *hash.MultiHasher
|
|
||||||
var err error
|
|
||||||
if ht != hash.None {
|
|
||||||
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
|
|
||||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
|
||||||
wrappedIn = wrap(wrappedIn)
|
|
||||||
}
|
|
||||||
|
|
||||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
result := <-resultsZstd
|
|
||||||
if result.err != nil {
|
|
||||||
if o != nil {
|
|
||||||
_ = o.Remove(ctx)
|
|
||||||
}
|
|
||||||
return nil, nil, result.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build metadata using uncompressed size for filename
|
|
||||||
meta := z.newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
|
|
||||||
if ht != hash.None && hasher != nil {
|
|
||||||
err = f.verifyObjectHash(ctx, o, hasher, ht)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return o, meta, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
|
||||||
func (z *zstdModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
return o, z.newMetadata(o.Size(), mode, SzstdMetadata{}, hex.EncodeToString(sum), mimeType), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
|
||||||
// Warning: This function panics if cmeta is not of the expected type.
|
|
||||||
func (z *zstdModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
|
||||||
meta, ok := cmeta.(SzstdMetadata)
|
|
||||||
if !ok {
|
|
||||||
panic("invalid cmeta type: expected SzstdMetadata")
|
|
||||||
}
|
|
||||||
|
|
||||||
objMeta := new(ObjectMetadata)
|
|
||||||
objMeta.Size = size
|
|
||||||
objMeta.Mode = mode
|
|
||||||
objMeta.CompressionMetadataGzip = nil
|
|
||||||
objMeta.CompressionMetadataZstd = &meta
|
|
||||||
objMeta.MD5 = md5
|
|
||||||
objMeta.MimeType = mimeType
|
|
||||||
|
|
||||||
return objMeta
|
|
||||||
}
|
|
||||||
@@ -7,22 +7,17 @@ import (
|
|||||||
gocipher "crypto/cipher"
|
gocipher "crypto/cipher"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/base32"
|
"encoding/base32"
|
||||||
"encoding/base64"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/Max-Sum/base32768"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
|
||||||
"github.com/rclone/rclone/lib/version"
|
|
||||||
"github.com/rfjakob/eme"
|
"github.com/rfjakob/eme"
|
||||||
"golang.org/x/crypto/nacl/secretbox"
|
"golang.org/x/crypto/nacl/secretbox"
|
||||||
"golang.org/x/crypto/scrypt"
|
"golang.org/x/crypto/scrypt"
|
||||||
@@ -38,6 +33,7 @@ const (
|
|||||||
blockHeaderSize = secretbox.Overhead
|
blockHeaderSize = secretbox.Overhead
|
||||||
blockDataSize = 64 * 1024
|
blockDataSize = 64 * 1024
|
||||||
blockSize = blockHeaderSize + blockDataSize
|
blockSize = blockHeaderSize + blockDataSize
|
||||||
|
encryptedSuffix = ".bin" // when file name encryption is off we add this suffix to make sure the cloud provider doesn't process the file
|
||||||
)
|
)
|
||||||
|
|
||||||
// Errors returned by cipher
|
// Errors returned by cipher
|
||||||
@@ -53,9 +49,8 @@ var (
|
|||||||
ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?")
|
ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?")
|
||||||
ErrorBadBase32Encoding = errors.New("bad base32 filename encoding")
|
ErrorBadBase32Encoding = errors.New("bad base32 filename encoding")
|
||||||
ErrorFileClosed = errors.New("file already closed")
|
ErrorFileClosed = errors.New("file already closed")
|
||||||
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - does not match suffix")
|
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - no \"" + encryptedSuffix + "\" suffix")
|
||||||
ErrorBadSeek = errors.New("Seek beyond end of file")
|
ErrorBadSeek = errors.New("Seek beyond end of file")
|
||||||
ErrorSuffixMissingDot = errors.New("suffix config setting should include a '.'")
|
|
||||||
defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1}
|
defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1}
|
||||||
obfuscQuoteRune = '!'
|
obfuscQuoteRune = '!'
|
||||||
)
|
)
|
||||||
@@ -97,12 +92,12 @@ func NewNameEncryptionMode(s string) (mode NameEncryptionMode, err error) {
|
|||||||
case "obfuscate":
|
case "obfuscate":
|
||||||
mode = NameEncryptionObfuscated
|
mode = NameEncryptionObfuscated
|
||||||
default:
|
default:
|
||||||
err = fmt.Errorf("unknown file name encryption mode %q", s)
|
err = errors.Errorf("Unknown file name encryption mode %q", s)
|
||||||
}
|
}
|
||||||
return mode, err
|
return mode, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// String turns mode into a human-readable string
|
// String turns mode into a human readable string
|
||||||
func (mode NameEncryptionMode) String() (out string) {
|
func (mode NameEncryptionMode) String() (out string) {
|
||||||
switch mode {
|
switch mode {
|
||||||
case NameEncryptionOff:
|
case NameEncryptionOff:
|
||||||
@@ -117,83 +112,27 @@ func (mode NameEncryptionMode) String() (out string) {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// fileNameEncoding are the encoding methods dealing with encrypted file names
|
|
||||||
type fileNameEncoding interface {
|
|
||||||
EncodeToString(src []byte) string
|
|
||||||
DecodeString(s string) ([]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// caseInsensitiveBase32Encoding defines a file name encoding
|
|
||||||
// using a modified version of standard base32 as described in
|
|
||||||
// RFC4648
|
|
||||||
//
|
|
||||||
// The standard encoding is modified in two ways
|
|
||||||
// - it becomes lower case (no-one likes upper case filenames!)
|
|
||||||
// - we strip the padding character `=`
|
|
||||||
type caseInsensitiveBase32Encoding struct{}
|
|
||||||
|
|
||||||
// EncodeToString encodes a string using the modified version of
|
|
||||||
// base32 encoding.
|
|
||||||
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
|
|
||||||
encoded := base32.HexEncoding.EncodeToString(src)
|
|
||||||
encoded = strings.TrimRight(encoded, "=")
|
|
||||||
return strings.ToLower(encoded)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeString decodes a string as encoded by EncodeToString
|
|
||||||
func (caseInsensitiveBase32Encoding) DecodeString(s string) ([]byte, error) {
|
|
||||||
if strings.HasSuffix(s, "=") {
|
|
||||||
return nil, ErrorBadBase32Encoding
|
|
||||||
}
|
|
||||||
// First figure out how many padding characters to add
|
|
||||||
roundUpToMultipleOf8 := (len(s) + 7) &^ 7
|
|
||||||
equals := roundUpToMultipleOf8 - len(s)
|
|
||||||
s = strings.ToUpper(s) + "========"[:equals]
|
|
||||||
return base32.HexEncoding.DecodeString(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewNameEncoding creates a NameEncoding from a string
|
|
||||||
func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
|
|
||||||
s = strings.ToLower(s)
|
|
||||||
switch s {
|
|
||||||
case "base32":
|
|
||||||
enc = caseInsensitiveBase32Encoding{}
|
|
||||||
case "base64":
|
|
||||||
enc = base64.RawURLEncoding
|
|
||||||
case "base32768":
|
|
||||||
enc = base32768.SafeEncoding
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("unknown file name encoding mode %q", s)
|
|
||||||
}
|
|
||||||
return enc, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cipher defines an encoding and decoding cipher for the crypt backend
|
// Cipher defines an encoding and decoding cipher for the crypt backend
|
||||||
type Cipher struct {
|
type Cipher struct {
|
||||||
dataKey [32]byte // Key for secretbox
|
dataKey [32]byte // Key for secretbox
|
||||||
nameKey [32]byte // 16,24 or 32 bytes
|
nameKey [32]byte // 16,24 or 32 bytes
|
||||||
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
||||||
block gocipher.Block
|
block gocipher.Block
|
||||||
mode NameEncryptionMode
|
mode NameEncryptionMode
|
||||||
fileNameEnc fileNameEncoding
|
buffers sync.Pool // encrypt/decrypt buffers
|
||||||
buffers sync.Pool // encrypt/decrypt buffers
|
cryptoRand io.Reader // read crypto random numbers from here
|
||||||
cryptoRand io.Reader // read crypto random numbers from here
|
dirNameEncrypt bool
|
||||||
dirNameEncrypt bool
|
|
||||||
passBadBlocks bool // if set passed bad blocks as zeroed blocks
|
|
||||||
encryptedSuffix string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
||||||
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
|
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*Cipher, error) {
|
||||||
c := &Cipher{
|
c := &Cipher{
|
||||||
mode: mode,
|
mode: mode,
|
||||||
fileNameEnc: enc,
|
cryptoRand: rand.Reader,
|
||||||
cryptoRand: rand.Reader,
|
dirNameEncrypt: dirNameEncrypt,
|
||||||
dirNameEncrypt: dirNameEncrypt,
|
|
||||||
encryptedSuffix: ".bin",
|
|
||||||
}
|
}
|
||||||
c.buffers.New = func() any {
|
c.buffers.New = func() interface{} {
|
||||||
return new([blockSize]byte)
|
return make([]byte, blockSize)
|
||||||
}
|
}
|
||||||
err := c.Key(password, salt)
|
err := c.Key(password, salt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -202,31 +141,13 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
|||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// setEncryptedSuffix set suffix, or an empty string
|
|
||||||
func (c *Cipher) setEncryptedSuffix(suffix string) {
|
|
||||||
if strings.EqualFold(suffix, "none") {
|
|
||||||
c.encryptedSuffix = ""
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !strings.HasPrefix(suffix, ".") {
|
|
||||||
fs.Errorf(nil, "crypt: bad suffix: %v", ErrorSuffixMissingDot)
|
|
||||||
suffix = "." + suffix
|
|
||||||
}
|
|
||||||
c.encryptedSuffix = suffix
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call to set bad block pass through
|
|
||||||
func (c *Cipher) setPassBadBlocks(passBadBlocks bool) {
|
|
||||||
c.passBadBlocks = passBadBlocks
|
|
||||||
}
|
|
||||||
|
|
||||||
// Key creates all the internal keys from the password passed in using
|
// Key creates all the internal keys from the password passed in using
|
||||||
// scrypt.
|
// scrypt.
|
||||||
//
|
//
|
||||||
// If salt is "" we use a fixed salt just to make attackers lives
|
// If salt is "" we use a fixed salt just to make attackers lives
|
||||||
// slightly harder than using no salt.
|
// slighty harder than using no salt.
|
||||||
//
|
//
|
||||||
// Note that empty password makes all 0x00 keys which is used in the
|
// Note that empty passsword makes all 0x00 keys which is used in the
|
||||||
// tests.
|
// tests.
|
||||||
func (c *Cipher) Key(password, salt string) (err error) {
|
func (c *Cipher) Key(password, salt string) (err error) {
|
||||||
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
|
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
|
||||||
@@ -252,18 +173,45 @@ func (c *Cipher) Key(password, salt string) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getBlock gets a block from the pool of size blockSize
|
// getBlock gets a block from the pool of size blockSize
|
||||||
func (c *Cipher) getBlock() *[blockSize]byte {
|
func (c *Cipher) getBlock() []byte {
|
||||||
return c.buffers.Get().(*[blockSize]byte)
|
return c.buffers.Get().([]byte)
|
||||||
}
|
}
|
||||||
|
|
||||||
// putBlock returns a block to the pool of size blockSize
|
// putBlock returns a block to the pool of size blockSize
|
||||||
func (c *Cipher) putBlock(buf *[blockSize]byte) {
|
func (c *Cipher) putBlock(buf []byte) {
|
||||||
|
if len(buf) != blockSize {
|
||||||
|
panic("bad blocksize returned to pool")
|
||||||
|
}
|
||||||
c.buffers.Put(buf)
|
c.buffers.Put(buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// encodeFileName encodes a filename using a modified version of
|
||||||
|
// standard base32 as described in RFC4648
|
||||||
|
//
|
||||||
|
// The standard encoding is modified in two ways
|
||||||
|
// * it becomes lower case (no-one likes upper case filenames!)
|
||||||
|
// * we strip the padding character `=`
|
||||||
|
func encodeFileName(in []byte) string {
|
||||||
|
encoded := base32.HexEncoding.EncodeToString(in)
|
||||||
|
encoded = strings.TrimRight(encoded, "=")
|
||||||
|
return strings.ToLower(encoded)
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeFileName decodes a filename as encoded by encodeFileName
|
||||||
|
func decodeFileName(in string) ([]byte, error) {
|
||||||
|
if strings.HasSuffix(in, "=") {
|
||||||
|
return nil, ErrorBadBase32Encoding
|
||||||
|
}
|
||||||
|
// First figure out how many padding characters to add
|
||||||
|
roundUpToMultipleOf8 := (len(in) + 7) &^ 7
|
||||||
|
equals := roundUpToMultipleOf8 - len(in)
|
||||||
|
in = strings.ToUpper(in) + "========"[:equals]
|
||||||
|
return base32.HexEncoding.DecodeString(in)
|
||||||
|
}
|
||||||
|
|
||||||
// encryptSegment encrypts a path segment
|
// encryptSegment encrypts a path segment
|
||||||
//
|
//
|
||||||
// This uses EME with AES.
|
// This uses EME with AES
|
||||||
//
|
//
|
||||||
// EME (ECB-Mix-ECB) is a wide-block encryption mode presented in the
|
// EME (ECB-Mix-ECB) is a wide-block encryption mode presented in the
|
||||||
// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and
|
// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and
|
||||||
@@ -273,15 +221,15 @@ func (c *Cipher) putBlock(buf *[blockSize]byte) {
|
|||||||
// same filename must encrypt to the same thing.
|
// same filename must encrypt to the same thing.
|
||||||
//
|
//
|
||||||
// This means that
|
// This means that
|
||||||
// - filenames with the same name will encrypt the same
|
// * filenames with the same name will encrypt the same
|
||||||
// - filenames which start the same won't have a common prefix
|
// * filenames which start the same won't have a common prefix
|
||||||
func (c *Cipher) encryptSegment(plaintext string) string {
|
func (c *Cipher) encryptSegment(plaintext string) string {
|
||||||
if plaintext == "" {
|
if plaintext == "" {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
paddedPlaintext := pkcs7.Pad(nameCipherBlockSize, []byte(plaintext))
|
paddedPlaintext := pkcs7.Pad(nameCipherBlockSize, []byte(plaintext))
|
||||||
ciphertext := eme.Transform(c.block, c.nameTweak[:], paddedPlaintext, eme.DirectionEncrypt)
|
ciphertext := eme.Transform(c.block, c.nameTweak[:], paddedPlaintext, eme.DirectionEncrypt)
|
||||||
return c.fileNameEnc.EncodeToString(ciphertext)
|
return encodeFileName(ciphertext)
|
||||||
}
|
}
|
||||||
|
|
||||||
// decryptSegment decrypts a path segment
|
// decryptSegment decrypts a path segment
|
||||||
@@ -289,7 +237,7 @@ func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
|
|||||||
if ciphertext == "" {
|
if ciphertext == "" {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
rawCiphertext, err := c.fileNameEnc.DecodeString(ciphertext)
|
rawCiphertext, err := decodeFileName(ciphertext)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@@ -329,14 +277,14 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
|
|||||||
for _, runeValue := range plaintext {
|
for _, runeValue := range plaintext {
|
||||||
dir += int(runeValue)
|
dir += int(runeValue)
|
||||||
}
|
}
|
||||||
dir %= 256
|
dir = dir % 256
|
||||||
|
|
||||||
// We'll use this number to store in the result filename...
|
// We'll use this number to store in the result filename...
|
||||||
var result bytes.Buffer
|
var result bytes.Buffer
|
||||||
_, _ = result.WriteString(strconv.Itoa(dir) + ".")
|
_, _ = result.WriteString(strconv.Itoa(dir) + ".")
|
||||||
|
|
||||||
// but we'll augment it with the nameKey for real calculation
|
// but we'll augment it with the nameKey for real calculation
|
||||||
for i := range len(c.nameKey) {
|
for i := 0; i < len(c.nameKey); i++ {
|
||||||
dir += int(c.nameKey[i])
|
dir += int(c.nameKey[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -418,7 +366,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// add the nameKey to get the real rotate distance
|
// add the nameKey to get the real rotate distance
|
||||||
for i := range len(c.nameKey) {
|
for i := 0; i < len(c.nameKey); i++ {
|
||||||
dir += int(c.nameKey[i])
|
dir += int(c.nameKey[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -450,7 +398,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
|||||||
if pos >= 26 {
|
if pos >= 26 {
|
||||||
pos -= 6
|
pos -= 6
|
||||||
}
|
}
|
||||||
pos -= thisdir
|
pos = pos - thisdir
|
||||||
if pos < 0 {
|
if pos < 0 {
|
||||||
pos += 52
|
pos += 52
|
||||||
}
|
}
|
||||||
@@ -494,32 +442,11 @@ func (c *Cipher) encryptFileName(in string) string {
|
|||||||
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Strip version string so that only the non-versioned part
|
|
||||||
// of the file name gets encrypted/obfuscated
|
|
||||||
hasVersion := false
|
|
||||||
var t time.Time
|
|
||||||
if i == (len(segments)-1) && version.Match(segments[i]) {
|
|
||||||
var s string
|
|
||||||
t, s = version.Remove(segments[i])
|
|
||||||
// version.Remove can fail, in which case it returns segments[i]
|
|
||||||
if s != segments[i] {
|
|
||||||
segments[i] = s
|
|
||||||
hasVersion = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.mode == NameEncryptionStandard {
|
if c.mode == NameEncryptionStandard {
|
||||||
segments[i] = c.encryptSegment(segments[i])
|
segments[i] = c.encryptSegment(segments[i])
|
||||||
} else {
|
} else {
|
||||||
segments[i] = c.obfuscateSegment(segments[i])
|
segments[i] = c.obfuscateSegment(segments[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add back a version to the encrypted/obfuscated
|
|
||||||
// file name, if we stripped it off earlier
|
|
||||||
if hasVersion {
|
|
||||||
segments[i] = version.Add(segments[i], t)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return strings.Join(segments, "/")
|
return strings.Join(segments, "/")
|
||||||
}
|
}
|
||||||
@@ -527,7 +454,7 @@ func (c *Cipher) encryptFileName(in string) string {
|
|||||||
// EncryptFileName encrypts a file path
|
// EncryptFileName encrypts a file path
|
||||||
func (c *Cipher) EncryptFileName(in string) string {
|
func (c *Cipher) EncryptFileName(in string) string {
|
||||||
if c.mode == NameEncryptionOff {
|
if c.mode == NameEncryptionOff {
|
||||||
return in + c.encryptedSuffix
|
return in + encryptedSuffix
|
||||||
}
|
}
|
||||||
return c.encryptFileName(in)
|
return c.encryptFileName(in)
|
||||||
}
|
}
|
||||||
@@ -550,21 +477,6 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
|||||||
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Strip version string so that only the non-versioned part
|
|
||||||
// of the file name gets decrypted/deobfuscated
|
|
||||||
hasVersion := false
|
|
||||||
var t time.Time
|
|
||||||
if i == (len(segments)-1) && version.Match(segments[i]) {
|
|
||||||
var s string
|
|
||||||
t, s = version.Remove(segments[i])
|
|
||||||
// version.Remove can fail, in which case it returns segments[i]
|
|
||||||
if s != segments[i] {
|
|
||||||
segments[i] = s
|
|
||||||
hasVersion = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.mode == NameEncryptionStandard {
|
if c.mode == NameEncryptionStandard {
|
||||||
segments[i], err = c.decryptSegment(segments[i])
|
segments[i], err = c.decryptSegment(segments[i])
|
||||||
} else {
|
} else {
|
||||||
@@ -574,12 +486,6 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add back a version to the decrypted/deobfuscated
|
|
||||||
// file name, if we stripped it off earlier
|
|
||||||
if hasVersion {
|
|
||||||
segments[i] = version.Add(segments[i], t)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return strings.Join(segments, "/"), nil
|
return strings.Join(segments, "/"), nil
|
||||||
}
|
}
|
||||||
@@ -587,19 +493,11 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
|||||||
// DecryptFileName decrypts a file path
|
// DecryptFileName decrypts a file path
|
||||||
func (c *Cipher) DecryptFileName(in string) (string, error) {
|
func (c *Cipher) DecryptFileName(in string) (string, error) {
|
||||||
if c.mode == NameEncryptionOff {
|
if c.mode == NameEncryptionOff {
|
||||||
remainingLength := len(in) - len(c.encryptedSuffix)
|
remainingLength := len(in) - len(encryptedSuffix)
|
||||||
if remainingLength == 0 || !strings.HasSuffix(in, c.encryptedSuffix) {
|
if remainingLength > 0 && strings.HasSuffix(in, encryptedSuffix) {
|
||||||
return "", ErrorNotAnEncryptedFile
|
return in[:remainingLength], nil
|
||||||
}
|
}
|
||||||
decrypted := in[:remainingLength]
|
return "", ErrorNotAnEncryptedFile
|
||||||
if version.Match(decrypted) {
|
|
||||||
_, unversioned := version.Remove(decrypted)
|
|
||||||
if unversioned == "" {
|
|
||||||
return "", ErrorNotAnEncryptedFile
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Leave the version string on, if it was there
|
|
||||||
return decrypted, nil
|
|
||||||
}
|
}
|
||||||
return c.decryptFileName(in)
|
return c.decryptFileName(in)
|
||||||
}
|
}
|
||||||
@@ -628,9 +526,9 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
|
|||||||
// fromReader fills the nonce from an io.Reader - normally the OSes
|
// fromReader fills the nonce from an io.Reader - normally the OSes
|
||||||
// crypto random number generator
|
// crypto random number generator
|
||||||
func (n *nonce) fromReader(in io.Reader) error {
|
func (n *nonce) fromReader(in io.Reader) error {
|
||||||
read, err := readers.ReadFill(in, (*n)[:])
|
read, err := io.ReadFull(in, (*n)[:])
|
||||||
if read != fileNonceSize {
|
if read != fileNonceSize {
|
||||||
return fmt.Errorf("short read of nonce: %w", err)
|
return errors.Wrap(err, "short read of nonce")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -664,7 +562,7 @@ func (n *nonce) increment() {
|
|||||||
// add a uint64 to the nonce
|
// add a uint64 to the nonce
|
||||||
func (n *nonce) add(x uint64) {
|
func (n *nonce) add(x uint64) {
|
||||||
carry := uint16(0)
|
carry := uint16(0)
|
||||||
for i := range 8 {
|
for i := 0; i < 8; i++ {
|
||||||
digit := (*n)[i]
|
digit := (*n)[i]
|
||||||
xDigit := byte(x)
|
xDigit := byte(x)
|
||||||
x >>= 8
|
x >>= 8
|
||||||
@@ -683,8 +581,8 @@ type encrypter struct {
|
|||||||
in io.Reader
|
in io.Reader
|
||||||
c *Cipher
|
c *Cipher
|
||||||
nonce nonce
|
nonce nonce
|
||||||
buf *[blockSize]byte
|
buf []byte
|
||||||
readBuf *[blockSize]byte
|
readBuf []byte
|
||||||
bufIndex int
|
bufIndex int
|
||||||
bufSize int
|
bufSize int
|
||||||
err error
|
err error
|
||||||
@@ -709,9 +607,9 @@ func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Copy magic into buffer
|
// Copy magic into buffer
|
||||||
copy((*fh.buf)[:], fileMagicBytes)
|
copy(fh.buf, fileMagicBytes)
|
||||||
// Copy nonce into buffer
|
// Copy nonce into buffer
|
||||||
copy((*fh.buf)[fileMagicSize:], fh.nonce[:])
|
copy(fh.buf[fileMagicSize:], fh.nonce[:])
|
||||||
return fh, nil
|
return fh, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -726,20 +624,25 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
|
|||||||
if fh.bufIndex >= fh.bufSize {
|
if fh.bufIndex >= fh.bufSize {
|
||||||
// Read data
|
// Read data
|
||||||
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
||||||
readBuf := (*fh.readBuf)[:blockDataSize]
|
readBuf := fh.readBuf[:blockDataSize]
|
||||||
n, err = readers.ReadFill(fh.in, readBuf)
|
n, err = io.ReadFull(fh.in, readBuf)
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
|
// err can't be nil since:
|
||||||
|
// n == len(buf) if and only if err == nil.
|
||||||
return fh.finish(err)
|
return fh.finish(err)
|
||||||
}
|
}
|
||||||
// possibly err != nil here, but we will process the
|
// possibly err != nil here, but we will process the
|
||||||
// data and the next call to ReadFill will return 0, err
|
// data and the next call to ReadFull will return 0, err
|
||||||
|
// Write nonce to start of block
|
||||||
|
copy(fh.buf, fh.nonce[:])
|
||||||
// Encrypt the block using the nonce
|
// Encrypt the block using the nonce
|
||||||
secretbox.Seal((*fh.buf)[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
block := fh.buf
|
||||||
|
secretbox.Seal(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||||
fh.bufIndex = 0
|
fh.bufIndex = 0
|
||||||
fh.bufSize = blockHeaderSize + n
|
fh.bufSize = blockHeaderSize + n
|
||||||
fh.nonce.increment()
|
fh.nonce.increment()
|
||||||
}
|
}
|
||||||
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufSize])
|
n = copy(p, fh.buf[fh.bufIndex:fh.bufSize])
|
||||||
fh.bufIndex += n
|
fh.bufIndex += n
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
@@ -780,8 +683,8 @@ type decrypter struct {
|
|||||||
nonce nonce
|
nonce nonce
|
||||||
initialNonce nonce
|
initialNonce nonce
|
||||||
c *Cipher
|
c *Cipher
|
||||||
buf *[blockSize]byte
|
buf []byte
|
||||||
readBuf *[blockSize]byte
|
readBuf []byte
|
||||||
bufIndex int
|
bufIndex int
|
||||||
bufSize int
|
bufSize int
|
||||||
err error
|
err error
|
||||||
@@ -799,12 +702,12 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
|||||||
limit: -1,
|
limit: -1,
|
||||||
}
|
}
|
||||||
// Read file header (magic + nonce)
|
// Read file header (magic + nonce)
|
||||||
readBuf := (*fh.readBuf)[:fileHeaderSize]
|
readBuf := fh.readBuf[:fileHeaderSize]
|
||||||
n, err := readers.ReadFill(fh.rc, readBuf)
|
_, err := io.ReadFull(fh.rc, readBuf)
|
||||||
if n < fileHeaderSize && err == io.EOF {
|
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||||
// This read from 0..fileHeaderSize-1 bytes
|
// This read from 0..fileHeaderSize-1 bytes
|
||||||
return nil, fh.finishAndClose(ErrorEncryptedFileTooShort)
|
return nil, fh.finishAndClose(ErrorEncryptedFileTooShort)
|
||||||
} else if err != io.EOF && err != nil {
|
} else if err != nil {
|
||||||
return nil, fh.finishAndClose(err)
|
return nil, fh.finishAndClose(err)
|
||||||
}
|
}
|
||||||
// check the magic
|
// check the magic
|
||||||
@@ -862,8 +765,10 @@ func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
|
|||||||
func (fh *decrypter) fillBuffer() (err error) {
|
func (fh *decrypter) fillBuffer() (err error) {
|
||||||
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
||||||
readBuf := fh.readBuf
|
readBuf := fh.readBuf
|
||||||
n, err := readers.ReadFill(fh.rc, (*readBuf)[:])
|
n, err := io.ReadFull(fh.rc, readBuf)
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
|
// err can't be nil since:
|
||||||
|
// n == len(buf) if and only if err == nil.
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// possibly err != nil here, but we will process the data and
|
// possibly err != nil here, but we will process the data and
|
||||||
@@ -871,25 +776,19 @@ func (fh *decrypter) fillBuffer() (err error) {
|
|||||||
|
|
||||||
// Check header + 1 byte exists
|
// Check header + 1 byte exists
|
||||||
if n <= blockHeaderSize {
|
if n <= blockHeaderSize {
|
||||||
if err != nil && err != io.EOF {
|
if err != nil {
|
||||||
return err // return pending error as it is likely more accurate
|
return err // return pending error as it is likely more accurate
|
||||||
}
|
}
|
||||||
return ErrorEncryptedFileBadHeader
|
return ErrorEncryptedFileBadHeader
|
||||||
}
|
}
|
||||||
// Decrypt the block using the nonce
|
// Decrypt the block using the nonce
|
||||||
_, ok := secretbox.Open((*fh.buf)[:0], (*readBuf)[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
block := fh.buf
|
||||||
|
_, ok := secretbox.Open(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||||
if !ok {
|
if !ok {
|
||||||
if err != nil && err != io.EOF {
|
if err != nil {
|
||||||
return err // return pending error as it is likely more accurate
|
return err // return pending error as it is likely more accurate
|
||||||
}
|
}
|
||||||
if !fh.c.passBadBlocks {
|
return ErrorEncryptedBadBlock
|
||||||
return ErrorEncryptedBadBlock
|
|
||||||
}
|
|
||||||
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
|
|
||||||
// Zero out the bad block and continue
|
|
||||||
for i := range (*fh.buf)[:n] {
|
|
||||||
fh.buf[i] = 0
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
fh.bufIndex = 0
|
fh.bufIndex = 0
|
||||||
fh.bufSize = n - blockHeaderSize
|
fh.bufSize = n - blockHeaderSize
|
||||||
@@ -915,7 +814,7 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
|
|||||||
if fh.limit >= 0 && fh.limit < int64(toCopy) {
|
if fh.limit >= 0 && fh.limit < int64(toCopy) {
|
||||||
toCopy = int(fh.limit)
|
toCopy = int(fh.limit)
|
||||||
}
|
}
|
||||||
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufIndex+toCopy])
|
n = copy(p, fh.buf[fh.bufIndex:fh.bufIndex+toCopy])
|
||||||
fh.bufIndex += n
|
fh.bufIndex += n
|
||||||
if fh.limit >= 0 {
|
if fh.limit >= 0 {
|
||||||
fh.limit -= int64(n)
|
fh.limit -= int64(n)
|
||||||
@@ -926,8 +825,9 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
|
|||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// calculateUnderlying converts an (offset, limit) in an encrypted file
|
// calculateUnderlying converts an (offset, limit) in a crypted file
|
||||||
// into an (underlyingOffset, underlyingLimit) for the underlying file.
|
// into an (underlyingOffset, underlyingLimit) for the underlying
|
||||||
|
// file.
|
||||||
//
|
//
|
||||||
// It also returns number of bytes to discard after reading the first
|
// It also returns number of bytes to discard after reading the first
|
||||||
// block and number of blocks this is from the start so the nonce can
|
// block and number of blocks this is from the start so the nonce can
|
||||||
@@ -1008,7 +908,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
|
|||||||
// Re-open the underlying object with the offset given
|
// Re-open the underlying object with the offset given
|
||||||
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
|
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fh.finish(fmt.Errorf("couldn't reopen file with offset and limit: %w", err))
|
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the file handle
|
// Set the file handle
|
||||||
@@ -1106,7 +1006,7 @@ func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
|
|||||||
|
|
||||||
// DecryptDataSeek decrypts the data stream from offset
|
// DecryptDataSeek decrypts the data stream from offset
|
||||||
//
|
//
|
||||||
// The open function must return a ReadCloser opened to the offset supplied.
|
// The open function must return a ReadCloser opened to the offset supplied
|
||||||
//
|
//
|
||||||
// You must use this form of DecryptData if you might want to Seek the file handle
|
// You must use this form of DecryptData if you might want to Seek the file handle
|
||||||
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -3,22 +3,20 @@ package crypt
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/cache"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/list"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
@@ -29,12 +27,9 @@ func init() {
|
|||||||
Description: "Encrypt/Decrypt a remote",
|
Description: "Encrypt/Decrypt a remote",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
MetadataInfo: &fs.MetadataInfo{
|
|
||||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
|
||||||
},
|
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
Help: "Remote to encrypt/decrypt.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "filename_encryption",
|
Name: "filename_encryption",
|
||||||
@@ -43,13 +38,13 @@ func init() {
|
|||||||
Examples: []fs.OptionExample{
|
Examples: []fs.OptionExample{
|
||||||
{
|
{
|
||||||
Value: "standard",
|
Value: "standard",
|
||||||
Help: "Encrypt the filenames.\nSee the docs for the details.",
|
Help: "Encrypt the filenames see the docs for the details.",
|
||||||
}, {
|
}, {
|
||||||
Value: "obfuscate",
|
Value: "obfuscate",
|
||||||
Help: "Very simple filename obfuscation.",
|
Help: "Very simple filename obfuscation.",
|
||||||
}, {
|
}, {
|
||||||
Value: "off",
|
Value: "off",
|
||||||
Help: "Don't encrypt the file names.\nAdds a \".bin\", or \"suffix\" extension only.",
|
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
@@ -75,24 +70,8 @@ NB If filename_encryption is "off" then this option will do nothing.`,
|
|||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "password2",
|
Name: "password2",
|
||||||
Help: "Password or pass phrase for salt.\n\nOptional but recommended.\nShould be different to the previous password.",
|
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
}, {
|
|
||||||
Name: "server_side_across_configs",
|
|
||||||
Default: false,
|
|
||||||
Help: `Deprecated: use --server-side-across-configs instead.
|
|
||||||
|
|
||||||
Allow server-side operations (e.g. copy) to work across different crypt configs.
|
|
||||||
|
|
||||||
Normally this option is not what you want, but if you have two crypts
|
|
||||||
pointing to the same backend you can use it.
|
|
||||||
|
|
||||||
This can be used, for example, to change file name encryption type
|
|
||||||
without re-uploading all the data. Just make two crypt backends
|
|
||||||
pointing to two different directories with the single changed
|
|
||||||
parameter and use rclone move to move the files between the crypt
|
|
||||||
remotes.`,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "show_mapping",
|
Name: "show_mapping",
|
||||||
Help: `For all files listed show how the names encrypt.
|
Help: `For all files listed show how the names encrypt.
|
||||||
@@ -107,71 +86,6 @@ names, or for debugging purposes.`,
|
|||||||
Default: false,
|
Default: false,
|
||||||
Hide: fs.OptionHideConfigurator,
|
Hide: fs.OptionHideConfigurator,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "no_data_encryption",
|
|
||||||
Help: "Option to either encrypt file data or leave it unencrypted.",
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
Examples: []fs.OptionExample{
|
|
||||||
{
|
|
||||||
Value: "true",
|
|
||||||
Help: "Don't encrypt file data, leave it unencrypted.",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Value: "false",
|
|
||||||
Help: "Encrypt file data.",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
Name: "pass_bad_blocks",
|
|
||||||
Help: `If set this will pass bad blocks through as all 0.
|
|
||||||
|
|
||||||
This should not be set in normal operation, it should only be set if
|
|
||||||
trying to recover an encrypted file with errors and it is desired to
|
|
||||||
recover as much of the file as possible.`,
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "strict_names",
|
|
||||||
Help: `If set, this will raise an error when crypt comes across a filename that can't be decrypted.
|
|
||||||
|
|
||||||
(By default, rclone will just log a NOTICE and continue as normal.)
|
|
||||||
This can happen if encrypted and unencrypted files are stored in the same
|
|
||||||
directory (which is not recommended.) It may also indicate a more serious
|
|
||||||
problem that should be investigated.`,
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "filename_encoding",
|
|
||||||
Help: `How to encode the encrypted filename to text string.
|
|
||||||
|
|
||||||
This option could help with shortening the encrypted filename. The
|
|
||||||
suitable option would depend on the way your remote count the filename
|
|
||||||
length and if it's case sensitive.`,
|
|
||||||
Default: "base32",
|
|
||||||
Examples: []fs.OptionExample{
|
|
||||||
{
|
|
||||||
Value: "base32",
|
|
||||||
Help: "Encode using base32. Suitable for all remote.",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Value: "base64",
|
|
||||||
Help: "Encode using base64. Suitable for case sensitive remote.",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Value: "base32768",
|
|
||||||
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "suffix",
|
|
||||||
Help: `If this is set it will override the default suffix of ".bin".
|
|
||||||
|
|
||||||
Setting suffix to "none" will result in an empty suffix. This may be useful
|
|
||||||
when the path length is critical.`,
|
|
||||||
Default: ".bin",
|
|
||||||
Advanced: true,
|
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -187,25 +101,19 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
|
|||||||
}
|
}
|
||||||
password, err := obscure.Reveal(opt.Password)
|
password, err := obscure.Reveal(opt.Password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to decrypt password: %w", err)
|
return nil, errors.Wrap(err, "failed to decrypt password")
|
||||||
}
|
}
|
||||||
var salt string
|
var salt string
|
||||||
if opt.Password2 != "" {
|
if opt.Password2 != "" {
|
||||||
salt, err = obscure.Reveal(opt.Password2)
|
salt, err = obscure.Reveal(opt.Password2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to decrypt password2: %w", err)
|
return nil, errors.Wrap(err, "failed to decrypt password2")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
enc, err := NewNameEncoding(opt.FilenameEncoding)
|
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, errors.Wrap(err, "failed to make cipher")
|
||||||
}
|
}
|
||||||
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption, enc)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to make cipher: %w", err)
|
|
||||||
}
|
|
||||||
cipher.setEncryptedSuffix(opt.Suffix)
|
|
||||||
cipher.setPassBadBlocks(opt.PassBadBlocks)
|
|
||||||
return cipher, nil
|
return cipher, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -221,7 +129,7 @@ func NewCipher(m configmap.Mapper) (*Cipher, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -236,25 +144,24 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
if strings.HasPrefix(remote, name+":") {
|
if strings.HasPrefix(remote, name+":") {
|
||||||
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
||||||
}
|
}
|
||||||
// Make sure to remove trailing . referring to the current dir
|
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
||||||
|
}
|
||||||
|
// Make sure to remove trailing . reffering to the current dir
|
||||||
if path.Base(rpath) == "." {
|
if path.Base(rpath) == "." {
|
||||||
rpath = strings.TrimSuffix(rpath, ".")
|
rpath = strings.TrimSuffix(rpath, ".")
|
||||||
}
|
}
|
||||||
// Look for a file first
|
// Look for a file first
|
||||||
var wrappedFs fs.Fs
|
remotePath := fspath.JoinRootPath(wPath, cipher.EncryptFileName(rpath))
|
||||||
if rpath == "" {
|
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
|
||||||
wrappedFs, err = cache.Get(ctx, remote)
|
// if that didn't produce a file, look for a directory
|
||||||
} else {
|
if err != fs.ErrorIsFile {
|
||||||
remotePath := fspath.JoinRootPath(remote, cipher.EncryptFileName(rpath))
|
remotePath = fspath.JoinRootPath(wPath, cipher.EncryptDirName(rpath))
|
||||||
wrappedFs, err = cache.Get(ctx, remotePath)
|
wrappedFs, err = wInfo.NewFs(wName, remotePath, wConfig)
|
||||||
// if that didn't produce a file, look for a directory
|
|
||||||
if err != fs.ErrorIsFile {
|
|
||||||
remotePath = fspath.JoinRootPath(remote, cipher.EncryptDirName(rpath))
|
|
||||||
wrappedFs, err = cache.Get(ctx, remotePath)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if err != fs.ErrorIsFile && err != nil {
|
if err != fs.ErrorIsFile && err != nil {
|
||||||
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
|
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
|
||||||
}
|
}
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
Fs: wrappedFs,
|
Fs: wrappedFs,
|
||||||
@@ -263,39 +170,18 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
opt: *opt,
|
opt: *opt,
|
||||||
cipher: cipher,
|
cipher: cipher,
|
||||||
}
|
}
|
||||||
cache.PinUntilFinalized(f.Fs, f)
|
|
||||||
// Correct root if definitely pointing to a file
|
|
||||||
if err == fs.ErrorIsFile {
|
|
||||||
f.root = path.Dir(f.root)
|
|
||||||
if f.root == "." || f.root == "/" {
|
|
||||||
f.root = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// the features here are ones we could support, and they are
|
// the features here are ones we could support, and they are
|
||||||
// ANDed with the ones from wrappedFs
|
// ANDed with the ones from wrappedFs
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
|
CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||||
DuplicateFiles: true,
|
DuplicateFiles: true,
|
||||||
ReadMimeType: false, // MimeTypes not supported with crypt
|
ReadMimeType: false, // MimeTypes not supported with crypt
|
||||||
WriteMimeType: false,
|
WriteMimeType: false,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
SetTier: true,
|
SetTier: true,
|
||||||
GetTier: true,
|
GetTier: true,
|
||||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
ReadMetadata: true,
|
|
||||||
WriteMetadata: true,
|
|
||||||
UserMetadata: true,
|
|
||||||
ReadDirMetadata: true,
|
|
||||||
WriteDirMetadata: true,
|
|
||||||
WriteDirSetModTime: true,
|
|
||||||
UserDirMetadata: true,
|
|
||||||
DirModTimeUpdatesOnWrite: true,
|
|
||||||
PartialUploads: true,
|
|
||||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
|
||||||
|
|
||||||
// Enable ListP always
|
|
||||||
f.features.ListP = f.ListP
|
|
||||||
|
|
||||||
return f, err
|
return f, err
|
||||||
}
|
}
|
||||||
@@ -305,15 +191,9 @@ type Options struct {
|
|||||||
Remote string `config:"remote"`
|
Remote string `config:"remote"`
|
||||||
FilenameEncryption string `config:"filename_encryption"`
|
FilenameEncryption string `config:"filename_encryption"`
|
||||||
DirectoryNameEncryption bool `config:"directory_name_encryption"`
|
DirectoryNameEncryption bool `config:"directory_name_encryption"`
|
||||||
NoDataEncryption bool `config:"no_data_encryption"`
|
|
||||||
Password string `config:"password"`
|
Password string `config:"password"`
|
||||||
Password2 string `config:"password2"`
|
Password2 string `config:"password2"`
|
||||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
|
||||||
ShowMapping bool `config:"show_mapping"`
|
ShowMapping bool `config:"show_mapping"`
|
||||||
PassBadBlocks bool `config:"pass_bad_blocks"`
|
|
||||||
FilenameEncoding string `config:"filename_encoding"`
|
|
||||||
Suffix string `config:"suffix"`
|
|
||||||
StrictNames bool `config:"strict_names"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a wrapped fs.Fs
|
// Fs represents a wrapped fs.Fs
|
||||||
@@ -348,64 +228,45 @@ func (f *Fs) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt an object file name to entries.
|
// Encrypt an object file name to entries.
|
||||||
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) error {
|
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
|
||||||
remote := obj.Remote()
|
remote := obj.Remote()
|
||||||
decryptedRemote, err := f.cipher.DecryptFileName(remote)
|
decryptedRemote, err := f.cipher.DecryptFileName(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if f.opt.StrictNames {
|
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
|
||||||
return fmt.Errorf("%s: undecryptable file name detected: %v", remote, err)
|
return
|
||||||
}
|
|
||||||
fs.Logf(remote, "Skipping undecryptable file name: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
if f.opt.ShowMapping {
|
if f.opt.ShowMapping {
|
||||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||||
}
|
}
|
||||||
*entries = append(*entries, f.newObject(obj))
|
*entries = append(*entries, f.newObject(obj))
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt a directory file name to entries.
|
// Encrypt a directory file name to entries.
|
||||||
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) error {
|
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
|
||||||
remote := dir.Remote()
|
remote := dir.Remote()
|
||||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if f.opt.StrictNames {
|
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
|
||||||
return fmt.Errorf("%s: undecryptable dir name detected: %v", remote, err)
|
return
|
||||||
}
|
|
||||||
fs.Logf(remote, "Skipping undecryptable dir name: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
if f.opt.ShowMapping {
|
if f.opt.ShowMapping {
|
||||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||||
}
|
}
|
||||||
*entries = append(*entries, f.newDir(ctx, dir))
|
*entries = append(*entries, f.newDir(ctx, dir))
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt some directory entries. This alters entries returning it as newEntries.
|
// Encrypt some directory entries. This alters entries returning it as newEntries.
|
||||||
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
||||||
newEntries = entries[:0] // in place filter
|
newEntries = entries[:0] // in place filter
|
||||||
errors := 0
|
|
||||||
var firsterr error
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
switch x := entry.(type) {
|
switch x := entry.(type) {
|
||||||
case fs.Object:
|
case fs.Object:
|
||||||
err = f.add(&newEntries, x)
|
f.add(&newEntries, x)
|
||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
err = f.addDir(ctx, &newEntries, x)
|
f.addDir(ctx, &newEntries, x)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown object type %T", entry)
|
return nil, errors.Errorf("Unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
errors++
|
|
||||||
if firsterr == nil {
|
|
||||||
firsterr = err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if firsterr != nil {
|
|
||||||
return nil, fmt.Errorf("there were %v undecryptable name errors. first error: %v", errors, firsterr)
|
|
||||||
}
|
}
|
||||||
return newEntries, nil
|
return newEntries, nil
|
||||||
}
|
}
|
||||||
@@ -420,40 +281,11 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
|
|||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
return list.WithListP(ctx, dir, f)
|
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
|
||||||
}
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
// ListP lists the objects and directories of the Fs starting
|
|
||||||
// from dir non recursively into out.
|
|
||||||
//
|
|
||||||
// dir should be "" to start from the root, and should not
|
|
||||||
// have trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
//
|
|
||||||
// It should call callback for each tranche of entries read.
|
|
||||||
// These need not be returned in any particular order. If
|
|
||||||
// callback returns an error then the listing will stop
|
|
||||||
// immediately.
|
|
||||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
|
||||||
wrappedCallback := func(entries fs.DirEntries) error {
|
|
||||||
entries, err := f.encryptEntries(ctx, entries)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return callback(entries)
|
|
||||||
}
|
}
|
||||||
listP := f.Fs.Features().ListP
|
return f.encryptEntries(ctx, entries)
|
||||||
encryptedDir := f.cipher.EncryptDirName(dir)
|
|
||||||
if listP == nil {
|
|
||||||
entries, err := f.Fs.List(ctx, encryptedDir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return wrappedCallback(entries)
|
|
||||||
}
|
|
||||||
return listP(ctx, encryptedDir, wrappedCallback)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@@ -495,16 +327,6 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
|
|||||||
|
|
||||||
// put implements Put or PutStream
|
// put implements Put or PutStream
|
||||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||||
ci := fs.GetConfig(ctx)
|
|
||||||
|
|
||||||
if f.opt.NoDataEncryption {
|
|
||||||
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
|
|
||||||
if err == nil && o != nil {
|
|
||||||
o = f.newObject(o)
|
|
||||||
}
|
|
||||||
return o, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encrypt the data into wrappedIn
|
// Encrypt the data into wrappedIn
|
||||||
wrappedIn, encrypter, err := f.cipher.encryptData(in)
|
wrappedIn, encrypter, err := f.cipher.encryptData(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -514,9 +336,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
|||||||
// Find a hash the destination supports to compute a hash of
|
// Find a hash the destination supports to compute a hash of
|
||||||
// the encrypted data
|
// the encrypted data
|
||||||
ht := f.Fs.Hashes().GetOne()
|
ht := f.Fs.Hashes().GetOne()
|
||||||
if ci.IgnoreChecksum {
|
|
||||||
ht = hash.None
|
|
||||||
}
|
|
||||||
var hasher *hash.MultiHasher
|
var hasher *hash.MultiHasher
|
||||||
if ht != hash.None {
|
if ht != hash.None {
|
||||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||||
@@ -544,18 +363,15 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
|||||||
var dstHash string
|
var dstHash string
|
||||||
dstHash, err = o.Hash(ctx, ht)
|
dstHash, err = o.Hash(ctx, ht)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to read destination hash: %w", err)
|
return nil, errors.Wrap(err, "failed to read destination hash")
|
||||||
}
|
}
|
||||||
if srcHash != "" && dstHash != "" {
|
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
||||||
if srcHash != dstHash {
|
// remove object
|
||||||
// remove object
|
err = o.Remove(ctx)
|
||||||
err = o.Remove(ctx)
|
if err != nil {
|
||||||
if err != nil {
|
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash)
|
|
||||||
}
|
}
|
||||||
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -588,37 +404,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
|
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
|
||||||
}
|
}
|
||||||
|
|
||||||
// MkdirMetadata makes the root directory of the Fs object
|
|
||||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
|
||||||
do := f.Fs.Features().MkdirMetadata
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
newDir, err := do(ctx, f.cipher.EncryptDirName(dir), metadata)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var entries = make(fs.DirEntries, 0, 1)
|
|
||||||
err = f.addDir(ctx, &entries, newDir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
newDir, ok := entries[0].(fs.Directory)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0])
|
|
||||||
}
|
|
||||||
return newDir, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirSetModTime sets the directory modtime for dir
|
|
||||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
|
||||||
do := f.Fs.Features().DirSetModTime
|
|
||||||
if do == nil {
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do(ctx, f.cipher.EncryptDirName(dir), modTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist or isn't empty
|
// Return an error if it doesn't exist or isn't empty
|
||||||
@@ -626,25 +411,25 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir))
|
return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Purge all files in the directory specified
|
// Purge all files in the root and the root directory
|
||||||
//
|
//
|
||||||
// Implement this if you have a way of deleting all the files
|
// Implement this if you have a way of deleting all the files
|
||||||
// quicker than just running Remove() on the result of List()
|
// quicker than just running Remove() on the result of List()
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist
|
// Return an error if it doesn't exist
|
||||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
do := f.Fs.Features().Purge
|
do := f.Fs.Features().Purge
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return fs.ErrorCantPurge
|
return fs.ErrorCantPurge
|
||||||
}
|
}
|
||||||
return do(ctx, f.cipher.EncryptDirName(dir))
|
return do(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -665,11 +450,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return f.newObject(oResult), nil
|
return f.newObject(oResult), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
// Move src to this remote using server side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -691,7 +476,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server-side move operations.
|
// using server side move operations.
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -738,7 +523,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
|||||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||||
do := f.Fs.Features().CleanUp
|
do := f.Fs.Features().CleanUp
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return errors.New("not supported by underlying remote")
|
return errors.New("can't CleanUp")
|
||||||
}
|
}
|
||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
@@ -747,7 +532,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
do := f.Fs.Features().About
|
do := f.Fs.Features().About
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, errors.New("not supported by underlying remote")
|
return nil, errors.New("About not supported")
|
||||||
}
|
}
|
||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
@@ -785,24 +570,24 @@ func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Objec
|
|||||||
// Open the src for input
|
// Open the src for input
|
||||||
in, err := src.Open(ctx)
|
in, err := src.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to open src: %w", err)
|
return "", errors.Wrap(err, "failed to open src")
|
||||||
}
|
}
|
||||||
defer fs.CheckClose(in, &err)
|
defer fs.CheckClose(in, &err)
|
||||||
|
|
||||||
// Now encrypt the src with the nonce
|
// Now encrypt the src with the nonce
|
||||||
out, err := f.cipher.newEncrypter(in, &nonce)
|
out, err := f.cipher.newEncrypter(in, &nonce)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to make encrypter: %w", err)
|
return "", errors.Wrap(err, "failed to make encrypter")
|
||||||
}
|
}
|
||||||
|
|
||||||
// pipe into hash
|
// pipe into hash
|
||||||
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
|
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to make hasher: %w", err)
|
return "", errors.Wrap(err, "failed to make hasher")
|
||||||
}
|
}
|
||||||
_, err = io.Copy(m, out)
|
_, err = io.Copy(m, out)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to hash data: %w", err)
|
return "", errors.Wrap(err, "failed to hash data")
|
||||||
}
|
}
|
||||||
|
|
||||||
return m.Sums()[hashType], nil
|
return m.Sums()[hashType], nil
|
||||||
@@ -813,20 +598,16 @@ func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Objec
|
|||||||
//
|
//
|
||||||
// Note that we break lots of encapsulation in this function.
|
// Note that we break lots of encapsulation in this function.
|
||||||
func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
||||||
if f.opt.NoDataEncryption {
|
|
||||||
return src.Hash(ctx, hashType)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the nonce - opening the file is sufficient to read the nonce in
|
// Read the nonce - opening the file is sufficient to read the nonce in
|
||||||
// use a limited read so we only read the header
|
// use a limited read so we only read the header
|
||||||
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to open object to read nonce: %w", err)
|
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||||
}
|
}
|
||||||
d, err := f.cipher.newDecrypter(in)
|
d, err := f.cipher.newDecrypter(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = in.Close()
|
_ = in.Close()
|
||||||
return "", fmt.Errorf("failed to open object to read nonce: %w", err)
|
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||||
}
|
}
|
||||||
nonce := d.nonce
|
nonce := d.nonce
|
||||||
// fs.Debugf(o, "Read nonce % 2x", nonce)
|
// fs.Debugf(o, "Read nonce % 2x", nonce)
|
||||||
@@ -845,7 +626,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
|
|||||||
// Close d (and hence in) once we have read the nonce
|
// Close d (and hence in) once we have read the nonce
|
||||||
err = d.Close()
|
err = d.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to close nonce read: %w", err)
|
return "", errors.Wrap(err, "failed to close nonce read")
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.computeHashWithNonce(ctx, nonce, src, hashType)
|
return f.computeHashWithNonce(ctx, nonce, src, hashType)
|
||||||
@@ -860,7 +641,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|||||||
}
|
}
|
||||||
out := make([]fs.Directory, len(dirs))
|
out := make([]fs.Directory, len(dirs))
|
||||||
for i, dir := range dirs {
|
for i, dir := range dirs {
|
||||||
out[i] = fs.NewDirWrapper(f.cipher.EncryptDirName(dir.Remote()), dir)
|
out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
|
||||||
}
|
}
|
||||||
return do(ctx, out)
|
return do(ctx, out)
|
||||||
}
|
}
|
||||||
@@ -875,7 +656,7 @@ func (f *Fs) DirCacheFlush() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||||
do := f.Fs.Features().PublicLink
|
do := f.Fs.Features().PublicLink
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return "", errors.New("PublicLink not supported")
|
return "", errors.New("PublicLink not supported")
|
||||||
@@ -883,9 +664,9 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
o, err := f.NewObject(ctx, remote)
|
o, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// assume it is a directory
|
// assume it is a directory
|
||||||
return do(ctx, f.cipher.EncryptDirName(remote), expire, unlink)
|
return do(ctx, f.cipher.EncryptDirName(remote))
|
||||||
}
|
}
|
||||||
return do(ctx, o.(*Object).Object.Remote(), expire, unlink)
|
return do(ctx, o.(*Object).Object.Remote())
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChangeNotify calls the passed function with a path
|
// ChangeNotify calls the passed function with a path
|
||||||
@@ -923,30 +704,28 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
|||||||
var commandHelp = []fs.CommandHelp{
|
var commandHelp = []fs.CommandHelp{
|
||||||
{
|
{
|
||||||
Name: "encode",
|
Name: "encode",
|
||||||
Short: "Encode the given filename(s).",
|
Short: "Encode the given filename(s)",
|
||||||
Long: `This encodes the filenames given as arguments returning a list of
|
Long: `This encodes the filenames given as arguments returning a list of
|
||||||
strings of the encoded results.
|
strings of the encoded results.
|
||||||
|
|
||||||
Usage examples:
|
Usage Example:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend encode crypt: file1 [file2...]
|
||||||
rclone backend encode crypt: file1 [file2...]
|
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
|
||||||
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
|
`,
|
||||||
` + "```",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "decode",
|
Name: "decode",
|
||||||
Short: "Decode the given filename(s).",
|
Short: "Decode the given filename(s)",
|
||||||
Long: `This decodes the filenames given as arguments returning a list of
|
Long: `This decodes the filenames given as arguments returning a list of
|
||||||
strings of the decoded results. It will return an error if any of the
|
strings of the decoded results. It will return an error if any of the
|
||||||
inputs are invalid.
|
inputs are invalid.
|
||||||
|
|
||||||
Usage examples:
|
Usage Example:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
|
||||||
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
|
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
|
||||||
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
|
`,
|
||||||
` + "```",
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -959,14 +738,14 @@ rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile
|
|||||||
// The result should be capable of being JSON encoded
|
// The result should be capable of being JSON encoded
|
||||||
// If it is a string or a []string it will be shown to the user
|
// If it is a string or a []string it will be shown to the user
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
// otherwise it will be JSON encoded and shown to the user like that
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||||
switch name {
|
switch name {
|
||||||
case "decode":
|
case "decode":
|
||||||
out := make([]string, 0, len(arg))
|
out := make([]string, 0, len(arg))
|
||||||
for _, encryptedFileName := range arg {
|
for _, encryptedFileName := range arg {
|
||||||
fileName, err := f.DecryptFileName(encryptedFileName)
|
fileName, err := f.DecryptFileName(encryptedFileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return out, fmt.Errorf("failed to decrypt: %s: %w", encryptedFileName, err)
|
return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName))
|
||||||
}
|
}
|
||||||
out = append(out, fileName)
|
out = append(out, fileName)
|
||||||
}
|
}
|
||||||
@@ -1024,13 +803,9 @@ func (o *Object) Remote() string {
|
|||||||
|
|
||||||
// Size returns the size of the file
|
// Size returns the size of the file
|
||||||
func (o *Object) Size() int64 {
|
func (o *Object) Size() int64 {
|
||||||
size := o.Object.Size()
|
size, err := o.f.cipher.DecryptedSize(o.Object.Size())
|
||||||
if !o.f.opt.NoDataEncryption {
|
if err != nil {
|
||||||
var err error
|
fs.Debugf(o, "Bad size for decrypt: %v", err)
|
||||||
size, err = o.f.cipher.DecryptedSize(size)
|
|
||||||
if err != nil {
|
|
||||||
fs.Debugf(o, "Bad size for decrypt: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return size
|
return size
|
||||||
}
|
}
|
||||||
@@ -1048,10 +823,6 @@ func (o *Object) UnWrap() fs.Object {
|
|||||||
|
|
||||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||||
if o.f.opt.NoDataEncryption {
|
|
||||||
return o.Object.Open(ctx, options...)
|
|
||||||
}
|
|
||||||
|
|
||||||
var openOptions []fs.OpenOption
|
var openOptions []fs.OpenOption
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
@@ -1098,14 +869,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
// newDir returns a dir with the Name decrypted
|
// newDir returns a dir with the Name decrypted
|
||||||
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
|
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
|
||||||
|
newDir := fs.NewDirCopy(ctx, dir)
|
||||||
remote := dir.Remote()
|
remote := dir.Remote()
|
||||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(remote, "Undecryptable dir name: %v", err)
|
fs.Debugf(remote, "Undecryptable dir name: %v", err)
|
||||||
} else {
|
} else {
|
||||||
remote = decryptedRemote
|
newDir.SetRemote(decryptedRemote)
|
||||||
}
|
}
|
||||||
newDir := fs.NewDirWrapper(remote, dir)
|
|
||||||
return newDir
|
return newDir
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1127,16 +898,6 @@ func (f *Fs) Disconnect(ctx context.Context) error {
|
|||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown the backend, closing any background tasks and any
|
|
||||||
// cached connections.
|
|
||||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
|
||||||
do := f.Fs.Features().Shutdown
|
|
||||||
if do == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
||||||
//
|
//
|
||||||
// This encrypts the remote name and adjusts the size
|
// This encrypts the remote name and adjusts the size
|
||||||
@@ -1170,9 +931,6 @@ func (o *ObjectInfo) Size() int64 {
|
|||||||
if size < 0 {
|
if size < 0 {
|
||||||
return size
|
return size
|
||||||
}
|
}
|
||||||
if o.f.opt.NoDataEncryption {
|
|
||||||
return size
|
|
||||||
}
|
|
||||||
return o.f.cipher.EncryptedSize(size)
|
return o.f.cipher.EncryptedSize(size)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1184,11 +942,10 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
|||||||
// Get the underlying object if there is one
|
// Get the underlying object if there is one
|
||||||
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
|
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
|
||||||
// Prefer direct interface assertion
|
// Prefer direct interface assertion
|
||||||
} else if do, ok := o.ObjectInfo.(*fs.OverrideRemote); ok {
|
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
|
||||||
// Unwrap if it is an operations.OverrideRemote
|
// Otherwise likely is an operations.OverrideRemote
|
||||||
srcObj = do.UnWrap()
|
srcObj = do.UnWrap()
|
||||||
} else {
|
} else {
|
||||||
// Otherwise don't unwrap any further
|
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
// if this is wrapping a local object then we work out the hash
|
// if this is wrapping a local object then we work out the hash
|
||||||
@@ -1200,50 +957,6 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
|||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTier returns storage tier or class of the Object
|
|
||||||
func (o *ObjectInfo) GetTier() string {
|
|
||||||
do, ok := o.ObjectInfo.(fs.GetTierer)
|
|
||||||
if !ok {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return do.GetTier()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the ID of the Object if known, or "" if not
|
|
||||||
func (o *ObjectInfo) ID() string {
|
|
||||||
do, ok := o.ObjectInfo.(fs.IDer)
|
|
||||||
if !ok {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return do.ID()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metadata returns metadata for an object
|
|
||||||
//
|
|
||||||
// It should return nil if there is no Metadata
|
|
||||||
func (o *ObjectInfo) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|
||||||
do, ok := o.ObjectInfo.(fs.Metadataer)
|
|
||||||
if !ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return do.Metadata(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MimeType returns the content type of the Object if
|
|
||||||
// known, or "" if not
|
|
||||||
//
|
|
||||||
// This is deliberately unsupported so we don't leak mime type info by
|
|
||||||
// default.
|
|
||||||
func (o *ObjectInfo) MimeType(ctx context.Context) string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnWrap returns the Object that this Object is wrapping or
|
|
||||||
// nil if it isn't wrapping anything
|
|
||||||
func (o *ObjectInfo) UnWrap() fs.Object {
|
|
||||||
return fs.UnWrapObjectInfo(o.ObjectInfo)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the ID of the Object if known, or "" if not
|
// ID returns the ID of the Object if known, or "" if not
|
||||||
func (o *Object) ID() string {
|
func (o *Object) ID() string {
|
||||||
do, ok := o.Object.(fs.IDer)
|
do, ok := o.Object.(fs.IDer)
|
||||||
@@ -1272,37 +985,6 @@ func (o *Object) GetTier() string {
|
|||||||
return do.GetTier()
|
return do.GetTier()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Metadata returns metadata for an object
|
|
||||||
//
|
|
||||||
// It should return nil if there is no Metadata
|
|
||||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|
||||||
do, ok := o.Object.(fs.Metadataer)
|
|
||||||
if !ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return do.Metadata(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMetadata sets metadata for an Object
|
|
||||||
//
|
|
||||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
|
||||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
|
||||||
do, ok := o.Object.(fs.SetMetadataer)
|
|
||||||
if !ok {
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do.SetMetadata(ctx, metadata)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MimeType returns the content type of the Object if
|
|
||||||
// known, or "" if not
|
|
||||||
//
|
|
||||||
// This is deliberately unsupported so we don't leak mime type info by
|
|
||||||
// default.
|
|
||||||
func (o *Object) MimeType(ctx context.Context) string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
// Check the interfaces are satisfied
|
||||||
var (
|
var (
|
||||||
_ fs.Fs = (*Fs)(nil)
|
_ fs.Fs = (*Fs)(nil)
|
||||||
@@ -1319,14 +1001,15 @@ var (
|
|||||||
_ fs.Abouter = (*Fs)(nil)
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
_ fs.Wrapper = (*Fs)(nil)
|
_ fs.Wrapper = (*Fs)(nil)
|
||||||
_ fs.MergeDirser = (*Fs)(nil)
|
_ fs.MergeDirser = (*Fs)(nil)
|
||||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
|
||||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
|
||||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
_ fs.UserInfoer = (*Fs)(nil)
|
_ fs.UserInfoer = (*Fs)(nil)
|
||||||
_ fs.Disconnecter = (*Fs)(nil)
|
_ fs.Disconnecter = (*Fs)(nil)
|
||||||
_ fs.Shutdowner = (*Fs)(nil)
|
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||||
_ fs.FullObjectInfo = (*ObjectInfo)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
_ fs.FullObject = (*Object)(nil)
|
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||||
|
_ fs.IDer = (*Object)(nil)
|
||||||
|
_ fs.SetTierer = (*Object)(nil)
|
||||||
|
_ fs.GetTierer = (*Object)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -17,28 +17,41 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type testWrapper struct {
|
||||||
|
fs.ObjectInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnWrap returns the Object that this Object is wrapping or nil if it
|
||||||
|
// isn't wrapping anything
|
||||||
|
func (o testWrapper) UnWrap() fs.Object {
|
||||||
|
if o, ok := o.ObjectInfo.(fs.Object); ok {
|
||||||
|
return o
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Create a temporary local fs to upload things from
|
// Create a temporary local fs to upload things from
|
||||||
|
|
||||||
func makeTempLocalFs(t *testing.T) (localFs fs.Fs) {
|
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
|
||||||
localFs, err := fs.TemporaryLocalFs(context.Background())
|
localFs, err := fs.TemporaryLocalFs()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
t.Cleanup(func() {
|
cleanup = func() {
|
||||||
require.NoError(t, localFs.Rmdir(context.Background(), ""))
|
require.NoError(t, localFs.Rmdir(context.Background(), ""))
|
||||||
})
|
}
|
||||||
return localFs
|
return localFs, cleanup
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upload a file to a remote
|
// Upload a file to a remote
|
||||||
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object) {
|
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
|
||||||
inBuf := bytes.NewBufferString(contents)
|
inBuf := bytes.NewBufferString(contents)
|
||||||
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
|
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
|
||||||
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
|
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
|
||||||
obj, err := f.Put(context.Background(), inBuf, upSrc)
|
obj, err := f.Put(context.Background(), inBuf, upSrc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
t.Cleanup(func() {
|
cleanup = func() {
|
||||||
require.NoError(t, obj.Remove(context.Background()))
|
require.NoError(t, obj.Remove(context.Background()))
|
||||||
})
|
}
|
||||||
return obj
|
return obj, cleanup
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test the ObjectInfo
|
// Test the ObjectInfo
|
||||||
@@ -52,9 +65,11 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
|||||||
path = "_wrap"
|
path = "_wrap"
|
||||||
}
|
}
|
||||||
|
|
||||||
localFs := makeTempLocalFs(t)
|
localFs, cleanupLocalFs := makeTempLocalFs(t)
|
||||||
|
defer cleanupLocalFs()
|
||||||
|
|
||||||
obj := uploadFile(t, localFs, path, contents)
|
obj, cleanupObj := uploadFile(t, localFs, path, contents)
|
||||||
|
defer cleanupObj()
|
||||||
|
|
||||||
// encrypt the data
|
// encrypt the data
|
||||||
inBuf := bytes.NewBufferString(contents)
|
inBuf := bytes.NewBufferString(contents)
|
||||||
@@ -68,17 +83,15 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
|||||||
var oi fs.ObjectInfo = obj
|
var oi fs.ObjectInfo = obj
|
||||||
if wrap {
|
if wrap {
|
||||||
// wrap the object in an fs.ObjectUnwrapper if required
|
// wrap the object in an fs.ObjectUnwrapper if required
|
||||||
oi = fs.NewOverrideRemote(oi, "new_remote")
|
oi = testWrapper{oi}
|
||||||
}
|
}
|
||||||
|
|
||||||
// wrap the object in a crypt for upload using the nonce we
|
// wrap the object in a crypt for upload using the nonce we
|
||||||
// saved from the encrypter
|
// saved from the encryptor
|
||||||
src := f.newObjectInfo(oi, nonce)
|
src := f.newObjectInfo(oi, nonce)
|
||||||
|
|
||||||
// Test ObjectInfo methods
|
// Test ObjectInfo methods
|
||||||
if !f.opt.NoDataEncryption {
|
assert.Equal(t, int64(outBuf.Len()), src.Size())
|
||||||
assert.Equal(t, int64(outBuf.Len()), src.Size())
|
|
||||||
}
|
|
||||||
assert.Equal(t, f, src.Fs())
|
assert.Equal(t, f, src.Fs())
|
||||||
assert.NotEqual(t, path, src.Remote())
|
assert.NotEqual(t, path, src.Remote())
|
||||||
|
|
||||||
@@ -101,13 +114,16 @@ func testComputeHash(t *testing.T, f *Fs) {
|
|||||||
t.Skipf("%v: does not support hashes", f.Fs)
|
t.Skipf("%v: does not support hashes", f.Fs)
|
||||||
}
|
}
|
||||||
|
|
||||||
localFs := makeTempLocalFs(t)
|
localFs, cleanupLocalFs := makeTempLocalFs(t)
|
||||||
|
defer cleanupLocalFs()
|
||||||
|
|
||||||
// Upload a file to localFs as a test object
|
// Upload a file to localFs as a test object
|
||||||
localObj := uploadFile(t, localFs, path, contents)
|
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
|
||||||
|
defer cleanupLocalObj()
|
||||||
|
|
||||||
// Upload the same data to the remote Fs also
|
// Upload the same data to the remote Fs also
|
||||||
remoteObj := uploadFile(t, f, path, contents)
|
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
|
||||||
|
defer cleanupRemoteObj()
|
||||||
|
|
||||||
// Calculate the expected Hash of the remote object
|
// Calculate the expected Hash of the remote object
|
||||||
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)
|
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ package crypt_test
|
|||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/crypt"
|
"github.com/rclone/rclone/backend/crypt"
|
||||||
@@ -24,13 +23,13 @@ func TestIntegration(t *testing.T) {
|
|||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: *fstest.RemoteName,
|
RemoteName: *fstest.RemoteName,
|
||||||
NilObject: (*crypt.Object)(nil),
|
NilObject: (*crypt.Object)(nil),
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestStandard runs integration tests against the remote
|
// TestStandard runs integration tests against the remote
|
||||||
func TestStandardBase32(t *testing.T) {
|
func TestStandard(t *testing.T) {
|
||||||
if *fstest.RemoteName != "" {
|
if *fstest.RemoteName != "" {
|
||||||
t.Skip("Skipping as -remote set")
|
t.Skip("Skipping as -remote set")
|
||||||
}
|
}
|
||||||
@@ -45,53 +44,8 @@ func TestStandardBase32(t *testing.T) {
|
|||||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStandardBase64(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
|
||||||
name := "TestCrypt"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":",
|
|
||||||
NilObject: (*crypt.Object)(nil),
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "crypt"},
|
|
||||||
{Name: name, Key: "remote", Value: tempdir},
|
|
||||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
|
||||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
|
||||||
{Name: name, Key: "filename_encoding", Value: "base64"},
|
|
||||||
},
|
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStandardBase32768(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
|
||||||
name := "TestCrypt"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":",
|
|
||||||
NilObject: (*crypt.Object)(nil),
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "crypt"},
|
|
||||||
{Name: name, Key: "remote", Value: tempdir},
|
|
||||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
|
||||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
|
||||||
{Name: name, Key: "filename_encoding", Value: "base32768"},
|
|
||||||
},
|
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -111,9 +65,8 @@ func TestOff(t *testing.T) {
|
|||||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||||
{Name: name, Key: "filename_encryption", Value: "off"},
|
{Name: name, Key: "filename_encryption", Value: "off"},
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -122,9 +75,6 @@ func TestObfuscate(t *testing.T) {
|
|||||||
if *fstest.RemoteName != "" {
|
if *fstest.RemoteName != "" {
|
||||||
t.Skip("Skipping as -remote set")
|
t.Skip("Skipping as -remote set")
|
||||||
}
|
}
|
||||||
if runtime.GOOS == "darwin" {
|
|
||||||
t.Skip("Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with")
|
|
||||||
}
|
|
||||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||||
name := "TestCrypt3"
|
name := "TestCrypt3"
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
@@ -137,35 +87,7 @@ func TestObfuscate(t *testing.T) {
|
|||||||
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
||||||
},
|
},
|
||||||
SkipBadWindowsCharacters: true,
|
SkipBadWindowsCharacters: true,
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestNoDataObfuscate runs integration tests against the remote
|
|
||||||
func TestNoDataObfuscate(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
if runtime.GOOS == "darwin" {
|
|
||||||
t.Skip("Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with")
|
|
||||||
}
|
|
||||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
|
||||||
name := "TestCrypt4"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":",
|
|
||||||
NilObject: (*crypt.Object)(nil),
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "crypt"},
|
|
||||||
{Name: name, Key: "remote", Value: tempdir},
|
|
||||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
|
||||||
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
|
||||||
{Name: name, Key: "no_data_encryption", Value: "true"},
|
|
||||||
},
|
|
||||||
SkipBadWindowsCharacters: true,
|
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,15 +4,15 @@
|
|||||||
// buffers which are a multiple of an underlying crypto block size.
|
// buffers which are a multiple of an underlying crypto block size.
|
||||||
package pkcs7
|
package pkcs7
|
||||||
|
|
||||||
import "errors"
|
import "github.com/pkg/errors"
|
||||||
|
|
||||||
// Errors Unpad can return
|
// Errors Unpad can return
|
||||||
var (
|
var (
|
||||||
ErrorPaddingNotFound = errors.New("bad PKCS#7 padding - not padded")
|
ErrorPaddingNotFound = errors.New("Bad PKCS#7 padding - not padded")
|
||||||
ErrorPaddingNotAMultiple = errors.New("bad PKCS#7 padding - not a multiple of blocksize")
|
ErrorPaddingNotAMultiple = errors.New("Bad PKCS#7 padding - not a multiple of blocksize")
|
||||||
ErrorPaddingTooLong = errors.New("bad PKCS#7 padding - too long")
|
ErrorPaddingTooLong = errors.New("Bad PKCS#7 padding - too long")
|
||||||
ErrorPaddingTooShort = errors.New("bad PKCS#7 padding - too short")
|
ErrorPaddingTooShort = errors.New("Bad PKCS#7 padding - too short")
|
||||||
ErrorPaddingNotAllTheSame = errors.New("bad PKCS#7 padding - not all the same")
|
ErrorPaddingNotAllTheSame = errors.New("Bad PKCS#7 padding - not all the same")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Pad buf using PKCS#7 to a multiple of n.
|
// Pad buf using PKCS#7 to a multiple of n.
|
||||||
@@ -25,7 +25,7 @@ func Pad(n int, buf []byte) []byte {
|
|||||||
}
|
}
|
||||||
length := len(buf)
|
length := len(buf)
|
||||||
padding := n - (length % n)
|
padding := n - (length % n)
|
||||||
for range padding {
|
for i := 0; i < padding; i++ {
|
||||||
buf = append(buf, byte(padding))
|
buf = append(buf, byte(padding))
|
||||||
}
|
}
|
||||||
if (len(buf) % n) != 0 {
|
if (len(buf) % n) != 0 {
|
||||||
@@ -54,7 +54,7 @@ func Unpad(n int, buf []byte) ([]byte, error) {
|
|||||||
if padding == 0 {
|
if padding == 0 {
|
||||||
return nil, ErrorPaddingTooShort
|
return nil, ErrorPaddingTooShort
|
||||||
}
|
}
|
||||||
for i := range padding {
|
for i := 0; i < padding; i++ {
|
||||||
if buf[length-1-i] != byte(padding) {
|
if buf[length-1-i] != byte(padding) {
|
||||||
return nil, ErrorPaddingNotAllTheSame
|
return nil, ErrorPaddingNotAllTheSame
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,38 +0,0 @@
|
|||||||
// Type definitions specific to Dataverse
|
|
||||||
|
|
||||||
package api
|
|
||||||
|
|
||||||
// DataverseDatasetResponse is returned by the Dataverse dataset API
|
|
||||||
type DataverseDatasetResponse struct {
|
|
||||||
Status string `json:"status"`
|
|
||||||
Data DataverseDataset `json:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DataverseDataset is the representation of a dataset
|
|
||||||
type DataverseDataset struct {
|
|
||||||
LatestVersion DataverseDatasetVersion `json:"latestVersion"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DataverseDatasetVersion is the representation of a dataset version
|
|
||||||
type DataverseDatasetVersion struct {
|
|
||||||
LastUpdateTime string `json:"lastUpdateTime"`
|
|
||||||
Files []DataverseFile `json:"files"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DataverseFile is the representation of a file found in a dataset
|
|
||||||
type DataverseFile struct {
|
|
||||||
DirectoryLabel string `json:"directoryLabel"`
|
|
||||||
DataFile DataverseDataFile `json:"dataFile"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DataverseDataFile represents file metadata details
|
|
||||||
type DataverseDataFile struct {
|
|
||||||
ID int64 `json:"id"`
|
|
||||||
Filename string `json:"filename"`
|
|
||||||
ContentType string `json:"contentType"`
|
|
||||||
FileSize int64 `json:"filesize"`
|
|
||||||
OriginalFileFormat string `json:"originalFileFormat"`
|
|
||||||
OriginalFileSize int64 `json:"originalFileSize"`
|
|
||||||
OriginalFileName string `json:"originalFileName"`
|
|
||||||
MD5 string `json:"md5"`
|
|
||||||
}
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
// Type definitions specific to InvenioRDM
|
|
||||||
|
|
||||||
package api
|
|
||||||
|
|
||||||
// InvenioRecordResponse is the representation of a record stored in InvenioRDM
|
|
||||||
type InvenioRecordResponse struct {
|
|
||||||
Links InvenioRecordResponseLinks `json:"links"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// InvenioRecordResponseLinks represents a record's links
|
|
||||||
type InvenioRecordResponseLinks struct {
|
|
||||||
Self string `json:"self"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// InvenioFilesResponse is the representation of a record's files
|
|
||||||
type InvenioFilesResponse struct {
|
|
||||||
Entries []InvenioFilesResponseEntry `json:"entries"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// InvenioFilesResponseEntry is the representation of a file entry
|
|
||||||
type InvenioFilesResponseEntry struct {
|
|
||||||
Key string `json:"key"`
|
|
||||||
Checksum string `json:"checksum"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
Updated string `json:"updated"`
|
|
||||||
MimeType string `json:"mimetype"`
|
|
||||||
Links InvenioFilesResponseEntryLinks `json:"links"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// InvenioFilesResponseEntryLinks represents file links details
|
|
||||||
type InvenioFilesResponseEntryLinks struct {
|
|
||||||
Content string `json:"content"`
|
|
||||||
}
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
// Package api has general type definitions for doi
|
|
||||||
package api
|
|
||||||
|
|
||||||
// DoiResolverResponse is returned by the DOI resolver API
|
|
||||||
//
|
|
||||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
|
||||||
type DoiResolverResponse struct {
|
|
||||||
ResponseCode int `json:"responseCode"`
|
|
||||||
Handle string `json:"handle"`
|
|
||||||
Values []DoiResolverResponseValue `json:"values"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoiResolverResponseValue is a single handle record value
|
|
||||||
type DoiResolverResponseValue struct {
|
|
||||||
Index int `json:"index"`
|
|
||||||
Type string `json:"type"`
|
|
||||||
Data DoiResolverResponseValueData `json:"data"`
|
|
||||||
TTL int `json:"ttl"`
|
|
||||||
Timestamp string `json:"timestamp"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoiResolverResponseValueData is the data held in a handle value
|
|
||||||
type DoiResolverResponseValueData struct {
|
|
||||||
Format string `json:"format"`
|
|
||||||
Value any `json:"value"`
|
|
||||||
}
|
|
||||||
@@ -1,112 +0,0 @@
|
|||||||
// Implementation for Dataverse
|
|
||||||
|
|
||||||
package doi
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/doi/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Returns true if resolvedURL is likely a DOI hosted on a Dataverse intallation
|
|
||||||
func activateDataverse(resolvedURL *url.URL) (isActive bool) {
|
|
||||||
queryValues := resolvedURL.Query()
|
|
||||||
persistentID := queryValues.Get("persistentId")
|
|
||||||
return persistentID != ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve the main API endpoint for a DOI hosted on a Dataverse installation
|
|
||||||
func resolveDataverseEndpoint(resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
|
|
||||||
queryValues := resolvedURL.Query()
|
|
||||||
persistentID := queryValues.Get("persistentId")
|
|
||||||
|
|
||||||
query := url.Values{}
|
|
||||||
query.Add("persistentId", persistentID)
|
|
||||||
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/datasets/:persistentId/", RawQuery: query.Encode()})
|
|
||||||
|
|
||||||
return Dataverse, endpointURL, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// dataverseProvider implements the doiProvider interface for Dataverse installations
|
|
||||||
type dataverseProvider struct {
|
|
||||||
f *Fs
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
|
||||||
func (dp *dataverseProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
|
|
||||||
// Use the cache if populated
|
|
||||||
cachedEntries, found := dp.f.cache.GetMaybe("files")
|
|
||||||
if found {
|
|
||||||
parsedEntries, ok := cachedEntries.([]Object)
|
|
||||||
if ok {
|
|
||||||
for _, entry := range parsedEntries {
|
|
||||||
newEntry := entry
|
|
||||||
entries = append(entries, &newEntry)
|
|
||||||
}
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
filesURL := dp.f.endpoint
|
|
||||||
var res *http.Response
|
|
||||||
var result api.DataverseDatasetResponse
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
|
|
||||||
Parameters: filesURL.Query(),
|
|
||||||
}
|
|
||||||
err = dp.f.pacer.Call(func() (bool, error) {
|
|
||||||
res, err = dp.f.srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return shouldRetry(ctx, res, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("readDir failed: %w", err)
|
|
||||||
}
|
|
||||||
modTime, modTimeErr := time.Parse(time.RFC3339, result.Data.LatestVersion.LastUpdateTime)
|
|
||||||
if modTimeErr != nil {
|
|
||||||
fs.Logf(dp.f, "error: could not parse last update time %v", modTimeErr)
|
|
||||||
modTime = timeUnset
|
|
||||||
}
|
|
||||||
for _, file := range result.Data.LatestVersion.Files {
|
|
||||||
contentURLPath := fmt.Sprintf("/api/access/datafile/%d", file.DataFile.ID)
|
|
||||||
query := url.Values{}
|
|
||||||
query.Add("format", "original")
|
|
||||||
contentURL := dp.f.endpoint.ResolveReference(&url.URL{Path: contentURLPath, RawQuery: query.Encode()})
|
|
||||||
entry := &Object{
|
|
||||||
fs: dp.f,
|
|
||||||
remote: path.Join(file.DirectoryLabel, file.DataFile.Filename),
|
|
||||||
contentURL: contentURL.String(),
|
|
||||||
size: file.DataFile.FileSize,
|
|
||||||
modTime: modTime,
|
|
||||||
md5: file.DataFile.MD5,
|
|
||||||
contentType: file.DataFile.ContentType,
|
|
||||||
}
|
|
||||||
if file.DataFile.OriginalFileName != "" {
|
|
||||||
entry.remote = path.Join(file.DirectoryLabel, file.DataFile.OriginalFileName)
|
|
||||||
entry.size = file.DataFile.OriginalFileSize
|
|
||||||
entry.contentType = file.DataFile.OriginalFileFormat
|
|
||||||
}
|
|
||||||
entries = append(entries, entry)
|
|
||||||
}
|
|
||||||
// Populate the cache
|
|
||||||
cacheEntries := []Object{}
|
|
||||||
for _, entry := range entries {
|
|
||||||
cacheEntries = append(cacheEntries, *entry)
|
|
||||||
}
|
|
||||||
dp.f.cache.Put("files", cacheEntries)
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newDataverseProvider(f *Fs) doiProvider {
|
|
||||||
return &dataverseProvider{
|
|
||||||
f: f,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,653 +0,0 @@
|
|||||||
// Package doi provides a filesystem interface for digital objects identified by DOIs.
|
|
||||||
//
|
|
||||||
// See: https://www.doi.org/the-identifier/what-is-a-doi/
|
|
||||||
package doi
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/doi/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/lib/cache"
|
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// the URL of the DOI resolver
|
|
||||||
//
|
|
||||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
|
||||||
doiResolverAPIURL = "https://doi.org/api"
|
|
||||||
minSleep = 10 * time.Millisecond
|
|
||||||
maxSleep = 2 * time.Second
|
|
||||||
decayConstant = 2 // bigger for slower decay, exponential
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errorReadOnly = errors.New("doi remotes are read only")
|
|
||||||
timeUnset = time.Unix(0, 0)
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
fsi := &fs.RegInfo{
|
|
||||||
Name: "doi",
|
|
||||||
Description: "DOI datasets",
|
|
||||||
NewFs: NewFs,
|
|
||||||
CommandHelp: commandHelp,
|
|
||||||
Options: []fs.Option{{
|
|
||||||
Name: "doi",
|
|
||||||
Help: "The DOI or the doi.org URL.",
|
|
||||||
Required: true,
|
|
||||||
}, {
|
|
||||||
Name: fs.ConfigProvider,
|
|
||||||
Help: `DOI provider.
|
|
||||||
|
|
||||||
The DOI provider can be set when rclone does not automatically recognize a supported DOI provider.`,
|
|
||||||
Examples: []fs.OptionExample{
|
|
||||||
{
|
|
||||||
Value: "auto",
|
|
||||||
Help: "Auto-detect provider",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Value: string(Zenodo),
|
|
||||||
Help: "Zenodo",
|
|
||||||
}, {
|
|
||||||
Value: string(Dataverse),
|
|
||||||
Help: "Dataverse",
|
|
||||||
}, {
|
|
||||||
Value: string(Invenio),
|
|
||||||
Help: "Invenio",
|
|
||||||
}},
|
|
||||||
Required: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "doi_resolver_api_url",
|
|
||||||
Help: `The URL of the DOI resolver API to use.
|
|
||||||
|
|
||||||
The DOI resolver can be set for testing or for cases when the the canonical DOI resolver API cannot be used.
|
|
||||||
|
|
||||||
Defaults to "https://doi.org/api".`,
|
|
||||||
Required: false,
|
|
||||||
Advanced: true,
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
fs.Register(fsi)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Provider defines the type of provider hosting the DOI
|
|
||||||
type Provider string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Zenodo provider, see https://zenodo.org
|
|
||||||
Zenodo Provider = "zenodo"
|
|
||||||
// Dataverse provider, see https://dataverse.harvard.edu
|
|
||||||
Dataverse Provider = "dataverse"
|
|
||||||
// Invenio provider, see https://inveniordm.docs.cern.ch
|
|
||||||
Invenio Provider = "invenio"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
|
||||||
type Options struct {
|
|
||||||
Doi string `config:"doi"` // The DOI, a digital identifier of an object, usually a dataset
|
|
||||||
Provider string `config:"provider"` // The DOI provider
|
|
||||||
DoiResolverAPIURL string `config:"doi_resolver_api_url"` // The URL of the DOI resolver API to use.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs stores the interface to the remote HTTP files
|
|
||||||
type Fs struct {
|
|
||||||
name string // name of this remote
|
|
||||||
root string // the path we are working on
|
|
||||||
provider Provider // the DOI provider
|
|
||||||
doiProvider doiProvider // the interface used to interact with the DOI provider
|
|
||||||
features *fs.Features // optional features
|
|
||||||
opt Options // options for this backend
|
|
||||||
ci *fs.ConfigInfo // global config
|
|
||||||
endpoint *url.URL // the main API endpoint for this remote
|
|
||||||
endpointURL string // endpoint as a string
|
|
||||||
srv *rest.Client // the connection to the server
|
|
||||||
pacer *fs.Pacer // pacer for API calls
|
|
||||||
cache *cache.Cache // a cache for the remote metadata
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
|
|
||||||
type Object struct {
|
|
||||||
fs *Fs // what this object is part of
|
|
||||||
remote string // the remote path
|
|
||||||
contentURL string // the URL where the contents of the file can be downloaded
|
|
||||||
size int64 // size of the object
|
|
||||||
modTime time.Time // modification time of the object
|
|
||||||
contentType string // content type of the object
|
|
||||||
md5 string // MD5 hash of the object content
|
|
||||||
}
|
|
||||||
|
|
||||||
// doiProvider is the interface used to list objects in a DOI
|
|
||||||
type doiProvider interface {
|
|
||||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
|
||||||
ListEntries(ctx context.Context) (entries []*Object, err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse the input string as a DOI
|
|
||||||
// Examples:
|
|
||||||
// 10.1000/182 -> 10.1000/182
|
|
||||||
// https://doi.org/10.1000/182 -> 10.1000/182
|
|
||||||
// doi:10.1000/182 -> 10.1000/182
|
|
||||||
func parseDoi(doi string) string {
|
|
||||||
doiURL, err := url.Parse(doi)
|
|
||||||
if err != nil {
|
|
||||||
return doi
|
|
||||||
}
|
|
||||||
if doiURL.Scheme == "doi" {
|
|
||||||
return strings.TrimLeft(strings.TrimPrefix(doi, "doi:"), "/")
|
|
||||||
}
|
|
||||||
if strings.HasSuffix(doiURL.Hostname(), "doi.org") {
|
|
||||||
return strings.TrimLeft(doiURL.Path, "/")
|
|
||||||
}
|
|
||||||
return doi
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve a DOI to a URL
|
|
||||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
|
||||||
func resolveDoiURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (doiURL *url.URL, err error) {
|
|
||||||
resolverURL := opt.DoiResolverAPIURL
|
|
||||||
if resolverURL == "" {
|
|
||||||
resolverURL = doiResolverAPIURL
|
|
||||||
}
|
|
||||||
|
|
||||||
var result api.DoiResolverResponse
|
|
||||||
params := url.Values{}
|
|
||||||
params.Add("index", "1")
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
RootURL: resolverURL,
|
|
||||||
Path: "/handles/" + opt.Doi,
|
|
||||||
Parameters: params,
|
|
||||||
}
|
|
||||||
err = pacer.Call(func() (bool, error) {
|
|
||||||
res, err := srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return shouldRetry(ctx, res, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if result.ResponseCode != 1 {
|
|
||||||
return nil, fmt.Errorf("could not resolve DOI (error code %d)", result.ResponseCode)
|
|
||||||
}
|
|
||||||
resolvedURLStr := ""
|
|
||||||
for _, value := range result.Values {
|
|
||||||
if value.Type == "URL" && value.Data.Format == "string" {
|
|
||||||
valueStr, ok := value.Data.Value.(string)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("could not resolve DOI (incorrect response format)")
|
|
||||||
}
|
|
||||||
resolvedURLStr = valueStr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
resolvedURL, err := url.Parse(resolvedURLStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return resolvedURL, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve the passed configuration into a provider and enpoint
|
|
||||||
func resolveEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (provider Provider, endpoint *url.URL, err error) {
|
|
||||||
resolvedURL, err := resolveDoiURL(ctx, srv, pacer, opt)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch opt.Provider {
|
|
||||||
case string(Dataverse):
|
|
||||||
return resolveDataverseEndpoint(resolvedURL)
|
|
||||||
case string(Invenio):
|
|
||||||
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
|
||||||
case string(Zenodo):
|
|
||||||
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
|
|
||||||
}
|
|
||||||
|
|
||||||
hostname := strings.ToLower(resolvedURL.Hostname())
|
|
||||||
if hostname == "dataverse.harvard.edu" || activateDataverse(resolvedURL) {
|
|
||||||
return resolveDataverseEndpoint(resolvedURL)
|
|
||||||
}
|
|
||||||
if hostname == "zenodo.org" || strings.HasSuffix(hostname, ".zenodo.org") {
|
|
||||||
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
|
|
||||||
}
|
|
||||||
if activateInvenio(ctx, srv, pacer, resolvedURL) {
|
|
||||||
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", nil, fmt.Errorf("provider '%s' is not supported", resolvedURL.Hostname())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make the http connection from the passed options
|
|
||||||
func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err error) {
|
|
||||||
provider, endpoint, err := resolveEndpoint(ctx, f.srv, f.pacer, opt)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update f with the new parameters
|
|
||||||
f.srv.SetRoot(endpoint.ResolveReference(&url.URL{Path: "/"}).String())
|
|
||||||
f.endpoint = endpoint
|
|
||||||
f.endpointURL = endpoint.String()
|
|
||||||
f.provider = provider
|
|
||||||
f.opt.Provider = string(provider)
|
|
||||||
|
|
||||||
switch f.provider {
|
|
||||||
case Dataverse:
|
|
||||||
f.doiProvider = newDataverseProvider(f)
|
|
||||||
case Invenio, Zenodo:
|
|
||||||
f.doiProvider = newInvenioProvider(f)
|
|
||||||
default:
|
|
||||||
return false, fmt.Errorf("provider type '%s' not supported", f.provider)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine if the root is a file
|
|
||||||
entries, err := f.doiProvider.ListEntries(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
for _, entry := range entries {
|
|
||||||
if entry.remote == f.root {
|
|
||||||
isFile = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return isFile, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// retryErrorCodes is a slice of error codes that we will retry
|
|
||||||
var retryErrorCodes = []int{
|
|
||||||
429, // Too Many Requests.
|
|
||||||
500, // Internal Server Error
|
|
||||||
502, // Bad Gateway
|
|
||||||
503, // Service Unavailable
|
|
||||||
504, // Gateway Timeout
|
|
||||||
509, // Bandwidth Limit Exceeded
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this res and err
|
|
||||||
// deserve to be retried. It returns the err as a convenience.
|
|
||||||
func shouldRetry(ctx context.Context, res *http.Response, err error) (bool, error) {
|
|
||||||
if fserrors.ContextError(ctx, &err) {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFs creates a new Fs object from the name and root. It connects to
|
|
||||||
// the host specified in the config file.
|
|
||||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|
||||||
root = strings.Trim(root, "/")
|
|
||||||
|
|
||||||
// Parse config into Options struct
|
|
||||||
opt := new(Options)
|
|
||||||
err := configstruct.Set(m, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
opt.Doi = parseDoi(opt.Doi)
|
|
||||||
|
|
||||||
client := fshttp.NewClient(ctx)
|
|
||||||
ci := fs.GetConfig(ctx)
|
|
||||||
f := &Fs{
|
|
||||||
name: name,
|
|
||||||
root: root,
|
|
||||||
opt: *opt,
|
|
||||||
ci: ci,
|
|
||||||
srv: rest.NewClient(client),
|
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
|
||||||
cache: cache.New(),
|
|
||||||
}
|
|
||||||
f.features = (&fs.Features{
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
}).Fill(ctx, f)
|
|
||||||
|
|
||||||
isFile, err := f.httpConnection(ctx, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if isFile {
|
|
||||||
// return an error with an fs which points to the parent
|
|
||||||
newRoot := path.Dir(f.root)
|
|
||||||
if newRoot == "." {
|
|
||||||
newRoot = ""
|
|
||||||
}
|
|
||||||
f.root = newRoot
|
|
||||||
return f, fs.ErrorIsFile
|
|
||||||
}
|
|
||||||
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the configured name of the file system
|
|
||||||
func (f *Fs) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root returns the root for the filesystem
|
|
||||||
func (f *Fs) Root() string {
|
|
||||||
return f.root
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the URL for the filesystem
|
|
||||||
func (f *Fs) String() string {
|
|
||||||
return fmt.Sprintf("DOI %s", f.opt.Doi)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
|
||||||
func (f *Fs) Features() *fs.Features {
|
|
||||||
return f.features
|
|
||||||
}
|
|
||||||
|
|
||||||
// Precision is the remote http file system's modtime precision, which we have no way of knowing. We estimate at 1s
|
|
||||||
func (f *Fs) Precision() time.Duration {
|
|
||||||
return time.Second
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
|
||||||
func (f *Fs) Hashes() hash.Set {
|
|
||||||
return hash.Set(hash.MD5)
|
|
||||||
// return hash.Set(hash.None)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mkdir makes the root directory of the Fs object
|
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|
||||||
return errorReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove a remote http file object
|
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
|
||||||
return errorReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the root directory of the Fs object
|
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
||||||
return errorReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject creates a new remote http file object
|
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|
||||||
entries, err := f.doiProvider.ListEntries(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
remoteFullPath := remote
|
|
||||||
if f.root != "" {
|
|
||||||
remoteFullPath = path.Join(f.root, remote)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, entry := range entries {
|
|
||||||
if entry.Remote() == remoteFullPath {
|
|
||||||
return entry, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
// List the objects and directories in dir into entries. The
|
|
||||||
// entries can be returned in any order but should be for a
|
|
||||||
// complete directory.
|
|
||||||
//
|
|
||||||
// dir should be "" to list the root, and should not have
|
|
||||||
// trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
|
||||||
fileEntries, err := f.doiProvider.ListEntries(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error listing %q: %w", dir, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fullDir := path.Join(f.root, dir)
|
|
||||||
if fullDir != "" {
|
|
||||||
fullDir += "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
dirPaths := map[string]bool{}
|
|
||||||
for _, entry := range fileEntries {
|
|
||||||
// First, filter out files not in `fullDir`
|
|
||||||
if !strings.HasPrefix(entry.remote, fullDir) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Then, find entries in subfolers
|
|
||||||
remotePath := entry.remote
|
|
||||||
if fullDir != "" {
|
|
||||||
remotePath = strings.TrimLeft(strings.TrimPrefix(remotePath, fullDir), "/")
|
|
||||||
}
|
|
||||||
parts := strings.SplitN(remotePath, "/", 2)
|
|
||||||
if len(parts) == 1 {
|
|
||||||
newEntry := *entry
|
|
||||||
newEntry.remote = path.Join(dir, remotePath)
|
|
||||||
entries = append(entries, &newEntry)
|
|
||||||
} else {
|
|
||||||
dirPaths[path.Join(dir, parts[0])] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for dirPath := range dirPaths {
|
|
||||||
entry := fs.NewDir(dirPath, time.Time{})
|
|
||||||
entries = append(entries, entry)
|
|
||||||
}
|
|
||||||
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put in to the remote path with the modTime given of the given size
|
|
||||||
//
|
|
||||||
// May create the object even if it returns an error - if so
|
|
||||||
// will return the object and the error, otherwise will return
|
|
||||||
// nil and the error
|
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
return nil, errorReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
|
||||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
return nil, errorReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs is the filesystem this remote http file object is located within
|
|
||||||
func (o *Object) Fs() fs.Info {
|
|
||||||
return o.fs
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the URL to the remote HTTP file
|
|
||||||
func (o *Object) String() string {
|
|
||||||
if o == nil {
|
|
||||||
return "<nil>"
|
|
||||||
}
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remote the name of the remote HTTP file, relative to the fs root
|
|
||||||
func (o *Object) Remote() string {
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
|
|
||||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|
||||||
if t != hash.MD5 {
|
|
||||||
return "", hash.ErrUnsupported
|
|
||||||
}
|
|
||||||
return o.md5, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the size in bytes of the remote http file
|
|
||||||
func (o *Object) Size() int64 {
|
|
||||||
return o.size
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModTime returns the modification time of the remote http file
|
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
|
||||||
return o.modTime
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModTime sets the modification and access time to the specified time
|
|
||||||
//
|
|
||||||
// it also updates the info field
|
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|
||||||
return errorReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
|
|
||||||
func (o *Object) Storable() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open a remote http file object for reading. Seek is supported
|
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|
||||||
fs.FixRangeOption(options, o.size)
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
RootURL: o.contentURL,
|
|
||||||
Options: options,
|
|
||||||
}
|
|
||||||
var res *http.Response
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
res, err = o.fs.srv.Call(ctx, &opts)
|
|
||||||
return shouldRetry(ctx, res, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Open failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle non-compliant redirects
|
|
||||||
if res.Header.Get("Location") != "" {
|
|
||||||
newURL, err := res.Location()
|
|
||||||
if err == nil {
|
|
||||||
opts.RootURL = newURL.String()
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
res, err = o.fs.srv.Call(ctx, &opts)
|
|
||||||
return shouldRetry(ctx, res, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Open failed: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res.Body, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update in to the object with the modTime given of the given size
|
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
|
||||||
return errorReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
|
||||||
func (o *Object) MimeType(ctx context.Context) string {
|
|
||||||
return o.contentType
|
|
||||||
}
|
|
||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{{
|
|
||||||
Name: "metadata",
|
|
||||||
Short: "Show metadata about the DOI.",
|
|
||||||
Long: `This command returns a JSON object with some information about the DOI.
|
|
||||||
|
|
||||||
Usage example:
|
|
||||||
|
|
||||||
` + "```console" + `
|
|
||||||
rclone backend metadata doi:
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
It returns a JSON object representing metadata about the DOI.`,
|
|
||||||
}, {
|
|
||||||
Name: "set",
|
|
||||||
Short: "Set command for updating the config parameters.",
|
|
||||||
Long: `This set command can be used to update the config parameters
|
|
||||||
for a running doi backend.
|
|
||||||
|
|
||||||
Usage examples:
|
|
||||||
|
|
||||||
` + "```console" + `
|
|
||||||
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
|
||||||
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
|
||||||
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
The option keys are named as they are in the config file.
|
|
||||||
|
|
||||||
This rebuilds the connection to the doi backend when it is called with
|
|
||||||
the new parameters. Only new parameters need be passed as the values
|
|
||||||
will default to those currently in use.
|
|
||||||
|
|
||||||
It doesn't return anything.`,
|
|
||||||
}}
|
|
||||||
|
|
||||||
// Command the backend to run a named command
|
|
||||||
//
|
|
||||||
// The command run is name
|
|
||||||
// args may be used to read arguments from
|
|
||||||
// opts may be used to read optional arguments from
|
|
||||||
//
|
|
||||||
// The result should be capable of being JSON encoded
|
|
||||||
// If it is a string or a []string it will be shown to the user
|
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
|
||||||
switch name {
|
|
||||||
case "metadata":
|
|
||||||
return f.ShowMetadata(ctx)
|
|
||||||
case "set":
|
|
||||||
newOpt := f.opt
|
|
||||||
err := configstruct.Set(configmap.Simple(opt), &newOpt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("reading config: %w", err)
|
|
||||||
}
|
|
||||||
_, err = f.httpConnection(ctx, &newOpt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("updating session: %w", err)
|
|
||||||
}
|
|
||||||
f.opt = newOpt
|
|
||||||
keys := []string{}
|
|
||||||
for k := range opt {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", "))
|
|
||||||
return nil, nil
|
|
||||||
default:
|
|
||||||
return nil, fs.ErrorCommandNotFound
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShowMetadata returns some metadata about the corresponding DOI
|
|
||||||
func (f *Fs) ShowMetadata(ctx context.Context) (metadata any, err error) {
|
|
||||||
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
info := map[string]any{}
|
|
||||||
info["DOI"] = f.opt.Doi
|
|
||||||
info["URL"] = doiURL.String()
|
|
||||||
info["metadataURL"] = f.endpointURL
|
|
||||||
info["provider"] = f.provider
|
|
||||||
return info, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
|
||||||
var (
|
|
||||||
_ fs.Fs = (*Fs)(nil)
|
|
||||||
_ fs.PutStreamer = (*Fs)(nil)
|
|
||||||
_ fs.Commander = (*Fs)(nil)
|
|
||||||
_ fs.Object = (*Object)(nil)
|
|
||||||
_ fs.MimeTyper = (*Object)(nil)
|
|
||||||
)
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user