mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
1 Commits
dependabot
...
fix-union-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1d1d847f18 |
4
.gitattributes
vendored
4
.gitattributes
vendored
@@ -1,7 +1,3 @@
|
|||||||
# Go writes go.mod and go.sum with lf even on windows
|
|
||||||
go.mod text eol=lf
|
|
||||||
go.sum text eol=lf
|
|
||||||
|
|
||||||
# Ignore generated files in GitHub language statistics and diffs
|
# Ignore generated files in GitHub language statistics and diffs
|
||||||
/MANUAL.* linguist-generated=true
|
/MANUAL.* linguist-generated=true
|
||||||
/rclone.1 linguist-generated=true
|
/rclone.1 linguist-generated=true
|
||||||
|
|||||||
4
.github/FUNDING.yml
vendored
Normal file
4
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
github: [ncw]
|
||||||
|
patreon: njcw
|
||||||
|
liberapay: ncw
|
||||||
|
custom: ["https://rclone.org/donate/"]
|
||||||
6
.github/dependabot.yml
vendored
6
.github/dependabot.yml
vendored
@@ -1,6 +0,0 @@
|
|||||||
version: 2
|
|
||||||
updates:
|
|
||||||
- package-ecosystem: "github-actions"
|
|
||||||
directory: "/"
|
|
||||||
schedule:
|
|
||||||
interval: "daily"
|
|
||||||
216
.github/workflows/build.yml
vendored
216
.github/workflows/build.yml
vendored
@@ -8,33 +8,29 @@ name: build
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- '**'
|
- '*'
|
||||||
tags:
|
tags:
|
||||||
- '**'
|
- '*'
|
||||||
pull_request:
|
pull_request:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
manual:
|
manual:
|
||||||
description: Manual run (bypass default conditions)
|
required: true
|
||||||
type: boolean
|
|
||||||
default: true
|
default: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.24']
|
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.16', 'go1.17']
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- job_name: linux
|
- job_name: linux
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '>=1.25.0-rc.1'
|
go: '1.18.x'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
build_flags: '-include "^linux/"'
|
build_flags: '-include "^linux/"'
|
||||||
check: true
|
check: true
|
||||||
@@ -45,14 +41,14 @@ jobs:
|
|||||||
|
|
||||||
- job_name: linux_386
|
- job_name: linux_386
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '>=1.25.0-rc.1'
|
go: '1.18.x'
|
||||||
goarch: 386
|
goarch: 386
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
quicktest: true
|
quicktest: true
|
||||||
|
|
||||||
- job_name: mac_amd64
|
- job_name: mac_amd64
|
||||||
os: macos-latest
|
os: macos-11
|
||||||
go: '>=1.25.0-rc.1'
|
go: '1.18.x'
|
||||||
gotags: 'cmount'
|
gotags: 'cmount'
|
||||||
build_flags: '-include "^darwin/amd64" -cgo'
|
build_flags: '-include "^darwin/amd64" -cgo'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
@@ -60,15 +56,15 @@ jobs:
|
|||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: mac_arm64
|
- job_name: mac_arm64
|
||||||
os: macos-latest
|
os: macos-11
|
||||||
go: '>=1.25.0-rc.1'
|
go: '1.18.x'
|
||||||
gotags: 'cmount'
|
gotags: 'cmount'
|
||||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: windows
|
- job_name: windows
|
||||||
os: windows-latest
|
os: windows-latest
|
||||||
go: '>=1.25.0-rc.1'
|
go: '1.18.x'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
cgo: '0'
|
cgo: '0'
|
||||||
build_flags: '-include "^windows/"'
|
build_flags: '-include "^windows/"'
|
||||||
@@ -78,14 +74,20 @@ jobs:
|
|||||||
|
|
||||||
- job_name: other_os
|
- job_name: other_os
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '>=1.25.0-rc.1'
|
go: '1.18.x'
|
||||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||||
compile_all: true
|
compile_all: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: go1.24
|
- job_name: go1.16
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.24'
|
go: '1.16.x'
|
||||||
|
quicktest: true
|
||||||
|
racequicktest: true
|
||||||
|
|
||||||
|
- job_name: go1.17
|
||||||
|
os: ubuntu-latest
|
||||||
|
go: '1.17.x'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
racequicktest: true
|
||||||
|
|
||||||
@@ -95,17 +97,19 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@v6
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
|
stable: 'false'
|
||||||
go-version: ${{ matrix.go }}
|
go-version: ${{ matrix.go }}
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Set environment variables
|
- name: Set environment variables
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
|
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
|
||||||
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
|
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
|
||||||
@@ -114,25 +118,20 @@ jobs:
|
|||||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
|
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
|
||||||
|
|
||||||
- name: Install Libraries on Linux
|
- name: Install Libraries on Linux
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sudo modprobe fuse
|
sudo modprobe fuse
|
||||||
sudo chmod 666 /dev/fuse
|
sudo chmod 666 /dev/fuse
|
||||||
sudo chown root:$USER /etc/fuse.conf
|
sudo chown root:$USER /etc/fuse.conf
|
||||||
sudo apt-get update
|
sudo apt-get install fuse libfuse-dev rpm pkg-config
|
||||||
sudo apt-get install -y fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
|
|
||||||
if: matrix.os == 'ubuntu-latest'
|
if: matrix.os == 'ubuntu-latest'
|
||||||
|
|
||||||
- name: Install Libraries on macOS
|
- name: Install Libraries on macOS
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
|
|
||||||
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
|
|
||||||
unset HOMEBREW_NO_INSTALL_FROM_API
|
|
||||||
brew untap --force homebrew/core
|
|
||||||
brew untap --force homebrew/cask
|
|
||||||
brew update
|
brew update
|
||||||
brew install --cask macfuse
|
brew install --cask macfuse
|
||||||
brew install git-annex git-annex-remote-rclone
|
if: matrix.os == 'macos-11'
|
||||||
if: matrix.os == 'macos-latest'
|
|
||||||
|
|
||||||
- name: Install Libraries on Windows
|
- name: Install Libraries on Windows
|
||||||
shell: powershell
|
shell: powershell
|
||||||
@@ -151,6 +150,7 @@ jobs:
|
|||||||
if: matrix.os == 'windows-latest'
|
if: matrix.os == 'windows-latest'
|
||||||
|
|
||||||
- name: Print Go version and environment
|
- name: Print Go version and environment
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
printf "Using go at: $(which go)\n"
|
printf "Using go at: $(which go)\n"
|
||||||
printf "Go version: $(go version)\n"
|
printf "Go version: $(go version)\n"
|
||||||
@@ -161,25 +161,38 @@ jobs:
|
|||||||
printf "\n\nSystem environment:\n\n"
|
printf "\n\nSystem environment:\n\n"
|
||||||
env
|
env
|
||||||
|
|
||||||
|
- name: Go module cache
|
||||||
|
uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: ~/go/pkg/mod
|
||||||
|
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-go-
|
||||||
|
|
||||||
- name: Build rclone
|
- name: Build rclone
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
make
|
make
|
||||||
|
|
||||||
- name: Rclone version
|
- name: Rclone version
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
rclone version
|
rclone version
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
make quicktest
|
make quicktest
|
||||||
if: matrix.quicktest
|
if: matrix.quicktest
|
||||||
|
|
||||||
- name: Race test
|
- name: Race test
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
make racequicktest
|
make racequicktest
|
||||||
if: matrix.racequicktest
|
if: matrix.racequicktest
|
||||||
|
|
||||||
- name: Run librclone tests
|
- name: Run librclone tests
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
make -C librclone/ctest test
|
make -C librclone/ctest test
|
||||||
make -C librclone/ctest clean
|
make -C librclone/ctest clean
|
||||||
@@ -187,137 +200,72 @@ jobs:
|
|||||||
if: matrix.librclonetest
|
if: matrix.librclonetest
|
||||||
|
|
||||||
- name: Compile all architectures test
|
- name: Compile all architectures test
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
make
|
make
|
||||||
make compile_all
|
make compile_all
|
||||||
if: matrix.compile_all
|
if: matrix.compile_all
|
||||||
|
|
||||||
- name: Deploy built binaries
|
- name: Deploy built binaries
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
|
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
|
||||||
|
if [[ "${{ matrix.os }}" == "windows-latest" ]]; then make release_dep_windows ; fi
|
||||||
make ci_beta
|
make ci_beta
|
||||||
env:
|
env:
|
||||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||||
# working-directory: '$(modulePath)'
|
# working-directory: '$(modulePath)'
|
||||||
# Deploy binaries if enabled in config && not a PR && not a fork
|
# Deploy binaries if enabled in config && not a PR && not a fork
|
||||||
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
name: "lint"
|
name: "lint"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Get runner parameters
|
|
||||||
id: get-runner-parameters
|
|
||||||
run: |
|
|
||||||
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
|
|
||||||
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v2
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Install Go
|
- name: Code quality test
|
||||||
id: setup-go
|
uses: golangci/golangci-lint-action@v2
|
||||||
uses: actions/setup-go@v6
|
|
||||||
with:
|
|
||||||
go-version: '>=1.24.0-rc.1'
|
|
||||||
check-latest: true
|
|
||||||
cache: false
|
|
||||||
|
|
||||||
- name: Cache
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/go/pkg/mod
|
|
||||||
~/.cache/go-build
|
|
||||||
~/.cache/golangci-lint
|
|
||||||
key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }}
|
|
||||||
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
|
||||||
|
|
||||||
- name: Code quality test (Linux)
|
|
||||||
uses: golangci/golangci-lint-action@v9
|
|
||||||
with:
|
with:
|
||||||
|
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
||||||
version: latest
|
version: latest
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Code quality test (Windows)
|
|
||||||
uses: golangci/golangci-lint-action@v9
|
|
||||||
env:
|
|
||||||
GOOS: "windows"
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Code quality test (macOS)
|
|
||||||
uses: golangci/golangci-lint-action@v9
|
|
||||||
env:
|
|
||||||
GOOS: "darwin"
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Code quality test (FreeBSD)
|
|
||||||
uses: golangci/golangci-lint-action@v9
|
|
||||||
env:
|
|
||||||
GOOS: "freebsd"
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Code quality test (OpenBSD)
|
|
||||||
uses: golangci/golangci-lint-action@v9
|
|
||||||
env:
|
|
||||||
GOOS: "openbsd"
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Install govulncheck
|
|
||||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
|
||||||
|
|
||||||
- name: Scan for vulnerabilities
|
|
||||||
run: govulncheck ./...
|
|
||||||
|
|
||||||
- name: Check Markdown format
|
|
||||||
uses: DavidAnson/markdownlint-cli2-action@v21
|
|
||||||
with:
|
|
||||||
globs: |
|
|
||||||
CONTRIBUTING.md
|
|
||||||
MAINTAINERS.md
|
|
||||||
README.md
|
|
||||||
RELEASE.md
|
|
||||||
CODE_OF_CONDUCT.md
|
|
||||||
librclone\README.md
|
|
||||||
backend\s3\README.md
|
|
||||||
docs/content/{_index,authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
|
|
||||||
|
|
||||||
- name: Scan edits of autogenerated files
|
|
||||||
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
|
|
||||||
if: github.event_name == 'pull_request'
|
|
||||||
|
|
||||||
android:
|
android:
|
||||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
name: "android-all"
|
name: "android-all"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
# Upgrade together with NDK version
|
# Upgrade together with NDK version
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v6
|
uses: actions/setup-go@v1
|
||||||
with:
|
with:
|
||||||
go-version: '>=1.25.0-rc.1'
|
go-version: 1.18.x
|
||||||
|
|
||||||
|
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||||
|
- name: Force NDK version
|
||||||
|
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;23.1.7779620" | grep -v = || true
|
||||||
|
|
||||||
|
- name: Go module cache
|
||||||
|
uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: ~/go/pkg/mod
|
||||||
|
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-go-
|
||||||
|
|
||||||
- name: Set global environment variables
|
- name: Set global environment variables
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
||||||
|
|
||||||
@@ -330,27 +278,27 @@ jobs:
|
|||||||
go install golang.org/x/mobile/cmd/gobind@latest
|
go install golang.org/x/mobile/cmd/gobind@latest
|
||||||
go install golang.org/x/mobile/cmd/gomobile@latest
|
go install golang.org/x/mobile/cmd/gomobile@latest
|
||||||
env PATH=$PATH:~/go/bin gomobile init
|
env PATH=$PATH:~/go/bin gomobile init
|
||||||
echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: arm-v7a gomobile build
|
- name: arm-v7a gomobile build
|
||||||
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||||
|
|
||||||
- name: arm-v7a Set environment variables
|
- name: arm-v7a Set environment variables
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||||
echo 'GOARM=7' >> $GITHUB_ENV
|
echo 'GOARM=7' >> $GITHUB_ENV
|
||||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: arm-v7a build
|
- name: arm-v7a build
|
||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
|
||||||
|
|
||||||
- name: arm64-v8a Set environment variables
|
- name: arm64-v8a Set environment variables
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||||
@@ -358,11 +306,12 @@ jobs:
|
|||||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: arm64-v8a build
|
- name: arm64-v8a build
|
||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
|
||||||
|
|
||||||
- name: x86 Set environment variables
|
- name: x86 Set environment variables
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||||
@@ -370,11 +319,12 @@ jobs:
|
|||||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: x86 build
|
- name: x86 build
|
||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
|
||||||
|
|
||||||
- name: x64 Set environment variables
|
- name: x64 Set environment variables
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||||
@@ -382,7 +332,7 @@ jobs:
|
|||||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: x64 build
|
- name: x64 build
|
||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 .
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
run: |
|
run: |
|
||||||
@@ -390,4 +340,4 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||||
# Upload artifacts if not a PR && not a fork
|
# Upload artifacts if not a PR && not a fork
|
||||||
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'
|
if: github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||||
|
|||||||
312
.github/workflows/build_publish_docker_image.yml
vendored
312
.github/workflows/build_publish_docker_image.yml
vendored
@@ -1,294 +1,26 @@
|
|||||||
---
|
name: Docker beta build
|
||||||
# Github Actions release for rclone
|
|
||||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_image.yml" -*-
|
|
||||||
|
|
||||||
name: Build & Push Docker Images
|
|
||||||
|
|
||||||
# Trigger the workflow on push or pull request
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- '**'
|
- master
|
||||||
tags:
|
|
||||||
- '**'
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
manual:
|
|
||||||
description: Manual run (bypass default conditions)
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-image:
|
build:
|
||||||
if: inputs.manual || (github.repository == 'rclone/rclone' && github.event_name != 'pull_request')
|
if: github.repository == 'rclone/rclone'
|
||||||
timeout-minutes: 60
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
name: Build image job
|
||||||
fail-fast: false
|
steps:
|
||||||
matrix:
|
- name: Checkout master
|
||||||
include:
|
uses: actions/checkout@v2
|
||||||
- platform: linux/amd64
|
with:
|
||||||
runs-on: ubuntu-24.04
|
fetch-depth: 0
|
||||||
- platform: linux/386
|
- name: Build and publish image
|
||||||
runs-on: ubuntu-24.04
|
uses: ilteoood/docker_buildx@1.1.0
|
||||||
- platform: linux/arm64
|
with:
|
||||||
runs-on: ubuntu-24.04-arm
|
tag: beta
|
||||||
- platform: linux/arm/v7
|
imageName: rclone/rclone
|
||||||
runs-on: ubuntu-24.04-arm
|
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||||
- platform: linux/arm/v6
|
publish: true
|
||||||
runs-on: ubuntu-24.04-arm
|
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||||
|
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||||
name: Build Docker Image for ${{ matrix.platform }}
|
|
||||||
runs-on: ${{ matrix.runs-on }}
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Free Space
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
df -h .
|
|
||||||
# Remove android SDK
|
|
||||||
sudo rm -rf /usr/local/lib/android || true
|
|
||||||
# Remove .net runtime
|
|
||||||
sudo rm -rf /usr/share/dotnet || true
|
|
||||||
df -h .
|
|
||||||
|
|
||||||
- name: Checkout Repository
|
|
||||||
uses: actions/checkout@v5
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Set REPO_NAME Variable
|
|
||||||
run: |
|
|
||||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
|
||||||
|
|
||||||
- name: Set PLATFORM Variable
|
|
||||||
run: |
|
|
||||||
platform=${{ matrix.platform }}
|
|
||||||
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Set CACHE_NAME Variable
|
|
||||||
shell: python
|
|
||||||
run: |
|
|
||||||
import os, re
|
|
||||||
|
|
||||||
def slugify(input_string, max_length=63):
|
|
||||||
slug = input_string.lower()
|
|
||||||
slug = re.sub(r'[^a-z0-9 -]', ' ', slug)
|
|
||||||
slug = slug.strip()
|
|
||||||
slug = re.sub(r'\s+', '-', slug)
|
|
||||||
slug = re.sub(r'-+', '-', slug)
|
|
||||||
slug = slug[:max_length]
|
|
||||||
slug = re.sub(r'[-]+$', '', slug)
|
|
||||||
return slug
|
|
||||||
|
|
||||||
ref_name_slug = "cache"
|
|
||||||
|
|
||||||
if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request":
|
|
||||||
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
|
|
||||||
|
|
||||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
|
||||||
env.write(f"CACHE_NAME={ref_name_slug}\n")
|
|
||||||
|
|
||||||
- name: Get ImageOS
|
|
||||||
# There's no way around this, because "ImageOS" is only available to
|
|
||||||
# processes, but the setup-go action uses it in its key.
|
|
||||||
id: imageos
|
|
||||||
uses: actions/github-script@v8
|
|
||||||
with:
|
|
||||||
result-encoding: string
|
|
||||||
script: |
|
|
||||||
return process.env.ImageOS
|
|
||||||
|
|
||||||
- name: Extract Metadata (tags, labels) for Docker
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v5
|
|
||||||
env:
|
|
||||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,manifest-descriptor # Important for digest annotation (used by Github packages)
|
|
||||||
with:
|
|
||||||
images: |
|
|
||||||
ghcr.io/${{ env.REPO_NAME }}
|
|
||||||
labels: |
|
|
||||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
|
||||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
|
||||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
|
||||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
|
||||||
org.opencontainers.image.revision=${{ github.sha }}
|
|
||||||
tags: |
|
|
||||||
type=sha
|
|
||||||
type=ref,event=pr
|
|
||||||
type=ref,event=branch
|
|
||||||
type=semver,pattern={{version}}
|
|
||||||
type=semver,pattern={{major}}
|
|
||||||
type=semver,pattern={{major}}.{{minor}}
|
|
||||||
type=raw,value=beta,enable={{is_default_branch}}
|
|
||||||
|
|
||||||
- name: Setup QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
|
|
||||||
- name: Load Go Build Cache for Docker
|
|
||||||
id: go-cache
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
|
||||||
# Cache only the go builds, the module download is cached via the docker layer caching
|
|
||||||
path: |
|
|
||||||
go-build-cache
|
|
||||||
|
|
||||||
- name: Inject Go Build Cache into Docker
|
|
||||||
uses: reproducible-containers/buildkit-cache-dance@v3
|
|
||||||
with:
|
|
||||||
cache-map: |
|
|
||||||
{
|
|
||||||
"go-build-cache": "/root/.cache/go-build"
|
|
||||||
}
|
|
||||||
skip-extraction: ${{ steps.go-cache.outputs.cache-hit }}
|
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
# This is the user that triggered the Workflow. In this case, it will
|
|
||||||
# either be the user whom created the Release or manually triggered
|
|
||||||
# the workflow_dispatch.
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Build and Publish Image Digest
|
|
||||||
id: build
|
|
||||||
uses: docker/build-push-action@v6
|
|
||||||
with:
|
|
||||||
file: Dockerfile
|
|
||||||
context: .
|
|
||||||
provenance: false
|
|
||||||
# don't specify 'tags' here (error "get can't push tagged ref by digest")
|
|
||||||
# tags: ${{ steps.meta.outputs.tags }}
|
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
|
||||||
annotations: ${{ steps.meta.outputs.annotations }}
|
|
||||||
platforms: ${{ matrix.platform }}
|
|
||||||
outputs: |
|
|
||||||
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
|
|
||||||
cache-from: |
|
|
||||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
|
||||||
cache-to: |
|
|
||||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd
|
|
||||||
|
|
||||||
- name: Export Image Digest
|
|
||||||
run: |
|
|
||||||
mkdir -p /tmp/digests
|
|
||||||
digest="${{ steps.build.outputs.digest }}"
|
|
||||||
touch "/tmp/digests/${digest#sha256:}"
|
|
||||||
|
|
||||||
- name: Upload Image Digest
|
|
||||||
uses: actions/upload-artifact@v5
|
|
||||||
with:
|
|
||||||
name: digests-${{ env.PLATFORM }}
|
|
||||||
path: /tmp/digests/*
|
|
||||||
retention-days: 1
|
|
||||||
if-no-files-found: error
|
|
||||||
|
|
||||||
merge-image:
|
|
||||||
name: Merge & Push Final Docker Image
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
needs:
|
|
||||||
- build-image
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Download Image Digests
|
|
||||||
uses: actions/download-artifact@v6
|
|
||||||
with:
|
|
||||||
path: /tmp/digests
|
|
||||||
pattern: digests-*
|
|
||||||
merge-multiple: true
|
|
||||||
|
|
||||||
- name: Set REPO_NAME Variable
|
|
||||||
run: |
|
|
||||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
|
||||||
|
|
||||||
- name: Extract Metadata (tags, labels) for Docker
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v5
|
|
||||||
env:
|
|
||||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
|
||||||
with:
|
|
||||||
images: |
|
|
||||||
${{ env.REPO_NAME }}
|
|
||||||
ghcr.io/${{ env.REPO_NAME }}
|
|
||||||
labels: |
|
|
||||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
|
||||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
|
||||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
|
||||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
|
||||||
org.opencontainers.image.revision=${{ github.sha }}
|
|
||||||
tags: |
|
|
||||||
type=sha
|
|
||||||
type=ref,event=pr
|
|
||||||
type=ref,event=branch
|
|
||||||
type=semver,pattern={{version}}
|
|
||||||
type=semver,pattern={{major}}
|
|
||||||
type=semver,pattern={{major}}.{{minor}}
|
|
||||||
type=raw,value=beta,enable={{is_default_branch}}
|
|
||||||
|
|
||||||
- name: Extract Tags
|
|
||||||
shell: python
|
|
||||||
run: |
|
|
||||||
import json, os
|
|
||||||
|
|
||||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
|
||||||
metadata = json.loads(metadata_json)
|
|
||||||
|
|
||||||
tags = [f"--tag '{tag}'" for tag in metadata["tags"]]
|
|
||||||
tags_string = " ".join(tags)
|
|
||||||
|
|
||||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
|
||||||
env.write(f"TAGS={tags_string}\n")
|
|
||||||
|
|
||||||
- name: Extract Annotations
|
|
||||||
shell: python
|
|
||||||
run: |
|
|
||||||
import json, os
|
|
||||||
|
|
||||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
|
||||||
metadata = json.loads(metadata_json)
|
|
||||||
|
|
||||||
annotations = [f"--annotation '{annotation}'" for annotation in metadata["annotations"]]
|
|
||||||
annotations_string = " ".join(annotations)
|
|
||||||
|
|
||||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
|
||||||
env.write(f"ANNOTATIONS={annotations_string}\n")
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
|
|
||||||
- name: Login to Docker Hub
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
# This is the user that triggered the Workflow. In this case, it will
|
|
||||||
# either be the user whom created the Release or manually triggered
|
|
||||||
# the workflow_dispatch.
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Create & Push Manifest List
|
|
||||||
working-directory: /tmp/digests
|
|
||||||
run: |
|
|
||||||
docker buildx imagetools create \
|
|
||||||
${{ env.TAGS }} \
|
|
||||||
${{ env.ANNOTATIONS }} \
|
|
||||||
$(printf 'ghcr.io/${{ env.REPO_NAME }}@sha256:%s ' *)
|
|
||||||
|
|
||||||
- name: Inspect and Run Multi-Platform Image
|
|
||||||
run: |
|
|
||||||
docker buildx imagetools inspect --raw ${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
|
||||||
docker buildx imagetools inspect --raw ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
|
||||||
docker run --rm ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} version
|
|
||||||
|
|||||||
@@ -1,49 +0,0 @@
|
|||||||
---
|
|
||||||
# Github Actions release for rclone
|
|
||||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_plugin.yml" -*-
|
|
||||||
|
|
||||||
name: Release Build for Docker Plugin
|
|
||||||
|
|
||||||
on:
|
|
||||||
release:
|
|
||||||
types: [published]
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
manual:
|
|
||||||
description: Manual run (bypass default conditions)
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build_docker_volume_plugin:
|
|
||||||
if: inputs.manual || github.repository == 'rclone/rclone'
|
|
||||||
name: Build docker plugin job
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Free some space
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
df -h .
|
|
||||||
# Remove android SDK
|
|
||||||
sudo rm -rf /usr/local/lib/android || true
|
|
||||||
# Remove .net runtime
|
|
||||||
sudo rm -rf /usr/share/dotnet || true
|
|
||||||
df -h .
|
|
||||||
- name: Checkout master
|
|
||||||
uses: actions/checkout@v5
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
- name: Build and publish docker plugin
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
VER=${GITHUB_REF#refs/tags/}
|
|
||||||
PLUGIN_USER=rclone
|
|
||||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
|
||||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
|
||||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
|
||||||
export PLUGIN_USER PLUGIN_ARCH
|
|
||||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
|
||||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
|
||||||
done
|
|
||||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
|
||||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
|
||||||
59
.github/workflows/build_publish_release_docker_image.yml
vendored
Normal file
59
.github/workflows/build_publish_release_docker_image.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
name: Docker release build
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [published]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
if: github.repository == 'rclone/rclone'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Build image job
|
||||||
|
steps:
|
||||||
|
- name: Checkout master
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Get actual patch version
|
||||||
|
id: actual_patch_version
|
||||||
|
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
|
||||||
|
- name: Get actual minor version
|
||||||
|
id: actual_minor_version
|
||||||
|
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
|
||||||
|
- name: Get actual major version
|
||||||
|
id: actual_major_version
|
||||||
|
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||||
|
- name: Build and publish image
|
||||||
|
uses: ilteoood/docker_buildx@1.1.0
|
||||||
|
with:
|
||||||
|
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||||
|
imageName: rclone/rclone
|
||||||
|
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||||
|
publish: true
|
||||||
|
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||||
|
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||||
|
|
||||||
|
build_docker_volume_plugin:
|
||||||
|
if: github.repository == 'rclone/rclone'
|
||||||
|
needs: build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Build docker plugin job
|
||||||
|
steps:
|
||||||
|
- name: Checkout master
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Build and publish docker plugin
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
VER=${GITHUB_REF#refs/tags/}
|
||||||
|
PLUGIN_USER=rclone
|
||||||
|
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||||
|
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||||
|
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||||
|
export PLUGIN_USER PLUGIN_ARCH
|
||||||
|
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||||
|
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||||
|
done
|
||||||
|
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||||
|
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
||||||
15
.github/workflows/notify.yml
vendored
15
.github/workflows/notify.yml
vendored
@@ -1,15 +0,0 @@
|
|||||||
name: Notify users based on issue labels
|
|
||||||
|
|
||||||
on:
|
|
||||||
issues:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
notify:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: jenschelkopf/issue-label-notification-action@1.3
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.NOTIFY_ACTION_TOKEN }}
|
|
||||||
recipients: |
|
|
||||||
Support Contract=@rclone/support
|
|
||||||
14
.github/workflows/winget.yml
vendored
14
.github/workflows/winget.yml
vendored
@@ -1,14 +0,0 @@
|
|||||||
name: Publish to Winget
|
|
||||||
on:
|
|
||||||
release:
|
|
||||||
types: [released]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
publish:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: vedantmgoyal2009/winget-releaser@v2
|
|
||||||
with:
|
|
||||||
identifier: Rclone.Rclone
|
|
||||||
installers-regex: '-windows-\w+\.zip$'
|
|
||||||
token: ${{ secrets.WINGET_TOKEN }}
|
|
||||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -3,20 +3,15 @@ _junk/
|
|||||||
rclone
|
rclone
|
||||||
rclone.exe
|
rclone.exe
|
||||||
build
|
build
|
||||||
/docs/public/
|
docs/public
|
||||||
/docs/.hugo_build.lock
|
|
||||||
/docs/static/img/logos/
|
|
||||||
rclone.iml
|
rclone.iml
|
||||||
.idea
|
.idea
|
||||||
.history
|
.history
|
||||||
.vscode
|
|
||||||
*.test
|
*.test
|
||||||
|
*.log
|
||||||
*.iml
|
*.iml
|
||||||
fuzz-build.zip
|
fuzz-build.zip
|
||||||
*.orig
|
*.orig
|
||||||
*.rej
|
*.rej
|
||||||
Thumbs.db
|
Thumbs.db
|
||||||
__pycache__
|
__pycache__
|
||||||
.DS_Store
|
|
||||||
resource_windows_*.syso
|
|
||||||
.devcontainer
|
|
||||||
|
|||||||
155
.golangci.yml
155
.golangci.yml
@@ -1,151 +1,30 @@
|
|||||||
version: "2"
|
# golangci-lint configuration options
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
# Configure the linter set. To avoid unexpected results the implicit default
|
|
||||||
# set is ignored and all the ones to use are explicitly enabled.
|
|
||||||
default: none
|
|
||||||
enable:
|
enable:
|
||||||
# Default
|
- deadcode
|
||||||
- errcheck
|
- errcheck
|
||||||
- govet
|
|
||||||
- ineffassign
|
|
||||||
- staticcheck
|
|
||||||
- unused
|
|
||||||
# Additional
|
|
||||||
- gocritic
|
|
||||||
- misspell
|
|
||||||
#- prealloc # TODO
|
|
||||||
- revive
|
|
||||||
- unconvert
|
|
||||||
# Configure checks. Mostly using defaults but with some commented exceptions.
|
|
||||||
settings:
|
|
||||||
govet:
|
|
||||||
enable-all: true
|
|
||||||
disable:
|
|
||||||
- fieldalignment
|
|
||||||
- shadow
|
|
||||||
staticcheck:
|
|
||||||
# With staticcheck there is only one setting, so to extend the implicit
|
|
||||||
# default value it must be explicitly included.
|
|
||||||
checks:
|
|
||||||
# Default
|
|
||||||
- all
|
|
||||||
- -ST1000
|
|
||||||
- -ST1003
|
|
||||||
- -ST1016
|
|
||||||
- -ST1020
|
|
||||||
- -ST1021
|
|
||||||
- -ST1022
|
|
||||||
# Disable quickfix checks
|
|
||||||
- -QF*
|
|
||||||
gocritic:
|
|
||||||
# With gocritic there are different settings, but since enabled-checks
|
|
||||||
# and disabled-checks cannot both be set, for full customization the
|
|
||||||
# alternative is to disable all defaults and explicitly enable the ones
|
|
||||||
# to use.
|
|
||||||
disable-all: true
|
|
||||||
enabled-checks:
|
|
||||||
#- appendAssign # Skip default
|
|
||||||
- argOrder
|
|
||||||
- assignOp
|
|
||||||
- badCall
|
|
||||||
- badCond
|
|
||||||
#- captLocal # Skip default
|
|
||||||
- caseOrder
|
|
||||||
- codegenComment
|
|
||||||
#- commentFormatting # Skip default
|
|
||||||
- defaultCaseOrder
|
|
||||||
- deprecatedComment
|
|
||||||
- dupArg
|
|
||||||
- dupBranchBody
|
|
||||||
- dupCase
|
|
||||||
- dupSubExpr
|
|
||||||
- elseif
|
|
||||||
#- exitAfterDefer # Skip default
|
|
||||||
- flagDeref
|
|
||||||
- flagName
|
|
||||||
#- ifElseChain # Skip default
|
|
||||||
- mapKey
|
|
||||||
- newDeref
|
|
||||||
- offBy1
|
|
||||||
- regexpMust
|
|
||||||
- ruleguard # Enable additional check that are not enabled by default
|
|
||||||
#- singleCaseSwitch # Skip default
|
|
||||||
- sloppyLen
|
|
||||||
- sloppyTypeAssert
|
|
||||||
- switchTrue
|
|
||||||
- typeSwitchVar
|
|
||||||
- underef
|
|
||||||
- unlambda
|
|
||||||
- unslice
|
|
||||||
- valSwap
|
|
||||||
- wrapperFunc
|
|
||||||
settings:
|
|
||||||
ruleguard:
|
|
||||||
rules: ${base-path}/bin/rules.go
|
|
||||||
revive:
|
|
||||||
# With revive there is in reality only one setting, and when at least one
|
|
||||||
# rule are specified then only these rules will be considered, defaults
|
|
||||||
# and all others are then implicitly disabled, so must explicitly enable
|
|
||||||
# all rules to be used.
|
|
||||||
rules:
|
|
||||||
- name: blank-imports
|
|
||||||
disabled: false
|
|
||||||
- name: context-as-argument
|
|
||||||
disabled: false
|
|
||||||
- name: context-keys-type
|
|
||||||
disabled: false
|
|
||||||
- name: dot-imports
|
|
||||||
disabled: false
|
|
||||||
#- name: empty-block # Skip default
|
|
||||||
# disabled: true
|
|
||||||
- name: error-naming
|
|
||||||
disabled: false
|
|
||||||
- name: error-return
|
|
||||||
disabled: false
|
|
||||||
- name: error-strings
|
|
||||||
disabled: false
|
|
||||||
- name: errorf
|
|
||||||
disabled: false
|
|
||||||
- name: exported
|
|
||||||
disabled: false
|
|
||||||
#- name: increment-decrement # Skip default
|
|
||||||
# disabled: true
|
|
||||||
- name: indent-error-flow
|
|
||||||
disabled: false
|
|
||||||
- name: package-comments
|
|
||||||
disabled: false
|
|
||||||
- name: range
|
|
||||||
disabled: false
|
|
||||||
- name: receiver-naming
|
|
||||||
disabled: false
|
|
||||||
#- name: redefines-builtin-id # Skip default
|
|
||||||
# disabled: true
|
|
||||||
#- name: superfluous-else # Skip default
|
|
||||||
# disabled: true
|
|
||||||
- name: time-naming
|
|
||||||
disabled: false
|
|
||||||
- name: unexported-return
|
|
||||||
disabled: false
|
|
||||||
#- name: unreachable-code # Skip default
|
|
||||||
# disabled: true
|
|
||||||
#- name: unused-parameter # Skip default
|
|
||||||
# disabled: true
|
|
||||||
- name: var-declaration
|
|
||||||
disabled: false
|
|
||||||
- name: var-naming
|
|
||||||
disabled: false
|
|
||||||
|
|
||||||
formatters:
|
|
||||||
enable:
|
|
||||||
- goimports
|
- goimports
|
||||||
|
#- revive
|
||||||
|
- ineffassign
|
||||||
|
- structcheck
|
||||||
|
- varcheck
|
||||||
|
- govet
|
||||||
|
- unconvert
|
||||||
|
#- prealloc
|
||||||
|
#- maligned
|
||||||
|
disable-all: true
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
|
# Enable some lints excluded by default
|
||||||
|
exclude-use-default: false
|
||||||
|
|
||||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||||
max-issues-per-linter: 0
|
max-per-linter: 0
|
||||||
|
|
||||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||||
max-same-issues: 0
|
max-same-issues: 0
|
||||||
|
|
||||||
run:
|
run:
|
||||||
# Timeout for total work, e.g. 30s, 5m, 5m30s. Default is 0 (disabled).
|
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||||
timeout: 10m
|
timeout: 10m
|
||||||
|
|||||||
@@ -1,72 +0,0 @@
|
|||||||
default: true
|
|
||||||
|
|
||||||
# Use specific styles, to be consistent accross all documents.
|
|
||||||
# Default is to accept any as long as it is consistent within the same document.
|
|
||||||
heading-style: # MD003
|
|
||||||
style: atx
|
|
||||||
ul-style: # MD004
|
|
||||||
style: dash
|
|
||||||
hr-style: # MD035
|
|
||||||
style: ---
|
|
||||||
code-block-style: # MD046
|
|
||||||
style: fenced
|
|
||||||
code-fence-style: # MD048
|
|
||||||
style: backtick
|
|
||||||
emphasis-style: # MD049
|
|
||||||
style: asterisk
|
|
||||||
strong-style: # MD050
|
|
||||||
style: asterisk
|
|
||||||
|
|
||||||
# Allow multiple headers with same text as long as they are not siblings.
|
|
||||||
no-duplicate-heading: # MD024
|
|
||||||
siblings_only: true
|
|
||||||
|
|
||||||
# Allow long lines in code blocks and tables.
|
|
||||||
line-length: # MD013
|
|
||||||
code_blocks: false
|
|
||||||
tables: false
|
|
||||||
|
|
||||||
# The Markdown files used to generated docs with Hugo contain a top level
|
|
||||||
# header, even though the YAML front matter has a title property (which is
|
|
||||||
# used for the HTML document title only). Suppress Markdownlint warning:
|
|
||||||
# Multiple top-level headings in the same document.
|
|
||||||
single-title: # MD025
|
|
||||||
level: 1
|
|
||||||
front_matter_title:
|
|
||||||
|
|
||||||
# The HTML docs generated by Hugo from Markdown files may have slightly
|
|
||||||
# different header anchors than GitHub rendered Markdown, e.g. Hugo trims
|
|
||||||
# leading dashes so "--config string" becomes "#config-string" while it is
|
|
||||||
# "#--config-string" in GitHub preview. When writing links to headers in the
|
|
||||||
# Markdown files we must use whatever works in the final HTML generated docs.
|
|
||||||
# Suppress Markdownlint warning: Link fragments should be valid.
|
|
||||||
link-fragments: false # MD051
|
|
||||||
|
|
||||||
# Restrict the languages and language identifiers to use for code blocks.
|
|
||||||
# We only want those supported by both Hugo and GitHub. These are documented
|
|
||||||
# here:
|
|
||||||
# https://gohugo.io/content-management/syntax-highlighting/#languages
|
|
||||||
# https://docs.github.com//get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks#syntax-highlighting
|
|
||||||
# In addition, we only want to allow identifiers (aliases) that correspond to
|
|
||||||
# the same language in Hugo and GitHub, and preferrably also VSCode and other
|
|
||||||
# commonly used tools, to avoid confusion. An example of this is that "shell"
|
|
||||||
# by some are considered an identifier for shell scripts, i.e. an alias for
|
|
||||||
# "sh", while others consider it an identifier for shell sessions, i.e. an
|
|
||||||
# alias for "console". Although Hugo and GitHub in this case are consistent and
|
|
||||||
# have choosen the former, using "sh" instead, and not allowing use of "shell",
|
|
||||||
# avoids the confusion entirely.
|
|
||||||
fenced-code-language: # MD040
|
|
||||||
allowed_languages:
|
|
||||||
- text
|
|
||||||
- console
|
|
||||||
- sh
|
|
||||||
- bat
|
|
||||||
- ini
|
|
||||||
- json
|
|
||||||
- yaml
|
|
||||||
- go
|
|
||||||
- python
|
|
||||||
- c++
|
|
||||||
- c#
|
|
||||||
- java
|
|
||||||
- powershell
|
|
||||||
@@ -1,80 +0,0 @@
|
|||||||
# Rclone Code of Conduct
|
|
||||||
|
|
||||||
Like the technical community as a whole, the Rclone team and community
|
|
||||||
is made up of a mixture of professionals and volunteers from all over
|
|
||||||
the world, working on every aspect of the mission - including
|
|
||||||
mentorship, teaching, and connecting people.
|
|
||||||
|
|
||||||
Diversity is one of our huge strengths, but it can also lead to
|
|
||||||
communication issues and unhappiness. To that end, we have a few
|
|
||||||
ground rules that we ask people to adhere to. This code applies
|
|
||||||
equally to founders, mentors and those seeking help and guidance.
|
|
||||||
|
|
||||||
This isn't an exhaustive list of things that you can't do. Rather,
|
|
||||||
take it in the spirit in which it's intended - a guide to make it
|
|
||||||
easier to enrich all of us and the technical communities in which we
|
|
||||||
participate.
|
|
||||||
|
|
||||||
This code of conduct applies to all spaces managed by the Rclone
|
|
||||||
project or Rclone Services Ltd. This includes the issue tracker, the
|
|
||||||
forum, the GitHub site, the wiki, any other online services or
|
|
||||||
in-person events. In addition, violations of this code outside these
|
|
||||||
spaces may affect a person's ability to participate within them.
|
|
||||||
|
|
||||||
- **Be friendly and patient.**
|
|
||||||
- **Be welcoming.** We strive to be a community that welcomes and
|
|
||||||
supports people of all backgrounds and identities. This includes,
|
|
||||||
but is not limited to members of any race, ethnicity, culture,
|
|
||||||
national origin, colour, immigration status, social and economic
|
|
||||||
class, educational level, sex, sexual orientation, gender identity
|
|
||||||
and expression, age, size, family status, political belief,
|
|
||||||
religion, and mental and physical ability.
|
|
||||||
- **Be considerate.** Your work will be used by other people, and you
|
|
||||||
in turn will depend on the work of others. Any decision you take
|
|
||||||
will affect users and colleagues, and you should take those
|
|
||||||
consequences into account when making decisions. Remember that we're
|
|
||||||
a world-wide community, so you might not be communicating in someone
|
|
||||||
else's primary language.
|
|
||||||
- **Be respectful.** Not all of us will agree all the time, but
|
|
||||||
disagreement is no excuse for poor behavior and poor manners. We
|
|
||||||
might all experience some frustration now and then, but we cannot
|
|
||||||
allow that frustration to turn into a personal attack. It's
|
|
||||||
important to remember that a community where people feel
|
|
||||||
uncomfortable or threatened is not a productive one. Members of the
|
|
||||||
Rclone community should be respectful when dealing with other
|
|
||||||
members as well as with people outside the Rclone community.
|
|
||||||
- **Be careful in the words that you choose.** We are a community of
|
|
||||||
professionals, and we conduct ourselves professionally. Be kind to
|
|
||||||
others. Do not insult or put down other participants. Harassment and
|
|
||||||
other exclusionary behavior aren't acceptable. This includes, but is
|
|
||||||
not limited to:
|
|
||||||
- Violent threats or language directed against another person.
|
|
||||||
- Discriminatory jokes and language.
|
|
||||||
- Posting sexually explicit or violent material.
|
|
||||||
- Posting (or threatening to post) other people's personally
|
|
||||||
identifying information ("doxing").
|
|
||||||
- Personal insults, especially those using racist or sexist terms.
|
|
||||||
- Unwelcome sexual attention.
|
|
||||||
- Advocating for, or encouraging, any of the above behavior.
|
|
||||||
- Repeated harassment of others. In general, if someone asks you to
|
|
||||||
stop, then stop.
|
|
||||||
- **When we disagree, try to understand why.** Disagreements, both
|
|
||||||
social and technical, happen all the time and Rclone is no
|
|
||||||
exception. It is important that we resolve disagreements and
|
|
||||||
differing views constructively. Remember that we're different. The
|
|
||||||
strength of Rclone comes from its varied community, people from a
|
|
||||||
wide range of backgrounds. Different people have different
|
|
||||||
perspectives on issues. Being unable to understand why someone holds
|
|
||||||
a viewpoint doesn't mean that they're wrong. Don't forget that it is
|
|
||||||
human to err and blaming each other doesn't get us anywhere.
|
|
||||||
Instead, focus on helping to resolve issues and learning from
|
|
||||||
mistakes.
|
|
||||||
|
|
||||||
If you believe someone is violating the code of conduct, we ask that
|
|
||||||
you report it by emailing [info@rclone.com](mailto:info@rclone.com).
|
|
||||||
|
|
||||||
Original text courtesy of the [Speak Up! project](http://web.archive.org/web/20141109123859/http://speakup.io/coc.html).
|
|
||||||
|
|
||||||
## Questions?
|
|
||||||
|
|
||||||
If you have questions, please feel free to [contact us](mailto:info@rclone.com).
|
|
||||||
662
CONTRIBUTING.md
662
CONTRIBUTING.md
@@ -1,8 +1,8 @@
|
|||||||
# Contributing to rclone
|
# Contributing to rclone #
|
||||||
|
|
||||||
This is a short guide on how to contribute things to rclone.
|
This is a short guide on how to contribute things to rclone.
|
||||||
|
|
||||||
## Reporting a bug
|
## Reporting a bug ##
|
||||||
|
|
||||||
If you've just got a question or aren't sure if you've found a bug
|
If you've just got a question or aren't sure if you've found a bug
|
||||||
then please use the [rclone forum](https://forum.rclone.org/) instead
|
then please use the [rclone forum](https://forum.rclone.org/) instead
|
||||||
@@ -12,227 +12,163 @@ When filing an issue, please include the following information if
|
|||||||
possible as well as a description of the problem. Make sure you test
|
possible as well as a description of the problem. Make sure you test
|
||||||
with the [latest beta of rclone](https://beta.rclone.org/):
|
with the [latest beta of rclone](https://beta.rclone.org/):
|
||||||
|
|
||||||
- Rclone version (e.g. output from `rclone version`)
|
* Rclone version (e.g. output from `rclone version`)
|
||||||
- Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
|
* Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
|
||||||
- The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
||||||
- A log of the command with the `-vv` flag (e.g. output from
|
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||||
`rclone -vv copy /tmp remote:tmp`)
|
* if the log contains secrets then edit the file with a text editor first to obscure them
|
||||||
- if the log contains secrets then edit the file with a text editor first to
|
|
||||||
obscure them
|
|
||||||
|
|
||||||
## Submitting a new feature or bug fix
|
## Submitting a new feature or bug fix ##
|
||||||
|
|
||||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||||
like to implement then please submit a pull request via GitHub.
|
like to implement then please submit a pull request via GitHub.
|
||||||
|
|
||||||
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues)
|
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) first so it can be discussed.
|
||||||
first so it can be discussed.
|
|
||||||
|
|
||||||
To prepare your pull request first press the fork button on [rclone's GitHub
|
To prepare your pull request first press the fork button on [rclone's GitHub
|
||||||
page](https://github.com/rclone/rclone).
|
page](https://github.com/rclone/rclone).
|
||||||
|
|
||||||
Then [install Git](https://git-scm.com/downloads) and set your public contribution
|
Then [install Git](https://git-scm.com/downloads) and set your public contribution [name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git) and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
|
||||||
[name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git)
|
|
||||||
and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
|
|
||||||
|
|
||||||
Next open your terminal, change directory to your preferred folder and initialise
|
Next open your terminal, change directory to your preferred folder and initialise your local rclone project:
|
||||||
your local rclone project:
|
|
||||||
|
|
||||||
```console
|
git clone https://github.com/rclone/rclone.git
|
||||||
git clone https://github.com/rclone/rclone.git
|
cd rclone
|
||||||
cd rclone
|
git remote rename origin upstream
|
||||||
git remote rename origin upstream
|
# if you have SSH keys setup in your GitHub account:
|
||||||
# if you have SSH keys setup in your GitHub account:
|
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
# otherwise:
|
||||||
# otherwise:
|
git remote add origin https://github.com/YOURUSER/rclone.git
|
||||||
git remote add origin https://github.com/YOURUSER/rclone.git
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that most of the terminal commands in the rest of this guide must be
|
Note that most of the terminal commands in the rest of this guide must be executed from the rclone folder created above.
|
||||||
executed from the rclone folder created above.
|
|
||||||
|
|
||||||
Now [install Go](https://golang.org/doc/install) and verify your installation:
|
Now [install Go](https://golang.org/doc/install) and verify your installation:
|
||||||
|
|
||||||
```console
|
go version
|
||||||
go version
|
|
||||||
```
|
|
||||||
|
|
||||||
Great, you can now compile and execute your own version of rclone:
|
Great, you can now compile and execute your own version of rclone:
|
||||||
|
|
||||||
```console
|
go build
|
||||||
go build
|
./rclone version
|
||||||
./rclone version
|
|
||||||
```
|
|
||||||
|
|
||||||
(Note that you can also replace `go build` with `make`, which will include a
|
(Note that you can also replace `go build` with `make`, which will include a
|
||||||
more accurate version number in the executable as well as enable you to specify
|
more accurate version number in the executable as well as enable you to specify
|
||||||
more build options.) Finally make a branch to add your new feature
|
more build options.) Finally make a branch to add your new feature
|
||||||
|
|
||||||
```console
|
git checkout -b my-new-feature
|
||||||
git checkout -b my-new-feature
|
|
||||||
```
|
|
||||||
|
|
||||||
And get hacking.
|
And get hacking.
|
||||||
|
|
||||||
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins)
|
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) and a quick view on the rclone [code organisation](#code-organisation).
|
||||||
and a quick view on the rclone [code organisation](#code-organisation).
|
|
||||||
|
|
||||||
When ready - test the affected functionality and run the unit tests for the
|
When ready - test the affected functionality and run the unit tests for the code you changed
|
||||||
code you changed
|
|
||||||
|
|
||||||
```console
|
cd folder/with/changed/files
|
||||||
cd folder/with/changed/files
|
go test -v
|
||||||
go test -v
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that you may need to make a test remote, e.g. `TestSwift` for some
|
Note that you may need to make a test remote, e.g. `TestSwift` for some
|
||||||
of the unit tests.
|
of the unit tests.
|
||||||
|
|
||||||
This is typically enough if you made a simple bug fix, otherwise please read
|
This is typically enough if you made a simple bug fix, otherwise please read the rclone [testing](#testing) section too.
|
||||||
the rclone [testing](#testing) section too.
|
|
||||||
|
|
||||||
Make sure you
|
Make sure you
|
||||||
|
|
||||||
- Add [unit tests](#testing) for a new feature.
|
* Add [unit tests](#testing) for a new feature.
|
||||||
- Add [documentation](#writing-documentation) for a new feature.
|
* Add [documentation](#writing-documentation) for a new feature.
|
||||||
- [Commit your changes](#committing-your-changes) using the [commit message guidelines](#commit-messages).
|
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
|
||||||
|
|
||||||
When you are done with that push your changes to GitHub:
|
When you are done with that push your changes to Github:
|
||||||
|
|
||||||
```console
|
git push -u origin my-new-feature
|
||||||
git push -u origin my-new-feature
|
|
||||||
```
|
|
||||||
|
|
||||||
and open the GitHub website to [create your pull
|
and open the GitHub website to [create your pull
|
||||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||||
|
|
||||||
Your changes will then get reviewed and you might get asked to fix some stuff.
|
Your changes will then get reviewed and you might get asked to fix some stuff. If so, then make the changes in the same branch, commit and push your updates to GitHub.
|
||||||
If so, then make the changes in the same branch, commit and push your updates to
|
|
||||||
GitHub.
|
|
||||||
|
|
||||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master)
|
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
||||||
or [squash your commits](#squashing-your-commits).
|
|
||||||
|
|
||||||
## Using Git and GitHub
|
## Using Git and Github ##
|
||||||
|
|
||||||
### Committing your changes
|
### Committing your changes ###
|
||||||
|
|
||||||
Follow the guideline for [commit messages](#commit-messages) and then:
|
Follow the guideline for [commit messages](#commit-messages) and then:
|
||||||
|
|
||||||
```console
|
git checkout my-new-feature # To switch to your branch
|
||||||
git checkout my-new-feature # To switch to your branch
|
git status # To see the new and changed files
|
||||||
git status # To see the new and changed files
|
git add FILENAME # To select FILENAME for the commit
|
||||||
git add FILENAME # To select FILENAME for the commit
|
git status # To verify the changes to be committed
|
||||||
git status # To verify the changes to be committed
|
git commit # To do the commit
|
||||||
git commit # To do the commit
|
git log # To verify the commit. Use q to quit the log
|
||||||
git log # To verify the commit. Use q to quit the log
|
|
||||||
```
|
|
||||||
|
|
||||||
You can modify the message or changes in the latest commit using:
|
You can modify the message or changes in the latest commit using:
|
||||||
|
|
||||||
```console
|
git commit --amend
|
||||||
git commit --amend
|
|
||||||
```
|
|
||||||
|
|
||||||
If you amend to commits that have been pushed to GitHub, then you will have to
|
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||||
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
|
||||||
|
|
||||||
### Replacing your previously pushed commits
|
### Replacing your previously pushed commits ###
|
||||||
|
|
||||||
Note that you are about to rewrite the GitHub history of your branch. It is good
|
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
|
||||||
practice to involve your collaborators before modifying commits that have been
|
|
||||||
pushed to GitHub.
|
|
||||||
|
|
||||||
Your previously pushed commits are replaced by:
|
Your previously pushed commits are replaced by:
|
||||||
|
|
||||||
```console
|
git push --force origin my-new-feature
|
||||||
git push --force origin my-new-feature
|
|
||||||
```
|
|
||||||
|
|
||||||
### Basing your changes on the latest master
|
### Basing your changes on the latest master ###
|
||||||
|
|
||||||
To base your changes on the latest version of the
|
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
||||||
[rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
|
||||||
|
|
||||||
```console
|
git checkout master
|
||||||
git checkout master
|
git fetch upstream
|
||||||
git fetch upstream
|
git merge --ff-only
|
||||||
git merge --ff-only
|
git push origin --follow-tags # optional update of your fork in GitHub
|
||||||
git push origin --follow-tags # optional update of your fork in GitHub
|
git checkout my-new-feature
|
||||||
git checkout my-new-feature
|
git rebase master
|
||||||
git rebase master
|
|
||||||
```
|
|
||||||
|
|
||||||
If you rebase commits that have been pushed to GitHub, then you will have to
|
If you rebase commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||||
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
|
||||||
|
|
||||||
### Squashing your commits
|
### Squashing your commits ###
|
||||||
|
|
||||||
To combine your commits into one commit:
|
To combine your commits into one commit:
|
||||||
|
|
||||||
```console
|
git log # To count the commits to squash, e.g. the last 2
|
||||||
git log # To count the commits to squash, e.g. the last 2
|
git reset --soft HEAD~2 # To undo the 2 latest commits
|
||||||
git reset --soft HEAD~2 # To undo the 2 latest commits
|
git status # To check everything is as expected
|
||||||
git status # To check everything is as expected
|
|
||||||
```
|
|
||||||
|
|
||||||
If everything is fine, then make the new combined commit:
|
If everything is fine, then make the new combined commit:
|
||||||
|
|
||||||
```console
|
git commit # To commit the undone commits as one
|
||||||
git commit # To commit the undone commits as one
|
|
||||||
```
|
|
||||||
|
|
||||||
otherwise, you may roll back using:
|
otherwise, you may roll back using:
|
||||||
|
|
||||||
```console
|
git reflog # To check that HEAD{1} is your previous state
|
||||||
git reflog # To check that HEAD{1} is your previous state
|
git reset --soft 'HEAD@{1}' # To roll back to your previous state
|
||||||
git reset --soft 'HEAD@{1}' # To roll back to your previous state
|
|
||||||
```
|
|
||||||
|
|
||||||
If you squash commits that have been pushed to GitHub, then you will have to
|
If you squash commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||||
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
|
||||||
|
|
||||||
Tip: You may like to use `git rebase -i master` if you are experienced or have a
|
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
|
||||||
more complex situation.
|
|
||||||
|
|
||||||
### GitHub Continuous Integration
|
### GitHub Continuous Integration ###
|
||||||
|
|
||||||
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions)
|
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
||||||
to build and test the project, which should be automatically available for your
|
|
||||||
fork too from the `Actions` tab in your repository.
|
|
||||||
|
|
||||||
## Testing
|
## Testing ##
|
||||||
|
|
||||||
### Code quality tests
|
### Quick testing ###
|
||||||
|
|
||||||
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then
|
|
||||||
you can run the same tests as get run in the CI which can be very helpful.
|
|
||||||
|
|
||||||
You can run them with `make check` or with `golangci-lint run ./...`.
|
|
||||||
|
|
||||||
Using these tests ensures that the rclone codebase all uses the same coding
|
|
||||||
standards. These tests also check for easy mistakes to make (like forgetting
|
|
||||||
to check an error return).
|
|
||||||
|
|
||||||
### Quick testing
|
|
||||||
|
|
||||||
rclone's tests are run from the go testing framework, so at the top
|
rclone's tests are run from the go testing framework, so at the top
|
||||||
level you can run this to run all the tests.
|
level you can run this to run all the tests.
|
||||||
|
|
||||||
```console
|
go test -v ./...
|
||||||
go test -v ./...
|
|
||||||
```
|
|
||||||
|
|
||||||
You can also use `make`, if supported by your platform
|
You can also use `make`, if supported by your platform
|
||||||
|
|
||||||
```console
|
make quicktest
|
||||||
make quicktest
|
|
||||||
```
|
|
||||||
|
|
||||||
The quicktest is [automatically run by GitHub](#github-continuous-integration)
|
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
|
||||||
when you push your branch to GitHub.
|
|
||||||
|
|
||||||
### Backend testing
|
### Backend testing ###
|
||||||
|
|
||||||
rclone contains a mixture of unit tests and integration tests.
|
rclone contains a mixture of unit tests and integration tests.
|
||||||
Because it is difficult (and in some respects pointless) to test cloud
|
Because it is difficult (and in some respects pointless) to test cloud
|
||||||
@@ -246,216 +182,142 @@ need to make a remote called `TestDrive`.
|
|||||||
You can then run the unit tests in the drive directory. These tests
|
You can then run the unit tests in the drive directory. These tests
|
||||||
are skipped if `TestDrive:` isn't defined.
|
are skipped if `TestDrive:` isn't defined.
|
||||||
|
|
||||||
```console
|
cd backend/drive
|
||||||
cd backend/drive
|
go test -v
|
||||||
go test -v
|
|
||||||
```
|
|
||||||
|
|
||||||
You can then run the integration tests which test all of rclone's
|
You can then run the integration tests which test all of rclone's
|
||||||
operations. Normally these get run against the local file system,
|
operations. Normally these get run against the local file system,
|
||||||
but they can be run against any of the remotes.
|
but they can be run against any of the remotes.
|
||||||
|
|
||||||
```console
|
cd fs/sync
|
||||||
cd fs/sync
|
go test -v -remote TestDrive:
|
||||||
go test -v -remote TestDrive:
|
go test -v -remote TestDrive: -fast-list
|
||||||
go test -v -remote TestDrive: -fast-list
|
|
||||||
|
|
||||||
cd fs/operations
|
cd fs/operations
|
||||||
go test -v -remote TestDrive:
|
go test -v -remote TestDrive:
|
||||||
```
|
|
||||||
|
|
||||||
If you want to use the integration test framework to run these tests
|
If you want to use the integration test framework to run these tests
|
||||||
altogether with an HTML report and test retries then from the
|
altogether with an HTML report and test retries then from the
|
||||||
project root:
|
project root:
|
||||||
|
|
||||||
```console
|
go install github.com/rclone/rclone/fstest/test_all
|
||||||
go run ./fstest/test_all -backends drive
|
test_all -backend drive
|
||||||
```
|
|
||||||
|
|
||||||
### Full integration testing
|
### Full integration testing ###
|
||||||
|
|
||||||
If you want to run all the integration tests against all the remotes,
|
If you want to run all the integration tests against all the remotes,
|
||||||
then change into the project root and run
|
then change into the project root and run
|
||||||
|
|
||||||
```console
|
make check
|
||||||
make check
|
make test
|
||||||
make test
|
|
||||||
```
|
|
||||||
|
|
||||||
The commands may require some extra go packages which you can install with
|
The commands may require some extra go packages which you can install with
|
||||||
|
|
||||||
```console
|
make build_dep
|
||||||
make build_dep
|
|
||||||
```
|
|
||||||
|
|
||||||
The full integration tests are run daily on the integration test server. You can
|
The full integration tests are run daily on the integration test server. You can
|
||||||
find the results at <https://integration.rclone.org>
|
find the results at https://pub.rclone.org/integration-tests/
|
||||||
|
|
||||||
## Code Organisation
|
## Code Organisation ##
|
||||||
|
|
||||||
Rclone code is organised into a small number of top level directories
|
Rclone code is organised into a small number of top level directories
|
||||||
with modules beneath.
|
with modules beneath.
|
||||||
|
|
||||||
- backend - the rclone backends for interfacing to cloud providers -
|
* backend - the rclone backends for interfacing to cloud providers -
|
||||||
- all - import this to load all the cloud providers
|
* all - import this to load all the cloud providers
|
||||||
- ...providers
|
* ...providers
|
||||||
- bin - scripts for use while building or maintaining rclone
|
* bin - scripts for use while building or maintaining rclone
|
||||||
- cmd - the rclone commands
|
* cmd - the rclone commands
|
||||||
- all - import this to load all the commands
|
* all - import this to load all the commands
|
||||||
- ...commands
|
* ...commands
|
||||||
- cmdtest - end-to-end tests of commands, flags, environment variables,...
|
* cmdtest - end-to-end tests of commands, flags, environment variables,...
|
||||||
- docs - the documentation and website
|
* docs - the documentation and website
|
||||||
- content - adjust these docs only, except those marked autogenerated
|
* content - adjust these docs only - everything else is autogenerated
|
||||||
or portions marked autogenerated where the corresponding .go file must be
|
* command - these are auto-generated - edit the corresponding .go file
|
||||||
edited instead, and everything else is autogenerated
|
* fs - main rclone definitions - minimal amount of code
|
||||||
- commands - these are auto-generated, edit the corresponding .go file
|
* accounting - bandwidth limiting and statistics
|
||||||
- fs - main rclone definitions - minimal amount of code
|
* asyncreader - an io.Reader which reads ahead
|
||||||
- accounting - bandwidth limiting and statistics
|
* config - manage the config file and flags
|
||||||
- asyncreader - an io.Reader which reads ahead
|
* driveletter - detect if a name is a drive letter
|
||||||
- config - manage the config file and flags
|
* filter - implements include/exclude filtering
|
||||||
- driveletter - detect if a name is a drive letter
|
* fserrors - rclone specific error handling
|
||||||
- filter - implements include/exclude filtering
|
* fshttp - http handling for rclone
|
||||||
- fserrors - rclone specific error handling
|
* fspath - path handling for rclone
|
||||||
- fshttp - http handling for rclone
|
* hash - defines rclone's hash types and functions
|
||||||
- fspath - path handling for rclone
|
* list - list a remote
|
||||||
- hash - defines rclone's hash types and functions
|
* log - logging facilities
|
||||||
- list - list a remote
|
* march - iterates directories in lock step
|
||||||
- log - logging facilities
|
* object - in memory Fs objects
|
||||||
- march - iterates directories in lock step
|
* operations - primitives for sync, e.g. Copy, Move
|
||||||
- object - in memory Fs objects
|
* sync - sync directories
|
||||||
- operations - primitives for sync, e.g. Copy, Move
|
* walk - walk a directory
|
||||||
- sync - sync directories
|
* fstest - provides integration test framework
|
||||||
- walk - walk a directory
|
* fstests - integration tests for the backends
|
||||||
- fstest - provides integration test framework
|
* mockdir - mocks an fs.Directory
|
||||||
- fstests - integration tests for the backends
|
* mockobject - mocks an fs.Object
|
||||||
- mockdir - mocks an fs.Directory
|
* test_all - Runs integration tests for everything
|
||||||
- mockobject - mocks an fs.Object
|
* graphics - the images used in the website, etc.
|
||||||
- test_all - Runs integration tests for everything
|
* lib - libraries used by the backend
|
||||||
- graphics - the images used in the website, etc.
|
* atexit - register functions to run when rclone exits
|
||||||
- lib - libraries used by the backend
|
* dircache - directory ID to name caching
|
||||||
- atexit - register functions to run when rclone exits
|
* oauthutil - helpers for using oauth
|
||||||
- dircache - directory ID to name caching
|
* pacer - retries with backoff and paces operations
|
||||||
- oauthutil - helpers for using oauth
|
* readers - a selection of useful io.Readers
|
||||||
- pacer - retries with backoff and paces operations
|
* rest - a thin abstraction over net/http for REST
|
||||||
- readers - a selection of useful io.Readers
|
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
||||||
- rest - a thin abstraction over net/http for REST
|
|
||||||
- librclone - in memory interface to rclone's API for embedding rclone
|
|
||||||
- vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
|
||||||
|
|
||||||
## Writing Documentation
|
## Writing Documentation ##
|
||||||
|
|
||||||
If you are adding a new feature then please update the documentation.
|
If you are adding a new feature then please update the documentation.
|
||||||
|
|
||||||
The documentation sources are generally in Markdown format, in conformance
|
|
||||||
with the CommonMark specification and compatible with GitHub Flavored
|
|
||||||
Markdown (GFM). The markdown format and style is checked as part of the lint
|
|
||||||
operation that runs automatically on pull requests, to enforce standards and
|
|
||||||
consistency. This is based on the [markdownlint](https://github.com/DavidAnson/markdownlint)
|
|
||||||
tool by David Anson, which can also be integrated into editors so you can
|
|
||||||
perform the same checks while writing. It generally follows Ciro Santilli's
|
|
||||||
[Markdown Style Guide](https://cirosantilli.com/markdown-style-guide), which
|
|
||||||
is good source if you want to know more.
|
|
||||||
|
|
||||||
HTML pages, served as website <rclone.org>, are generated from the Markdown,
|
|
||||||
using [Hugo](https://gohugo.io). Note that when generating the HTML pages,
|
|
||||||
there is currently used a different algorithm for generating header anchors
|
|
||||||
than what GitHub uses for its Markdown rendering. For example, in the HTML docs
|
|
||||||
generated by Hugo any leading `-` characters are ignored, which means when
|
|
||||||
linking to a header with text `--config string` we therefore need to use the
|
|
||||||
link `#config-string` in our Markdown source, which will not work in GitHub's
|
|
||||||
preview where `#--config-string` would be the correct link.
|
|
||||||
|
|
||||||
Most of the documentation are written directly in text files with extension
|
|
||||||
`.md`, mainly within folder `docs/content`. Note that several of such files
|
|
||||||
are autogenerated (e.g. the command documentation, and `docs/content/flags.md`),
|
|
||||||
or contain autogenerated portions (e.g. the backend documentation under
|
|
||||||
`docs/content/commands`). These are marked with an `autogenerated` comment.
|
|
||||||
The sources of the autogenerated text are usually Markdown formatted text
|
|
||||||
embedded as string values in the Go source code, so you need to locate these
|
|
||||||
and edit the `.go` file instead. The `MANUAL.*`, `rclone.1` and other text
|
|
||||||
files in the root of the repository are also autogenerated. The autogeneration
|
|
||||||
of files, and the website, will be done during the release process. See the
|
|
||||||
`make doc` and `make website` targets in the Makefile if you are interested in
|
|
||||||
how. You don't need to run these when adding a feature.
|
|
||||||
|
|
||||||
If you add a new general flag (not for a backend), then document it in
|
If you add a new general flag (not for a backend), then document it in
|
||||||
`docs/content/docs.md` - the flags there are supposed to be in
|
`docs/content/docs.md` - the flags there are supposed to be in
|
||||||
alphabetical order.
|
alphabetical order.
|
||||||
|
|
||||||
If you add a new backend option/flag, then it should be documented in
|
If you add a new backend option/flag, then it should be documented in
|
||||||
the source file in the `Help:` field:
|
the source file in the `Help:` field.
|
||||||
|
|
||||||
- Start with the most important information about the option,
|
* Start with the most important information about the option,
|
||||||
as a single sentence on a single line.
|
as a single sentence on a single line.
|
||||||
- This text will be used for the command-line flag help.
|
* This text will be used for the command-line flag help.
|
||||||
- It will be combined with other information, such as any default value,
|
* It will be combined with other information, such as any default value,
|
||||||
and the result will look odd if not written as a single sentence.
|
and the result will look odd if not written as a single sentence.
|
||||||
- It should end with a period/full stop character, which will be shown
|
* It should end with a period/full stop character, which will be shown
|
||||||
in docs but automatically removed when producing the flag help.
|
in docs but automatically removed when producing the flag help.
|
||||||
- Try to keep it below 80 characters, to reduce text wrapping in the terminal.
|
* Try to keep it below 80 characters, to reduce text wrapping in the terminal.
|
||||||
- More details can be added in a new paragraph, after an empty line (`"\n\n"`).
|
* More details can be added in a new paragraph, after an empty line (`"\n\n"`).
|
||||||
- Like with docs generated from Markdown, a single line break is ignored
|
* Like with docs generated from Markdown, a single line break is ignored
|
||||||
and two line breaks creates a new paragraph.
|
and two line breaks creates a new paragraph.
|
||||||
- This text will be shown to the user in `rclone config`
|
* This text will be shown to the user in `rclone config`
|
||||||
and in the docs (where it will be added by `make backenddocs`,
|
and in the docs (where it will be added by `make backenddocs`,
|
||||||
normally run some time before next release).
|
normally run some time before next release).
|
||||||
- To create options of enumeration type use the `Examples:` field.
|
* To create options of enumeration type use the `Examples:` field.
|
||||||
- Each example value have their own `Help:` field, but they are treated
|
* Each example value have their own `Help:` field, but they are treated
|
||||||
a bit different than the main option help text. They will be shown
|
a bit different than the main option help text. They will be shown
|
||||||
as an unordered list, therefore a single line break is enough to
|
as an unordered list, therefore a single line break is enough to
|
||||||
create a new list item. Also, for enumeration texts like name of
|
create a new list item. Also, for enumeration texts like name of
|
||||||
countries, it looks better without an ending period/full stop character.
|
countries, it looks better without an ending period/full stop character.
|
||||||
- You can run `make backenddocs` to verify the resulting Markdown.
|
|
||||||
- This will update the autogenerated sections of the backend docs Markdown
|
|
||||||
files under `docs/content`.
|
|
||||||
- It requires you to have [Python](https://www.python.org) installed.
|
|
||||||
- The `backenddocs` make target runs the Python script `bin/make_backend_docs.py`,
|
|
||||||
and you can also run this directly, optionally with the name of a backend
|
|
||||||
as argument to only update the docs for a specific backend.
|
|
||||||
- **Do not** commit the updated Markdown files. This operation is run as part of
|
|
||||||
the release process. Since any manual changes in the autogenerated sections
|
|
||||||
of the Markdown files will then be lost, we have a pull request check that
|
|
||||||
reports error for any changes within the autogenerated sections. Should you
|
|
||||||
have done manual changes outside of the autogenerated sections they must be
|
|
||||||
committed, of course.
|
|
||||||
- You can run `make serve` to verify the resulting website.
|
|
||||||
- This will build the website and serve it locally, so you can open it in
|
|
||||||
your web browser and verify that the end result looks OK. Check specifically
|
|
||||||
any added links, also in light of the note above regarding different algorithms
|
|
||||||
for generated header anchors.
|
|
||||||
- It requires you to have the [Hugo](https://gohugo.io) tool available.
|
|
||||||
- The `serve` make target depends on the `website` target, which runs the
|
|
||||||
`hugo` command from the `docs` directory to build the website, and then
|
|
||||||
it serves the website locally with an embedded web server using a command
|
|
||||||
`hugo server --logLevel info -w --disableFastRender --ignoreCache`, so you
|
|
||||||
can run similar Hugo commands directly as well.
|
|
||||||
|
|
||||||
When writing documentation for an entirely new backend,
|
The only documentation you need to edit are the `docs/content/*.md`
|
||||||
see [backend documentation](#backend-documentation).
|
files. The `MANUAL.*`, `rclone.1`, website, etc. are all auto-generated
|
||||||
|
from those during the release process. See the `make doc` and `make
|
||||||
|
website` targets in the Makefile if you are interested in how. You
|
||||||
|
don't need to run these when adding a feature.
|
||||||
|
|
||||||
If you are updating documentation for a command, you must do that in the
|
Documentation for rclone sub commands is with their code, e.g.
|
||||||
command source code, e.g. `cmd/ls/ls.go`. Write flag help strings as a single
|
`cmd/ls/ls.go`. Write flag help strings as a single sentence on a single
|
||||||
sentence on a single line, without a period/full stop character at the end,
|
line, without a period/full stop character at the end, as it will be
|
||||||
as it will be combined unmodified with other information (such as any default
|
combined unmodified with other information (such as any default value).
|
||||||
value).
|
|
||||||
|
|
||||||
Note that you can use
|
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||||
[GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
for small changes in the docs which makes it very easy.
|
||||||
for small changes in the docs which makes it very easy. Just remember the
|
|
||||||
caveat when linking to header anchors, noted above, which means that GitHub's
|
|
||||||
Markdown preview may not be an entirely reliable verification of the results.
|
|
||||||
|
|
||||||
After your changes have been merged, you can verify them on
|
## Making a release ##
|
||||||
[tip.rclone.org](https://tip.rclone.org). This site is updated daily with the
|
|
||||||
current state of the master branch at 07:00 UTC. The changes will be on the main
|
|
||||||
[rclone.org](https://rclone.org) site once they have been included in a release.
|
|
||||||
|
|
||||||
## Making a release
|
|
||||||
|
|
||||||
There are separate instructions for making a release in the RELEASE.md
|
There are separate instructions for making a release in the RELEASE.md
|
||||||
file.
|
file.
|
||||||
|
|
||||||
## Commit messages
|
## Commit messages ##
|
||||||
|
|
||||||
Please make the first line of your commit message a summary of the
|
Please make the first line of your commit message a summary of the
|
||||||
change that a user (not a developer) of rclone would like to read, and
|
change that a user (not a developer) of rclone would like to read, and
|
||||||
@@ -479,13 +341,13 @@ change will get linked into the issue.
|
|||||||
|
|
||||||
Here is an example of a short commit message:
|
Here is an example of a short commit message:
|
||||||
|
|
||||||
```text
|
```
|
||||||
drive: add team drive support - fixes #885
|
drive: add team drive support - fixes #885
|
||||||
```
|
```
|
||||||
|
|
||||||
And here is an example of a longer one:
|
And here is an example of a longer one:
|
||||||
|
|
||||||
```text
|
```
|
||||||
mount: fix hang on errored upload
|
mount: fix hang on errored upload
|
||||||
|
|
||||||
In certain circumstances, if an upload failed then the mount could hang
|
In certain circumstances, if an upload failed then the mount could hang
|
||||||
@@ -496,7 +358,7 @@ error fixing the hang.
|
|||||||
Fixes #1498
|
Fixes #1498
|
||||||
```
|
```
|
||||||
|
|
||||||
## Adding a dependency
|
## Adding a dependency ##
|
||||||
|
|
||||||
rclone uses the [go
|
rclone uses the [go
|
||||||
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
|
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
|
||||||
@@ -508,9 +370,7 @@ To add a dependency `github.com/ncw/new_dependency` see the
|
|||||||
instructions below. These will fetch the dependency and add it to
|
instructions below. These will fetch the dependency and add it to
|
||||||
`go.mod` and `go.sum`.
|
`go.mod` and `go.sum`.
|
||||||
|
|
||||||
```console
|
GO111MODULE=on go get github.com/ncw/new_dependency
|
||||||
go get github.com/ncw/new_dependency
|
|
||||||
```
|
|
||||||
|
|
||||||
You can add constraints on that package when doing `go get` (see the
|
You can add constraints on that package when doing `go get` (see the
|
||||||
go docs linked above), but don't unless you really need to.
|
go docs linked above), but don't unless you really need to.
|
||||||
@@ -518,17 +378,15 @@ go docs linked above), but don't unless you really need to.
|
|||||||
Please check in the changes generated by `go mod` including `go.mod`
|
Please check in the changes generated by `go mod` including `go.mod`
|
||||||
and `go.sum` in the same commit as your other changes.
|
and `go.sum` in the same commit as your other changes.
|
||||||
|
|
||||||
## Updating a dependency
|
## Updating a dependency ##
|
||||||
|
|
||||||
If you need to update a dependency then run
|
If you need to update a dependency then run
|
||||||
|
|
||||||
```console
|
GO111MODULE=on go get -u golang.org/x/crypto
|
||||||
go get golang.org/x/crypto
|
|
||||||
```
|
|
||||||
|
|
||||||
Check in a single commit as above.
|
Check in a single commit as above.
|
||||||
|
|
||||||
## Updating all the dependencies
|
## Updating all the dependencies ##
|
||||||
|
|
||||||
In order to update all the dependencies then run `make update`. This
|
In order to update all the dependencies then run `make update`. This
|
||||||
just uses the go modules to update all the modules to their latest
|
just uses the go modules to update all the modules to their latest
|
||||||
@@ -537,7 +395,7 @@ stable release. Check in the changes in a single commit as above.
|
|||||||
This should be done early in the release cycle to pick up new versions
|
This should be done early in the release cycle to pick up new versions
|
||||||
of packages in time for them to get some testing.
|
of packages in time for them to get some testing.
|
||||||
|
|
||||||
## Updating a backend
|
## Updating a backend ##
|
||||||
|
|
||||||
If you update a backend then please run the unit tests and the
|
If you update a backend then please run the unit tests and the
|
||||||
integration tests for that backend.
|
integration tests for that backend.
|
||||||
@@ -552,153 +410,105 @@ integration tests.
|
|||||||
|
|
||||||
The next section goes into more detail about the tests.
|
The next section goes into more detail about the tests.
|
||||||
|
|
||||||
## Writing a new backend
|
## Writing a new backend ##
|
||||||
|
|
||||||
Choose a name. The docs here will use `remote` as an example.
|
Choose a name. The docs here will use `remote` as an example.
|
||||||
|
|
||||||
Note that in rclone terminology a file system backend is called a
|
Note that in rclone terminology a file system backend is called a
|
||||||
remote or an fs.
|
remote or an fs.
|
||||||
|
|
||||||
### Research
|
Research
|
||||||
|
|
||||||
- Look at the interfaces defined in `fs/types.go`
|
* Look at the interfaces defined in `fs/fs.go`
|
||||||
- Study one or more of the existing remotes
|
* Study one or more of the existing remotes
|
||||||
|
|
||||||
### Getting going
|
Getting going
|
||||||
|
|
||||||
- Create `backend/remote/remote.go` (copy this from a similar remote)
|
* Create `backend/remote/remote.go` (copy this from a similar remote)
|
||||||
- box is a good one to start from if you have a directory-based remote (and
|
* box is a good one to start from if you have a directory-based remote
|
||||||
shows how to use the directory cache)
|
* b2 is a good one to start from if you have a bucket-based remote
|
||||||
- b2 is a good one to start from if you have a bucket-based remote
|
* Add your remote to the imports in `backend/all/all.go`
|
||||||
- Add your remote to the imports in `backend/all/all.go`
|
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
||||||
- HTTP based remotes are easiest to maintain if they use rclone's
|
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||||
[lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but
|
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||||
if there is a really good Go SDK from the provider then use that instead.
|
* `rclone purge -v TestRemote:rclone-info`
|
||||||
- Try to implement as many optional methods as possible as it makes the remote
|
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||||
more usable.
|
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||||
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to
|
* open `remote.csv` in a spreadsheet and examine
|
||||||
make sure we can encode any path name and `rclone info` to help determine the
|
|
||||||
encodings needed
|
|
||||||
- `rclone purge -v TestRemote:rclone-info`
|
|
||||||
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
|
||||||
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
|
||||||
- open `remote.csv` in a spreadsheet and examine
|
|
||||||
|
|
||||||
### Guidelines for a speedy merge
|
Unit tests
|
||||||
|
|
||||||
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest)
|
* Create a config entry called `TestRemote` for the unit tests to use
|
||||||
if you are implementing a REST like backend and parsing XML/JSON in the backend.
|
* Create a `backend/remote/remote_test.go` - copy and adjust your example remote
|
||||||
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp)
|
* Make sure all tests pass with `go test -v`
|
||||||
if your backend is HTTP based - this adds features like `--dump bodies`,
|
|
||||||
`--tpslimit`, `--user-agent` without you having to code anything!
|
|
||||||
- **Do** follow your example backend exactly - use the same code order, function
|
|
||||||
names, layout, structure. **Don't** move stuff around and **Don't** delete the
|
|
||||||
comments.
|
|
||||||
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few
|
|
||||||
backends like that - don't follow them!)
|
|
||||||
- **Do** put your API type definitions in a separate file - by preference `api/types.go`
|
|
||||||
- **Remember** we have >50 backends to maintain so keeping them as similar as
|
|
||||||
possible to each other is a high priority!
|
|
||||||
|
|
||||||
### Unit tests
|
Integration tests
|
||||||
|
|
||||||
- Create a config entry called `TestRemote` for the unit tests to use
|
* Add your backend to `fstest/test_all/config.yaml`
|
||||||
- Create a `backend/remote/remote_test.go` - copy and adjust your example remote
|
* Once you've done that then you can use the integration test framework from the project root:
|
||||||
- Make sure all tests pass with `go test -v`
|
* go install ./...
|
||||||
|
* test_all -backends remote
|
||||||
### Integration tests
|
|
||||||
|
|
||||||
- Add your backend to `fstest/test_all/config.yaml`
|
|
||||||
- Once you've done that then you can use the integration test framework from
|
|
||||||
the project root:
|
|
||||||
- `go run ./fstest/test_all -backends remote`
|
|
||||||
|
|
||||||
Or if you want to run the integration tests manually:
|
Or if you want to run the integration tests manually:
|
||||||
|
|
||||||
- Make sure integration tests pass with
|
* Make sure integration tests pass with
|
||||||
- `cd fs/operations`
|
* `cd fs/operations`
|
||||||
- `go test -v -remote TestRemote:`
|
* `go test -v -remote TestRemote:`
|
||||||
- `cd fs/sync`
|
* `cd fs/sync`
|
||||||
- `go test -v -remote TestRemote:`
|
* `go test -v -remote TestRemote:`
|
||||||
- If your remote defines `ListR` check with this also
|
* If your remote defines `ListR` check with this also
|
||||||
- `go test -v -remote TestRemote: -fast-list`
|
* `go test -v -remote TestRemote: -fast-list`
|
||||||
|
|
||||||
See the [testing](#testing) section for more information on integration tests.
|
See the [testing](#testing) section for more information on integration tests.
|
||||||
|
|
||||||
### Backend documentation
|
Add your fs to the docs - you'll need to pick an icon for it from
|
||||||
|
|
||||||
Add your backend to the docs - you'll need to pick an icon for it from
|
|
||||||
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
||||||
alphabetical order of full name of remote (e.g. `drive` is ordered as
|
alphabetical order of full name of remote (e.g. `drive` is ordered as
|
||||||
`Google Drive`) but with the local file system last.
|
`Google Drive`) but with the local file system last.
|
||||||
|
|
||||||
- `README.md` - main GitHub page
|
* `README.md` - main GitHub page
|
||||||
- `docs/content/remote.md` - main docs page (note the backend options are
|
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||||
automatically added to this file with `make backenddocs`)
|
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||||
- make sure this has the `autogenerated options` comments in (see your
|
* update them with `make backenddocs` - revert any changes in other backends
|
||||||
reference backend docs)
|
* `docs/content/overview.md` - overview docs
|
||||||
- update them in your backend with `bin/make_backend_docs.py remote`
|
* `docs/content/docs.md` - list of remotes in config section
|
||||||
- `docs/content/overview.md` - overview docs - add an entry into the Features
|
* `docs/content/_index.md` - front page of rclone.org
|
||||||
table and the Optional Features table.
|
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||||
- `docs/content/docs.md` - list of remotes in config section
|
* `bin/make_manual.py` - add the page to the `docs` constant
|
||||||
- `docs/content/_index.md` - front page of rclone.org
|
|
||||||
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
|
||||||
- `bin/make_manual.py` - add the page to the `docs` constant
|
|
||||||
|
|
||||||
Once you've written the docs, run `make serve` and check they look OK
|
Once you've written the docs, run `make serve` and check they look OK
|
||||||
in the web browser and the links (internal and external) all work.
|
in the web browser and the links (internal and external) all work.
|
||||||
|
|
||||||
## Adding a new s3 provider
|
## Writing a plugin ##
|
||||||
|
|
||||||
[Please see the guide in the S3 backend directory](backend/s3/README.md).
|
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
|
||||||
|
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
|
||||||
|
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
|
||||||
|
|
||||||
## Writing a plugin
|
Usage
|
||||||
|
|
||||||
New features (backends, commands) can also be added "out-of-tree", through Go
|
- Naming
|
||||||
plugins. Changes will be kept in a dynamically loaded file instead of being
|
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
||||||
compiled into the main binary. This is useful if you can't merge your changes
|
- `KIND` should be one of `backend`, `command` or `bundle`.
|
||||||
upstream or don't want to maintain a fork of rclone.
|
- Example: A plugin with backend support for PiFS would be called
|
||||||
|
`librcloneplugin_backend_pifs.so`.
|
||||||
|
- Loading
|
||||||
|
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
|
||||||
|
- Supported on rclone v1.50 or greater.
|
||||||
|
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
|
||||||
|
- If this variable doesn't exist, plugin support is disabled.
|
||||||
|
- Plugins must be compiled against the exact version of rclone to work.
|
||||||
|
(The rclone used during building the plugin must be the same as the source of rclone)
|
||||||
|
|
||||||
### Usage
|
Building
|
||||||
|
|
||||||
- Naming
|
|
||||||
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
|
||||||
- `KIND` should be one of `backend`, `command` or `bundle`.
|
|
||||||
- Example: A plugin with backend support for PiFS would be called
|
|
||||||
`librcloneplugin_backend_pifs.so`.
|
|
||||||
- Loading
|
|
||||||
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
|
|
||||||
- Supported on rclone v1.50 or greater.
|
|
||||||
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
|
|
||||||
- If this variable doesn't exist, plugin support is disabled.
|
|
||||||
- Plugins must be compiled against the exact version of rclone to work.
|
|
||||||
(The rclone used during building the plugin must be the same as the source
|
|
||||||
of rclone)
|
|
||||||
|
|
||||||
### Building
|
|
||||||
|
|
||||||
To turn your existing additions into a Go plugin, move them to an external repository
|
To turn your existing additions into a Go plugin, move them to an external repository
|
||||||
and change the top-level package name to `main`.
|
and change the top-level package name to `main`.
|
||||||
|
|
||||||
Check `rclone --version` and make sure that the plugin's rclone dependency and
|
Check `rclone --version` and make sure that the plugin's rclone dependency and host Go version match.
|
||||||
host Go version match.
|
|
||||||
|
|
||||||
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
|
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
|
||||||
|
|
||||||
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
|
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
|
||||||
|
|
||||||
## Keeping a backend or command out of tree
|
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
|
||||||
|
|
||||||
Rclone was designed to be modular so it is very easy to keep a backend
|
|
||||||
or a command out of the main rclone source tree.
|
|
||||||
|
|
||||||
So for example if you had a backend which accessed your proprietary
|
|
||||||
systems or a command which was specialised for your needs you could
|
|
||||||
add them out of tree.
|
|
||||||
|
|
||||||
This may be easier than using a plugin and is supported on all
|
|
||||||
platforms not just macOS and Linux.
|
|
||||||
|
|
||||||
This is explained further in <https://github.com/rclone/rclone_out_of_tree_example>
|
|
||||||
which has an example of an out of tree backend `ram` (which is a
|
|
||||||
renamed version of the `memory` backend).
|
|
||||||
|
|||||||
45
Dockerfile
45
Dockerfile
@@ -1,47 +1,18 @@
|
|||||||
FROM golang:alpine AS builder
|
FROM golang AS builder
|
||||||
|
|
||||||
ARG CGO_ENABLED=0
|
|
||||||
|
|
||||||
|
COPY . /go/src/github.com/rclone/rclone/
|
||||||
WORKDIR /go/src/github.com/rclone/rclone/
|
WORKDIR /go/src/github.com/rclone/rclone/
|
||||||
|
|
||||||
RUN echo "**** Set Go Environment Variables ****" && \
|
RUN \
|
||||||
go env -w GOCACHE=/root/.cache/go-build
|
CGO_ENABLED=0 \
|
||||||
|
make
|
||||||
RUN echo "**** Install Dependencies ****" && \
|
RUN ./rclone version
|
||||||
apk add --no-cache \
|
|
||||||
make \
|
|
||||||
bash \
|
|
||||||
gawk \
|
|
||||||
git
|
|
||||||
|
|
||||||
COPY go.mod .
|
|
||||||
COPY go.sum .
|
|
||||||
|
|
||||||
RUN echo "**** Download Go Dependencies ****" && \
|
|
||||||
go mod download -x
|
|
||||||
|
|
||||||
RUN echo "**** Verify Go Dependencies ****" && \
|
|
||||||
go mod verify
|
|
||||||
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/go-build,sharing=locked \
|
|
||||||
echo "**** Build Binary ****" && \
|
|
||||||
make
|
|
||||||
|
|
||||||
RUN echo "**** Print Version Binary ****" && \
|
|
||||||
./rclone version
|
|
||||||
|
|
||||||
# Begin final image
|
# Begin final image
|
||||||
FROM alpine:latest
|
FROM alpine:latest
|
||||||
|
|
||||||
RUN echo "**** Install Dependencies ****" && \
|
RUN apk --no-cache add ca-certificates fuse tzdata && \
|
||||||
apk add --no-cache \
|
echo "user_allow_other" >> /etc/fuse.conf
|
||||||
ca-certificates \
|
|
||||||
fuse3 \
|
|
||||||
tzdata && \
|
|
||||||
echo "Enable user_allow_other in fuse" && \
|
|
||||||
echo "user_allow_other" >> /etc/fuse.conf
|
|
||||||
|
|
||||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||||
|
|
||||||
|
|||||||
125
MAINTAINERS.md
125
MAINTAINERS.md
@@ -1,4 +1,4 @@
|
|||||||
# Maintainers guide for rclone
|
# Maintainers guide for rclone #
|
||||||
|
|
||||||
Current active maintainers of rclone are:
|
Current active maintainers of rclone are:
|
||||||
|
|
||||||
@@ -16,116 +16,81 @@ Current active maintainers of rclone are:
|
|||||||
| Max Sum | @Max-Sum | union backend |
|
| Max Sum | @Max-Sum | union backend |
|
||||||
| Fred | @creativeprojects | seafile backend |
|
| Fred | @creativeprojects | seafile backend |
|
||||||
| Caleb Case | @calebcase | storj backend |
|
| Caleb Case | @calebcase | storj backend |
|
||||||
| wiserain | @wiserain | pikpak backend |
|
|
||||||
| albertony | @albertony | |
|
|
||||||
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
|
||||||
| Hideo Aoyama | @boukendesho | snap packaging |
|
|
||||||
| nielash | @nielash | bisync |
|
|
||||||
| Dan McArdle | @dmcardle | gitannex |
|
|
||||||
| Sam Harrison | @childish-sambino | filescom |
|
|
||||||
|
|
||||||
## This is a work in progress draft
|
**This is a work in progress Draft**
|
||||||
|
|
||||||
This is a guide for how to be an rclone maintainer. This is mostly a write-up
|
This is a guide for how to be an rclone maintainer. This is mostly a write-up of what I (@ncw) attempt to do.
|
||||||
of what I (@ncw) attempt to do.
|
|
||||||
|
|
||||||
## Triaging Tickets
|
## Triaging Tickets ##
|
||||||
|
|
||||||
When a ticket comes in it should be triaged. This means it should be classified
|
When a ticket comes in it should be triaged. This means it should be classified by adding labels and placed into a milestone. Quite a lot of tickets need a bit of back and forth to determine whether it is a valid ticket so tickets may remain without labels or milestone for a while.
|
||||||
by adding labels and placed into a milestone. Quite a lot of tickets need a bit
|
|
||||||
of back and forth to determine whether it is a valid ticket so tickets may
|
|
||||||
remain without labels or milestone for a while.
|
|
||||||
|
|
||||||
Rclone uses the labels like this:
|
Rclone uses the labels like this:
|
||||||
|
|
||||||
- `bug` - a definitely verified bug
|
* `bug` - a definitely verified bug
|
||||||
- `can't reproduce` - a problem which we can't reproduce
|
* `can't reproduce` - a problem which we can't reproduce
|
||||||
- `doc fix` - a bug in the documentation - if users need help understanding the
|
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
|
||||||
docs add this label
|
* `duplicate` - normally close these and ask the user to subscribe to the original
|
||||||
- `duplicate` - normally close these and ask the user to subscribe to the original
|
* `enhancement: new remote` - a new rclone backend
|
||||||
- `enhancement: new remote` - a new rclone backend
|
* `enhancement` - a new feature
|
||||||
- `enhancement` - a new feature
|
* `FUSE` - to do with `rclone mount` command
|
||||||
- `FUSE` - to do with `rclone mount` command
|
* `good first issue` - mark these if you find a small self-contained issue - these get shown to new visitors to the project
|
||||||
- `good first issue` - mark these if you find a small self-contained issue -
|
* `help` wanted - mark these if you find a self-contained issue - these get shown to new visitors to the project
|
||||||
these get shown to new visitors to the project
|
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
||||||
- `help` wanted - mark these if you find a self-contained issue - these get
|
* `maintenance` - internal enhancement, code re-organisation, etc.
|
||||||
shown to new visitors to the project
|
* `Needs Go 1.XX` - waiting for that version of Go to be released
|
||||||
- `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
|
||||||
- `maintenance` - internal enhancement, code re-organisation, etc.
|
* `Remote: XXX` - which rclone backend this affects
|
||||||
- `Needs Go 1.XX` - waiting for that version of Go to be released
|
* `thinking` - not decided on the course of action yet
|
||||||
- `question` - not a `bug` or `enhancement` - direct to the forum for next time
|
|
||||||
- `Remote: XXX` - which rclone backend this affects
|
|
||||||
- `thinking` - not decided on the course of action yet
|
|
||||||
|
|
||||||
If it turns out to be a bug or an enhancement it should be tagged as such, with
|
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
|
||||||
the appropriate other tags. Don't forget the "good first issue" tag to give new
|
|
||||||
contributors something easy to do to get going.
|
|
||||||
|
|
||||||
When a ticket is tagged it should be added to a milestone, either the next
|
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (e.g. the next go release).
|
||||||
release, the one after, Soon or Help Wanted. Bugs can be added to the
|
|
||||||
"Known Bugs" milestone if they aren't planned to be fixed or need to wait for
|
|
||||||
something (e.g. the next go release).
|
|
||||||
|
|
||||||
The milestones have these meanings:
|
The milestones have these meanings:
|
||||||
|
|
||||||
- v1.XX - stuff we would like to fit into this release
|
* v1.XX - stuff we would like to fit into this release
|
||||||
- v1.XX+1 - stuff we are leaving until the next release
|
* v1.XX+1 - stuff we are leaving until the next release
|
||||||
- Soon - stuff we think is a good idea - waiting to be scheduled for a release
|
* Soon - stuff we think is a good idea - waiting to be scheduled for a release
|
||||||
- Help wanted - blue sky stuff that might get moved up, or someone could help with
|
* Help wanted - blue sky stuff that might get moved up, or someone could help with
|
||||||
- Known bugs - bugs waiting on external factors or we aren't going to fix for
|
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
|
||||||
the moment
|
|
||||||
|
|
||||||
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile)
|
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
|
||||||
are good candidates for ones that have slipped between the gaps and need
|
|
||||||
following up.
|
|
||||||
|
|
||||||
## Closing Tickets
|
## Closing Tickets ##
|
||||||
|
|
||||||
Close tickets as soon as you can - make sure they are tagged with a release.
|
Close tickets as soon as you can - make sure they are tagged with a release. Post a link to a beta in the ticket with the fix in, asking for feedback.
|
||||||
Post a link to a beta in the ticket with the fix in, asking for feedback.
|
|
||||||
|
|
||||||
## Pull requests
|
## Pull requests ##
|
||||||
|
|
||||||
Try to process pull requests promptly!
|
Try to process pull requests promptly!
|
||||||
|
|
||||||
Merging pull requests on GitHub itself works quite well nowadays so you can
|
Merging pull requests on GitHub itself works quite well nowadays so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||||
squash and rebase or rebase pull requests. rclone doesn't use merge commits.
|
|
||||||
Use the squash and rebase option if you need to edit the commit message.
|
|
||||||
|
|
||||||
After merging the commit, in your local master branch, do `git pull` then run
|
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
|
||||||
`bin/update-authors.py` to update the authors file then `git push`.
|
|
||||||
|
|
||||||
Sometimes pull requests need to be left open for a while - this especially true
|
Sometimes pull requests need to be left open for a while - this especially true of contributions of new backends which take a long time to get right.
|
||||||
of contributions of new backends which take a long time to get right.
|
|
||||||
|
|
||||||
## Merges
|
## Merges ##
|
||||||
|
|
||||||
If you are merging a branch locally then do `git merge --ff-only branch-name` to
|
If you are merging a branch locally then do `git merge --ff-only branch-name` to avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
|
||||||
avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
|
|
||||||
|
|
||||||
## Release cycle
|
## Release cycle ##
|
||||||
|
|
||||||
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer
|
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer if there is something big to merge that didn't stabilize properly or for personal reasons.
|
||||||
if there is something big to merge that didn't stabilize properly or for personal
|
|
||||||
reasons.
|
|
||||||
|
|
||||||
High impact regressions should be fixed before the next release.
|
High impact regressions should be fixed before the next release.
|
||||||
|
|
||||||
Near the start of the release cycle, the dependencies should be updated with
|
Near the start of the release cycle, the dependencies should be updated with `make update` to give time for bugs to surface.
|
||||||
`make update` to give time for bugs to surface.
|
|
||||||
|
|
||||||
Towards the end of the release cycle try not to merge anything too big so let
|
Towards the end of the release cycle try not to merge anything too big so let things settle down.
|
||||||
things settle down.
|
|
||||||
|
|
||||||
Follow the instructions in RELEASE.md for making the release. Note that the
|
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time-consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
|
||||||
testing part is the most time-consuming often needing several rounds of test
|
|
||||||
and fix depending on exactly how many new features rclone has gained.
|
|
||||||
|
|
||||||
## Mailing list
|
## Mailing list ##
|
||||||
|
|
||||||
There is now an invite-only mailing list for rclone developers `rclone-dev` on
|
There is now an invite-only mailing list for rclone developers `rclone-dev` on google groups.
|
||||||
google groups.
|
|
||||||
|
|
||||||
## TODO
|
## TODO ##
|
||||||
|
|
||||||
I should probably make a <dev@rclone.org> to register with cloud providers.
|
I should probably make a dev@rclone.org to register with cloud providers.
|
||||||
|
|||||||
61764
MANUAL.html
generated
61764
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
36084
MANUAL.txt
generated
36084
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
74
Makefile
74
Makefile
@@ -30,37 +30,29 @@ ifdef RELEASE_TAG
|
|||||||
TAG := $(RELEASE_TAG)
|
TAG := $(RELEASE_TAG)
|
||||||
endif
|
endif
|
||||||
GO_VERSION := $(shell go version)
|
GO_VERSION := $(shell go version)
|
||||||
GO_OS := $(shell go env GOOS)
|
|
||||||
ifdef BETA_SUBDIR
|
ifdef BETA_SUBDIR
|
||||||
BETA_SUBDIR := /$(BETA_SUBDIR)
|
BETA_SUBDIR := /$(BETA_SUBDIR)
|
||||||
endif
|
endif
|
||||||
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
|
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
|
||||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||||
BETA_UPLOAD_ROOT := beta.rclone.org:
|
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||||
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
||||||
# Pass in GOTAGS=xyz on the make command line to set build tags
|
# Pass in GOTAGS=xyz on the make command line to set build tags
|
||||||
ifdef GOTAGS
|
ifdef GOTAGS
|
||||||
BUILDTAGS=-tags "$(GOTAGS)"
|
BUILDTAGS=-tags "$(GOTAGS)"
|
||||||
LINTTAGS=--build-tags "$(GOTAGS)"
|
LINTTAGS=--build-tags "$(GOTAGS)"
|
||||||
endif
|
endif
|
||||||
LDFLAGS=--ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)"
|
|
||||||
|
|
||||||
.PHONY: rclone test_all vars version
|
.PHONY: rclone test_all vars version
|
||||||
|
|
||||||
rclone:
|
rclone:
|
||||||
ifeq ($(GO_OS),windows)
|
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
|
||||||
go run bin/resource_windows.go -version $(TAG) -syso resource_windows_`go env GOARCH`.syso
|
|
||||||
endif
|
|
||||||
go build -v $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS)
|
|
||||||
ifeq ($(GO_OS),windows)
|
|
||||||
rm resource_windows_`go env GOARCH`.syso
|
|
||||||
endif
|
|
||||||
mkdir -p `go env GOPATH`/bin/
|
mkdir -p `go env GOPATH`/bin/
|
||||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
||||||
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
||||||
|
|
||||||
test_all:
|
test_all:
|
||||||
go install $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
@echo SHELL="'$(SHELL)'"
|
@echo SHELL="'$(SHELL)'"
|
||||||
@@ -74,10 +66,6 @@ btest:
|
|||||||
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
|
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
|
||||||
@echo "Copied markdown of beta release to clip board"
|
@echo "Copied markdown of beta release to clip board"
|
||||||
|
|
||||||
btesth:
|
|
||||||
@echo "<a href="$(BETA_URL)">$(TAG)</a> on branch <a href="https://github.com/rclone/rclone/tree/$(BRANCH)">$(BRANCH)</a> (uploaded in 15-30 mins)" | xclip -r -sel clip -t text/html
|
|
||||||
@echo "Copied beta release in HTML to clip board"
|
|
||||||
|
|
||||||
version:
|
version:
|
||||||
@echo '$(TAG)'
|
@echo '$(TAG)'
|
||||||
|
|
||||||
@@ -88,47 +76,47 @@ test: rclone test_all
|
|||||||
|
|
||||||
# Quick test
|
# Quick test
|
||||||
quicktest:
|
quicktest:
|
||||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) ./...
|
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
|
||||||
|
|
||||||
racequicktest:
|
racequicktest:
|
||||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -cpu=2 -race ./...
|
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
|
||||||
|
|
||||||
compiletest:
|
|
||||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -run XXX ./...
|
|
||||||
|
|
||||||
# Do source code quality checks
|
# Do source code quality checks
|
||||||
check: rclone
|
check: rclone
|
||||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||||
@golangci-lint run $(LINTTAGS) ./...
|
@golangci-lint run $(LINTTAGS) ./...
|
||||||
@bin/markdown-lint
|
|
||||||
@echo "-- END CODE QUALITY REPORT ---------------------------------"
|
@echo "-- END CODE QUALITY REPORT ---------------------------------"
|
||||||
|
|
||||||
# Get the build dependencies
|
# Get the build dependencies
|
||||||
build_dep:
|
build_dep:
|
||||||
go run bin/get-github-release.go -use-api -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
||||||
|
|
||||||
# Get the release dependencies we only install on linux
|
# Get the release dependencies we only install on linux
|
||||||
release_dep_linux:
|
release_dep_linux:
|
||||||
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
|
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
|
||||||
|
|
||||||
|
# Get the release dependencies we only install on Windows
|
||||||
|
release_dep_windows:
|
||||||
|
GOOS="" GOARCH="" go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest
|
||||||
|
|
||||||
# Update dependencies
|
# Update dependencies
|
||||||
showupdates:
|
showupdates:
|
||||||
@echo "*** Direct dependencies that could be updated ***"
|
@echo "*** Direct dependencies that could be updated ***"
|
||||||
@go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
||||||
|
|
||||||
# Update direct dependencies only
|
# Update direct dependencies only
|
||||||
updatedirect:
|
updatedirect:
|
||||||
go get $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
GO111MODULE=on go get -d $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||||
go mod tidy
|
GO111MODULE=on go mod tidy
|
||||||
|
|
||||||
# Update direct and indirect dependencies and test dependencies
|
# Update direct and indirect dependencies and test dependencies
|
||||||
update:
|
update:
|
||||||
go get -u -t ./...
|
GO111MODULE=on go get -d -u -t ./...
|
||||||
go mod tidy
|
GO111MODULE=on go mod tidy
|
||||||
|
|
||||||
# Tidy the module dependencies
|
# Tidy the module dependencies
|
||||||
tidy:
|
tidy:
|
||||||
go mod tidy
|
GO111MODULE=on go mod tidy
|
||||||
|
|
||||||
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||||
|
|
||||||
@@ -145,23 +133,17 @@ MANUAL.txt: MANUAL.md
|
|||||||
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
||||||
|
|
||||||
commanddocs: rclone
|
commanddocs: rclone
|
||||||
go generate ./lib/transform
|
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
|
||||||
-@rmdir -p '$$HOME/.config/rclone'
|
|
||||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
|
|
||||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
|
||||||
go run bin/make_bisync_docs.go ./docs/content/
|
|
||||||
|
|
||||||
backenddocs: rclone bin/make_backend_docs.py
|
backenddocs: rclone bin/make_backend_docs.py
|
||||||
-@rmdir -p '$$HOME/.config/rclone'
|
|
||||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
|
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
|
||||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
|
||||||
|
|
||||||
rcdocs: rclone
|
rcdocs: rclone
|
||||||
bin/make_rc_docs.sh
|
bin/make_rc_docs.sh
|
||||||
|
|
||||||
install: rclone
|
install: rclone
|
||||||
install -d ${DESTDIR}/usr/bin
|
install -d ${DESTDIR}/usr/bin
|
||||||
install ${GOPATH}/bin/rclone ${DESTDIR}/usr/bin
|
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
go clean ./...
|
go clean ./...
|
||||||
@@ -175,7 +157,7 @@ website:
|
|||||||
@if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi
|
@if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi
|
||||||
|
|
||||||
upload_website: website
|
upload_website: website
|
||||||
rclone -v sync docs/public www.rclone.org:
|
rclone -v sync docs/public memstore:www-rclone-org
|
||||||
|
|
||||||
upload_test_website: website
|
upload_test_website: website
|
||||||
rclone -P sync docs/public test-rclone-org:
|
rclone -P sync docs/public test-rclone-org:
|
||||||
@@ -202,8 +184,8 @@ check_sign:
|
|||||||
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
|
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
|
||||||
|
|
||||||
upload:
|
upload:
|
||||||
rclone -P copy build/ downloads.rclone.org:/$(TAG)
|
rclone -P copy build/ memstore:downloads-rclone-org/$(TAG)
|
||||||
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "downloads.rclone.org:/$(TAG)/$$i" "downloads.rclone.org:/$$j"'
|
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"'
|
||||||
|
|
||||||
upload_github:
|
upload_github:
|
||||||
./bin/upload-github $(TAG)
|
./bin/upload-github $(TAG)
|
||||||
@@ -213,7 +195,7 @@ cross: doc
|
|||||||
|
|
||||||
beta:
|
beta:
|
||||||
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||||
rclone -v copy build/ pub.rclone.org:/$(TAG)
|
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
||||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
||||||
|
|
||||||
log_since_last_release:
|
log_since_last_release:
|
||||||
@@ -226,18 +208,18 @@ ci_upload:
|
|||||||
sudo chown -R $$USER build
|
sudo chown -R $$USER build
|
||||||
find build -type l -delete
|
find build -type l -delete
|
||||||
gzip -r9v build
|
gzip -r9v build
|
||||||
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||||
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||||
endif
|
endif
|
||||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||||
|
|
||||||
ci_beta:
|
ci_beta:
|
||||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||||
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||||
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
||||||
endif
|
endif
|
||||||
@echo Beta release ready at $(BETA_URL)
|
@echo Beta release ready at $(BETA_URL)
|
||||||
|
|
||||||
@@ -246,7 +228,7 @@ fetch_binaries:
|
|||||||
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
||||||
|
|
||||||
serve: website
|
serve: website
|
||||||
cd docs && hugo server --logLevel info -w --disableFastRender --ignoreCache
|
cd docs && hugo server -v -w --disableFastRender
|
||||||
|
|
||||||
tag: retag doc
|
tag: retag doc
|
||||||
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
|
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
|
||||||
|
|||||||
235
README.md
235
README.md
@@ -1,6 +1,4 @@
|
|||||||
<!-- markdownlint-disable-next-line first-line-heading no-inline-html -->
|
|
||||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
||||||
<!-- markdownlint-disable-next-line no-inline-html -->
|
|
||||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
||||||
|
|
||||||
[Website](https://rclone.org) |
|
[Website](https://rclone.org) |
|
||||||
@@ -18,111 +16,72 @@
|
|||||||
|
|
||||||
# Rclone
|
# Rclone
|
||||||
|
|
||||||
Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
Rclone *("rsync for cloud storage")* is a command-line program to sync files and directories to and from different cloud storage providers.
|
||||||
directories to and from different cloud storage providers.
|
|
||||||
|
|
||||||
## Storage providers
|
## Storage providers
|
||||||
|
|
||||||
- 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||||
- Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
||||||
- Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||||
- Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||||
- ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||||
- Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||||
- Box [:page_facing_up:](https://rclone.org/box/)
|
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||||
- Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||||
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||||
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||||
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
* Arvan Cloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||||
- Cubbit DS3 [:page_facing_up:](https://rclone.org/s3/#Cubbit)
|
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||||
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||||
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||||
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||||
- Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||||
- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||||
- Exaba [:page_facing_up:](https://rclone.org/s3/#exaba)
|
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||||
- Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||||
- FileLu [:page_facing_up:](https://rclone.org/filelu/)
|
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||||
- Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||||
- FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
|
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||||
- FTP [:page_facing_up:](https://rclone.org/ftp/)
|
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||||
- GoFile [:page_facing_up:](https://rclone.org/gofile/)
|
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||||
- Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||||
- Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||||
- Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||||
- HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||||
- Hetzner Object Storage [:page_facing_up:](https://rclone.org/s3/#hetzner)
|
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||||
- Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||||
- HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||||
- HTTP [:page_facing_up:](https://rclone.org/http/)
|
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||||
- Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||||
- iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
|
* Memory [:page_facing_up:](https://rclone.org/memory/)
|
||||||
- ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||||
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||||
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||||
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||||
- Intercolo Object Storage [:page_facing_up:](https://rclone.org/s3/#intercolo)
|
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
||||||
- IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||||
- Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||||
- Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||||
- Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||||
- Linkbox [:page_facing_up:](https://rclone.org/linkbox)
|
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||||
- Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
|
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||||
- Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
|
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||||
- Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||||
- Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||||
- MEGA [:page_facing_up:](https://rclone.org/mega/)
|
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||||
- MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega)
|
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||||
- Memory [:page_facing_up:](https://rclone.org/memory/)
|
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||||
- Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||||
- Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
|
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||||
- Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||||
- Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||||
- Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||||
- Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||||
- OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||||
- OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||||
- Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||||
- Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
||||||
- Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
|
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||||
- OVHcloud Object Storage (Swift) [:page_facing_up:](https://rclone.org/swift/)
|
|
||||||
- OVHcloud Object Storage (S3-compatible) [:page_facing_up:](https://rclone.org/s3/#ovhcloud)
|
|
||||||
- ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
|
||||||
- pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
|
||||||
- Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
|
||||||
- PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
|
||||||
- Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
|
|
||||||
- premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
|
||||||
- put.io [:page_facing_up:](https://rclone.org/putio/)
|
|
||||||
- Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
|
||||||
- QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
|
||||||
- Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
|
||||||
- Rabata Cloud Storage [:page_facing_up:](https://rclone.org/s3/#Rabata)
|
|
||||||
- Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
|
||||||
- Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
|
||||||
- RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
|
||||||
- rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
|
|
||||||
- Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
|
||||||
- Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
|
||||||
- Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
|
|
||||||
- SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
|
||||||
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
|
||||||
- Servercore Object Storage [:page_facing_up:](https://rclone.org/s3/#servercore)
|
|
||||||
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
|
||||||
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
|
||||||
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
|
|
||||||
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
|
||||||
- Storj [:page_facing_up:](https://rclone.org/storj/)
|
|
||||||
- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
|
||||||
- Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
|
||||||
- Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
|
||||||
- Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
|
|
||||||
- Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
|
||||||
- WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
|
||||||
- Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
|
||||||
- Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
|
||||||
- Zata.ai [:page_facing_up:](https://rclone.org/s3/#Zata)
|
|
||||||
- The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
|
||||||
|
|
||||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||||
|
|
||||||
@@ -130,55 +89,49 @@ Please see [the full list of all storage providers and their features](https://r
|
|||||||
|
|
||||||
These backends adapt or modify other storage providers
|
These backends adapt or modify other storage providers
|
||||||
|
|
||||||
- Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
|
* Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
|
||||||
- Archive: read archive files [:page_facing_up:](https://rclone.org/archive/)
|
* Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
|
||||||
- Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
|
* Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
|
||||||
- Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
|
* Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
|
||||||
- Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
|
* Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
|
||||||
- Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
|
* Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
|
||||||
- Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
|
* Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
|
||||||
- Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
|
* Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
|
||||||
- Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
|
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
- MD5/SHA-1 hashes checked at all times for file integrity
|
* MD5/SHA-1 hashes checked at all times for file integrity
|
||||||
- Timestamps preserved on files
|
* Timestamps preserved on files
|
||||||
- Partial syncs supported on a whole file basis
|
* Partial syncs supported on a whole file basis
|
||||||
- [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed
|
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
||||||
files
|
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
||||||
- [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory
|
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||||
identical
|
* Can sync to and from network, e.g. two different cloud accounts
|
||||||
- [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync
|
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||||
bidirectionally
|
* Optional transparent compression ([Compress](https://rclone.org/compress/))
|
||||||
- [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash
|
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||||
equality
|
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||||
- Can sync to and from network, e.g. two different cloud accounts
|
* Multi-threaded downloads to local disk
|
||||||
- Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDAV/FTP/SFTP/DLNA
|
||||||
- Optional transparent compression ([Compress](https://rclone.org/compress/))
|
|
||||||
- Optional encryption ([Crypt](https://rclone.org/crypt/))
|
|
||||||
- Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
|
||||||
- Multi-threaded downloads to local disk
|
|
||||||
- Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files
|
|
||||||
over HTTP/WebDAV/FTP/SFTP/DLNA
|
|
||||||
|
|
||||||
## Installation & documentation
|
## Installation & documentation
|
||||||
|
|
||||||
Please see the [rclone website](https://rclone.org/) for:
|
Please see the [rclone website](https://rclone.org/) for:
|
||||||
|
|
||||||
- [Installation](https://rclone.org/install/)
|
* [Installation](https://rclone.org/install/)
|
||||||
- [Documentation & configuration](https://rclone.org/docs/)
|
* [Documentation & configuration](https://rclone.org/docs/)
|
||||||
- [Changelog](https://rclone.org/changelog/)
|
* [Changelog](https://rclone.org/changelog/)
|
||||||
- [FAQ](https://rclone.org/faq/)
|
* [FAQ](https://rclone.org/faq/)
|
||||||
- [Storage providers](https://rclone.org/overview/)
|
* [Storage providers](https://rclone.org/overview/)
|
||||||
- [Forum](https://forum.rclone.org/)
|
* [Forum](https://forum.rclone.org/)
|
||||||
- ...and more
|
* ...and more
|
||||||
|
|
||||||
## Downloads
|
## Downloads
|
||||||
|
|
||||||
- <https://rclone.org/downloads/>
|
* https://rclone.org/downloads/
|
||||||
|
|
||||||
## License
|
License
|
||||||
|
-------
|
||||||
|
|
||||||
This is free software under the terms of the MIT license (check the
|
This is free software under the terms of the MIT license (check the
|
||||||
[COPYING file](/COPYING) included in this package).
|
[COPYING file](/COPYING) included in this package).
|
||||||
|
|||||||
240
RELEASE.md
240
RELEASE.md
@@ -4,88 +4,48 @@ This file describes how to make the various kinds of releases
|
|||||||
|
|
||||||
## Extra required software for making a release
|
## Extra required software for making a release
|
||||||
|
|
||||||
- [gh the github cli](https://github.com/cli/cli) for uploading packages
|
* [gh the github cli](https://github.com/cli/cli) for uploading packages
|
||||||
- pandoc for making the html and man pages
|
* pandoc for making the html and man pages
|
||||||
|
|
||||||
## Making a release
|
## Making a release
|
||||||
|
|
||||||
- git checkout master # see below for stable branch
|
* git checkout master # see below for stable branch
|
||||||
- git pull # IMPORTANT
|
* git pull
|
||||||
- git status - make sure everything is checked in
|
* git status - make sure everything is checked in
|
||||||
- Check GitHub actions build for master is Green
|
* Check GitHub actions build for master is Green
|
||||||
- make test # see integration test server or run locally
|
* make test # see integration test server or run locally
|
||||||
- make tag
|
* make tag
|
||||||
- edit docs/content/changelog.md # make sure to remove duplicate logs from point
|
* edit docs/content/changelog.md # make sure to remove duplicate logs from point releases
|
||||||
releases
|
* make tidy
|
||||||
- make tidy
|
* make doc
|
||||||
- make doc
|
* git status - to check for new man pages - git add them
|
||||||
- git status - to check for new man pages - git add them
|
* git commit -a -v -m "Version v1.XX.0"
|
||||||
- git commit -a -v -m "Version v1.XX.0"
|
* make retag
|
||||||
- make retag
|
* git push --follow-tags origin
|
||||||
- git push origin # without --follow-tags so it doesn't push the tag if it fails
|
* # Wait for the GitHub builds to complete then...
|
||||||
- git push --follow-tags origin
|
* make fetch_binaries
|
||||||
- \# Wait for the GitHub builds to complete then...
|
* make tarball
|
||||||
- make fetch_binaries
|
* make vendorball
|
||||||
- make tarball
|
* make sign_upload
|
||||||
- make vendorball
|
* make check_sign
|
||||||
- make sign_upload
|
* make upload
|
||||||
- make check_sign
|
* make upload_website
|
||||||
- make upload
|
* make upload_github
|
||||||
- make upload_website
|
* make startdev # make startstable for stable branch
|
||||||
- make upload_github
|
* # announce with forum post, twitter post, patreon post
|
||||||
- make startdev # make startstable for stable branch
|
|
||||||
- \# announce with forum post, twitter post, patreon post
|
|
||||||
|
|
||||||
## Update dependencies
|
## Update dependencies
|
||||||
|
|
||||||
Early in the next release cycle update the dependencies.
|
Early in the next release cycle update the dependencies
|
||||||
|
|
||||||
- Review any pinned packages in go.mod and remove if possible
|
* Review any pinned packages in go.mod and remove if possible
|
||||||
- `make updatedirect`
|
* make updatedirect
|
||||||
- `make GOTAGS=cmount`
|
* make
|
||||||
- `make compiletest`
|
* git commit -a -v
|
||||||
- Fix anything which doesn't compile at this point and commit changes here
|
* make update
|
||||||
- `git commit -a -v -m "build: update all dependencies"`
|
* make
|
||||||
|
* roll back any updates which didn't compile
|
||||||
If the `make updatedirect` upgrades the version of go in the `go.mod`
|
* git commit -a -v --amend
|
||||||
|
|
||||||
```text
|
|
||||||
go 1.22.0
|
|
||||||
```
|
|
||||||
|
|
||||||
then go to manual mode. `go1.22` here is the lowest supported version
|
|
||||||
in the `go.mod`.
|
|
||||||
|
|
||||||
If `make updatedirect` added a `toolchain` directive then remove it.
|
|
||||||
We don't want to force a toolchain on our users. Linux packagers are
|
|
||||||
often using a version of Go that is a few versions out of date.
|
|
||||||
|
|
||||||
```console
|
|
||||||
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
|
||||||
go get -d $(cat /tmp/potential-upgrades)
|
|
||||||
go mod tidy -go=1.22 -compat=1.22
|
|
||||||
```
|
|
||||||
|
|
||||||
If the `go mod tidy` fails use the output from it to remove the
|
|
||||||
package which can't be upgraded from `/tmp/potential-upgrades` when
|
|
||||||
done
|
|
||||||
|
|
||||||
```console
|
|
||||||
git co go.mod go.sum
|
|
||||||
```
|
|
||||||
|
|
||||||
And try again.
|
|
||||||
|
|
||||||
Optionally upgrade the direct and indirect dependencies. This is very
|
|
||||||
likely to fail if the manual method was used abve - in that case
|
|
||||||
ignore it as it is too time consuming to fix.
|
|
||||||
|
|
||||||
- `make update`
|
|
||||||
- `make GOTAGS=cmount`
|
|
||||||
- `make compiletest`
|
|
||||||
- roll back any updates which didn't compile
|
|
||||||
- `git commit -a -v --amend`
|
|
||||||
- **NB** watch out for this changing the default go version in `go.mod`
|
|
||||||
|
|
||||||
Note that `make update` updates all direct and indirect dependencies
|
Note that `make update` updates all direct and indirect dependencies
|
||||||
and there can occasionally be forwards compatibility problems with
|
and there can occasionally be forwards compatibility problems with
|
||||||
@@ -93,117 +53,63 @@ doing that so it may be necessary to roll back dependencies to the
|
|||||||
version specified by `make updatedirect` in order to get rclone to
|
version specified by `make updatedirect` in order to get rclone to
|
||||||
build.
|
build.
|
||||||
|
|
||||||
Once it compiles locally, push it on a test branch and commit fixes
|
|
||||||
until the tests pass.
|
|
||||||
|
|
||||||
### Major versions
|
|
||||||
|
|
||||||
The above procedure will not upgrade major versions, so v2 to v3.
|
|
||||||
However this tool can show which major versions might need to be
|
|
||||||
upgraded:
|
|
||||||
|
|
||||||
```console
|
|
||||||
go run github.com/icholy/gomajor@latest list -major
|
|
||||||
```
|
|
||||||
|
|
||||||
Expect API breakage when updating major versions.
|
|
||||||
|
|
||||||
## Tidy beta
|
|
||||||
|
|
||||||
At some point after the release run
|
|
||||||
|
|
||||||
```console
|
|
||||||
bin/tidy-beta v1.55
|
|
||||||
```
|
|
||||||
|
|
||||||
where the version number is that of a couple ago to remove old beta binaries.
|
|
||||||
|
|
||||||
## Making a point release
|
## Making a point release
|
||||||
|
|
||||||
If rclone needs a point release due to some horrendous bug:
|
If rclone needs a point release due to some horrendous bug:
|
||||||
|
|
||||||
Set vars
|
Set vars
|
||||||
|
|
||||||
- BASE_TAG=v1.XX # e.g. v1.52
|
* BASE_TAG=v1.XX # e.g. v1.52
|
||||||
- NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
|
* NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
|
||||||
- echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
||||||
|
|
||||||
First make the release branch. If this is a second point release then
|
First make the release branch. If this is a second point release then
|
||||||
this will be done already.
|
this will be done already.
|
||||||
|
|
||||||
- git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
|
* git branch ${BASE_TAG} ${BASE_TAG}-stable
|
||||||
- make startstable
|
* git co ${BASE_TAG}-stable
|
||||||
|
* make startstable
|
||||||
|
|
||||||
Now
|
Now
|
||||||
|
|
||||||
- git co ${BASE_TAG}-stable
|
* git co ${BASE_TAG}-stable
|
||||||
- git cherry-pick any fixes
|
* git cherry-pick any fixes
|
||||||
- make startstable
|
* Do the steps as above
|
||||||
- Do the steps as above
|
* make startstable
|
||||||
- git co master
|
* git co master
|
||||||
- `#` cherry pick the changes to the changelog - check the diff to make sure it
|
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
|
||||||
is correct
|
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||||
- git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||||
- git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
* git push
|
||||||
- git push
|
|
||||||
|
|
||||||
## Sponsor logos
|
|
||||||
|
|
||||||
If updating the website note that the sponsor logos have been moved out of the
|
|
||||||
main repository.
|
|
||||||
|
|
||||||
You will need to checkout `/docs/static/img/logos` from <https://github.com/rclone/third-party-logos>
|
|
||||||
which is a private repo containing artwork from sponsors.
|
|
||||||
|
|
||||||
## Update the website between releases
|
|
||||||
|
|
||||||
Create an update website branch based off the last release
|
|
||||||
|
|
||||||
```console
|
|
||||||
git co -b update-website
|
|
||||||
```
|
|
||||||
|
|
||||||
If the branch already exists, double check there are no commits that need saving.
|
|
||||||
|
|
||||||
Now reset the branch to the last release
|
|
||||||
|
|
||||||
```console
|
|
||||||
git reset --hard v1.64.0
|
|
||||||
```
|
|
||||||
|
|
||||||
Create the changes, check them in, test with `make serve` then
|
|
||||||
|
|
||||||
```console
|
|
||||||
make upload_test_website
|
|
||||||
```
|
|
||||||
|
|
||||||
Check out <https://test.rclone.org> and when happy
|
|
||||||
|
|
||||||
```console
|
|
||||||
make upload_website
|
|
||||||
```
|
|
||||||
|
|
||||||
Cherry pick any changes back to master and the stable branch if it is active.
|
|
||||||
|
|
||||||
## Making a manual build of docker
|
## Making a manual build of docker
|
||||||
|
|
||||||
To do a basic build of rclone's docker image to debug builds locally:
|
The rclone docker image should autobuild on via GitHub actions. If it doesn't
|
||||||
|
or needs to be updated then rebuild like this.
|
||||||
|
|
||||||
```console
|
See: https://github.com/ilteoood/docker_buildx/issues/19
|
||||||
docker buildx build --load -t rclone/rclone:testing --progress=plain .
|
See: https://github.com/ilteoood/docker_buildx/blob/master/scripts/install_buildx.sh
|
||||||
docker run --rm rclone/rclone:testing version
|
|
||||||
|
```
|
||||||
|
git co v1.54.1
|
||||||
|
docker pull golang
|
||||||
|
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||||
|
docker buildx create --name actions_builder --use
|
||||||
|
docker run --rm --privileged docker/binfmt:820fdd95a9972a5308930a2bdfb8573dd4447ad3
|
||||||
|
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
|
||||||
|
SUPPORTED_PLATFORMS=$(docker buildx inspect --bootstrap | grep 'Platforms:*.*' | cut -d : -f2,3)
|
||||||
|
echo "Supported platforms: $SUPPORTED_PLATFORMS"
|
||||||
|
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||||
|
docker buildx stop actions_builder
|
||||||
```
|
```
|
||||||
|
|
||||||
To test the multipatform build
|
### Old build for linux/amd64 only
|
||||||
|
|
||||||
```console
|
|
||||||
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
|
|
||||||
```
|
```
|
||||||
|
docker pull golang
|
||||||
To make a full build then set the tags correctly and add `--push`
|
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
|
||||||
|
docker push rclone/rclone:1.52.0
|
||||||
Note that you can't only build one architecture - you need to build them all.
|
docker push rclone/rclone:1.52
|
||||||
|
docker push rclone/rclone:1
|
||||||
```console
|
docker push rclone/rclone:latest
|
||||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
// Package alias implements a virtual provider to rename existing remotes.
|
|
||||||
package alias
|
package alias
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -23,8 +23,8 @@ func prepare(t *testing.T, root string) {
|
|||||||
configfile.Install()
|
configfile.Install()
|
||||||
|
|
||||||
// Configure the remote
|
// Configure the remote
|
||||||
config.FileSetValue(remoteName, "type", "alias")
|
config.FileSet(remoteName, "type", "alias")
|
||||||
config.FileSetValue(remoteName, "remote", root)
|
config.FileSet(remoteName, "remote", root)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewFS(t *testing.T) {
|
func TestNewFS(t *testing.T) {
|
||||||
@@ -81,12 +81,10 @@ func TestNewFS(t *testing.T) {
|
|||||||
for i, gotEntry := range gotEntries {
|
for i, gotEntry := range gotEntries {
|
||||||
what := fmt.Sprintf("%s, entry=%d", what, i)
|
what := fmt.Sprintf("%s, entry=%d", what, i)
|
||||||
wantEntry := test.entries[i]
|
wantEntry := test.entries[i]
|
||||||
_, isDir := gotEntry.(fs.Directory)
|
|
||||||
|
|
||||||
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
||||||
if !isDir {
|
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
|
||||||
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
|
_, isDir := gotEntry.(fs.Directory)
|
||||||
}
|
|
||||||
require.Equal(t, wantEntry.isDir, isDir, what)
|
require.Equal(t, wantEntry.isDir, isDir, what)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,41 +1,32 @@
|
|||||||
// Package all imports all the backends
|
|
||||||
package all
|
package all
|
||||||
|
|
||||||
import (
|
import (
|
||||||
// Active file systems
|
// Active file systems
|
||||||
_ "github.com/rclone/rclone/backend/alias"
|
_ "github.com/rclone/rclone/backend/alias"
|
||||||
_ "github.com/rclone/rclone/backend/archive"
|
_ "github.com/rclone/rclone/backend/amazonclouddrive"
|
||||||
_ "github.com/rclone/rclone/backend/azureblob"
|
_ "github.com/rclone/rclone/backend/azureblob"
|
||||||
_ "github.com/rclone/rclone/backend/azurefiles"
|
|
||||||
_ "github.com/rclone/rclone/backend/b2"
|
_ "github.com/rclone/rclone/backend/b2"
|
||||||
_ "github.com/rclone/rclone/backend/box"
|
_ "github.com/rclone/rclone/backend/box"
|
||||||
_ "github.com/rclone/rclone/backend/cache"
|
_ "github.com/rclone/rclone/backend/cache"
|
||||||
_ "github.com/rclone/rclone/backend/chunker"
|
_ "github.com/rclone/rclone/backend/chunker"
|
||||||
_ "github.com/rclone/rclone/backend/cloudinary"
|
|
||||||
_ "github.com/rclone/rclone/backend/combine"
|
_ "github.com/rclone/rclone/backend/combine"
|
||||||
_ "github.com/rclone/rclone/backend/compress"
|
_ "github.com/rclone/rclone/backend/compress"
|
||||||
_ "github.com/rclone/rclone/backend/crypt"
|
_ "github.com/rclone/rclone/backend/crypt"
|
||||||
_ "github.com/rclone/rclone/backend/doi"
|
|
||||||
_ "github.com/rclone/rclone/backend/drive"
|
_ "github.com/rclone/rclone/backend/drive"
|
||||||
_ "github.com/rclone/rclone/backend/dropbox"
|
_ "github.com/rclone/rclone/backend/dropbox"
|
||||||
_ "github.com/rclone/rclone/backend/fichier"
|
_ "github.com/rclone/rclone/backend/fichier"
|
||||||
_ "github.com/rclone/rclone/backend/filefabric"
|
_ "github.com/rclone/rclone/backend/filefabric"
|
||||||
_ "github.com/rclone/rclone/backend/filelu"
|
|
||||||
_ "github.com/rclone/rclone/backend/filescom"
|
|
||||||
_ "github.com/rclone/rclone/backend/ftp"
|
_ "github.com/rclone/rclone/backend/ftp"
|
||||||
_ "github.com/rclone/rclone/backend/gofile"
|
|
||||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||||
_ "github.com/rclone/rclone/backend/hasher"
|
_ "github.com/rclone/rclone/backend/hasher"
|
||||||
_ "github.com/rclone/rclone/backend/hdfs"
|
_ "github.com/rclone/rclone/backend/hdfs"
|
||||||
_ "github.com/rclone/rclone/backend/hidrive"
|
_ "github.com/rclone/rclone/backend/hidrive"
|
||||||
_ "github.com/rclone/rclone/backend/http"
|
_ "github.com/rclone/rclone/backend/http"
|
||||||
_ "github.com/rclone/rclone/backend/iclouddrive"
|
_ "github.com/rclone/rclone/backend/hubic"
|
||||||
_ "github.com/rclone/rclone/backend/imagekit"
|
|
||||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||||
_ "github.com/rclone/rclone/backend/koofr"
|
_ "github.com/rclone/rclone/backend/koofr"
|
||||||
_ "github.com/rclone/rclone/backend/linkbox"
|
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
_ "github.com/rclone/rclone/backend/mailru"
|
_ "github.com/rclone/rclone/backend/mailru"
|
||||||
_ "github.com/rclone/rclone/backend/mega"
|
_ "github.com/rclone/rclone/backend/mega"
|
||||||
@@ -43,25 +34,18 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/netstorage"
|
_ "github.com/rclone/rclone/backend/netstorage"
|
||||||
_ "github.com/rclone/rclone/backend/onedrive"
|
_ "github.com/rclone/rclone/backend/onedrive"
|
||||||
_ "github.com/rclone/rclone/backend/opendrive"
|
_ "github.com/rclone/rclone/backend/opendrive"
|
||||||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
|
||||||
_ "github.com/rclone/rclone/backend/pcloud"
|
_ "github.com/rclone/rclone/backend/pcloud"
|
||||||
_ "github.com/rclone/rclone/backend/pikpak"
|
|
||||||
_ "github.com/rclone/rclone/backend/pixeldrain"
|
|
||||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||||
_ "github.com/rclone/rclone/backend/protondrive"
|
|
||||||
_ "github.com/rclone/rclone/backend/putio"
|
_ "github.com/rclone/rclone/backend/putio"
|
||||||
_ "github.com/rclone/rclone/backend/qingstor"
|
_ "github.com/rclone/rclone/backend/qingstor"
|
||||||
_ "github.com/rclone/rclone/backend/quatrix"
|
|
||||||
_ "github.com/rclone/rclone/backend/s3"
|
_ "github.com/rclone/rclone/backend/s3"
|
||||||
_ "github.com/rclone/rclone/backend/seafile"
|
_ "github.com/rclone/rclone/backend/seafile"
|
||||||
_ "github.com/rclone/rclone/backend/sftp"
|
_ "github.com/rclone/rclone/backend/sftp"
|
||||||
_ "github.com/rclone/rclone/backend/sharefile"
|
_ "github.com/rclone/rclone/backend/sharefile"
|
||||||
_ "github.com/rclone/rclone/backend/sia"
|
_ "github.com/rclone/rclone/backend/sia"
|
||||||
_ "github.com/rclone/rclone/backend/smb"
|
|
||||||
_ "github.com/rclone/rclone/backend/storj"
|
_ "github.com/rclone/rclone/backend/storj"
|
||||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||||
_ "github.com/rclone/rclone/backend/swift"
|
_ "github.com/rclone/rclone/backend/swift"
|
||||||
_ "github.com/rclone/rclone/backend/ulozto"
|
|
||||||
_ "github.com/rclone/rclone/backend/union"
|
_ "github.com/rclone/rclone/backend/union"
|
||||||
_ "github.com/rclone/rclone/backend/uptobox"
|
_ "github.com/rclone/rclone/backend/uptobox"
|
||||||
_ "github.com/rclone/rclone/backend/webdav"
|
_ "github.com/rclone/rclone/backend/webdav"
|
||||||
|
|||||||
1370
backend/amazonclouddrive/amazonclouddrive.go
Normal file
1370
backend/amazonclouddrive/amazonclouddrive.go
Normal file
File diff suppressed because it is too large
Load Diff
21
backend/amazonclouddrive/amazonclouddrive_test.go
Normal file
21
backend/amazonclouddrive/amazonclouddrive_test.go
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
// Test AmazonCloudDrive filesystem interface
|
||||||
|
|
||||||
|
//go:build acd
|
||||||
|
// +build acd
|
||||||
|
|
||||||
|
package amazonclouddrive_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/amazonclouddrive"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIntegration runs integration tests against the remote
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
|
||||||
|
fstests.RemoteName = "TestAmazonCloudDrive:"
|
||||||
|
fstests.Run(t)
|
||||||
|
}
|
||||||
@@ -1,679 +0,0 @@
|
|||||||
//go:build !plan9
|
|
||||||
|
|
||||||
// Package archive implements a backend to access archive files in a remote
|
|
||||||
package archive
|
|
||||||
|
|
||||||
// FIXME factor common code between backends out - eg VFS initialization
|
|
||||||
|
|
||||||
// FIXME can we generalize the VFS handle caching and use it in zip backend
|
|
||||||
|
|
||||||
// Factor more stuff out if possible
|
|
||||||
|
|
||||||
// Odd stats which are probably coming from the VFS
|
|
||||||
// * tensorflow.sqfs: 0% /3.074Gi, 204.426Ki/s, 4h22m46s
|
|
||||||
|
|
||||||
// FIXME this will perform poorly for unpacking as the VFS Reader is bad
|
|
||||||
// at multiple streams - need cache mode setting?
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
// Import all the required archivers here
|
|
||||||
_ "github.com/rclone/rclone/backend/archive/squashfs"
|
|
||||||
_ "github.com/rclone/rclone/backend/archive/zip"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/archive/archiver"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/cache"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Register with Fs
|
|
||||||
func init() {
|
|
||||||
fsi := &fs.RegInfo{
|
|
||||||
Name: "archive",
|
|
||||||
Description: "Read archives",
|
|
||||||
NewFs: NewFs,
|
|
||||||
MetadataInfo: &fs.MetadataInfo{
|
|
||||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
|
||||||
},
|
|
||||||
Options: []fs.Option{{
|
|
||||||
Name: "remote",
|
|
||||||
Help: `Remote to wrap to read archives from.
|
|
||||||
|
|
||||||
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
|
||||||
"myremote:bucket" or "myremote:".
|
|
||||||
|
|
||||||
If this is left empty, then the archive backend will use the root as
|
|
||||||
the remote.
|
|
||||||
|
|
||||||
This means that you can use :archive:remote:path and it will be
|
|
||||||
equivalent to setting remote="remote:path".
|
|
||||||
`,
|
|
||||||
Required: false,
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
fs.Register(fsi)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
|
||||||
type Options struct {
|
|
||||||
Remote string `config:"remote"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs represents a archive of upstreams
|
|
||||||
type Fs struct {
|
|
||||||
name string // name of this remote
|
|
||||||
features *fs.Features // optional features
|
|
||||||
opt Options // options for this Fs
|
|
||||||
root string // the path we are working on
|
|
||||||
f fs.Fs // remote we are wrapping
|
|
||||||
wrapper fs.Fs // fs that wraps us
|
|
||||||
|
|
||||||
mu sync.Mutex // protects the below
|
|
||||||
archives map[string]*archive // the archives we have, by path
|
|
||||||
}
|
|
||||||
|
|
||||||
// A single open archive
|
|
||||||
type archive struct {
|
|
||||||
archiver archiver.Archiver // archiver responsible
|
|
||||||
remote string // path to the archive
|
|
||||||
prefix string // prefix to add on to listings
|
|
||||||
root string // root of the archive to remove from listings
|
|
||||||
mu sync.Mutex // protects the following variables
|
|
||||||
f fs.Fs // the archive Fs, may be nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// If remote is an archive then return it otherwise return nil
|
|
||||||
func findArchive(remote string) *archive {
|
|
||||||
// FIXME use something faster than linear search?
|
|
||||||
for _, archiver := range archiver.Archivers {
|
|
||||||
if strings.HasSuffix(remote, archiver.Extension) {
|
|
||||||
return &archive{
|
|
||||||
archiver: archiver,
|
|
||||||
remote: remote,
|
|
||||||
prefix: remote,
|
|
||||||
root: "",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find an archive buried in remote
|
|
||||||
func subArchive(remote string) *archive {
|
|
||||||
archive := findArchive(remote)
|
|
||||||
if archive != nil {
|
|
||||||
return archive
|
|
||||||
}
|
|
||||||
parent := path.Dir(remote)
|
|
||||||
if parent == "/" || parent == "." {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return subArchive(parent)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If remote is an archive then return it otherwise return nil
|
|
||||||
func (f *Fs) findArchive(remote string) (archive *archive) {
|
|
||||||
archive = findArchive(remote)
|
|
||||||
if archive != nil {
|
|
||||||
f.mu.Lock()
|
|
||||||
f.archives[remote] = archive
|
|
||||||
f.mu.Unlock()
|
|
||||||
}
|
|
||||||
return archive
|
|
||||||
}
|
|
||||||
|
|
||||||
// Instantiate archive if it hasn't been instantiated yet
|
|
||||||
//
|
|
||||||
// This is done lazily so that we can list a directory full of
|
|
||||||
// archives without opening them all.
|
|
||||||
func (a *archive) init(ctx context.Context, f fs.Fs) (fs.Fs, error) {
|
|
||||||
a.mu.Lock()
|
|
||||||
defer a.mu.Unlock()
|
|
||||||
if a.f != nil {
|
|
||||||
return a.f, nil
|
|
||||||
}
|
|
||||||
newFs, err := a.archiver.New(ctx, f, a.remote, a.prefix, a.root)
|
|
||||||
if err != nil && err != fs.ErrorIsFile {
|
|
||||||
return nil, fmt.Errorf("failed to create archive %q: %w", a.remote, err)
|
|
||||||
}
|
|
||||||
a.f = newFs
|
|
||||||
return a.f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path.
|
|
||||||
//
|
|
||||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
|
||||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs.Fs, err error) {
|
|
||||||
// defer log.Trace(nil, "name=%q, root=%q, m=%v", name, root, m)("f=%+v, err=%v", &outFs, &err)
|
|
||||||
// Parse config into Options struct
|
|
||||||
opt := new(Options)
|
|
||||||
err = configstruct.Set(m, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
remote := opt.Remote
|
|
||||||
origRoot := root
|
|
||||||
|
|
||||||
// If remote is empty, use the root instead
|
|
||||||
if remote == "" {
|
|
||||||
remote = root
|
|
||||||
root = ""
|
|
||||||
}
|
|
||||||
isDirectory := strings.HasSuffix(remote, "/")
|
|
||||||
remote = strings.TrimRight(remote, "/")
|
|
||||||
if remote == "" {
|
|
||||||
remote = "/"
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(remote, name+":") {
|
|
||||||
return nil, errors.New("can't point archive remote at itself - check the value of the upstreams setting")
|
|
||||||
}
|
|
||||||
|
|
||||||
_ = isDirectory
|
|
||||||
|
|
||||||
foundArchive := subArchive(remote)
|
|
||||||
if foundArchive != nil {
|
|
||||||
fs.Debugf(nil, "Found archiver for %q remote %q", foundArchive.archiver.Extension, foundArchive.remote)
|
|
||||||
// Archive path
|
|
||||||
foundArchive.root = strings.Trim(remote[len(foundArchive.remote):], "/")
|
|
||||||
// Path to the archive
|
|
||||||
archiveRemote := remote[:len(foundArchive.remote)]
|
|
||||||
// Remote is archive leaf name
|
|
||||||
foundArchive.remote = path.Base(archiveRemote)
|
|
||||||
foundArchive.prefix = ""
|
|
||||||
// Point remote to archive file
|
|
||||||
remote = archiveRemote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure to remove trailing . referring to the current dir
|
|
||||||
if path.Base(root) == "." {
|
|
||||||
root = strings.TrimSuffix(root, ".")
|
|
||||||
}
|
|
||||||
remotePath := fspath.JoinRootPath(remote, root)
|
|
||||||
wrappedFs, err := cache.Get(ctx, remotePath)
|
|
||||||
if err != fs.ErrorIsFile && err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f := &Fs{
|
|
||||||
name: name,
|
|
||||||
//root: path.Join(remotePath, root),
|
|
||||||
root: origRoot,
|
|
||||||
opt: *opt,
|
|
||||||
f: wrappedFs,
|
|
||||||
archives: make(map[string]*archive),
|
|
||||||
}
|
|
||||||
cache.PinUntilFinalized(f.f, f)
|
|
||||||
// the features here are ones we could support, and they are
|
|
||||||
// ANDed with the ones from wrappedFs
|
|
||||||
f.features = (&fs.Features{
|
|
||||||
CaseInsensitive: true,
|
|
||||||
DuplicateFiles: false,
|
|
||||||
ReadMimeType: true,
|
|
||||||
WriteMimeType: true,
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
BucketBased: true,
|
|
||||||
SetTier: true,
|
|
||||||
GetTier: true,
|
|
||||||
ReadMetadata: true,
|
|
||||||
WriteMetadata: true,
|
|
||||||
UserMetadata: true,
|
|
||||||
PartialUploads: true,
|
|
||||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
|
||||||
|
|
||||||
if foundArchive != nil {
|
|
||||||
fs.Debugf(f, "Root is an archive")
|
|
||||||
if err != fs.ErrorIsFile {
|
|
||||||
return nil, fmt.Errorf("expecting to find a file at %q", remote)
|
|
||||||
}
|
|
||||||
return foundArchive.init(ctx, f.f)
|
|
||||||
}
|
|
||||||
// Correct root if definitely pointing to a file
|
|
||||||
if err == fs.ErrorIsFile {
|
|
||||||
f.root = path.Dir(f.root)
|
|
||||||
if f.root == "." || f.root == "/" {
|
|
||||||
f.root = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return f, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Root() string {
|
|
||||||
return f.root
|
|
||||||
}
|
|
||||||
|
|
||||||
// String converts this Fs to a string
|
|
||||||
func (f *Fs) String() string {
|
|
||||||
return fmt.Sprintf("archive root '%s'", f.root)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
|
||||||
func (f *Fs) Features() *fs.Features {
|
|
||||||
return f.features
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the root directory of the Fs object
|
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
||||||
return f.f.Rmdir(ctx, dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
|
||||||
func (f *Fs) Hashes() hash.Set {
|
|
||||||
return f.f.Hashes()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mkdir makes the root directory of the Fs object
|
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|
||||||
return f.f.Mkdir(ctx, dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Purge all files in the directory
|
|
||||||
//
|
|
||||||
// Implement this if you have a way of deleting all the files
|
|
||||||
// quicker than just running Remove() on the result of List()
|
|
||||||
//
|
|
||||||
// Return an error if it doesn't exist
|
|
||||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|
||||||
do := f.f.Features().Purge
|
|
||||||
if do == nil {
|
|
||||||
return fs.ErrorCantPurge
|
|
||||||
}
|
|
||||||
return do(ctx, dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
|
||||||
//
|
|
||||||
// This is stored with the remote path given.
|
|
||||||
//
|
|
||||||
// It returns the destination Object and a possible error.
|
|
||||||
//
|
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
|
||||||
//
|
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
|
||||||
do := f.f.Features().Copy
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorCantCopy
|
|
||||||
}
|
|
||||||
// FIXME
|
|
||||||
// o, ok := src.(*Object)
|
|
||||||
// if !ok {
|
|
||||||
// return nil, fs.ErrorCantCopy
|
|
||||||
// }
|
|
||||||
return do(ctx, src, remote)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
|
||||||
//
|
|
||||||
// This is stored with the remote path given.
|
|
||||||
//
|
|
||||||
// It returns the destination Object and a possible error.
|
|
||||||
//
|
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
|
||||||
//
|
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
|
||||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
|
||||||
do := f.f.Features().Move
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorCantMove
|
|
||||||
}
|
|
||||||
// FIXME
|
|
||||||
// o, ok := src.(*Object)
|
|
||||||
// if !ok {
|
|
||||||
// return nil, fs.ErrorCantMove
|
|
||||||
// }
|
|
||||||
return do(ctx, src, remote)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
|
||||||
// using server-side move operations.
|
|
||||||
//
|
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
|
||||||
//
|
|
||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
|
||||||
//
|
|
||||||
// If destination exists then return fs.ErrorDirExists
|
|
||||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
|
||||||
do := f.f.Features().DirMove
|
|
||||||
if do == nil {
|
|
||||||
return fs.ErrorCantDirMove
|
|
||||||
}
|
|
||||||
srcFs, ok := src.(*Fs)
|
|
||||||
if !ok {
|
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
|
||||||
return fs.ErrorCantDirMove
|
|
||||||
}
|
|
||||||
return do(ctx, srcFs.f, srcRemote, dstRemote)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChangeNotify calls the passed function with a path
|
|
||||||
// that has had changes. If the implementation
|
|
||||||
// uses polling, it should adhere to the given interval.
|
|
||||||
// At least one value will be written to the channel,
|
|
||||||
// specifying the initial value and updated values might
|
|
||||||
// follow. A 0 Duration should pause the polling.
|
|
||||||
// The ChangeNotify implementation must empty the channel
|
|
||||||
// regularly. When the channel gets closed, the implementation
|
|
||||||
// should stop polling and release resources.
|
|
||||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), ch <-chan time.Duration) {
|
|
||||||
do := f.f.Features().ChangeNotify
|
|
||||||
if do == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
|
||||||
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
|
||||||
notifyFunc(path, entryType)
|
|
||||||
}
|
|
||||||
do(ctx, wrappedNotifyFunc, ch)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirCacheFlush resets the directory cache - used in testing
|
|
||||||
// as an optional interface
|
|
||||||
func (f *Fs) DirCacheFlush() {
|
|
||||||
do := f.f.Features().DirCacheFlush
|
|
||||||
if do != nil {
|
|
||||||
do()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
var o fs.Object
|
|
||||||
var err error
|
|
||||||
if stream {
|
|
||||||
o, err = f.f.Features().PutStream(ctx, in, src, options...)
|
|
||||||
} else {
|
|
||||||
o, err = f.f.Put(ctx, in, src, options...)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put in to the remote path with the modTime given of the given size
|
|
||||||
//
|
|
||||||
// May create the object even if it returns an error - if so
|
|
||||||
// will return the object and the error, otherwise will return
|
|
||||||
// nil and the error
|
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
o, err := f.NewObject(ctx, src.Remote())
|
|
||||||
switch err {
|
|
||||||
case nil:
|
|
||||||
return o, o.Update(ctx, in, src, options...)
|
|
||||||
case fs.ErrorObjectNotFound:
|
|
||||||
return f.put(ctx, in, src, false, options...)
|
|
||||||
default:
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
|
||||||
//
|
|
||||||
// May create the object even if it returns an error - if so
|
|
||||||
// will return the object and the error, otherwise will return
|
|
||||||
// nil and the error
|
|
||||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
o, err := f.NewObject(ctx, src.Remote())
|
|
||||||
switch err {
|
|
||||||
case nil:
|
|
||||||
return o, o.Update(ctx, in, src, options...)
|
|
||||||
case fs.ErrorObjectNotFound:
|
|
||||||
return f.put(ctx, in, src, true, options...)
|
|
||||||
default:
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// About gets quota information from the Fs
|
|
||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|
||||||
do := f.f.Features().About
|
|
||||||
if do == nil {
|
|
||||||
return nil, errors.New("not supported by underlying remote")
|
|
||||||
}
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the Fs for the directory
|
|
||||||
func (f *Fs) findFs(ctx context.Context, dir string) (subFs fs.Fs, err error) {
|
|
||||||
f.mu.Lock()
|
|
||||||
defer f.mu.Unlock()
|
|
||||||
|
|
||||||
subFs = f.f
|
|
||||||
|
|
||||||
// FIXME should do this with a better datastructure like a prefix tree
|
|
||||||
// FIXME want to find the longest first otherwise nesting won't work
|
|
||||||
dirSlash := dir + "/"
|
|
||||||
for archiverRemote, archive := range f.archives {
|
|
||||||
subRemote := archiverRemote + "/"
|
|
||||||
if strings.HasPrefix(dirSlash, subRemote) {
|
|
||||||
subFs, err = archive.init(ctx, f.f)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return subFs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// List the objects and directories in dir into entries. The
|
|
||||||
// entries can be returned in any order but should be for a
|
|
||||||
// complete directory.
|
|
||||||
//
|
|
||||||
// dir should be "" to list the root, and should not have
|
|
||||||
// trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
|
||||||
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
|
|
||||||
|
|
||||||
subFs, err := f.findFs(ctx, dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
entries, err = subFs.List(ctx, dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for i, entry := range entries {
|
|
||||||
// Can only unarchive files
|
|
||||||
if o, ok := entry.(fs.Object); ok {
|
|
||||||
remote := o.Remote()
|
|
||||||
archive := f.findArchive(remote)
|
|
||||||
if archive != nil {
|
|
||||||
// Overwrite entry with directory
|
|
||||||
entries[i] = fs.NewDir(remote, o.ModTime(ctx))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject creates a new remote archive file object
|
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|
||||||
|
|
||||||
dir := path.Dir(remote)
|
|
||||||
if dir == "/" || dir == "." {
|
|
||||||
dir = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
subFs, err := f.findFs(ctx, dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
o, err := subFs.NewObject(ctx, remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Precision is the greatest precision of all the archivers
|
|
||||||
func (f *Fs) Precision() time.Duration {
|
|
||||||
return time.Second
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown the backend, closing any background tasks and any
|
|
||||||
// cached connections.
|
|
||||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
|
||||||
if do := f.f.Features().Shutdown; do != nil {
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
|
||||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
|
||||||
do := f.f.Features().PublicLink
|
|
||||||
if do == nil {
|
|
||||||
return "", errors.New("PublicLink not supported")
|
|
||||||
}
|
|
||||||
return do(ctx, remote, expire, unlink)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutUnchecked in to the remote path with the modTime given of the given size
|
|
||||||
//
|
|
||||||
// May create the object even if it returns an error - if so
|
|
||||||
// will return the object and the error, otherwise will return
|
|
||||||
// nil and the error
|
|
||||||
//
|
|
||||||
// May create duplicates or return errors if src already
|
|
||||||
// exists.
|
|
||||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
do := f.f.Features().PutUnchecked
|
|
||||||
if do == nil {
|
|
||||||
return nil, errors.New("can't PutUnchecked")
|
|
||||||
}
|
|
||||||
o, err := do(ctx, in, src, options...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MergeDirs merges the contents of all the directories passed
|
|
||||||
// in into the first one and rmdirs the other directories.
|
|
||||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|
||||||
if len(dirs) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
do := f.f.Features().MergeDirs
|
|
||||||
if do == nil {
|
|
||||||
return errors.New("MergeDirs not supported")
|
|
||||||
}
|
|
||||||
return do(ctx, dirs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CleanUp the trash in the Fs
|
|
||||||
//
|
|
||||||
// Implement this if you have a way of emptying the trash or
|
|
||||||
// otherwise cleaning up old versions of files.
|
|
||||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
|
||||||
do := f.f.Features().CleanUp
|
|
||||||
if do == nil {
|
|
||||||
return errors.New("not supported by underlying remote")
|
|
||||||
}
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenWriterAt opens with a handle for random access writes
|
|
||||||
//
|
|
||||||
// Pass in the remote desired and the size if known.
|
|
||||||
//
|
|
||||||
// It truncates any existing object
|
|
||||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
|
||||||
do := f.f.Features().OpenWriterAt
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do(ctx, remote, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnWrap returns the Fs that this Fs is wrapping
|
|
||||||
func (f *Fs) UnWrap() fs.Fs {
|
|
||||||
return f.f
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapFs returns the Fs that is wrapping this Fs
|
|
||||||
func (f *Fs) WrapFs() fs.Fs {
|
|
||||||
return f.wrapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetWrapper sets the Fs that is wrapping this Fs
|
|
||||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
|
||||||
f.wrapper = wrapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
|
||||||
//
|
|
||||||
// Pass in the remote and the src object
|
|
||||||
// You can also use options to hint at the desired chunk size
|
|
||||||
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
|
||||||
do := f.f.Features().OpenChunkWriter
|
|
||||||
if do == nil {
|
|
||||||
return info, nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do(ctx, remote, src, options...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UserInfo returns info about the connected user
|
|
||||||
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
|
|
||||||
do := f.f.Features().UserInfo
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disconnect the current user
|
|
||||||
func (f *Fs) Disconnect(ctx context.Context) error {
|
|
||||||
do := f.f.Features().Disconnect
|
|
||||||
if do == nil {
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
|
||||||
var (
|
|
||||||
_ fs.Fs = (*Fs)(nil)
|
|
||||||
_ fs.Purger = (*Fs)(nil)
|
|
||||||
_ fs.PutStreamer = (*Fs)(nil)
|
|
||||||
_ fs.Copier = (*Fs)(nil)
|
|
||||||
_ fs.Mover = (*Fs)(nil)
|
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
|
||||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
|
||||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
|
||||||
_ fs.Abouter = (*Fs)(nil)
|
|
||||||
_ fs.Shutdowner = (*Fs)(nil)
|
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
|
||||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
|
||||||
_ fs.MergeDirser = (*Fs)(nil)
|
|
||||||
_ fs.CleanUpper = (*Fs)(nil)
|
|
||||||
_ fs.OpenWriterAter = (*Fs)(nil)
|
|
||||||
_ fs.OpenChunkWriter = (*Fs)(nil)
|
|
||||||
_ fs.UserInfoer = (*Fs)(nil)
|
|
||||||
_ fs.Disconnecter = (*Fs)(nil)
|
|
||||||
// FIXME _ fs.FullObject = (*Object)(nil)
|
|
||||||
)
|
|
||||||
@@ -1,221 +0,0 @@
|
|||||||
//go:build !plan9
|
|
||||||
|
|
||||||
package archive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/cache"
|
|
||||||
"github.com/rclone/rclone/fs/filter"
|
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FIXME need to test Open with seek
|
|
||||||
|
|
||||||
// run - run a shell command
|
|
||||||
func run(t *testing.T, args ...string) {
|
|
||||||
cmd := exec.Command(args[0], args[1:]...)
|
|
||||||
fs.Debugf(nil, "run args = %v", args)
|
|
||||||
out, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf(`
|
|
||||||
----------------------------
|
|
||||||
Failed to run %v: %v
|
|
||||||
Command output was:
|
|
||||||
%s
|
|
||||||
----------------------------
|
|
||||||
`, args, err, out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// check the dst and src are identical
|
|
||||||
func checkTree(ctx context.Context, name string, t *testing.T, dstArchive, src string, expectedCount int) {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
fs.Debugf(nil, "check %q vs %q", dstArchive, src)
|
|
||||||
Farchive, err := cache.Get(ctx, dstArchive)
|
|
||||||
if err != fs.ErrorIsFile {
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
Fsrc, err := cache.Get(ctx, src)
|
|
||||||
if err != fs.ErrorIsFile {
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var matches bytes.Buffer
|
|
||||||
opt := operations.CheckOpt{
|
|
||||||
Fdst: Farchive,
|
|
||||||
Fsrc: Fsrc,
|
|
||||||
Match: &matches,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, action := range []string{"Check", "Download"} {
|
|
||||||
t.Run(action, func(t *testing.T) {
|
|
||||||
matches.Reset()
|
|
||||||
if action == "Download" {
|
|
||||||
assert.NoError(t, operations.CheckDownload(ctx, &opt))
|
|
||||||
} else {
|
|
||||||
assert.NoError(t, operations.Check(ctx, &opt))
|
|
||||||
}
|
|
||||||
if expectedCount > 0 {
|
|
||||||
assert.Equal(t, expectedCount, strings.Count(matches.String(), "\n"))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("NewObject", func(t *testing.T) {
|
|
||||||
// Check we can run NewObject on all files and read them
|
|
||||||
assert.NoError(t, operations.ListFn(ctx, Fsrc, func(srcObj fs.Object) {
|
|
||||||
if t.Failed() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
remote := srcObj.Remote()
|
|
||||||
archiveObj, err := Farchive.NewObject(ctx, remote)
|
|
||||||
require.NoError(t, err, remote)
|
|
||||||
assert.Equal(t, remote, archiveObj.Remote(), remote)
|
|
||||||
|
|
||||||
// Test that the contents are the same
|
|
||||||
archiveBuf := fstests.ReadObject(ctx, t, archiveObj, -1)
|
|
||||||
srcBuf := fstests.ReadObject(ctx, t, srcObj, -1)
|
|
||||||
assert.Equal(t, srcBuf, archiveBuf)
|
|
||||||
|
|
||||||
if len(srcBuf) < 81 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests that Open works with SeekOption
|
|
||||||
assert.Equal(t, srcBuf[50:], fstests.ReadObject(ctx, t, archiveObj, -1, &fs.SeekOption{Offset: 50}), "contents differ after seek")
|
|
||||||
|
|
||||||
// Tests that Open works with RangeOption
|
|
||||||
for _, test := range []struct {
|
|
||||||
ro fs.RangeOption
|
|
||||||
wantStart, wantEnd int
|
|
||||||
}{
|
|
||||||
{fs.RangeOption{Start: 5, End: 15}, 5, 16},
|
|
||||||
{fs.RangeOption{Start: 80, End: -1}, 80, len(srcBuf)},
|
|
||||||
{fs.RangeOption{Start: 81, End: 100000}, 81, len(srcBuf)},
|
|
||||||
{fs.RangeOption{Start: -1, End: 20}, len(srcBuf) - 20, len(srcBuf)}, // if start is omitted this means get the final bytes
|
|
||||||
// {fs.RangeOption{Start: -1, End: -1}, 0, len(srcBuf)}, - this seems to work but the RFC doesn't define it
|
|
||||||
} {
|
|
||||||
got := fstests.ReadObject(ctx, t, archiveObj, -1, &test.ro)
|
|
||||||
foundAt := strings.Index(srcBuf, got)
|
|
||||||
help := fmt.Sprintf("%#v failed want [%d:%d] got [%d:%d]", test.ro, test.wantStart, test.wantEnd, foundAt, foundAt+len(got))
|
|
||||||
assert.Equal(t, srcBuf[test.wantStart:test.wantEnd], got, help)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that the modtimes are correct
|
|
||||||
fstest.AssertTimeEqualWithPrecision(t, remote, srcObj.ModTime(ctx), archiveObj.ModTime(ctx), Farchive.Precision())
|
|
||||||
|
|
||||||
// Test that the sizes are correct
|
|
||||||
assert.Equal(t, srcObj.Size(), archiveObj.Size())
|
|
||||||
|
|
||||||
// Test that Strings are OK
|
|
||||||
assert.Equal(t, srcObj.String(), archiveObj.String())
|
|
||||||
}))
|
|
||||||
})
|
|
||||||
|
|
||||||
// t.Logf("Fdst ------------- %v", Fdst)
|
|
||||||
// operations.List(ctx, Fdst, os.Stdout)
|
|
||||||
// t.Logf("Fsrc ------------- %v", Fsrc)
|
|
||||||
// operations.List(ctx, Fsrc, os.Stdout)
|
|
||||||
})
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// test creating and reading back some archives
|
|
||||||
//
|
|
||||||
// Note that this uses rclone and zip as external binaries.
|
|
||||||
func testArchive(t *testing.T, archiveName string, archiveFn func(t *testing.T, output, input string)) {
|
|
||||||
ctx := context.Background()
|
|
||||||
checkFiles := 1000
|
|
||||||
|
|
||||||
// create random test input files
|
|
||||||
inputRoot := t.TempDir()
|
|
||||||
input := filepath.Join(inputRoot, archiveName)
|
|
||||||
require.NoError(t, os.Mkdir(input, 0777))
|
|
||||||
run(t, "rclone", "test", "makefiles", "--files", strconv.Itoa(checkFiles), "--ascii", input)
|
|
||||||
|
|
||||||
// Create the archive
|
|
||||||
output := t.TempDir()
|
|
||||||
zipFile := path.Join(output, archiveName)
|
|
||||||
archiveFn(t, zipFile, input)
|
|
||||||
|
|
||||||
// Check the archive itself
|
|
||||||
checkTree(ctx, "Archive", t, ":archive:"+zipFile, input, checkFiles)
|
|
||||||
|
|
||||||
// Now check a subdirectory
|
|
||||||
fis, err := os.ReadDir(input)
|
|
||||||
require.NoError(t, err)
|
|
||||||
subDir := "NOT FOUND"
|
|
||||||
aFile := "NOT FOUND"
|
|
||||||
for _, fi := range fis {
|
|
||||||
if fi.IsDir() {
|
|
||||||
subDir = fi.Name()
|
|
||||||
} else {
|
|
||||||
aFile = fi.Name()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
checkTree(ctx, "SubDir", t, ":archive:"+zipFile+"/"+subDir, filepath.Join(input, subDir), 0)
|
|
||||||
|
|
||||||
// Now check a single file
|
|
||||||
fiCtx, fi := filter.AddConfig(ctx)
|
|
||||||
require.NoError(t, fi.AddRule("+ "+aFile))
|
|
||||||
require.NoError(t, fi.AddRule("- *"))
|
|
||||||
checkTree(fiCtx, "SingleFile", t, ":archive:"+zipFile+"/"+aFile, filepath.Join(input, aFile), 0)
|
|
||||||
|
|
||||||
// Now check the level above
|
|
||||||
checkTree(ctx, "Root", t, ":archive:"+output, inputRoot, checkFiles)
|
|
||||||
// run(t, "cp", "-a", inputRoot, output, "/tmp/test-"+archiveName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure we have the executable named
|
|
||||||
func skipIfNoExe(t *testing.T, exeName string) {
|
|
||||||
_, err := exec.LookPath(exeName)
|
|
||||||
if err != nil {
|
|
||||||
t.Skipf("%s executable not installed", exeName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test creating and reading back some archives
|
|
||||||
//
|
|
||||||
// Note that this uses rclone and zip as external binaries.
|
|
||||||
func TestArchiveZip(t *testing.T) {
|
|
||||||
fstest.Initialise()
|
|
||||||
skipIfNoExe(t, "zip")
|
|
||||||
skipIfNoExe(t, "rclone")
|
|
||||||
testArchive(t, "test.zip", func(t *testing.T, output, input string) {
|
|
||||||
oldcwd, err := os.Getwd()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, os.Chdir(input))
|
|
||||||
defer func() {
|
|
||||||
require.NoError(t, os.Chdir(oldcwd))
|
|
||||||
}()
|
|
||||||
run(t, "zip", "-9r", output, ".")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test creating and reading back some archives
|
|
||||||
//
|
|
||||||
// Note that this uses rclone and squashfs as external binaries.
|
|
||||||
func TestArchiveSquashfs(t *testing.T) {
|
|
||||||
fstest.Initialise()
|
|
||||||
skipIfNoExe(t, "mksquashfs")
|
|
||||||
skipIfNoExe(t, "rclone")
|
|
||||||
testArchive(t, "test.sqfs", func(t *testing.T, output, input string) {
|
|
||||||
run(t, "mksquashfs", input, output)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,67 +0,0 @@
|
|||||||
//go:build !plan9
|
|
||||||
|
|
||||||
// Test Archive filesystem interface
|
|
||||||
package archive_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
|
||||||
_ "github.com/rclone/rclone/backend/memory"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
unimplementableFsMethods = []string{"ListR", "ListP", "MkdirMetadata", "DirSetModTime"}
|
|
||||||
// In these tests we receive objects from the underlying remote which don't implement these methods
|
|
||||||
unimplementableObjectMethods = []string{"GetTier", "ID", "Metadata", "MimeType", "SetTier", "UnWrap", "SetMetadata"}
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
if *fstest.RemoteName == "" {
|
|
||||||
t.Skip("Skipping as -remote not set")
|
|
||||||
}
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: *fstest.RemoteName,
|
|
||||||
UnimplementableFsMethods: unimplementableFsMethods,
|
|
||||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLocal(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
remote := t.TempDir()
|
|
||||||
name := "TestArchiveLocal"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":",
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "archive"},
|
|
||||||
{Name: name, Key: "remote", Value: remote},
|
|
||||||
},
|
|
||||||
QuickTestOK: true,
|
|
||||||
UnimplementableFsMethods: unimplementableFsMethods,
|
|
||||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemory(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
remote := ":memory:"
|
|
||||||
name := "TestArchiveMemory"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":",
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "archive"},
|
|
||||||
{Name: name, Key: "remote", Value: remote},
|
|
||||||
},
|
|
||||||
QuickTestOK: true,
|
|
||||||
UnimplementableFsMethods: unimplementableFsMethods,
|
|
||||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Build for archive for unsupported platforms to stop go complaining
|
|
||||||
// about "no buildable Go source files "
|
|
||||||
|
|
||||||
//go:build plan9
|
|
||||||
|
|
||||||
// Package archive implements a backend to access archive files in a remote
|
|
||||||
package archive
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
// Package archiver registers all the archivers
|
|
||||||
package archiver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Archiver describes an archive package
|
|
||||||
type Archiver struct {
|
|
||||||
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
|
||||||
// prefix with prefix and rooted at root
|
|
||||||
New func(ctx context.Context, f fs.Fs, remote, prefix, root string) (fs.Fs, error)
|
|
||||||
Extension string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Archivers is a slice of all registered archivers
|
|
||||||
var Archivers []Archiver
|
|
||||||
|
|
||||||
// Register adds the archivers provided to the list of known archivers
|
|
||||||
func Register(as ...Archiver) {
|
|
||||||
Archivers = append(Archivers, as...)
|
|
||||||
}
|
|
||||||
@@ -1,233 +0,0 @@
|
|||||||
// Package base is a base archive Fs
|
|
||||||
package base
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"path"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/vfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Fs represents a wrapped fs.Fs
|
|
||||||
type Fs struct {
|
|
||||||
f fs.Fs
|
|
||||||
wrapper fs.Fs
|
|
||||||
name string
|
|
||||||
features *fs.Features // optional features
|
|
||||||
vfs *vfs.VFS
|
|
||||||
node vfs.Node // archive object
|
|
||||||
remote string // remote of the archive object
|
|
||||||
prefix string // position for objects
|
|
||||||
prefixSlash string // position for objects with a slash on
|
|
||||||
root string // position to read from within the archive
|
|
||||||
}
|
|
||||||
|
|
||||||
var errNotImplemented = errors.New("internal error: method not implemented in archiver")
|
|
||||||
|
|
||||||
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
|
||||||
// prefix with prefix and rooted at root
|
|
||||||
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (*Fs, error) {
|
|
||||||
// FIXME vfs cache?
|
|
||||||
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
|
|
||||||
fs.Debugf(nil, "New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
|
|
||||||
VFS := vfs.New(wrappedFs, nil)
|
|
||||||
node, err := VFS.Stat(remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f := &Fs{
|
|
||||||
f: wrappedFs,
|
|
||||||
name: path.Join(fs.ConfigString(wrappedFs), remote),
|
|
||||||
vfs: VFS,
|
|
||||||
node: node,
|
|
||||||
remote: remote,
|
|
||||||
root: root,
|
|
||||||
prefix: prefix,
|
|
||||||
prefixSlash: prefix + "/",
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME
|
|
||||||
// the features here are ones we could support, and they are
|
|
||||||
// ANDed with the ones from wrappedFs
|
|
||||||
//
|
|
||||||
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
|
|
||||||
f.features = (&fs.Features{
|
|
||||||
CaseInsensitive: false,
|
|
||||||
DuplicateFiles: false,
|
|
||||||
ReadMimeType: false, // MimeTypes not supported with gzip
|
|
||||||
WriteMimeType: false,
|
|
||||||
BucketBased: false,
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
|
||||||
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Root() string {
|
|
||||||
return f.root
|
|
||||||
}
|
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
|
||||||
func (f *Fs) Features() *fs.Features {
|
|
||||||
return f.features
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a description of the FS
|
|
||||||
func (f *Fs) String() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// List the objects and directories in dir into entries. The
|
|
||||||
// entries can be returned in any order but should be for a
|
|
||||||
// complete directory.
|
|
||||||
//
|
|
||||||
// dir should be "" to list the root, and should not have
|
|
||||||
// trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
|
||||||
return nil, errNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject finds the Object at remote.
|
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
|
||||||
return nil, errNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// Precision of the ModTimes in this Fs
|
|
||||||
func (f *Fs) Precision() time.Duration {
|
|
||||||
return time.Second
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mkdir makes the directory (container, bucket)
|
|
||||||
//
|
|
||||||
// Shouldn't return an error if it already exists
|
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
|
||||||
//
|
|
||||||
// Return an error if it doesn't exist or isn't empty
|
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put in to the remote path with the modTime given of the given size
|
|
||||||
//
|
|
||||||
// May create the object even if it returns an error - if so
|
|
||||||
// will return the object and the error, otherwise will return
|
|
||||||
// nil and the error
|
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
|
||||||
return nil, vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashes returns the supported hash sets.
|
|
||||||
func (f *Fs) Hashes() hash.Set {
|
|
||||||
return hash.Set(hash.None)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnWrap returns the Fs that this Fs is wrapping
|
|
||||||
func (f *Fs) UnWrap() fs.Fs {
|
|
||||||
return f.f
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapFs returns the Fs that is wrapping this Fs
|
|
||||||
func (f *Fs) WrapFs() fs.Fs {
|
|
||||||
return f.wrapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetWrapper sets the Fs that is wrapping this Fs
|
|
||||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
|
||||||
f.wrapper = wrapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object describes an object to be read from the raw zip file
|
|
||||||
type Object struct {
|
|
||||||
f *Fs
|
|
||||||
remote string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs returns read only access to the Fs that this object is part of
|
|
||||||
func (o *Object) Fs() fs.Info {
|
|
||||||
return o.f
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return a string version
|
|
||||||
func (o *Object) String() string {
|
|
||||||
if o == nil {
|
|
||||||
return "<nil>"
|
|
||||||
}
|
|
||||||
return o.Remote()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remote returns the remote path
|
|
||||||
func (o *Object) Remote() string {
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the size of the file
|
|
||||||
func (o *Object) Size() int64 {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModTime returns the modification time of the object
|
|
||||||
//
|
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
|
||||||
// LastModified returned in the http headers
|
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
|
||||||
return time.Now()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storable raturns a boolean indicating if this object is storable
|
|
||||||
func (o *Object) Storable() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns the selected checksum of the file
|
|
||||||
// If no checksum is available it returns ""
|
|
||||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
|
||||||
return "", hash.ErrUnsupported
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
|
||||||
return nil, errNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update in to the object with the modTime given of the given size
|
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove an object
|
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
|
||||||
var (
|
|
||||||
_ fs.Fs = (*Fs)(nil)
|
|
||||||
_ fs.UnWrapper = (*Fs)(nil)
|
|
||||||
_ fs.Wrapper = (*Fs)(nil)
|
|
||||||
_ fs.Object = (*Object)(nil)
|
|
||||||
)
|
|
||||||
@@ -1,165 +0,0 @@
|
|||||||
package squashfs
|
|
||||||
|
|
||||||
// Could just be using bare object Open with RangeRequest which
|
|
||||||
// would transfer the minimum amount of data but may be slower.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/fs"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/diskfs/go-diskfs/backend"
|
|
||||||
"github.com/rclone/rclone/vfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Cache file handles for accessing the file
|
|
||||||
type cache struct {
|
|
||||||
node vfs.Node
|
|
||||||
fhsMu sync.Mutex
|
|
||||||
fhs []cacheHandle
|
|
||||||
}
|
|
||||||
|
|
||||||
// A cached file handle
|
|
||||||
type cacheHandle struct {
|
|
||||||
offset int64
|
|
||||||
fh vfs.Handle
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make a new cache
|
|
||||||
func newCache(node vfs.Node) *cache {
|
|
||||||
return &cache{
|
|
||||||
node: node,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get a vfs.Handle from the pool or open one
|
|
||||||
//
|
|
||||||
// This tries to find an open file handle which doesn't require seeking.
|
|
||||||
func (c *cache) open(off int64) (fh vfs.Handle, err error) {
|
|
||||||
c.fhsMu.Lock()
|
|
||||||
defer c.fhsMu.Unlock()
|
|
||||||
|
|
||||||
if len(c.fhs) > 0 {
|
|
||||||
// Look for exact match first
|
|
||||||
for i, cfh := range c.fhs {
|
|
||||||
if cfh.offset == off {
|
|
||||||
// fs.Debugf(nil, "CACHE MATCH")
|
|
||||||
c.fhs = append(c.fhs[:i], c.fhs[i+1:]...)
|
|
||||||
return cfh.fh, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// fs.Debugf(nil, "CACHE MISS")
|
|
||||||
// Just take the first one if not found
|
|
||||||
cfh := c.fhs[0]
|
|
||||||
c.fhs = c.fhs[1:]
|
|
||||||
return cfh.fh, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
fh, err = c.node.Open(os.O_RDONLY)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to open squashfs archive: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fh, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close a vfs.Handle or return it to the pool
|
|
||||||
//
|
|
||||||
// off should be the offset the file handle would read from without seeking
|
|
||||||
func (c *cache) close(fh vfs.Handle, off int64) {
|
|
||||||
c.fhsMu.Lock()
|
|
||||||
defer c.fhsMu.Unlock()
|
|
||||||
|
|
||||||
c.fhs = append(c.fhs, cacheHandle{
|
|
||||||
offset: off,
|
|
||||||
fh: fh,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadAt reads len(p) bytes into p starting at offset off in the underlying
|
|
||||||
// input source. It returns the number of bytes read (0 <= n <= len(p)) and any
|
|
||||||
// error encountered.
|
|
||||||
//
|
|
||||||
// When ReadAt returns n < len(p), it returns a non-nil error explaining why
|
|
||||||
// more bytes were not returned. In this respect, ReadAt is stricter than Read.
|
|
||||||
//
|
|
||||||
// Even if ReadAt returns n < len(p), it may use all of p as scratch
|
|
||||||
// space during the call. If some data is available but not len(p) bytes,
|
|
||||||
// ReadAt blocks until either all the data is available or an error occurs.
|
|
||||||
// In this respect ReadAt is different from Read.
|
|
||||||
//
|
|
||||||
// If the n = len(p) bytes returned by ReadAt are at the end of the input
|
|
||||||
// source, ReadAt may return either err == EOF or err == nil.
|
|
||||||
//
|
|
||||||
// If ReadAt is reading from an input source with a seek offset, ReadAt should
|
|
||||||
// not affect nor be affected by the underlying seek offset.
|
|
||||||
//
|
|
||||||
// Clients of ReadAt can execute parallel ReadAt calls on the same input
|
|
||||||
// source.
|
|
||||||
//
|
|
||||||
// Implementations must not retain p.
|
|
||||||
func (c *cache) ReadAt(p []byte, off int64) (n int, err error) {
|
|
||||||
fh, err := c.open(off)
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
c.close(fh, off+int64(len(p)))
|
|
||||||
}()
|
|
||||||
// fs.Debugf(nil, "ReadAt(p[%d], off=%d, fh=%p)", len(p), off, fh)
|
|
||||||
return fh.ReadAt(p, off)
|
|
||||||
}
|
|
||||||
|
|
||||||
var errCacheNotImplemented = errors.New("internal error: squashfs cache doesn't implement method")
|
|
||||||
|
|
||||||
// WriteAt method dummy stub to satisfy interface
|
|
||||||
func (c *cache) WriteAt(p []byte, off int64) (n int, err error) {
|
|
||||||
return 0, errCacheNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seek method dummy stub to satisfy interface
|
|
||||||
func (c *cache) Seek(offset int64, whence int) (int64, error) {
|
|
||||||
return 0, errCacheNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read method dummy stub to satisfy interface
|
|
||||||
func (c *cache) Read(p []byte) (n int, err error) {
|
|
||||||
return 0, errCacheNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *cache) Stat() (fs.FileInfo, error) {
|
|
||||||
return nil, errCacheNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close the file
|
|
||||||
func (c *cache) Close() (err error) {
|
|
||||||
c.fhsMu.Lock()
|
|
||||||
defer c.fhsMu.Unlock()
|
|
||||||
|
|
||||||
// Close any open file handles
|
|
||||||
for i := range c.fhs {
|
|
||||||
fh := &c.fhs[i]
|
|
||||||
newErr := fh.fh.Close()
|
|
||||||
if err == nil {
|
|
||||||
err = newErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.fhs = nil
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sys returns OS-specific file for ioctl calls via fd
|
|
||||||
func (c *cache) Sys() (*os.File, error) {
|
|
||||||
return nil, errCacheNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writable returns file for read-write operations
|
|
||||||
func (c *cache) Writable() (backend.WritableFile, error) {
|
|
||||||
return nil, errCacheNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// check interfaces
|
|
||||||
var _ backend.Storage = (*cache)(nil)
|
|
||||||
@@ -1,446 +0,0 @@
|
|||||||
// Package squashfs implements a squashfs archiver for the archive backend
|
|
||||||
package squashfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/diskfs/go-diskfs/filesystem/squashfs"
|
|
||||||
"github.com/rclone/rclone/backend/archive/archiver"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/fs/log"
|
|
||||||
"github.com/rclone/rclone/lib/readers"
|
|
||||||
"github.com/rclone/rclone/vfs"
|
|
||||||
"github.com/rclone/rclone/vfs/vfscommon"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
archiver.Register(archiver.Archiver{
|
|
||||||
New: New,
|
|
||||||
Extension: ".sqfs",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs represents a wrapped fs.Fs
|
|
||||||
type Fs struct {
|
|
||||||
f fs.Fs
|
|
||||||
wrapper fs.Fs
|
|
||||||
name string
|
|
||||||
features *fs.Features // optional features
|
|
||||||
vfs *vfs.VFS
|
|
||||||
sqfs *squashfs.FileSystem // interface to the squashfs
|
|
||||||
c *cache
|
|
||||||
node vfs.Node // squashfs file object - set if reading
|
|
||||||
remote string // remote of the squashfs file object
|
|
||||||
prefix string // position for objects
|
|
||||||
prefixSlash string // position for objects with a slash on
|
|
||||||
root string // position to read from within the archive
|
|
||||||
}
|
|
||||||
|
|
||||||
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
|
||||||
// prefix with prefix and rooted at root
|
|
||||||
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) {
|
|
||||||
// FIXME vfs cache?
|
|
||||||
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
|
|
||||||
fs.Debugf(nil, "Squashfs: New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
|
|
||||||
vfsOpt := vfscommon.Opt
|
|
||||||
vfsOpt.ReadWait = 0
|
|
||||||
VFS := vfs.New(wrappedFs, &vfsOpt)
|
|
||||||
node, err := VFS.Stat(remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
c := newCache(node)
|
|
||||||
|
|
||||||
// FIXME blocksize
|
|
||||||
sqfs, err := squashfs.Read(c, node.Size(), 0, 1024*1024)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to read squashfs: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f := &Fs{
|
|
||||||
f: wrappedFs,
|
|
||||||
name: path.Join(fs.ConfigString(wrappedFs), remote),
|
|
||||||
vfs: VFS,
|
|
||||||
node: node,
|
|
||||||
sqfs: sqfs,
|
|
||||||
c: c,
|
|
||||||
remote: remote,
|
|
||||||
root: strings.Trim(root, "/"),
|
|
||||||
prefix: prefix,
|
|
||||||
prefixSlash: prefix + "/",
|
|
||||||
}
|
|
||||||
if prefix == "" {
|
|
||||||
f.prefixSlash = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
singleObject := false
|
|
||||||
|
|
||||||
// Find the directory the root points to
|
|
||||||
if f.root != "" && !strings.HasSuffix(root, "/") {
|
|
||||||
native, err := f.toNative("")
|
|
||||||
if err == nil {
|
|
||||||
native = strings.TrimRight(native, "/")
|
|
||||||
_, err := f.newObjectNative(native)
|
|
||||||
if err == nil {
|
|
||||||
// If it pointed to a file, find the directory above
|
|
||||||
f.root = path.Dir(f.root)
|
|
||||||
if f.root == "." || f.root == "/" {
|
|
||||||
f.root = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME
|
|
||||||
// the features here are ones we could support, and they are
|
|
||||||
// ANDed with the ones from wrappedFs
|
|
||||||
//
|
|
||||||
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
|
|
||||||
f.features = (&fs.Features{
|
|
||||||
CaseInsensitive: false,
|
|
||||||
DuplicateFiles: false,
|
|
||||||
ReadMimeType: false, // MimeTypes not supported with gsquashfs
|
|
||||||
WriteMimeType: false,
|
|
||||||
BucketBased: false,
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
|
||||||
|
|
||||||
if singleObject {
|
|
||||||
return f, fs.ErrorIsFile
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Root() string {
|
|
||||||
return f.root
|
|
||||||
}
|
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
|
||||||
func (f *Fs) Features() *fs.Features {
|
|
||||||
return f.features
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a description of the FS
|
|
||||||
func (f *Fs) String() string {
|
|
||||||
return fmt.Sprintf("Squashfs %q", f.name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This turns a remote into a native path in the squashfs starting with a /
|
|
||||||
func (f *Fs) toNative(remote string) (string, error) {
|
|
||||||
native := strings.Trim(remote, "/")
|
|
||||||
if f.prefix == "" {
|
|
||||||
native = "/" + native
|
|
||||||
} else if native == f.prefix {
|
|
||||||
native = "/"
|
|
||||||
} else if !strings.HasPrefix(native, f.prefixSlash) {
|
|
||||||
return "", fmt.Errorf("internal error: %q doesn't start with prefix %q", native, f.prefixSlash)
|
|
||||||
} else {
|
|
||||||
native = native[len(f.prefix):]
|
|
||||||
}
|
|
||||||
if f.root != "" {
|
|
||||||
native = "/" + f.root + native
|
|
||||||
}
|
|
||||||
return native, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Turn a (nativeDir, leaf) into a remote
|
|
||||||
func (f *Fs) fromNative(nativeDir string, leaf string) string {
|
|
||||||
// fs.Debugf(nil, "nativeDir = %q, leaf = %q, root=%q", nativeDir, leaf, f.root)
|
|
||||||
dir := nativeDir
|
|
||||||
if f.root != "" {
|
|
||||||
dir = strings.TrimPrefix(dir, "/"+f.root)
|
|
||||||
}
|
|
||||||
remote := f.prefixSlash + strings.Trim(path.Join(dir, leaf), "/")
|
|
||||||
// fs.Debugf(nil, "dir = %q, remote=%q", dir, remote)
|
|
||||||
return remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert a FileInfo into an Object from native dir
|
|
||||||
func (f *Fs) objectFromFileInfo(nativeDir string, item squashfs.FileStat) *Object {
|
|
||||||
return &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: f.fromNative(nativeDir, item.Name()),
|
|
||||||
size: item.Size(),
|
|
||||||
modTime: item.ModTime(),
|
|
||||||
item: item,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// List the objects and directories in dir into entries. The
|
|
||||||
// entries can be returned in any order but should be for a
|
|
||||||
// complete directory.
|
|
||||||
//
|
|
||||||
// dir should be "" to list the root, and should not have
|
|
||||||
// trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
|
||||||
defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
|
||||||
|
|
||||||
nativeDir, err := f.toNative(dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
items, err := f.sqfs.ReadDir(nativeDir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("read squashfs: couldn't read directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
entries = make(fs.DirEntries, 0, len(items))
|
|
||||||
for _, fi := range items {
|
|
||||||
item, ok := fi.(squashfs.FileStat)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi)
|
|
||||||
}
|
|
||||||
// fs.Debugf(item.Name(), "entry = %#v", item)
|
|
||||||
var entry fs.DirEntry
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error reading item %q: %q", item.Name(), err)
|
|
||||||
}
|
|
||||||
if item.IsDir() {
|
|
||||||
var remote = f.fromNative(nativeDir, item.Name())
|
|
||||||
entry = fs.NewDir(remote, item.ModTime())
|
|
||||||
} else {
|
|
||||||
if item.Mode().IsRegular() {
|
|
||||||
entry = f.objectFromFileInfo(nativeDir, item)
|
|
||||||
} else {
|
|
||||||
fs.Debugf(item.Name(), "FIXME Not regular file - skipping")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
entries = append(entries, entry)
|
|
||||||
}
|
|
||||||
|
|
||||||
// fs.Debugf(f, "dir=%q, entries=%v", dir, entries)
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newObjectNative finds the object at the native path passed in
|
|
||||||
func (f *Fs) newObjectNative(nativePath string) (o fs.Object, err error) {
|
|
||||||
// get the path and filename
|
|
||||||
dir, leaf := path.Split(nativePath)
|
|
||||||
dir = strings.TrimRight(dir, "/")
|
|
||||||
leaf = strings.Trim(leaf, "/")
|
|
||||||
|
|
||||||
// FIXME need to detect directory not found
|
|
||||||
fis, err := f.sqfs.ReadDir(dir)
|
|
||||||
if err != nil {
|
|
||||||
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, fi := range fis {
|
|
||||||
if fi.Name() == leaf {
|
|
||||||
if fi.IsDir() {
|
|
||||||
return nil, fs.ErrorNotAFile
|
|
||||||
}
|
|
||||||
item, ok := fi.(squashfs.FileStat)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi)
|
|
||||||
}
|
|
||||||
o = f.objectFromFileInfo(dir, item)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if o == nil {
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject finds the Object at remote.
|
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
|
||||||
defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err)
|
|
||||||
|
|
||||||
nativePath, err := f.toNative(remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return f.newObjectNative(nativePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Precision of the ModTimes in this Fs
|
|
||||||
func (f *Fs) Precision() time.Duration {
|
|
||||||
return time.Second
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mkdir makes the directory (container, bucket)
|
|
||||||
//
|
|
||||||
// Shouldn't return an error if it already exists
|
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
|
||||||
//
|
|
||||||
// Return an error if it doesn't exist or isn't empty
|
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put in to the remote path with the modTime given of the given size
|
|
||||||
//
|
|
||||||
// May create the object even if it returns an error - if so
|
|
||||||
// will return the object and the error, otherwise will return
|
|
||||||
// nil and the error
|
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
|
||||||
return nil, vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashes returns the supported hash sets.
|
|
||||||
func (f *Fs) Hashes() hash.Set {
|
|
||||||
return hash.Set(hash.None)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnWrap returns the Fs that this Fs is wrapping
|
|
||||||
func (f *Fs) UnWrap() fs.Fs {
|
|
||||||
return f.f
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapFs returns the Fs that is wrapping this Fs
|
|
||||||
func (f *Fs) WrapFs() fs.Fs {
|
|
||||||
return f.wrapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetWrapper sets the Fs that is wrapping this Fs
|
|
||||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
|
||||||
f.wrapper = wrapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object describes an object to be read from the raw squashfs file
|
|
||||||
type Object struct {
|
|
||||||
fs *Fs
|
|
||||||
remote string
|
|
||||||
size int64
|
|
||||||
modTime time.Time
|
|
||||||
item squashfs.FileStat
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs returns read only access to the Fs that this object is part of
|
|
||||||
func (o *Object) Fs() fs.Info {
|
|
||||||
return o.fs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return a string version
|
|
||||||
func (o *Object) String() string {
|
|
||||||
if o == nil {
|
|
||||||
return "<nil>"
|
|
||||||
}
|
|
||||||
return o.Remote()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Turn a squashfs path into a full path for the parent Fs
|
|
||||||
// func (o *Object) path(remote string) string {
|
|
||||||
// return path.Join(o.fs.prefix, remote)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// Remote returns the remote path
|
|
||||||
func (o *Object) Remote() string {
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the size of the file
|
|
||||||
func (o *Object) Size() int64 {
|
|
||||||
return o.size
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModTime returns the modification time of the object
|
|
||||||
//
|
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
|
||||||
// LastModified returned in the http headers
|
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
|
||||||
return o.modTime
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storable raturns a boolean indicating if this object is storable
|
|
||||||
func (o *Object) Storable() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns the selected checksum of the file
|
|
||||||
// If no checksum is available it returns ""
|
|
||||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
|
||||||
return "", hash.ErrUnsupported
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
|
||||||
var offset, limit int64 = 0, -1
|
|
||||||
for _, option := range options {
|
|
||||||
switch x := option.(type) {
|
|
||||||
case *fs.SeekOption:
|
|
||||||
offset = x.Offset
|
|
||||||
case *fs.RangeOption:
|
|
||||||
offset, limit = x.Decode(o.Size())
|
|
||||||
default:
|
|
||||||
if option.Mandatory() {
|
|
||||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
remote, err := o.fs.toNative(o.remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Debugf(o, "Opening %q", remote)
|
|
||||||
//fh, err := o.fs.sqfs.OpenFile(remote, os.O_RDONLY)
|
|
||||||
fh, err := o.item.Open()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// discard data from start as necessary
|
|
||||||
if offset > 0 {
|
|
||||||
_, err = fh.Seek(offset, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If limited then don't return everything
|
|
||||||
if limit >= 0 {
|
|
||||||
fs.Debugf(nil, "limit=%d, offset=%d, options=%v", limit, offset, options)
|
|
||||||
return readers.NewLimitedReadCloser(fh, limit), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fh, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update in to the object with the modTime given of the given size
|
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove an object
|
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
|
||||||
var (
|
|
||||||
_ fs.Fs = (*Fs)(nil)
|
|
||||||
_ fs.UnWrapper = (*Fs)(nil)
|
|
||||||
_ fs.Wrapper = (*Fs)(nil)
|
|
||||||
_ fs.Object = (*Object)(nil)
|
|
||||||
)
|
|
||||||
@@ -1,385 +0,0 @@
|
|||||||
// Package zip implements a zip archiver for the archive backend
|
|
||||||
package zip
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/zip"
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/archive/archiver"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/dirtree"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/fs/log"
|
|
||||||
"github.com/rclone/rclone/lib/readers"
|
|
||||||
"github.com/rclone/rclone/vfs"
|
|
||||||
"github.com/rclone/rclone/vfs/vfscommon"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
archiver.Register(archiver.Archiver{
|
|
||||||
New: New,
|
|
||||||
Extension: ".zip",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs represents a wrapped fs.Fs
|
|
||||||
type Fs struct {
|
|
||||||
f fs.Fs
|
|
||||||
wrapper fs.Fs
|
|
||||||
name string
|
|
||||||
features *fs.Features // optional features
|
|
||||||
vfs *vfs.VFS
|
|
||||||
node vfs.Node // zip file object - set if reading
|
|
||||||
remote string // remote of the zip file object
|
|
||||||
prefix string // position for objects
|
|
||||||
prefixSlash string // position for objects with a slash on
|
|
||||||
root string // position to read from within the archive
|
|
||||||
dt dirtree.DirTree // read from zipfile
|
|
||||||
}
|
|
||||||
|
|
||||||
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
|
||||||
// prefix with prefix and rooted at root
|
|
||||||
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) {
|
|
||||||
// FIXME vfs cache?
|
|
||||||
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
|
|
||||||
fs.Debugf(nil, "Zip: New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
|
|
||||||
vfsOpt := vfscommon.Opt
|
|
||||||
vfsOpt.ReadWait = 0
|
|
||||||
VFS := vfs.New(wrappedFs, &vfsOpt)
|
|
||||||
node, err := VFS.Stat(remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f := &Fs{
|
|
||||||
f: wrappedFs,
|
|
||||||
name: path.Join(fs.ConfigString(wrappedFs), remote),
|
|
||||||
vfs: VFS,
|
|
||||||
node: node,
|
|
||||||
remote: remote,
|
|
||||||
root: root,
|
|
||||||
prefix: prefix,
|
|
||||||
prefixSlash: prefix + "/",
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the contents of the zip file
|
|
||||||
singleObject, err := f.readZip()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to open zip file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME
|
|
||||||
// the features here are ones we could support, and they are
|
|
||||||
// ANDed with the ones from wrappedFs
|
|
||||||
//
|
|
||||||
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
|
|
||||||
f.features = (&fs.Features{
|
|
||||||
CaseInsensitive: false,
|
|
||||||
DuplicateFiles: false,
|
|
||||||
ReadMimeType: false, // MimeTypes not supported with gzip
|
|
||||||
WriteMimeType: false,
|
|
||||||
BucketBased: false,
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
|
||||||
|
|
||||||
if singleObject {
|
|
||||||
return f, fs.ErrorIsFile
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Root() string {
|
|
||||||
return f.root
|
|
||||||
}
|
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
|
||||||
func (f *Fs) Features() *fs.Features {
|
|
||||||
return f.features
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a description of the FS
|
|
||||||
func (f *Fs) String() string {
|
|
||||||
return fmt.Sprintf("Zip %q", f.name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// readZip the zip file into f
|
|
||||||
//
|
|
||||||
// Returns singleObject=true if f.root points to a file
|
|
||||||
func (f *Fs) readZip() (singleObject bool, err error) {
|
|
||||||
if f.node == nil {
|
|
||||||
return singleObject, fs.ErrorDirNotFound
|
|
||||||
}
|
|
||||||
size := f.node.Size()
|
|
||||||
if size < 0 {
|
|
||||||
return singleObject, errors.New("can't read from zip file with unknown size")
|
|
||||||
}
|
|
||||||
r, err := f.node.Open(os.O_RDONLY)
|
|
||||||
if err != nil {
|
|
||||||
return singleObject, fmt.Errorf("failed to open zip file: %w", err)
|
|
||||||
}
|
|
||||||
zr, err := zip.NewReader(r, size)
|
|
||||||
if err != nil {
|
|
||||||
return singleObject, fmt.Errorf("failed to read zip file: %w", err)
|
|
||||||
}
|
|
||||||
dt := dirtree.New()
|
|
||||||
for _, file := range zr.File {
|
|
||||||
remote := strings.Trim(path.Clean(file.Name), "/")
|
|
||||||
if remote == "." {
|
|
||||||
remote = ""
|
|
||||||
}
|
|
||||||
remote = path.Join(f.prefix, remote)
|
|
||||||
if f.root != "" {
|
|
||||||
// Ignore all files outside the root
|
|
||||||
if !strings.HasPrefix(remote, f.root) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if remote == f.root {
|
|
||||||
remote = ""
|
|
||||||
} else {
|
|
||||||
remote = strings.TrimPrefix(remote, f.root+"/")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if strings.HasSuffix(file.Name, "/") {
|
|
||||||
dir := fs.NewDir(remote, file.Modified)
|
|
||||||
dt.AddDir(dir)
|
|
||||||
} else {
|
|
||||||
if remote == "" {
|
|
||||||
remote = path.Base(f.root)
|
|
||||||
singleObject = true
|
|
||||||
dt = dirtree.New()
|
|
||||||
}
|
|
||||||
o := &Object{
|
|
||||||
f: f,
|
|
||||||
remote: remote,
|
|
||||||
fh: &file.FileHeader,
|
|
||||||
file: file,
|
|
||||||
}
|
|
||||||
dt.Add(o)
|
|
||||||
if singleObject {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dt.CheckParents("")
|
|
||||||
dt.Sort()
|
|
||||||
f.dt = dt
|
|
||||||
//fs.Debugf(nil, "dt = %v", dt)
|
|
||||||
return singleObject, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// List the objects and directories in dir into entries. The
|
|
||||||
// entries can be returned in any order but should be for a
|
|
||||||
// complete directory.
|
|
||||||
//
|
|
||||||
// dir should be "" to list the root, and should not have
|
|
||||||
// trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
|
||||||
defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
|
||||||
// _, err = f.strip(dir)
|
|
||||||
// if err != nil {
|
|
||||||
// return nil, err
|
|
||||||
// }
|
|
||||||
entries, ok := f.dt[dir]
|
|
||||||
if !ok {
|
|
||||||
return nil, fs.ErrorDirNotFound
|
|
||||||
}
|
|
||||||
fs.Debugf(f, "dir=%q, entries=%v", dir, entries)
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject finds the Object at remote.
|
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
|
||||||
defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err)
|
|
||||||
if f.dt == nil {
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
_, entry := f.dt.Find(remote)
|
|
||||||
if entry == nil {
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
o, ok := entry.(*Object)
|
|
||||||
if !ok {
|
|
||||||
return nil, fs.ErrorNotAFile
|
|
||||||
}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Precision of the ModTimes in this Fs
|
|
||||||
func (f *Fs) Precision() time.Duration {
|
|
||||||
return time.Second
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mkdir makes the directory (container, bucket)
|
|
||||||
//
|
|
||||||
// Shouldn't return an error if it already exists
|
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
|
||||||
//
|
|
||||||
// Return an error if it doesn't exist or isn't empty
|
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put in to the remote path with the modTime given of the given size
|
|
||||||
//
|
|
||||||
// May create the object even if it returns an error - if so
|
|
||||||
// will return the object and the error, otherwise will return
|
|
||||||
// nil and the error
|
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
|
||||||
return nil, vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashes returns the supported hash sets.
|
|
||||||
func (f *Fs) Hashes() hash.Set {
|
|
||||||
return hash.Set(hash.CRC32)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnWrap returns the Fs that this Fs is wrapping
|
|
||||||
func (f *Fs) UnWrap() fs.Fs {
|
|
||||||
return f.f
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapFs returns the Fs that is wrapping this Fs
|
|
||||||
func (f *Fs) WrapFs() fs.Fs {
|
|
||||||
return f.wrapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetWrapper sets the Fs that is wrapping this Fs
|
|
||||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
|
||||||
f.wrapper = wrapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object describes an object to be read from the raw zip file
|
|
||||||
type Object struct {
|
|
||||||
f *Fs
|
|
||||||
remote string
|
|
||||||
fh *zip.FileHeader
|
|
||||||
file *zip.File
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs returns read only access to the Fs that this object is part of
|
|
||||||
func (o *Object) Fs() fs.Info {
|
|
||||||
return o.f
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return a string version
|
|
||||||
func (o *Object) String() string {
|
|
||||||
if o == nil {
|
|
||||||
return "<nil>"
|
|
||||||
}
|
|
||||||
return o.Remote()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remote returns the remote path
|
|
||||||
func (o *Object) Remote() string {
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the size of the file
|
|
||||||
func (o *Object) Size() int64 {
|
|
||||||
return int64(o.fh.UncompressedSize64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModTime returns the modification time of the object
|
|
||||||
//
|
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
|
||||||
// LastModified returned in the http headers
|
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
|
||||||
return o.fh.Modified
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storable raturns a boolean indicating if this object is storable
|
|
||||||
func (o *Object) Storable() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns the selected checksum of the file
|
|
||||||
// If no checksum is available it returns ""
|
|
||||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
|
||||||
if ht == hash.CRC32 {
|
|
||||||
// FIXME return empty CRC if writing
|
|
||||||
if o.f.dt == nil {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%08x", o.fh.CRC32), nil
|
|
||||||
}
|
|
||||||
return "", hash.ErrUnsupported
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
|
||||||
var offset, limit int64 = 0, -1
|
|
||||||
for _, option := range options {
|
|
||||||
switch x := option.(type) {
|
|
||||||
case *fs.SeekOption:
|
|
||||||
offset = x.Offset
|
|
||||||
case *fs.RangeOption:
|
|
||||||
offset, limit = x.Decode(o.Size())
|
|
||||||
default:
|
|
||||||
if option.Mandatory() {
|
|
||||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rc, err = o.file.Open()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// discard data from start as necessary
|
|
||||||
if offset > 0 {
|
|
||||||
_, err = io.CopyN(io.Discard, rc, offset)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If limited then don't return everything
|
|
||||||
if limit >= 0 {
|
|
||||||
return readers.NewLimitedReadCloser(rc, limit), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return rc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update in to the object with the modTime given of the given size
|
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove an object
|
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
|
||||||
return vfs.EROFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
|
||||||
var (
|
|
||||||
_ fs.Fs = (*Fs)(nil)
|
|
||||||
_ fs.UnWrapper = (*Fs)(nil)
|
|
||||||
_ fs.Wrapper = (*Fs)(nil)
|
|
||||||
_ fs.Object = (*Object)(nil)
|
|
||||||
)
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,151 +1,36 @@
|
|||||||
//go:build !plan9 && !solaris && !js
|
//go:build !plan9 && !solaris && !js
|
||||||
|
// +build !plan9,!solaris,!js
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"encoding/base64"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
"github.com/rclone/rclone/lib/random"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBlockIDCreator(t *testing.T) {
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
// Check creation and random number
|
// Check first feature flags are set on this
|
||||||
bic, err := newBlockIDCreator()
|
// remote
|
||||||
require.NoError(t, err)
|
|
||||||
bic2, err := newBlockIDCreator()
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NotEqual(t, bic.random, bic2.random)
|
|
||||||
assert.NotEqual(t, bic.random, [8]byte{})
|
|
||||||
|
|
||||||
// Set random to known value for tests
|
|
||||||
bic.random = [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
|
|
||||||
chunkNumber := uint64(0xFEDCBA9876543210)
|
|
||||||
|
|
||||||
// Check creation of ID
|
|
||||||
want := base64.StdEncoding.EncodeToString([]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, 1, 2, 3, 4, 5, 6, 7, 8})
|
|
||||||
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", want)
|
|
||||||
got := bic.newBlockID(chunkNumber)
|
|
||||||
assert.Equal(t, want, got)
|
|
||||||
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", got)
|
|
||||||
|
|
||||||
// Test checkID is working
|
|
||||||
assert.NoError(t, bic.checkID(chunkNumber, got))
|
|
||||||
assert.ErrorContains(t, bic.checkID(chunkNumber, "$"+got), "illegal base64")
|
|
||||||
assert.ErrorContains(t, bic.checkID(chunkNumber, "AAAA"+got), "bad block ID length")
|
|
||||||
assert.ErrorContains(t, bic.checkID(chunkNumber+1, got), "expecting decoded")
|
|
||||||
assert.ErrorContains(t, bic2.checkID(chunkNumber, got), "random bytes")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) testFeatures(t *testing.T) {
|
|
||||||
// Check first feature flags are set on this remote
|
|
||||||
enabled := f.Features().SetTier
|
enabled := f.Features().SetTier
|
||||||
assert.True(t, enabled)
|
assert.True(t, enabled)
|
||||||
enabled = f.Features().GetTier
|
enabled = f.Features().GetTier
|
||||||
assert.True(t, enabled)
|
assert.True(t, enabled)
|
||||||
}
|
}
|
||||||
|
|
||||||
type ReadSeekCloser struct {
|
func TestIncrement(t *testing.T) {
|
||||||
*strings.Reader
|
for _, test := range []struct {
|
||||||
}
|
in []byte
|
||||||
|
want []byte
|
||||||
func (r *ReadSeekCloser) Close() error {
|
}{
|
||||||
return nil
|
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
|
||||||
}
|
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
|
||||||
|
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
|
||||||
// Stage a block at remote but don't commit it
|
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
|
||||||
func (f *Fs) stageBlockWithoutCommit(ctx context.Context, t *testing.T, remote string) {
|
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
|
||||||
var (
|
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
|
||||||
containerName, blobPath = f.split(remote)
|
} {
|
||||||
containerClient = f.cntSVC(containerName)
|
increment(test.in)
|
||||||
blobClient = containerClient.NewBlockBlobClient(blobPath)
|
assert.Equal(t, test.want, test.in)
|
||||||
data = "uncommitted data"
|
|
||||||
blockID = "1"
|
|
||||||
blockIDBase64 = base64.StdEncoding.EncodeToString([]byte(blockID))
|
|
||||||
)
|
|
||||||
r := &ReadSeekCloser{strings.NewReader(data)}
|
|
||||||
_, err := blobClient.StageBlock(ctx, blockIDBase64, r, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify the block is staged but not committed
|
|
||||||
blockList, err := blobClient.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
found := false
|
|
||||||
for _, block := range blockList.UncommittedBlocks {
|
|
||||||
if *block.Name == blockIDBase64 {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
require.True(t, found, "Block ID not found in uncommitted blocks")
|
|
||||||
}
|
|
||||||
|
|
||||||
// This tests uploading a blob where it has uncommitted blocks with a different ID size.
|
|
||||||
//
|
|
||||||
// https://gauravmantri.com/2013/05/18/windows-azure-blob-storage-dealing-with-the-specified-blob-or-block-content-is-invalid-error/
|
|
||||||
//
|
|
||||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/WriteUncommittedBlocks
|
|
||||||
func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
|
|
||||||
var (
|
|
||||||
ctx = context.Background()
|
|
||||||
remote = "testBlob"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Multipart copy the blob please
|
|
||||||
oldUseCopyBlob, oldCopyCutoff := f.opt.UseCopyBlob, f.opt.CopyCutoff
|
|
||||||
f.opt.UseCopyBlob = false
|
|
||||||
f.opt.CopyCutoff = f.opt.ChunkSize
|
|
||||||
defer func() {
|
|
||||||
f.opt.UseCopyBlob, f.opt.CopyCutoff = oldUseCopyBlob, oldCopyCutoff
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Create a blob with uncommitted blocks
|
|
||||||
f.stageBlockWithoutCommit(ctx, t, remote)
|
|
||||||
|
|
||||||
// Now attempt to overwrite the block with a different sized block ID to provoke this error
|
|
||||||
|
|
||||||
// Check the object does not exist
|
|
||||||
_, err := f.NewObject(ctx, remote)
|
|
||||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
|
||||||
|
|
||||||
// Upload a multipart file over the block with uncommitted chunks of a different ID size
|
|
||||||
size := 4*int(f.opt.ChunkSize) - 1
|
|
||||||
contents := random.String(size)
|
|
||||||
item := fstest.NewItem(remote, contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
|
||||||
o := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
|
||||||
|
|
||||||
// Check size
|
|
||||||
assert.Equal(t, int64(size), o.Size())
|
|
||||||
|
|
||||||
// Create a new blob with uncommitted blocks
|
|
||||||
newRemote := "testBlob2"
|
|
||||||
f.stageBlockWithoutCommit(ctx, t, newRemote)
|
|
||||||
|
|
||||||
// Copy over that block
|
|
||||||
dst, err := f.Copy(ctx, o, newRemote)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Check basics
|
|
||||||
assert.Equal(t, int64(size), dst.Size())
|
|
||||||
assert.Equal(t, newRemote, dst.Remote())
|
|
||||||
|
|
||||||
// Check contents
|
|
||||||
gotContents := fstests.ReadObject(ctx, t, dst, -1)
|
|
||||||
assert.Equal(t, contents, gotContents)
|
|
||||||
|
|
||||||
// Remove the object
|
|
||||||
require.NoError(t, dst.Remove(ctx))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
|
||||||
t.Run("Features", f.testFeatures)
|
|
||||||
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,51 +1,26 @@
|
|||||||
// Test AzureBlob filesystem interface
|
// Test AzureBlob filesystem interface
|
||||||
|
|
||||||
//go:build !plan9 && !solaris && !js
|
//go:build !plan9 && !solaris && !js
|
||||||
|
// +build !plan9,!solaris,!js
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
name := "TestAzureBlob"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: name + ":",
|
RemoteName: "TestAzureBlob:",
|
||||||
NilObject: (*Object)(nil),
|
NilObject: (*Object)(nil),
|
||||||
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
TiersToTest: []string{"Hot", "Cool"},
|
||||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
ChunkedUpload: fstests.ChunkedUploadConfig{},
|
||||||
MinChunkSize: defaultChunkSize,
|
|
||||||
},
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "use_copy_blob", Value: "false"},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestIntegration2 runs integration tests against the remote
|
|
||||||
func TestIntegration2(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
name := "TestAzureBlob"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":",
|
|
||||||
NilObject: (*Object)(nil),
|
|
||||||
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
|
||||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
|
||||||
MinChunkSize: defaultChunkSize,
|
|
||||||
},
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "directory_markers", Value: "true"},
|
|
||||||
{Name: name, Key: "use_copy_blob", Value: "false"},
|
|
||||||
},
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -53,15 +28,40 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|||||||
return f.setUploadChunkSize(cs)
|
return f.setUploadChunkSize(cs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|
||||||
return f.setCopyCutoff(cs)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||||
_ fstests.SetCopyCutoffer = (*Fs)(nil)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
|
||||||
|
func TestServicePrincipalFileSuccess(t *testing.T) {
|
||||||
|
ctx := context.TODO()
|
||||||
|
credentials := `
|
||||||
|
{
|
||||||
|
"appId": "my application (client) ID",
|
||||||
|
"password": "my secret",
|
||||||
|
"tenant": "my active directory tenant ID"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
|
||||||
|
if assert.NoError(t, err) {
|
||||||
|
assert.NotNil(t, tokenRefresher)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
|
||||||
|
func TestServicePrincipalFileFailure(t *testing.T) {
|
||||||
|
ctx := context.TODO()
|
||||||
|
credentials := `
|
||||||
|
{
|
||||||
|
"appId": "my application (client) ID",
|
||||||
|
"tenant": "my active directory tenant ID"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
|
||||||
|
}
|
||||||
|
|
||||||
func TestValidateAccessTier(t *testing.T) {
|
func TestValidateAccessTier(t *testing.T) {
|
||||||
tests := map[string]struct {
|
tests := map[string]struct {
|
||||||
accessTier string
|
accessTier string
|
||||||
@@ -71,7 +71,6 @@ func TestValidateAccessTier(t *testing.T) {
|
|||||||
"HOT": {"HOT", true},
|
"HOT": {"HOT", true},
|
||||||
"Hot": {"Hot", true},
|
"Hot": {"Hot", true},
|
||||||
"cool": {"cool", true},
|
"cool": {"cool", true},
|
||||||
"cold": {"cold", true},
|
|
||||||
"archive": {"archive", true},
|
"archive": {"archive", true},
|
||||||
"empty": {"", false},
|
"empty": {"", false},
|
||||||
"unknown": {"unknown", false},
|
"unknown": {"unknown", false},
|
||||||
|
|||||||
@@ -2,6 +2,6 @@
|
|||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
//go:build plan9 || solaris || js
|
//go:build plan9 || solaris || js
|
||||||
|
// +build plan9 solaris js
|
||||||
|
|
||||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|||||||
137
backend/azureblob/imds.go
Normal file
137
backend/azureblob/imds.go
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
//go:build !plan9 && !solaris && !js
|
||||||
|
// +build !plan9,!solaris,!js
|
||||||
|
|
||||||
|
package azureblob
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/Azure/go-autorest/autorest/adal"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
azureResource = "https://storage.azure.com"
|
||||||
|
imdsAPIVersion = "2018-02-01"
|
||||||
|
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This custom type is used to add the port the test server has bound to
|
||||||
|
// to the request context.
|
||||||
|
type testPortKey string
|
||||||
|
|
||||||
|
type msiIdentifierType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
msiClientID msiIdentifierType = iota
|
||||||
|
msiObjectID
|
||||||
|
msiResourceID
|
||||||
|
)
|
||||||
|
|
||||||
|
type userMSI struct {
|
||||||
|
Type msiIdentifierType
|
||||||
|
Value string
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpError struct {
|
||||||
|
Response *http.Response
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e httpError) Error() string {
|
||||||
|
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
|
||||||
|
// Metadata Service.
|
||||||
|
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
||||||
|
// Attempt to get an MSI token; silently continue if unsuccessful.
|
||||||
|
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
|
||||||
|
result := adal.Token{}
|
||||||
|
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(nil, "Failed to create request: %v", err)
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
params := req.URL.Query()
|
||||||
|
params.Set("resource", azureResource)
|
||||||
|
params.Set("api-version", imdsAPIVersion)
|
||||||
|
|
||||||
|
// Specify user-assigned identity if requested.
|
||||||
|
if identity != nil {
|
||||||
|
switch identity.Type {
|
||||||
|
case msiClientID:
|
||||||
|
params.Set("client_id", identity.Value)
|
||||||
|
case msiObjectID:
|
||||||
|
params.Set("object_id", identity.Value)
|
||||||
|
case msiResourceID:
|
||||||
|
params.Set("mi_res_id", identity.Value)
|
||||||
|
default:
|
||||||
|
// If this happens, the calling function and this one don't agree on
|
||||||
|
// what valid ID types exist.
|
||||||
|
return result, fmt.Errorf("unknown MSI identity type specified")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
|
||||||
|
// The Metadata header is required by all calls to IMDS.
|
||||||
|
req.Header.Set("Metadata", "true")
|
||||||
|
|
||||||
|
// If this function is run in a test, query the test server instead of IMDS.
|
||||||
|
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
|
||||||
|
if isTest {
|
||||||
|
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
|
||||||
|
req.Host = req.URL.Host
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send request
|
||||||
|
httpClient := fshttp.NewClient(ctx)
|
||||||
|
resp, err := httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
|
||||||
|
}
|
||||||
|
defer func() { // resp and Body should not be nil
|
||||||
|
_, err = io.Copy(ioutil.Discard, resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
|
||||||
|
}
|
||||||
|
err = resp.Body.Close()
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// Check if the status code indicates success
|
||||||
|
// The request returns 200 currently, add 201 and 202 as well for possible extension.
|
||||||
|
switch resp.StatusCode {
|
||||||
|
case 200, 201, 202:
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
body, _ := ioutil.ReadAll(resp.Body)
|
||||||
|
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
|
||||||
|
return result, httpError{Response: resp}
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return result, fmt.Errorf("couldn't read IMDS response: %w", err)
|
||||||
|
}
|
||||||
|
// Remove BOM, if any. azcopy does this so I'm following along.
|
||||||
|
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
|
||||||
|
|
||||||
|
// This would be a good place to persist the token if a large number of rclone
|
||||||
|
// invocations are being made in a short amount of time. If the token is
|
||||||
|
// persisted, the azureblob code will need to check for expiry before every
|
||||||
|
// storage API call.
|
||||||
|
err = json.Unmarshal(b, &result)
|
||||||
|
if err != nil {
|
||||||
|
return result, fmt.Errorf("couldn't unmarshal IMDS response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
118
backend/azureblob/imds_test.go
Normal file
118
backend/azureblob/imds_test.go
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
//go:build !plan9 && !solaris && !js
|
||||||
|
// +build !plan9,!solaris,!js
|
||||||
|
|
||||||
|
package azureblob
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/Azure/go-autorest/autorest/adal"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
err := r.ParseForm()
|
||||||
|
require.NoError(t, err)
|
||||||
|
parameters := r.URL.Query()
|
||||||
|
(*actual)["path"] = r.URL.Path
|
||||||
|
(*actual)["Metadata"] = r.Header.Get("Metadata")
|
||||||
|
(*actual)["method"] = r.Method
|
||||||
|
for paramName := range parameters {
|
||||||
|
(*actual)[paramName] = parameters.Get(paramName)
|
||||||
|
}
|
||||||
|
// Make response.
|
||||||
|
response := adal.Token{}
|
||||||
|
responseBytes, err := json.Marshal(response)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = w.Write(responseBytes)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestManagedIdentity(t *testing.T) {
|
||||||
|
// test user-assigned identity specifiers to use
|
||||||
|
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
|
||||||
|
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
|
||||||
|
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
|
||||||
|
tests := []struct {
|
||||||
|
identity *userMSI
|
||||||
|
identityParameterName string
|
||||||
|
expectedAbsent []string
|
||||||
|
}{
|
||||||
|
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
|
||||||
|
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
|
||||||
|
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
|
||||||
|
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
|
||||||
|
}
|
||||||
|
alwaysExpected := map[string]string{
|
||||||
|
"path": "/metadata/identity/oauth2/token",
|
||||||
|
"resource": "https://storage.azure.com",
|
||||||
|
"Metadata": "true",
|
||||||
|
"api-version": "2018-02-01",
|
||||||
|
"method": "GET",
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
actual := make(map[string]string, 10)
|
||||||
|
testServer := httptest.NewServer(handler(t, &actual))
|
||||||
|
defer testServer.Close()
|
||||||
|
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
|
||||||
|
require.NoError(t, err)
|
||||||
|
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
|
||||||
|
_, err = GetMSIToken(ctx, test.identity)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Validate expected query parameters present
|
||||||
|
expected := make(map[string]string)
|
||||||
|
for k, v := range alwaysExpected {
|
||||||
|
expected[k] = v
|
||||||
|
}
|
||||||
|
if test.identity != nil {
|
||||||
|
expected[test.identityParameterName] = test.identity.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
for key := range expected {
|
||||||
|
value, exists := actual[key]
|
||||||
|
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
|
||||||
|
test.identityParameterName, key) {
|
||||||
|
assert.Equalf(t, expected[key], value,
|
||||||
|
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate unexpected query parameters absent
|
||||||
|
for _, key := range test.expectedAbsent {
|
||||||
|
_, exists := actual[key]
|
||||||
|
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func errorHandler(resultCode int) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
http.Error(w, "Test error generated", resultCode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIMDSErrors(t *testing.T) {
|
||||||
|
errorCodes := []int{404, 429, 500}
|
||||||
|
for _, code := range errorCodes {
|
||||||
|
testServer := httptest.NewServer(errorHandler(code))
|
||||||
|
defer testServer.Close()
|
||||||
|
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
|
||||||
|
require.NoError(t, err)
|
||||||
|
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
|
||||||
|
_, err = GetMSIToken(ctx, nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
httpErr, ok := err.(httpError)
|
||||||
|
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
|
||||||
|
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
|
||||||
|
}
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,69 +0,0 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
|
|
||||||
package azurefiles
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"math/rand"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
|
||||||
t.Run("Authentication", f.InternalTestAuth)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
|
||||||
|
|
||||||
func (f *Fs) InternalTestAuth(t *testing.T) {
|
|
||||||
t.Skip("skipping since this requires authentication credentials which are not part of repo")
|
|
||||||
shareName := "test-rclone-oct-2023"
|
|
||||||
testCases := []struct {
|
|
||||||
name string
|
|
||||||
options *Options
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "ConnectionString",
|
|
||||||
options: &Options{
|
|
||||||
ShareName: shareName,
|
|
||||||
ConnectionString: "",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "AccountAndKey",
|
|
||||||
options: &Options{
|
|
||||||
ShareName: shareName,
|
|
||||||
Account: "",
|
|
||||||
Key: "",
|
|
||||||
}},
|
|
||||||
{
|
|
||||||
name: "SASUrl",
|
|
||||||
options: &Options{
|
|
||||||
ShareName: shareName,
|
|
||||||
SASURL: "",
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range testCases {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
fs, err := newFsFromOptions(context.TODO(), "TestAzureFiles", "", tc.options)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
dirName := randomString(10)
|
|
||||||
assert.NoError(t, fs.Mkdir(context.TODO(), dirName))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX"
|
|
||||||
|
|
||||||
func randomString(charCount int) string {
|
|
||||||
strBldr := strings.Builder{}
|
|
||||||
for range charCount {
|
|
||||||
randPos := rand.Int63n(52)
|
|
||||||
strBldr.WriteByte(chars[randPos])
|
|
||||||
}
|
|
||||||
return strBldr.String()
|
|
||||||
}
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
|
|
||||||
package azurefiles
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
var objPtr *Object
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: "TestAzureFiles:",
|
|
||||||
NilObject: objPtr,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Build for azurefiles for unsupported platforms to stop go complaining
|
|
||||||
// about "no buildable Go source files "
|
|
||||||
|
|
||||||
//go:build plan9 || js
|
|
||||||
|
|
||||||
// Package azurefiles provides an interface to Microsoft Azure Files
|
|
||||||
package azurefiles
|
|
||||||
@@ -1,4 +1,3 @@
|
|||||||
// Package api provides types used by the Backblaze B2 API.
|
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -33,27 +32,10 @@ var _ fserrors.Fataler = (*Error)(nil)
|
|||||||
|
|
||||||
// Bucket describes a B2 bucket
|
// Bucket describes a B2 bucket
|
||||||
type Bucket struct {
|
type Bucket struct {
|
||||||
ID string `json:"bucketId"`
|
ID string `json:"bucketId"`
|
||||||
AccountID string `json:"accountId"`
|
AccountID string `json:"accountId"`
|
||||||
Name string `json:"bucketName"`
|
Name string `json:"bucketName"`
|
||||||
Type string `json:"bucketType"`
|
Type string `json:"bucketType"`
|
||||||
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// LifecycleRule is a single lifecycle rule
|
|
||||||
type LifecycleRule struct {
|
|
||||||
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
|
|
||||||
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
|
|
||||||
DaysFromStartingToCancelingUnfinishedLargeFiles *int `json:"daysFromStartingToCancelingUnfinishedLargeFiles"`
|
|
||||||
FileNamePrefix string `json:"fileNamePrefix"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServerSideEncryption is a configuration object for B2 Server-Side Encryption
|
|
||||||
type ServerSideEncryption struct {
|
|
||||||
Mode string `json:"mode"`
|
|
||||||
Algorithm string `json:"algorithm"` // Encryption algorithm to use
|
|
||||||
CustomerKey string `json:"customerKey"` // User provided Base64 encoded key that is used by the server to encrypt files
|
|
||||||
CustomerKeyMd5 string `json:"customerKeyMd5"` // An MD5 hash of the decoded key
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Timestamp is a UTC time when this file was uploaded. It is a base
|
// Timestamp is a UTC time when this file was uploaded. It is a base
|
||||||
@@ -138,10 +120,10 @@ type AuthorizeAccountResponse struct {
|
|||||||
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
||||||
AccountID string `json:"accountId"` // The identifier for the account.
|
AccountID string `json:"accountId"` // The identifier for the account.
|
||||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||||
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||||
} `json:"allowed"`
|
} `json:"allowed"`
|
||||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||||
@@ -223,10 +205,9 @@ type FileInfo struct {
|
|||||||
|
|
||||||
// CreateBucketRequest is used to create a bucket
|
// CreateBucketRequest is used to create a bucket
|
||||||
type CreateBucketRequest struct {
|
type CreateBucketRequest struct {
|
||||||
AccountID string `json:"accountId"`
|
AccountID string `json:"accountId"`
|
||||||
Name string `json:"bucketName"`
|
Name string `json:"bucketName"`
|
||||||
Type string `json:"bucketType"`
|
Type string `json:"bucketType"`
|
||||||
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteBucketRequest is used to create a bucket
|
// DeleteBucketRequest is used to create a bucket
|
||||||
@@ -257,7 +238,7 @@ type GetFileInfoRequest struct {
|
|||||||
// If the original source of the file being uploaded has a last
|
// If the original source of the file being uploaded has a last
|
||||||
// modified time concept, Backblaze recommends using
|
// modified time concept, Backblaze recommends using
|
||||||
// src_last_modified_millis as the name, and a string holding the base
|
// src_last_modified_millis as the name, and a string holding the base
|
||||||
// 10 number of milliseconds since midnight, January 1, 1970
|
// 10 number number of milliseconds since midnight, January 1, 1970
|
||||||
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
||||||
// programming language Java. It is intended to be compatible with
|
// programming language Java. It is intended to be compatible with
|
||||||
// Java's time long. For example, it can be passed directly into the
|
// Java's time long. For example, it can be passed directly into the
|
||||||
@@ -269,22 +250,21 @@ type GetFileInfoRequest struct {
|
|||||||
//
|
//
|
||||||
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
|
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
|
||||||
type StartLargeFileRequest struct {
|
type StartLargeFileRequest struct {
|
||||||
BucketID string `json:"bucketId"` // The ID of the bucket that the file will go in.
|
BucketID string `json:"bucketId"` //The ID of the bucket that the file will go in.
|
||||||
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
|
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
|
||||||
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
|
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
|
||||||
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
|
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
|
||||||
ServerSideEncryption *ServerSideEncryption `json:"serverSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartLargeFileResponse is the response to StartLargeFileRequest
|
// StartLargeFileResponse is the response to StartLargeFileRequest
|
||||||
type StartLargeFileResponse struct {
|
type StartLargeFileResponse struct {
|
||||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||||
AccountID string `json:"accountId"` // The identifier for the account.
|
AccountID string `json:"accountId"` // The identifier for the account.
|
||||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||||
UploadTimestamp Timestamp `json:"uploadTimestamp,omitempty"` // This is a UTC time when this file was uploaded.
|
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
|
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
|
||||||
@@ -334,31 +314,19 @@ type CancelLargeFileResponse struct {
|
|||||||
|
|
||||||
// CopyFileRequest is as passed to b2_copy_file
|
// CopyFileRequest is as passed to b2_copy_file
|
||||||
type CopyFileRequest struct {
|
type CopyFileRequest struct {
|
||||||
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
||||||
Name string `json:"fileName"` // The name of the new file being created.
|
Name string `json:"fileName"` // The name of the new file being created.
|
||||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||||
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
|
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
|
||||||
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
|
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
|
||||||
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
|
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
|
||||||
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
|
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
|
||||||
SourceServerSideEncryption *ServerSideEncryption `json:"sourceServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the source file
|
|
||||||
DestinationServerSideEncryption *ServerSideEncryption `json:"destinationServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the destination file
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyPartRequest is the request for b2_copy_part - the response is UploadPartResponse
|
// CopyPartRequest is the request for b2_copy_part - the response is UploadPartResponse
|
||||||
type CopyPartRequest struct {
|
type CopyPartRequest struct {
|
||||||
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
||||||
LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file.
|
LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file.
|
||||||
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
||||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||||
SourceServerSideEncryption *ServerSideEncryption `json:"sourceServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the source file
|
|
||||||
DestinationServerSideEncryption *ServerSideEncryption `json:"destinationServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the destination file
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateBucketRequest describes a request to modify a B2 bucket
|
|
||||||
type UpdateBucketRequest struct {
|
|
||||||
ID string `json:"bucketId"`
|
|
||||||
AccountID string `json:"accountId"`
|
|
||||||
Type string `json:"bucketType,omitempty"`
|
|
||||||
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -42,11 +42,11 @@ func TestTimestampIsZero(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTimestampEqual(t *testing.T) {
|
func TestTimestampEqual(t *testing.T) {
|
||||||
assert.False(t, emptyT.Equal(emptyT)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
assert.False(t, emptyT.Equal(emptyT))
|
||||||
assert.False(t, t0.Equal(emptyT))
|
assert.False(t, t0.Equal(emptyT))
|
||||||
assert.False(t, emptyT.Equal(t0))
|
assert.False(t, emptyT.Equal(t0))
|
||||||
assert.False(t, t0.Equal(t1))
|
assert.False(t, t0.Equal(t1))
|
||||||
assert.False(t, t1.Equal(t0))
|
assert.False(t, t1.Equal(t0))
|
||||||
assert.True(t, t0.Equal(t0)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
assert.True(t, t0.Equal(t0))
|
||||||
assert.True(t, t1.Equal(t1)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
assert.True(t, t1.Equal(t1))
|
||||||
}
|
}
|
||||||
|
|||||||
828
backend/b2/b2.go
828
backend/b2/b2.go
File diff suppressed because it is too large
Load Diff
@@ -1,31 +1,14 @@
|
|||||||
package b2
|
package b2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"crypto/sha1"
|
|
||||||
"fmt"
|
|
||||||
"path"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/b2/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/cache"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/fs/object"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
"github.com/rclone/rclone/lib/bucket"
|
|
||||||
"github.com/rclone/rclone/lib/random"
|
|
||||||
"github.com/rclone/rclone/lib/version"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Test b2 string encoding
|
// Test b2 string encoding
|
||||||
// https://www.backblaze.com/docs/cloud-storage-native-api-string-encoding
|
// https://www.backblaze.com/b2/docs/string_encoding.html
|
||||||
|
|
||||||
var encodeTest = []struct {
|
var encodeTest = []struct {
|
||||||
fullyEncoded string
|
fullyEncoded string
|
||||||
@@ -185,435 +168,3 @@ func TestParseTimeString(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return a map of the headers in the options with keys stripped of the "x-bz-info-" prefix
|
|
||||||
func OpenOptionToMetaData(options []fs.OpenOption) map[string]string {
|
|
||||||
var headers = make(map[string]string)
|
|
||||||
for _, option := range options {
|
|
||||||
k, v := option.Header()
|
|
||||||
k = strings.ToLower(k)
|
|
||||||
if strings.HasPrefix(k, headerPrefix) {
|
|
||||||
headers[k[len(headerPrefix):]] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return headers
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string, chunkSize string) {
|
|
||||||
what := fmt.Sprintf("Size%s/UploadCutoff%s/ChunkSize%s", size, uploadCutoff, chunkSize)
|
|
||||||
t.Run(what, func(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
ss := fs.SizeSuffix(0)
|
|
||||||
err := ss.Set(size)
|
|
||||||
require.NoError(t, err)
|
|
||||||
original := random.String(int(ss))
|
|
||||||
|
|
||||||
contents := fstest.Gz(t, original)
|
|
||||||
mimeType := "text/html"
|
|
||||||
|
|
||||||
if chunkSize != "" {
|
|
||||||
ss := fs.SizeSuffix(0)
|
|
||||||
err := ss.Set(chunkSize)
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = f.SetUploadChunkSize(ss)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if uploadCutoff != "" {
|
|
||||||
ss := fs.SizeSuffix(0)
|
|
||||||
err := ss.Set(uploadCutoff)
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = f.SetUploadCutoff(ss)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
|
||||||
btime := time.Now()
|
|
||||||
metadata := fs.Metadata{
|
|
||||||
// Just mtime for now - limit to milliseconds since x-bz-info-src_last_modified_millis can't support any
|
|
||||||
|
|
||||||
"mtime": "2009-05-06T04:05:06.499Z",
|
|
||||||
}
|
|
||||||
|
|
||||||
// Need to specify HTTP options with the header prefix since they are passed as-is
|
|
||||||
options := []fs.OpenOption{
|
|
||||||
&fs.HTTPOption{Key: "X-Bz-Info-a", Value: "1"},
|
|
||||||
&fs.HTTPOption{Key: "X-Bz-Info-b", Value: "2"},
|
|
||||||
}
|
|
||||||
|
|
||||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, mimeType, metadata, options...)
|
|
||||||
defer func() {
|
|
||||||
assert.NoError(t, obj.Remove(ctx))
|
|
||||||
}()
|
|
||||||
o := obj.(*Object)
|
|
||||||
gotMetadata, err := o.getMetaData(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// X-Bz-Info-a & X-Bz-Info-b
|
|
||||||
optMetadata := OpenOptionToMetaData(options)
|
|
||||||
for k, v := range optMetadata {
|
|
||||||
got := gotMetadata.Info[k]
|
|
||||||
assert.Equal(t, v, got, k)
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
|
||||||
|
|
||||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
|
||||||
var mtime api.Timestamp
|
|
||||||
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
|
|
||||||
if err != nil {
|
|
||||||
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
|
|
||||||
}
|
|
||||||
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
|
|
||||||
|
|
||||||
// Upload time
|
|
||||||
gotBtime := time.Time(gotMetadata.UploadTimestamp)
|
|
||||||
dt := gotBtime.Sub(btime)
|
|
||||||
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
|
|
||||||
|
|
||||||
t.Run("GzipEncoding", func(t *testing.T) {
|
|
||||||
// Test that the gzipped file we uploaded can be
|
|
||||||
// downloaded
|
|
||||||
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
|
||||||
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
|
||||||
assert.Equal(t, wantContents, gotContents)
|
|
||||||
assert.Equal(t, wantSize, o.Size())
|
|
||||||
gotHash, err := o.Hash(ctx, hash.SHA1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, wantHash, gotHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("NoDecompress", func(t *testing.T) {
|
|
||||||
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTestMetadata(t *testing.T) {
|
|
||||||
// 1 kB regular file
|
|
||||||
f.internalTestMetadata(t, "1kiB", "", "")
|
|
||||||
|
|
||||||
// 10 MiB large file
|
|
||||||
f.internalTestMetadata(t, "10MiB", "6MiB", "6MiB")
|
|
||||||
}
|
|
||||||
|
|
||||||
func sha1Sum(t *testing.T, s string) string {
|
|
||||||
hash := sha1.Sum([]byte(s))
|
|
||||||
return fmt.Sprintf("%x", hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is adapted from the s3 equivalent.
|
|
||||||
func (f *Fs) InternalTestVersions(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
// Small pause to make the LastModified different since AWS
|
|
||||||
// only seems to track them to 1 second granularity
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
|
|
||||||
// Create an object
|
|
||||||
const dirName = "versions"
|
|
||||||
const fileName = dirName + "/" + "test-versions.txt"
|
|
||||||
contents := random.String(100)
|
|
||||||
item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
|
||||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
|
||||||
defer func() {
|
|
||||||
assert.NoError(t, obj.Remove(ctx))
|
|
||||||
}()
|
|
||||||
objMetadata, err := obj.(*Object).getMetaData(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Small pause
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
|
|
||||||
// Remove it
|
|
||||||
assert.NoError(t, obj.Remove(ctx))
|
|
||||||
|
|
||||||
// Small pause to make the LastModified different since AWS only seems to track them to 1 second granularity
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
|
|
||||||
// And create it with different size and contents
|
|
||||||
newContents := random.String(101)
|
|
||||||
newItem := fstest.NewItem(fileName, newContents, fstest.Time("2002-05-06T04:05:06.499999999Z"))
|
|
||||||
newObj := fstests.PutTestContents(ctx, t, f, &newItem, newContents, true)
|
|
||||||
newObjMetadata, err := newObj.(*Object).getMetaData(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Run("Versions", func(t *testing.T) {
|
|
||||||
// Set --b2-versions for this test
|
|
||||||
f.opt.Versions = true
|
|
||||||
defer func() {
|
|
||||||
f.opt.Versions = false
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Read the contents
|
|
||||||
entries, err := f.List(ctx, dirName)
|
|
||||||
require.NoError(t, err)
|
|
||||||
tests := 0
|
|
||||||
var fileNameVersion string
|
|
||||||
for _, entry := range entries {
|
|
||||||
t.Log(entry)
|
|
||||||
remote := entry.Remote()
|
|
||||||
if remote == fileName {
|
|
||||||
t.Run("ReadCurrent", func(t *testing.T) {
|
|
||||||
assert.Equal(t, newContents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
|
|
||||||
})
|
|
||||||
tests++
|
|
||||||
} else if versionTime, p := version.Remove(remote); !versionTime.IsZero() && p == fileName {
|
|
||||||
t.Run("ReadVersion", func(t *testing.T) {
|
|
||||||
assert.Equal(t, contents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
|
|
||||||
})
|
|
||||||
assert.WithinDuration(t, time.Time(objMetadata.UploadTimestamp), versionTime, time.Second, "object time must be with 1 second of version time")
|
|
||||||
fileNameVersion = remote
|
|
||||||
tests++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert.Equal(t, 2, tests, "object missing from listing")
|
|
||||||
|
|
||||||
// Check we can read the object with a version suffix
|
|
||||||
t.Run("NewObject", func(t *testing.T) {
|
|
||||||
o, err := f.NewObject(ctx, fileNameVersion)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, o)
|
|
||||||
assert.Equal(t, int64(100), o.Size(), o.Remote())
|
|
||||||
})
|
|
||||||
|
|
||||||
// Check we can make a NewFs from that object with a version suffix
|
|
||||||
t.Run("NewFs", func(t *testing.T) {
|
|
||||||
newPath := bucket.Join(fs.ConfigStringFull(f), fileNameVersion)
|
|
||||||
// Make sure --b2-versions is set in the config of the new remote
|
|
||||||
fs.Debugf(nil, "oldPath = %q", newPath)
|
|
||||||
lastColon := strings.LastIndex(newPath, ":")
|
|
||||||
require.True(t, lastColon >= 0)
|
|
||||||
newPath = newPath[:lastColon] + ",versions" + newPath[lastColon:]
|
|
||||||
fs.Debugf(nil, "newPath = %q", newPath)
|
|
||||||
fNew, err := cache.Get(ctx, newPath)
|
|
||||||
// This should return pointing to a file
|
|
||||||
require.Equal(t, fs.ErrorIsFile, err)
|
|
||||||
require.NotNil(t, fNew)
|
|
||||||
// With the directory above
|
|
||||||
assert.Equal(t, dirName, path.Base(fs.ConfigStringFull(fNew)))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("VersionAt", func(t *testing.T) {
|
|
||||||
// We set --b2-version-at for this test so make sure we reset it at the end
|
|
||||||
defer func() {
|
|
||||||
f.opt.VersionAt = fs.Time{}
|
|
||||||
}()
|
|
||||||
|
|
||||||
var (
|
|
||||||
firstObjectTime = time.Time(objMetadata.UploadTimestamp)
|
|
||||||
secondObjectTime = time.Time(newObjMetadata.UploadTimestamp)
|
|
||||||
)
|
|
||||||
|
|
||||||
for _, test := range []struct {
|
|
||||||
what string
|
|
||||||
at time.Time
|
|
||||||
want []fstest.Item
|
|
||||||
wantErr error
|
|
||||||
wantSize int64
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
what: "Before",
|
|
||||||
at: firstObjectTime.Add(-time.Second),
|
|
||||||
want: fstests.InternalTestFiles,
|
|
||||||
wantErr: fs.ErrorObjectNotFound,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
what: "AfterOne",
|
|
||||||
at: firstObjectTime.Add(time.Second),
|
|
||||||
want: append([]fstest.Item{item}, fstests.InternalTestFiles...),
|
|
||||||
wantSize: 100,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
what: "AfterDelete",
|
|
||||||
at: secondObjectTime.Add(-time.Second),
|
|
||||||
want: fstests.InternalTestFiles,
|
|
||||||
wantErr: fs.ErrorObjectNotFound,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
what: "AfterTwo",
|
|
||||||
at: secondObjectTime.Add(time.Second),
|
|
||||||
want: append([]fstest.Item{newItem}, fstests.InternalTestFiles...),
|
|
||||||
wantSize: 101,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(test.what, func(t *testing.T) {
|
|
||||||
f.opt.VersionAt = fs.Time(test.at)
|
|
||||||
t.Run("List", func(t *testing.T) {
|
|
||||||
fstest.CheckListing(t, f, test.want)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("NewObject", func(t *testing.T) {
|
|
||||||
gotObj, gotErr := f.NewObject(ctx, fileName)
|
|
||||||
assert.Equal(t, test.wantErr, gotErr)
|
|
||||||
if gotErr == nil {
|
|
||||||
assert.Equal(t, test.wantSize, gotObj.Size())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Cleanup", func(t *testing.T) {
|
|
||||||
t.Run("DryRun", func(t *testing.T) {
|
|
||||||
f.opt.Versions = true
|
|
||||||
defer func() {
|
|
||||||
f.opt.Versions = false
|
|
||||||
}()
|
|
||||||
// Listing should be unchanged after dry run
|
|
||||||
before := listAllFiles(ctx, t, f, dirName)
|
|
||||||
ctx, ci := fs.AddConfig(ctx)
|
|
||||||
ci.DryRun = true
|
|
||||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
|
||||||
after := listAllFiles(ctx, t, f, dirName)
|
|
||||||
assert.Equal(t, before, after)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("RealThing", func(t *testing.T) {
|
|
||||||
f.opt.Versions = true
|
|
||||||
defer func() {
|
|
||||||
f.opt.Versions = false
|
|
||||||
}()
|
|
||||||
// Listing should reflect current state after cleanup
|
|
||||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
|
||||||
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
|
|
||||||
fstest.CheckListing(t, f, items)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
// Purge gets tested later
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTestCleanupUnfinished(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
// B2CleanupHidden tests cleaning up hidden files
|
|
||||||
t.Run("CleanupUnfinished", func(t *testing.T) {
|
|
||||||
dirName := "unfinished"
|
|
||||||
fileCount := 5
|
|
||||||
expectedFiles := []string{}
|
|
||||||
for i := 1; i < fileCount; i++ {
|
|
||||||
fileName := fmt.Sprintf("%s/unfinished-%d", dirName, i)
|
|
||||||
expectedFiles = append(expectedFiles, fileName)
|
|
||||||
obj := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: fileName,
|
|
||||||
}
|
|
||||||
objInfo := object.NewStaticObjectInfo(fileName, fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
|
|
||||||
_, err := f.newLargeUpload(ctx, obj, nil, objInfo, f.opt.ChunkSize, false, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
checkListing(ctx, t, f, dirName, expectedFiles)
|
|
||||||
|
|
||||||
t.Run("DryRun", func(t *testing.T) {
|
|
||||||
// Listing should not change after dry run
|
|
||||||
ctx, ci := fs.AddConfig(ctx)
|
|
||||||
ci.DryRun = true
|
|
||||||
require.NoError(t, f.cleanUp(ctx, false, true, 0))
|
|
||||||
checkListing(ctx, t, f, dirName, expectedFiles)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("RealThing", func(t *testing.T) {
|
|
||||||
// Listing should be empty after real cleanup
|
|
||||||
require.NoError(t, f.cleanUp(ctx, false, true, 0))
|
|
||||||
checkListing(ctx, t, f, dirName, []string{})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func listAllFiles(ctx context.Context, t *testing.T, f *Fs, dirName string) []string {
|
|
||||||
bucket, directory := f.split(dirName)
|
|
||||||
foundFiles := []string{}
|
|
||||||
require.NoError(t, f.list(ctx, bucket, directory, "", false, true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
|
|
||||||
if !isDirectory {
|
|
||||||
foundFiles = append(foundFiles, object.Name)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}))
|
|
||||||
sort.Strings(foundFiles)
|
|
||||||
return foundFiles
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkListing(ctx context.Context, t *testing.T, f *Fs, dirName string, expectedFiles []string) {
|
|
||||||
foundFiles := listAllFiles(ctx, t, f, dirName)
|
|
||||||
sort.Strings(expectedFiles)
|
|
||||||
assert.Equal(t, expectedFiles, foundFiles)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTestLifecycleRules(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
opt := map[string]string{}
|
|
||||||
|
|
||||||
t.Run("InitState", func(t *testing.T) {
|
|
||||||
// There should be no lifecycle rules at the outset
|
|
||||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
|
||||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, len(lifecycleRules))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("DryRun", func(t *testing.T) {
|
|
||||||
// There should still be no lifecycle rules after each dry run operation
|
|
||||||
ctx, ci := fs.AddConfig(ctx)
|
|
||||||
ci.DryRun = true
|
|
||||||
|
|
||||||
opt["daysFromHidingToDeleting"] = "30"
|
|
||||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
|
||||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, len(lifecycleRules))
|
|
||||||
|
|
||||||
delete(opt, "daysFromHidingToDeleting")
|
|
||||||
opt["daysFromUploadingToHiding"] = "40"
|
|
||||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
|
||||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, len(lifecycleRules))
|
|
||||||
|
|
||||||
opt["daysFromHidingToDeleting"] = "30"
|
|
||||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
|
||||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, len(lifecycleRules))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("RealThing", func(t *testing.T) {
|
|
||||||
opt["daysFromHidingToDeleting"] = "30"
|
|
||||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
|
||||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, len(lifecycleRules))
|
|
||||||
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
|
|
||||||
|
|
||||||
delete(opt, "daysFromHidingToDeleting")
|
|
||||||
opt["daysFromUploadingToHiding"] = "40"
|
|
||||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
|
||||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, len(lifecycleRules))
|
|
||||||
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
|
|
||||||
|
|
||||||
opt["daysFromHidingToDeleting"] = "30"
|
|
||||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
|
||||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, len(lifecycleRules))
|
|
||||||
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
|
|
||||||
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
|
||||||
t.Run("Metadata", f.InternalTestMetadata)
|
|
||||||
t.Run("Versions", f.InternalTestVersions)
|
|
||||||
t.Run("CleanupUnfinished", f.InternalTestCleanupUnfinished)
|
|
||||||
t.Run("LifecycleRules", f.InternalTestLifecycleRules)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
|
||||||
|
|||||||
@@ -28,12 +28,7 @@ func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|||||||
return f.setUploadCutoff(cs)
|
return f.setUploadCutoff(cs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|
||||||
return f.setCopyCutoff(cs)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||||
_ fstests.SetCopyCutoffer = (*Fs)(nil)
|
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
// Upload large files for b2
|
// Upload large files for b2
|
||||||
//
|
//
|
||||||
// Docs - https://www.backblaze.com/docs/cloud-storage-large-files
|
// Docs - https://www.backblaze.com/b2/docs/large_files.html
|
||||||
|
|
||||||
package b2
|
package b2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
@@ -20,7 +21,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/chunksize"
|
"github.com/rclone/rclone/fs/chunksize"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/atexit"
|
"github.com/rclone/rclone/lib/atexit"
|
||||||
"github.com/rclone/rclone/lib/pool"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
@@ -78,31 +78,36 @@ type largeUpload struct {
|
|||||||
wrap accounting.WrapFn // account parts being transferred
|
wrap accounting.WrapFn // account parts being transferred
|
||||||
id string // ID of the file being uploaded
|
id string // ID of the file being uploaded
|
||||||
size int64 // total size
|
size int64 // total size
|
||||||
parts int // calculated number of parts, if known
|
parts int64 // calculated number of parts, if known
|
||||||
sha1smu sync.Mutex // mutex to protect sha1s
|
|
||||||
sha1s []string // slice of SHA1s for each part
|
sha1s []string // slice of SHA1s for each part
|
||||||
uploadMu sync.Mutex // lock for upload variable
|
uploadMu sync.Mutex // lock for upload variable
|
||||||
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
||||||
chunkSize int64 // chunk size to use
|
chunkSize int64 // chunk size to use
|
||||||
src *Object // if copying, object we are reading from
|
src *Object // if copying, object we are reading from
|
||||||
info *api.FileInfo // final response with info about the object
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||||
//
|
//
|
||||||
// If newInfo is set then metadata from that will be used instead of reading it from src
|
// If newInfo is set then metadata from that will be used instead of reading it from src
|
||||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File, options ...fs.OpenOption) (up *largeUpload, err error) {
|
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
parts := 0
|
parts := int64(0)
|
||||||
|
sha1SliceSize := int64(maxParts)
|
||||||
chunkSize := defaultChunkSize
|
chunkSize := defaultChunkSize
|
||||||
if size == -1 {
|
if size == -1 {
|
||||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||||
} else {
|
} else {
|
||||||
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
|
chunkSize = chunksize.Calculator(src, maxParts, defaultChunkSize)
|
||||||
parts = int(size / int64(chunkSize))
|
parts = size / int64(chunkSize)
|
||||||
if size%int64(chunkSize) != 0 {
|
if size%int64(chunkSize) != 0 {
|
||||||
parts++
|
parts++
|
||||||
}
|
}
|
||||||
|
sha1SliceSize = parts
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/b2_start_large_file",
|
||||||
}
|
}
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
bucketID, err := f.getBucketID(ctx, bucket)
|
bucketID, err := f.getBucketID(ctx, bucket)
|
||||||
@@ -113,27 +118,12 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
BucketID: bucketID,
|
BucketID: bucketID,
|
||||||
Name: f.opt.Enc.FromStandardPath(bucketPath),
|
Name: f.opt.Enc.FromStandardPath(bucketPath),
|
||||||
}
|
}
|
||||||
optionsToSend := make([]fs.OpenOption, 0, len(options))
|
|
||||||
if newInfo == nil {
|
if newInfo == nil {
|
||||||
modTime, err := o.getModTime(ctx, src, options)
|
modTime := src.ModTime(ctx)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
request.ContentType = fs.MimeType(ctx, src)
|
request.ContentType = fs.MimeType(ctx, src)
|
||||||
request.Info = map[string]string{
|
request.Info = map[string]string{
|
||||||
timeKey: timeString(modTime),
|
timeKey: timeString(modTime),
|
||||||
}
|
}
|
||||||
// Custom upload headers - remove header prefix since they are sent in the body
|
|
||||||
for _, option := range options {
|
|
||||||
k, v := option.Header()
|
|
||||||
k = strings.ToLower(k)
|
|
||||||
if strings.HasPrefix(k, headerPrefix) {
|
|
||||||
request.Info[k[len(headerPrefix):]] = v
|
|
||||||
} else {
|
|
||||||
optionsToSend = append(optionsToSend, option)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Set the SHA1 if known
|
// Set the SHA1 if known
|
||||||
if !o.fs.opt.DisableCheckSum || doCopy {
|
if !o.fs.opt.DisableCheckSum || doCopy {
|
||||||
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||||
@@ -144,19 +134,6 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
request.ContentType = newInfo.ContentType
|
request.ContentType = newInfo.ContentType
|
||||||
request.Info = newInfo.Info
|
request.Info = newInfo.Info
|
||||||
}
|
}
|
||||||
if o.fs.opt.SSECustomerKey != "" && o.fs.opt.SSECustomerKeyMD5 != "" {
|
|
||||||
request.ServerSideEncryption = &api.ServerSideEncryption{
|
|
||||||
Mode: "SSE-C",
|
|
||||||
Algorithm: o.fs.opt.SSECustomerAlgorithm,
|
|
||||||
CustomerKey: o.fs.opt.SSECustomerKeyBase64,
|
|
||||||
CustomerKeyMd5: o.fs.opt.SSECustomerKeyMD5,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/b2_start_large_file",
|
|
||||||
Options: optionsToSend,
|
|
||||||
}
|
|
||||||
var response api.StartLargeFileResponse
|
var response api.StartLargeFileResponse
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||||
@@ -173,7 +150,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
id: response.ID,
|
id: response.ID,
|
||||||
size: size,
|
size: size,
|
||||||
parts: parts,
|
parts: parts,
|
||||||
sha1s: make([]string, 0, 16),
|
sha1s: make([]string, sha1SliceSize),
|
||||||
chunkSize: int64(chunkSize),
|
chunkSize: int64(chunkSize),
|
||||||
}
|
}
|
||||||
// unwrap the accounting from the input, we use wrap to put it
|
// unwrap the accounting from the input, we use wrap to put it
|
||||||
@@ -192,26 +169,24 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
// This should be returned with returnUploadURL when finished
|
// This should be returned with returnUploadURL when finished
|
||||||
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
|
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
|
||||||
up.uploadMu.Lock()
|
up.uploadMu.Lock()
|
||||||
if len(up.uploads) > 0 {
|
defer up.uploadMu.Unlock()
|
||||||
|
if len(up.uploads) == 0 {
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/b2_get_upload_part_url",
|
||||||
|
}
|
||||||
|
var request = api.GetUploadPartURLRequest{
|
||||||
|
ID: up.id,
|
||||||
|
}
|
||||||
|
err := up.f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
|
||||||
|
return up.f.shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get upload URL: %w", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
||||||
up.uploadMu.Unlock()
|
|
||||||
return upload, nil
|
|
||||||
}
|
|
||||||
up.uploadMu.Unlock()
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/b2_get_upload_part_url",
|
|
||||||
}
|
|
||||||
var request = api.GetUploadPartURLRequest{
|
|
||||||
ID: up.id,
|
|
||||||
}
|
|
||||||
err = up.f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
|
|
||||||
return up.f.shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to get upload URL: %w", err)
|
|
||||||
}
|
}
|
||||||
return upload, nil
|
return upload, nil
|
||||||
}
|
}
|
||||||
@@ -226,39 +201,10 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
|
|||||||
up.uploadMu.Unlock()
|
up.uploadMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add an sha1 to the being built up sha1s
|
// Transfer a chunk
|
||||||
func (up *largeUpload) addSha1(chunkNumber int, sha1 string) {
|
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
|
||||||
up.sha1smu.Lock()
|
err := up.f.pacer.Call(func() (bool, error) {
|
||||||
defer up.sha1smu.Unlock()
|
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
||||||
if len(up.sha1s) < chunkNumber+1 {
|
|
||||||
up.sha1s = append(up.sha1s, make([]string, chunkNumber+1-len(up.sha1s))...)
|
|
||||||
}
|
|
||||||
up.sha1s[chunkNumber] = sha1
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
|
||||||
func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) {
|
|
||||||
// Only account after the checksum reads have been done
|
|
||||||
if do, ok := reader.(pool.DelayAccountinger); ok {
|
|
||||||
// To figure out this number, do a transfer and if the accounted size is 0 or a
|
|
||||||
// multiple of what it should be, increase or decrease this number.
|
|
||||||
do.DelayAccounting(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = up.f.pacer.Call(func() (bool, error) {
|
|
||||||
// Discover the size by seeking to the end
|
|
||||||
size, err = reader.Seek(0, io.SeekEnd)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// rewind the reader on retry and after reading size
|
|
||||||
_, err = reader.Seek(0, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Debugf(up.o, "Sending chunk %d length %d", chunkNumber, size)
|
|
||||||
|
|
||||||
// Get upload URL
|
// Get upload URL
|
||||||
upload, err := up.getUploadURL(ctx)
|
upload, err := up.getUploadURL(ctx)
|
||||||
@@ -266,8 +212,8 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
in := newHashAppendingReader(reader, sha1.New())
|
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
|
||||||
sizeWithHash := size + int64(in.AdditionalLength())
|
size := int64(len(body)) + int64(in.AdditionalLength())
|
||||||
|
|
||||||
// Authorization
|
// Authorization
|
||||||
//
|
//
|
||||||
@@ -297,16 +243,10 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
|||||||
Body: up.wrap(in),
|
Body: up.wrap(in),
|
||||||
ExtraHeaders: map[string]string{
|
ExtraHeaders: map[string]string{
|
||||||
"Authorization": upload.AuthorizationToken,
|
"Authorization": upload.AuthorizationToken,
|
||||||
"X-Bz-Part-Number": fmt.Sprintf("%d", chunkNumber+1),
|
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
|
||||||
sha1Header: "hex_digits_at_end",
|
sha1Header: "hex_digits_at_end",
|
||||||
},
|
},
|
||||||
ContentLength: &sizeWithHash,
|
ContentLength: &size,
|
||||||
}
|
|
||||||
|
|
||||||
if up.o.fs.opt.SSECustomerKey != "" && up.o.fs.opt.SSECustomerKeyMD5 != "" {
|
|
||||||
opts.ExtraHeaders[sseAlgorithmHeader] = up.o.fs.opt.SSECustomerAlgorithm
|
|
||||||
opts.ExtraHeaders[sseKeyHeader] = up.o.fs.opt.SSECustomerKeyBase64
|
|
||||||
opts.ExtraHeaders[sseMd5Header] = up.o.fs.opt.SSECustomerKeyMD5
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var response api.UploadPartResponse
|
var response api.UploadPartResponse
|
||||||
@@ -314,7 +254,7 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
|||||||
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||||
retry, err := up.f.shouldRetry(ctx, resp, err)
|
retry, err := up.f.shouldRetry(ctx, resp, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err)
|
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||||
}
|
}
|
||||||
// On retryable error clear PartUploadURL
|
// On retryable error clear PartUploadURL
|
||||||
if retry {
|
if retry {
|
||||||
@@ -322,50 +262,39 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
|||||||
upload = nil
|
upload = nil
|
||||||
}
|
}
|
||||||
up.returnUploadURL(upload)
|
up.returnUploadURL(upload)
|
||||||
up.addSha1(chunkNumber, in.HexSum())
|
up.sha1s[part-1] = in.HexSum()
|
||||||
return retry, err
|
return retry, err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(up.o, "Error sending chunk %d: %v", chunkNumber, err)
|
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(up.o, "Done sending chunk %d", chunkNumber)
|
fs.Debugf(up.o, "Done sending chunk %d", part)
|
||||||
}
|
}
|
||||||
return size, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy a chunk
|
// Copy a chunk
|
||||||
func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64) error {
|
func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
|
||||||
err := up.f.pacer.Call(func() (bool, error) {
|
err := up.f.pacer.Call(func() (bool, error) {
|
||||||
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
|
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: "/b2_copy_part",
|
Path: "/b2_copy_part",
|
||||||
}
|
}
|
||||||
offset := int64(part) * up.chunkSize // where we are in the source file
|
offset := (part - 1) * up.chunkSize // where we are in the source file
|
||||||
var request = api.CopyPartRequest{
|
var request = api.CopyPartRequest{
|
||||||
SourceID: up.src.id,
|
SourceID: up.src.id,
|
||||||
LargeFileID: up.id,
|
LargeFileID: up.id,
|
||||||
PartNumber: int64(part + 1),
|
PartNumber: part,
|
||||||
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
|
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
|
||||||
}
|
}
|
||||||
|
|
||||||
if up.o.fs.opt.SSECustomerKey != "" && up.o.fs.opt.SSECustomerKeyMD5 != "" {
|
|
||||||
serverSideEncryptionConfig := api.ServerSideEncryption{
|
|
||||||
Mode: "SSE-C",
|
|
||||||
Algorithm: up.o.fs.opt.SSECustomerAlgorithm,
|
|
||||||
CustomerKey: up.o.fs.opt.SSECustomerKeyBase64,
|
|
||||||
CustomerKeyMd5: up.o.fs.opt.SSECustomerKeyMD5,
|
|
||||||
}
|
|
||||||
request.SourceServerSideEncryption = &serverSideEncryptionConfig
|
|
||||||
request.DestinationServerSideEncryption = &serverSideEncryptionConfig
|
|
||||||
}
|
|
||||||
var response api.UploadPartResponse
|
var response api.UploadPartResponse
|
||||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||||
retry, err := up.f.shouldRetry(ctx, resp, err)
|
retry, err := up.f.shouldRetry(ctx, resp, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||||
}
|
}
|
||||||
up.addSha1(part, response.SHA1)
|
up.sha1s[part-1] = response.SHA1
|
||||||
return retry, err
|
return retry, err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -376,8 +305,8 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes off the large upload
|
// finish closes off the large upload
|
||||||
func (up *largeUpload) Close(ctx context.Context) error {
|
func (up *largeUpload) finish(ctx context.Context) error {
|
||||||
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
|
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
@@ -395,12 +324,11 @@ func (up *largeUpload) Close(ctx context.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
up.info = &response
|
return up.o.decodeMetaDataFileInfo(&response)
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Abort aborts the large upload
|
// cancel aborts the large upload
|
||||||
func (up *largeUpload) Abort(ctx context.Context) error {
|
func (up *largeUpload) cancel(ctx context.Context) error {
|
||||||
fs.Debugf(up.o, "Cancelling large file %s", up.what)
|
fs.Debugf(up.o, "Cancelling large file %s", up.what)
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
@@ -425,102 +353,128 @@ func (up *largeUpload) Abort(ctx context.Context) error {
|
|||||||
// reaches EOF.
|
// reaches EOF.
|
||||||
//
|
//
|
||||||
// Note that initialUploadBlock must be returned to f.putBuf()
|
// Note that initialUploadBlock must be returned to f.putBuf()
|
||||||
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW) (err error) {
|
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
|
||||||
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
|
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
|
||||||
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
||||||
var (
|
var (
|
||||||
g, gCtx = errgroup.WithContext(ctx)
|
g, gCtx = errgroup.WithContext(ctx)
|
||||||
hasMoreParts = true
|
hasMoreParts = true
|
||||||
)
|
)
|
||||||
up.size = initialUploadBlock.Size()
|
up.size = int64(len(initialUploadBlock))
|
||||||
up.parts = 0
|
g.Go(func() error {
|
||||||
for part := 0; hasMoreParts; part++ {
|
for part := int64(1); hasMoreParts; part++ {
|
||||||
// Get a block of memory from the pool and token which limits concurrency.
|
// Get a block of memory from the pool and token which limits concurrency.
|
||||||
var rw *pool.RW
|
var buf []byte
|
||||||
if part == 0 {
|
if part == 1 {
|
||||||
rw = initialUploadBlock
|
buf = initialUploadBlock
|
||||||
} else {
|
} else {
|
||||||
rw = up.f.getRW(false)
|
buf = up.f.getBuf(false)
|
||||||
}
|
|
||||||
|
|
||||||
// Fail fast, in case an errgroup managed function returns an error
|
|
||||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
|
||||||
if gCtx.Err() != nil {
|
|
||||||
up.f.putRW(rw)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the chunk
|
|
||||||
var n int64
|
|
||||||
if part == 0 {
|
|
||||||
n = rw.Size()
|
|
||||||
} else {
|
|
||||||
n, err = io.CopyN(rw, up.in, up.chunkSize)
|
|
||||||
if err == io.EOF {
|
|
||||||
if n == 0 {
|
|
||||||
fs.Debugf(up.o, "Not sending empty chunk after EOF - ending.")
|
|
||||||
up.f.putRW(rw)
|
|
||||||
break
|
|
||||||
} else {
|
|
||||||
fs.Debugf(up.o, "Read less than a full chunk %d, making this the last one.", n)
|
|
||||||
}
|
|
||||||
hasMoreParts = false
|
|
||||||
} else if err != nil {
|
|
||||||
// other kinds of errors indicate failure
|
|
||||||
up.f.putRW(rw)
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Keep stats up to date
|
// Fail fast, in case an errgroup managed function returns an error
|
||||||
up.parts += 1
|
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||||
up.size += n
|
if gCtx.Err() != nil {
|
||||||
if part > maxParts {
|
up.f.putBuf(buf, false)
|
||||||
up.f.putRW(rw)
|
return nil
|
||||||
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
}
|
||||||
}
|
|
||||||
|
|
||||||
part := part // for the closure
|
// Read the chunk
|
||||||
g.Go(func() (err error) {
|
var n int
|
||||||
defer up.f.putRW(rw)
|
if part == 1 {
|
||||||
_, err = up.WriteChunk(gCtx, part, rw)
|
n = len(buf)
|
||||||
return err
|
} else {
|
||||||
})
|
n, err = io.ReadFull(up.in, buf)
|
||||||
}
|
if err == io.ErrUnexpectedEOF {
|
||||||
|
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
|
||||||
|
buf = buf[:n]
|
||||||
|
hasMoreParts = false
|
||||||
|
} else if err == io.EOF {
|
||||||
|
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
|
||||||
|
up.f.putBuf(buf, false)
|
||||||
|
return nil
|
||||||
|
} else if err != nil {
|
||||||
|
// other kinds of errors indicate failure
|
||||||
|
up.f.putBuf(buf, false)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep stats up to date
|
||||||
|
up.parts = part
|
||||||
|
up.size += int64(n)
|
||||||
|
if part > maxParts {
|
||||||
|
up.f.putBuf(buf, false)
|
||||||
|
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||||
|
}
|
||||||
|
|
||||||
|
part := part // for the closure
|
||||||
|
g.Go(func() (err error) {
|
||||||
|
defer up.f.putBuf(buf, false)
|
||||||
|
return up.transferChunk(gCtx, part, buf)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
err = g.Wait()
|
err = g.Wait()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return up.Close(ctx)
|
up.sha1s = up.sha1s[:up.parts]
|
||||||
|
return up.finish(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy the chunks from the source to the destination
|
// Upload uploads the chunks from the input
|
||||||
func (up *largeUpload) Copy(ctx context.Context) (err error) {
|
func (up *largeUpload) Upload(ctx context.Context) (err error) {
|
||||||
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
|
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
|
||||||
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
|
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
|
||||||
var (
|
var (
|
||||||
g, gCtx = errgroup.WithContext(ctx)
|
g, gCtx = errgroup.WithContext(ctx)
|
||||||
remaining = up.size
|
remaining = up.size
|
||||||
)
|
)
|
||||||
g.SetLimit(up.f.opt.UploadConcurrency)
|
g.Go(func() error {
|
||||||
for part := range up.parts {
|
for part := int64(1); part <= up.parts; part++ {
|
||||||
// Fail fast, in case an errgroup managed function returns an error
|
// Get a block of memory from the pool and token which limits concurrency.
|
||||||
// gCtx is cancelled. There is no point in copying all the other parts.
|
buf := up.f.getBuf(up.doCopy)
|
||||||
if gCtx.Err() != nil {
|
|
||||||
break
|
// Fail fast, in case an errgroup managed function returns an error
|
||||||
|
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||||
|
if gCtx.Err() != nil {
|
||||||
|
up.f.putBuf(buf, up.doCopy)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
reqSize := remaining
|
||||||
|
if reqSize >= up.chunkSize {
|
||||||
|
reqSize = up.chunkSize
|
||||||
|
}
|
||||||
|
|
||||||
|
if !up.doCopy {
|
||||||
|
// Read the chunk
|
||||||
|
buf = buf[:reqSize]
|
||||||
|
_, err = io.ReadFull(up.in, buf)
|
||||||
|
if err != nil {
|
||||||
|
up.f.putBuf(buf, up.doCopy)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
part := part // for the closure
|
||||||
|
g.Go(func() (err error) {
|
||||||
|
defer up.f.putBuf(buf, up.doCopy)
|
||||||
|
if !up.doCopy {
|
||||||
|
err = up.transferChunk(gCtx, part, buf)
|
||||||
|
} else {
|
||||||
|
err = up.copyChunk(gCtx, part, reqSize)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
remaining -= reqSize
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
reqSize := min(remaining, up.chunkSize)
|
})
|
||||||
|
|
||||||
part := part // for the closure
|
|
||||||
g.Go(func() (err error) {
|
|
||||||
return up.copyChunk(gCtx, part, reqSize)
|
|
||||||
})
|
|
||||||
remaining -= reqSize
|
|
||||||
}
|
|
||||||
err = g.Wait()
|
err = g.Wait()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return up.Close(ctx)
|
return up.finish(ctx)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ const (
|
|||||||
timeFormat = `"` + time.RFC3339 + `"`
|
timeFormat = `"` + time.RFC3339 + `"`
|
||||||
)
|
)
|
||||||
|
|
||||||
// Time represents date and time information for the
|
// Time represents represents date and time information for the
|
||||||
// box API, by using RFC3339
|
// box API, by using RFC3339
|
||||||
type Time time.Time
|
type Time time.Time
|
||||||
|
|
||||||
@@ -52,7 +52,7 @@ func (e *Error) Error() string {
|
|||||||
out += ": " + e.Message
|
out += ": " + e.Message
|
||||||
}
|
}
|
||||||
if e.ContextInfo != nil {
|
if e.ContextInfo != nil {
|
||||||
out += fmt.Sprintf(" (%s)", string(e.ContextInfo))
|
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
|
||||||
}
|
}
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
@@ -63,7 +63,7 @@ var _ error = (*Error)(nil)
|
|||||||
// ItemFields are the fields needed for FileInfo
|
// ItemFields are the fields needed for FileInfo
|
||||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
|
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
|
||||||
|
|
||||||
// Types of things in Item/ItemMini
|
// Types of things in Item
|
||||||
const (
|
const (
|
||||||
ItemTypeFolder = "folder"
|
ItemTypeFolder = "folder"
|
||||||
ItemTypeFile = "file"
|
ItemTypeFile = "file"
|
||||||
@@ -72,31 +72,20 @@ const (
|
|||||||
ItemStatusDeleted = "deleted"
|
ItemStatusDeleted = "deleted"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ItemMini is a subset of the elements in a full Item returned by some API calls
|
|
||||||
type ItemMini struct {
|
|
||||||
Type string `json:"type"`
|
|
||||||
ID string `json:"id"`
|
|
||||||
SequenceID int64 `json:"sequence_id,string"`
|
|
||||||
Etag string `json:"etag"`
|
|
||||||
SHA1 string `json:"sha1"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Item describes a folder or a file as returned by Get Folder Items and others
|
// Item describes a folder or a file as returned by Get Folder Items and others
|
||||||
type Item struct {
|
type Item struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
SequenceID int64 `json:"sequence_id,string"`
|
SequenceID string `json:"sequence_id"`
|
||||||
Etag string `json:"etag"`
|
Etag string `json:"etag"`
|
||||||
SHA1 string `json:"sha1"`
|
SHA1 string `json:"sha1"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
|
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
|
||||||
CreatedAt Time `json:"created_at"`
|
CreatedAt Time `json:"created_at"`
|
||||||
ModifiedAt Time `json:"modified_at"`
|
ModifiedAt Time `json:"modified_at"`
|
||||||
ContentCreatedAt Time `json:"content_created_at"`
|
ContentCreatedAt Time `json:"content_created_at"`
|
||||||
ContentModifiedAt Time `json:"content_modified_at"`
|
ContentModifiedAt Time `json:"content_modified_at"`
|
||||||
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
||||||
Parent ItemMini `json:"parent"`
|
|
||||||
SharedLink struct {
|
SharedLink struct {
|
||||||
URL string `json:"url,omitempty"`
|
URL string `json:"url,omitempty"`
|
||||||
Access string `json:"access,omitempty"`
|
Access string `json:"access,omitempty"`
|
||||||
@@ -125,21 +114,10 @@ type FolderItems struct {
|
|||||||
Offset int `json:"offset"`
|
Offset int `json:"offset"`
|
||||||
Limit int `json:"limit"`
|
Limit int `json:"limit"`
|
||||||
NextMarker *string `json:"next_marker,omitempty"`
|
NextMarker *string `json:"next_marker,omitempty"`
|
||||||
// There is some confusion about how this is actually
|
Order []struct {
|
||||||
// returned. The []struct has worked for many years, but in
|
By string `json:"by"`
|
||||||
// https://github.com/rclone/rclone/issues/8776 box was
|
Direction string `json:"direction"`
|
||||||
// returning it returned not as a list. We don't actually use
|
} `json:"order"`
|
||||||
// this so comment it out.
|
|
||||||
//
|
|
||||||
// Order struct {
|
|
||||||
// By string `json:"by"`
|
|
||||||
// Direction string `json:"direction"`
|
|
||||||
// } `json:"order"`
|
|
||||||
//
|
|
||||||
// Order []struct {
|
|
||||||
// By string `json:"by"`
|
|
||||||
// Direction string `json:"direction"`
|
|
||||||
// } `json:"order"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parent defined the ID of the parent directory
|
// Parent defined the ID of the parent directory
|
||||||
@@ -178,7 +156,19 @@ type PreUploadCheckResponse struct {
|
|||||||
// PreUploadCheckConflict is returned in the ContextInfo error field
|
// PreUploadCheckConflict is returned in the ContextInfo error field
|
||||||
// from PreUploadCheck when the error code is "item_name_in_use"
|
// from PreUploadCheck when the error code is "item_name_in_use"
|
||||||
type PreUploadCheckConflict struct {
|
type PreUploadCheckConflict struct {
|
||||||
Conflicts ItemMini `json:"conflicts"`
|
Conflicts struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
FileVersion struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
Sha1 string `json:"sha1"`
|
||||||
|
} `json:"file_version"`
|
||||||
|
SequenceID string `json:"sequence_id"`
|
||||||
|
Etag string `json:"etag"`
|
||||||
|
Sha1 string `json:"sha1"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
} `json:"conflicts"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateFileModTime is used in Update File Info
|
// UpdateFileModTime is used in Update File Info
|
||||||
@@ -282,39 +272,12 @@ type User struct {
|
|||||||
ModifiedAt time.Time `json:"modified_at"`
|
ModifiedAt time.Time `json:"modified_at"`
|
||||||
Language string `json:"language"`
|
Language string `json:"language"`
|
||||||
Timezone string `json:"timezone"`
|
Timezone string `json:"timezone"`
|
||||||
SpaceAmount float64 `json:"space_amount"`
|
SpaceAmount int64 `json:"space_amount"`
|
||||||
SpaceUsed float64 `json:"space_used"`
|
SpaceUsed int64 `json:"space_used"`
|
||||||
MaxUploadSize float64 `json:"max_upload_size"`
|
MaxUploadSize int64 `json:"max_upload_size"`
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
JobTitle string `json:"job_title"`
|
JobTitle string `json:"job_title"`
|
||||||
Phone string `json:"phone"`
|
Phone string `json:"phone"`
|
||||||
Address string `json:"address"`
|
Address string `json:"address"`
|
||||||
AvatarURL string `json:"avatar_url"`
|
AvatarURL string `json:"avatar_url"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileTreeChangeEventTypes are the events that can require cache invalidation
|
|
||||||
var FileTreeChangeEventTypes = map[string]struct{}{
|
|
||||||
"ITEM_COPY": {},
|
|
||||||
"ITEM_CREATE": {},
|
|
||||||
"ITEM_MAKE_CURRENT_VERSION": {},
|
|
||||||
"ITEM_MODIFY": {},
|
|
||||||
"ITEM_MOVE": {},
|
|
||||||
"ITEM_RENAME": {},
|
|
||||||
"ITEM_TRASH": {},
|
|
||||||
"ITEM_UNDELETE_VIA_TRASH": {},
|
|
||||||
"ITEM_UPLOAD": {},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Event is an array element in the response returned from /events
|
|
||||||
type Event struct {
|
|
||||||
EventType string `json:"event_type"`
|
|
||||||
EventID string `json:"event_id"`
|
|
||||||
Source Item `json:"source"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Events is returned from /events
|
|
||||||
type Events struct {
|
|
||||||
ChunkSize int64 `json:"chunk_size"`
|
|
||||||
Entries []Event `json:"entries"`
|
|
||||||
NextStreamPosition int64 `json:"next_stream_position"`
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -17,9 +17,9 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -27,7 +27,6 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang-jwt/jwt/v4"
|
|
||||||
"github.com/rclone/rclone/backend/box/api"
|
"github.com/rclone/rclone/backend/box/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -37,16 +36,16 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/list"
|
|
||||||
"github.com/rclone/rclone/lib/dircache"
|
"github.com/rclone/rclone/lib/dircache"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/env"
|
"github.com/rclone/rclone/lib/env"
|
||||||
"github.com/rclone/rclone/lib/jwtutil"
|
"github.com/rclone/rclone/lib/jwtutil"
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/random"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
"github.com/youmark/pkcs8"
|
"github.com/youmark/pkcs8"
|
||||||
|
"golang.org/x/oauth2"
|
||||||
|
"golang.org/x/oauth2/jws"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -65,21 +64,18 @@ const (
|
|||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
// Description of how to auth for this app
|
// Description of how to auth for this app
|
||||||
oauthConfig = &oauthutil.Config{
|
oauthConfig = &oauth2.Config{
|
||||||
Scopes: nil,
|
Scopes: nil,
|
||||||
AuthURL: "https://app.box.com/api/oauth2/authorize",
|
Endpoint: oauth2.Endpoint{
|
||||||
TokenURL: "https://app.box.com/api/oauth2/token",
|
AuthURL: "https://app.box.com/api/oauth2/authorize",
|
||||||
|
TokenURL: "https://app.box.com/api/oauth2/token",
|
||||||
|
},
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
RedirectURL: oauthutil.RedirectURL,
|
RedirectURL: oauthutil.RedirectURL,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
type boxCustomClaims struct {
|
|
||||||
jwt.StandardClaims
|
|
||||||
BoxSubType string `json:"box_sub_type,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
@@ -106,18 +102,16 @@ func init() {
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
},
|
},
|
||||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||||
Name: "root_folder_id",
|
Name: "root_folder_id",
|
||||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||||
Default: "0",
|
Default: "0",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "box_config_file",
|
Name: "box_config_file",
|
||||||
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
|
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
|
||||||
}, {
|
}, {
|
||||||
Name: "access_token",
|
Name: "access_token",
|
||||||
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "box_sub_type",
|
Name: "box_sub_type",
|
||||||
Default: "user",
|
Default: "user",
|
||||||
@@ -148,23 +142,6 @@ func init() {
|
|||||||
Default: "",
|
Default: "",
|
||||||
Help: "Only show items owned by the login (email address) passed in.",
|
Help: "Only show items owned by the login (email address) passed in.",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "impersonate",
|
|
||||||
Default: "",
|
|
||||||
Help: `Impersonate this user ID when using a service account.
|
|
||||||
|
|
||||||
Setting this flag allows rclone, when using a JWT service account, to
|
|
||||||
act on behalf of another user by setting the as-user header.
|
|
||||||
|
|
||||||
The user ID is the Box identifier for a user. User IDs can found for
|
|
||||||
any user via the GET /users endpoint, which is only available to
|
|
||||||
admins, or by calling the GET /users/me endpoint with an authenticated
|
|
||||||
user session.
|
|
||||||
|
|
||||||
See: https://developer.box.com/guides/authentication/jwt/as-user/
|
|
||||||
`,
|
|
||||||
Advanced: true,
|
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -201,12 +178,12 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
|
|||||||
signingHeaders := getSigningHeaders(boxConfig)
|
signingHeaders := getSigningHeaders(boxConfig)
|
||||||
queryParams := getQueryParams(boxConfig)
|
queryParams := getQueryParams(boxConfig)
|
||||||
client := fshttp.NewClient(ctx)
|
client := fshttp.NewClient(ctx)
|
||||||
err = jwtutil.Config("box", name, tokenURL, *claims, signingHeaders, queryParams, privateKey, m, client)
|
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||||
file, err := os.ReadFile(configFile)
|
file, err := ioutil.ReadFile(configFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
|
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
|
||||||
}
|
}
|
||||||
@@ -217,31 +194,34 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
|||||||
return boxConfig, nil
|
return boxConfig, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomClaims, err error) {
|
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
|
||||||
val, err := jwtutil.RandomHex(20)
|
val, err := jwtutil.RandomHex(20)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
|
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
claims = &boxCustomClaims{
|
claims = &jws.ClaimSet{
|
||||||
//lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely
|
Iss: boxConfig.BoxAppSettings.ClientID,
|
||||||
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019
|
Sub: boxConfig.EnterpriseID,
|
||||||
StandardClaims: jwt.StandardClaims{
|
Aud: tokenURL,
|
||||||
Id: val,
|
Exp: time.Now().Add(time.Second * 45).Unix(),
|
||||||
Issuer: boxConfig.BoxAppSettings.ClientID,
|
PrivateClaims: map[string]interface{}{
|
||||||
Subject: boxConfig.EnterpriseID,
|
"box_sub_type": boxSubType,
|
||||||
Audience: tokenURL,
|
"aud": tokenURL,
|
||||||
ExpiresAt: time.Now().Add(time.Second * 45).Unix(),
|
"jti": val,
|
||||||
},
|
},
|
||||||
BoxSubType: boxSubType,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return claims, nil
|
return claims, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]any {
|
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
|
||||||
signingHeaders := map[string]any{
|
signingHeaders := &jws.Header{
|
||||||
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
Algorithm: "RS256",
|
||||||
|
Typ: "JWT",
|
||||||
|
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
||||||
}
|
}
|
||||||
|
|
||||||
return signingHeaders
|
return signingHeaders
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -255,10 +235,8 @@ func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
||||||
|
|
||||||
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
||||||
if block == nil {
|
|
||||||
return nil, errors.New("box: failed to PEM decode private key")
|
|
||||||
}
|
|
||||||
if len(rest) > 0 {
|
if len(rest) > 0 {
|
||||||
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
|
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
|
||||||
}
|
}
|
||||||
@@ -280,29 +258,19 @@ type Options struct {
|
|||||||
AccessToken string `config:"access_token"`
|
AccessToken string `config:"access_token"`
|
||||||
ListChunk int `config:"list_chunk"`
|
ListChunk int `config:"list_chunk"`
|
||||||
OwnedBy string `config:"owned_by"`
|
OwnedBy string `config:"owned_by"`
|
||||||
Impersonate string `config:"impersonate"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ItemMeta defines metadata we cache for each Item ID
|
|
||||||
type ItemMeta struct {
|
|
||||||
SequenceID int64 // the most recent event processed for this item
|
|
||||||
ParentID string // ID of the parent directory of this item
|
|
||||||
Name string // leaf name of this item
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote box
|
// Fs represents a remote box
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
srv *rest.Client // the connection to the server
|
srv *rest.Client // the connection to the one drive server
|
||||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
pacer *fs.Pacer // pacer for API calls
|
pacer *fs.Pacer // pacer for API calls
|
||||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||||
uploadToken *pacer.TokenDispenser // control concurrency
|
uploadToken *pacer.TokenDispenser // control concurrency
|
||||||
itemMetaCacheMu *sync.Mutex // protects itemMetaCache
|
|
||||||
itemMetaCache map[string]ItemMeta // map of Item ID to selected metadata
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a box object
|
// Object describes a box object
|
||||||
@@ -381,7 +349,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
|||||||
|
|
||||||
// readMetaDataForPath reads the metadata from the path
|
// readMetaDataForPath reads the metadata from the path
|
||||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
||||||
// defer log.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
|
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
@@ -390,30 +358,20 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use preupload to find the ID
|
found, err := f.listAll(ctx, directoryID, false, true, true, func(item *api.Item) bool {
|
||||||
itemMini, err := f.preUploadCheck(ctx, leaf, directoryID, -1)
|
if strings.EqualFold(item.Name, leaf) {
|
||||||
if err != nil {
|
info = item
|
||||||
return nil, err
|
return true
|
||||||
}
|
}
|
||||||
if itemMini == nil {
|
return false
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now we have the ID we can look up the object proper
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: "/files/" + itemMini.ID,
|
|
||||||
Parameters: fieldsValue(),
|
|
||||||
}
|
|
||||||
var item api.Item
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &item)
|
|
||||||
return shouldRetry(ctx, resp, err)
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &item, nil
|
if !found {
|
||||||
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// errorHandler parses a non 2xx error response into an error
|
// errorHandler parses a non 2xx error response into an error
|
||||||
@@ -460,14 +418,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
|
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||||
itemMetaCacheMu: new(sync.Mutex),
|
|
||||||
itemMetaCache: make(map[string]ItemMeta),
|
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
@@ -480,11 +436,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
|
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If using impersonate set an as-user header
|
|
||||||
if f.opt.Impersonate != "" {
|
|
||||||
f.srv.SetHeader("as-user", f.opt.Impersonate)
|
|
||||||
}
|
|
||||||
|
|
||||||
jsonFile, ok := m.Get("box_config_file")
|
jsonFile, ok := m.Get("box_config_file")
|
||||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||||
|
|
||||||
@@ -620,7 +571,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// fmt.Printf("...Error %v\n", err)
|
//fmt.Printf("...Error %v\n", err)
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
// fmt.Printf("...Id %q\n", *info.Id)
|
// fmt.Printf("...Id %q\n", *info.Id)
|
||||||
@@ -706,27 +657,9 @@ OUTER:
|
|||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
return list.WithListP(ctx, dir, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListP lists the objects and directories of the Fs starting
|
|
||||||
// from dir non recursively into out.
|
|
||||||
//
|
|
||||||
// dir should be "" to start from the root, and should not
|
|
||||||
// have trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
//
|
|
||||||
// It should call callback for each tranche of entries read.
|
|
||||||
// These need not be returned in any particular order. If
|
|
||||||
// callback returns an error then the listing will stop
|
|
||||||
// immediately.
|
|
||||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
|
||||||
list := list.NewHelper(callback)
|
|
||||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
var iErr error
|
var iErr error
|
||||||
_, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool {
|
_, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool {
|
||||||
@@ -736,49 +669,30 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
|
|||||||
f.dirCache.Put(remote, info.ID)
|
f.dirCache.Put(remote, info.ID)
|
||||||
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
|
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
|
||||||
// FIXME more info from dir?
|
// FIXME more info from dir?
|
||||||
err = list.Add(d)
|
entries = append(entries, d)
|
||||||
if err != nil {
|
|
||||||
iErr = err
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
} else if info.Type == api.ItemTypeFile {
|
} else if info.Type == api.ItemTypeFile {
|
||||||
o, err := f.newObjectWithInfo(ctx, remote, info)
|
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
iErr = err
|
iErr = err
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
err = list.Add(o)
|
entries = append(entries, o)
|
||||||
if err != nil {
|
|
||||||
iErr = err
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cache some metadata for this Item to help us process events later
|
|
||||||
// on. In particular, the box event API does not provide the old path
|
|
||||||
// of the Item when it is renamed/deleted/moved/etc.
|
|
||||||
f.itemMetaCacheMu.Lock()
|
|
||||||
cachedItemMeta, found := f.itemMetaCache[info.ID]
|
|
||||||
if !found || cachedItemMeta.SequenceID < info.SequenceID {
|
|
||||||
f.itemMetaCache[info.ID] = ItemMeta{SequenceID: info.SequenceID, ParentID: directoryID, Name: info.Name}
|
|
||||||
}
|
|
||||||
f.itemMetaCacheMu.Unlock()
|
|
||||||
|
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
if iErr != nil {
|
if iErr != nil {
|
||||||
return iErr
|
return nil, iErr
|
||||||
}
|
}
|
||||||
return list.Flush()
|
return entries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates from the parameters passed in a half finished Object which
|
// Creates from the parameters passed in a half finished Object which
|
||||||
// must have setMetaData called on it
|
// must have setMetaData called on it
|
||||||
//
|
//
|
||||||
// Returns the object, leaf, directoryID and error.
|
// Returns the object, leaf, directoryID and error
|
||||||
//
|
//
|
||||||
// Used to create new objects
|
// Used to create new objects
|
||||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||||
@@ -799,7 +713,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
|||||||
//
|
//
|
||||||
// It returns "", nil if the file is good to go
|
// It returns "", nil if the file is good to go
|
||||||
// It returns "ID", nil if the file must be updated
|
// It returns "ID", nil if the file must be updated
|
||||||
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (item *api.ItemMini, err error) {
|
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (ID string, err error) {
|
||||||
check := api.PreUploadCheck{
|
check := api.PreUploadCheck{
|
||||||
Name: f.opt.Enc.FromStandardName(leaf),
|
Name: f.opt.Enc.FromStandardName(leaf),
|
||||||
Parent: api.Parent{
|
Parent: api.Parent{
|
||||||
@@ -824,21 +738,21 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
|
|||||||
var conflict api.PreUploadCheckConflict
|
var conflict api.PreUploadCheckConflict
|
||||||
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
|
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
|
return "", fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
|
||||||
}
|
}
|
||||||
if conflict.Conflicts.Type != api.ItemTypeFile {
|
if conflict.Conflicts.Type != api.ItemTypeFile {
|
||||||
return nil, fs.ErrorIsDir
|
return "", fmt.Errorf("pre-upload check: can't overwrite non file with file: %w", err)
|
||||||
}
|
}
|
||||||
return &conflict.Conflicts, nil
|
return conflict.Conflicts.ID, nil
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("pre-upload check: %w", err)
|
return "", fmt.Errorf("pre-upload check: %w", err)
|
||||||
}
|
}
|
||||||
return nil, nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put the object
|
// Put the object
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned.
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
@@ -854,11 +768,11 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
|
|
||||||
// Preflight check the upload, which returns the ID if the
|
// Preflight check the upload, which returns the ID if the
|
||||||
// object already exists
|
// object already exists
|
||||||
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
ID, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if item == nil {
|
if ID == "" {
|
||||||
return f.PutUnchecked(ctx, in, src, options...)
|
return f.PutUnchecked(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -866,7 +780,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
id: item.ID,
|
id: ID,
|
||||||
}
|
}
|
||||||
return o, o.Update(ctx, in, src, options...)
|
return o, o.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
@@ -878,9 +792,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
|
|
||||||
// PutUnchecked the object into the container
|
// PutUnchecked the object into the container
|
||||||
//
|
//
|
||||||
// This will produce an error if the object already exists.
|
// This will produce an error if the object already exists
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned.
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
@@ -963,9 +877,9 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -993,26 +907,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if dest already exists
|
|
||||||
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if item != nil { // dest already exists, need to copy to temp name and then move
|
|
||||||
tempSuffix := "-rclone-copy-" + random.String(8)
|
|
||||||
fs.Debugf(remote, "dst already exists, copying to temp name %v", remote+tempSuffix)
|
|
||||||
tempObj, err := f.Copy(ctx, src, remote+tempSuffix)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
fs.Debugf(remote+tempSuffix, "moving to real name %v", remote)
|
|
||||||
err = f.deleteObject(ctx, item.ID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return f.Move(ctx, tempObj, remote)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy the object
|
// Copy the object
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
@@ -1101,9 +995,9 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1223,7 +1117,7 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
|||||||
// CleanUp empties the trash
|
// CleanUp empties the trash
|
||||||
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||||
var (
|
var (
|
||||||
deleteErrors atomic.Uint64
|
deleteErrors = int64(0)
|
||||||
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
|
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
)
|
)
|
||||||
@@ -1239,7 +1133,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
|||||||
err := f.deletePermanently(ctx, item.Type, item.ID)
|
err := f.deletePermanently(ctx, item.Type, item.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
|
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
|
||||||
deleteErrors.Add(1)
|
atomic.AddInt64(&deleteErrors, 1)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
} else {
|
} else {
|
||||||
@@ -1248,279 +1142,12 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
|||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
if deleteErrors.Load() != 0 {
|
if deleteErrors != 0 {
|
||||||
return fmt.Errorf("failed to delete %d trash items", deleteErrors.Load())
|
return fmt.Errorf("failed to delete %d trash items", deleteErrors)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown shutdown the fs
|
|
||||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
|
||||||
f.tokenRenewer.Shutdown()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChangeNotify calls the passed function with a path that has had changes.
|
|
||||||
// If the implementation uses polling, it should adhere to the given interval.
|
|
||||||
//
|
|
||||||
// Automatically restarts itself in case of unexpected behavior of the remote.
|
|
||||||
//
|
|
||||||
// Close the returned channel to stop being notified.
|
|
||||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
|
||||||
go func() {
|
|
||||||
// get the `stream_position` early so all changes from now on get processed
|
|
||||||
streamPosition, err := f.changeNotifyStreamPosition(ctx)
|
|
||||||
if err != nil {
|
|
||||||
fs.Infof(f, "Failed to get StreamPosition: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// box can send duplicate Event IDs. Use this map to track and filter
|
|
||||||
// the ones we've already processed.
|
|
||||||
processedEventIDs := make(map[string]time.Time)
|
|
||||||
|
|
||||||
var ticker *time.Ticker
|
|
||||||
var tickerC <-chan time.Time
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case pollInterval, ok := <-pollIntervalChan:
|
|
||||||
if !ok {
|
|
||||||
if ticker != nil {
|
|
||||||
ticker.Stop()
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if ticker != nil {
|
|
||||||
ticker.Stop()
|
|
||||||
ticker, tickerC = nil, nil
|
|
||||||
}
|
|
||||||
if pollInterval != 0 {
|
|
||||||
ticker = time.NewTicker(pollInterval)
|
|
||||||
tickerC = ticker.C
|
|
||||||
}
|
|
||||||
case <-tickerC:
|
|
||||||
if streamPosition == "" {
|
|
||||||
streamPosition, err = f.changeNotifyStreamPosition(ctx)
|
|
||||||
if err != nil {
|
|
||||||
fs.Infof(f, "Failed to get StreamPosition: %s", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Garbage collect EventIDs older than 1 minute
|
|
||||||
for eventID, timestamp := range processedEventIDs {
|
|
||||||
if time.Since(timestamp) > time.Minute {
|
|
||||||
delete(processedEventIDs, eventID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
streamPosition, err = f.changeNotifyRunner(ctx, notifyFunc, streamPosition, processedEventIDs)
|
|
||||||
if err != nil {
|
|
||||||
fs.Infof(f, "Change notify listener failure: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) changeNotifyStreamPosition(ctx context.Context) (streamPosition string, err error) {
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: "/events",
|
|
||||||
Parameters: fieldsValue(),
|
|
||||||
}
|
|
||||||
opts.Parameters.Set("stream_position", "now")
|
|
||||||
opts.Parameters.Set("stream_type", "changes")
|
|
||||||
|
|
||||||
var result api.Events
|
|
||||||
var resp *http.Response
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return strconv.FormatInt(result.NextStreamPosition, 10), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attempts to construct the full path for an object, given the ID of its
|
|
||||||
// parent directory and the name of the object.
|
|
||||||
//
|
|
||||||
// Can return "" if the parentID is not currently in the directory cache.
|
|
||||||
func (f *Fs) getFullPath(parentID string, childName string) (fullPath string) {
|
|
||||||
fullPath = ""
|
|
||||||
name := f.opt.Enc.ToStandardName(childName)
|
|
||||||
if parentID != "" {
|
|
||||||
if parentDir, ok := f.dirCache.GetInv(parentID); ok {
|
|
||||||
if len(parentDir) > 0 {
|
|
||||||
fullPath = parentDir + "/" + name
|
|
||||||
} else {
|
|
||||||
fullPath = name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// No parent, this object is at the root
|
|
||||||
fullPath = name
|
|
||||||
}
|
|
||||||
return fullPath
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), streamPosition string, processedEventIDs map[string]time.Time) (nextStreamPosition string, err error) {
|
|
||||||
nextStreamPosition = streamPosition
|
|
||||||
|
|
||||||
for {
|
|
||||||
// box only allows a max of 500 events
|
|
||||||
limit := min(f.opt.ListChunk, 500)
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: "/events",
|
|
||||||
Parameters: fieldsValue(),
|
|
||||||
}
|
|
||||||
opts.Parameters.Set("stream_position", nextStreamPosition)
|
|
||||||
opts.Parameters.Set("stream_type", "changes")
|
|
||||||
opts.Parameters.Set("limit", strconv.Itoa(limit))
|
|
||||||
|
|
||||||
var result api.Events
|
|
||||||
var resp *http.Response
|
|
||||||
fs.Debugf(f, "Checking for changes on remote (next_stream_position: %q)", nextStreamPosition)
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if result.ChunkSize != int64(len(result.Entries)) {
|
|
||||||
return "", fmt.Errorf("invalid response to event request, chunk_size (%v) not equal to number of entries (%v)", result.ChunkSize, len(result.Entries))
|
|
||||||
}
|
|
||||||
|
|
||||||
nextStreamPosition = strconv.FormatInt(result.NextStreamPosition, 10)
|
|
||||||
if result.ChunkSize == 0 {
|
|
||||||
return nextStreamPosition, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type pathToClear struct {
|
|
||||||
path string
|
|
||||||
entryType fs.EntryType
|
|
||||||
}
|
|
||||||
var pathsToClear []pathToClear
|
|
||||||
newEventIDs := 0
|
|
||||||
for _, entry := range result.Entries {
|
|
||||||
eventDetails := fmt.Sprintf("[%q(%d)|%s|%s|%s|%s]", entry.Source.Name, entry.Source.SequenceID,
|
|
||||||
entry.Source.Type, entry.EventType, entry.Source.ID, entry.EventID)
|
|
||||||
|
|
||||||
if entry.EventID == "" {
|
|
||||||
fs.Debugf(f, "%s ignored due to missing EventID", eventDetails)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if _, ok := processedEventIDs[entry.EventID]; ok {
|
|
||||||
fs.Debugf(f, "%s ignored due to duplicate EventID", eventDetails)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
processedEventIDs[entry.EventID] = time.Now()
|
|
||||||
newEventIDs++
|
|
||||||
|
|
||||||
if entry.Source.ID == "" { // missing File or Folder ID
|
|
||||||
fs.Debugf(f, "%s ignored due to missing SourceID", eventDetails)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if entry.Source.Type != api.ItemTypeFile && entry.Source.Type != api.ItemTypeFolder { // event is not for a file or folder
|
|
||||||
fs.Debugf(f, "%s ignored due to unsupported SourceType", eventDetails)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only interested in event types that result in a file tree change
|
|
||||||
if _, found := api.FileTreeChangeEventTypes[entry.EventType]; !found {
|
|
||||||
fs.Debugf(f, "%s ignored due to unsupported EventType", eventDetails)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
f.itemMetaCacheMu.Lock()
|
|
||||||
itemMeta, cachedItemMetaFound := f.itemMetaCache[entry.Source.ID]
|
|
||||||
if cachedItemMetaFound {
|
|
||||||
if itemMeta.SequenceID >= entry.Source.SequenceID {
|
|
||||||
// Item in the cache has the same or newer SequenceID than
|
|
||||||
// this event. Ignore this event, it must be old.
|
|
||||||
f.itemMetaCacheMu.Unlock()
|
|
||||||
fs.Debugf(f, "%s ignored due to old SequenceID (%q)", eventDetails, itemMeta.SequenceID)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// This event is newer. Delete its entry from the cache,
|
|
||||||
// we'll notify about its change below, then it's up to a
|
|
||||||
// future list operation to repopulate the cache.
|
|
||||||
delete(f.itemMetaCache, entry.Source.ID)
|
|
||||||
}
|
|
||||||
f.itemMetaCacheMu.Unlock()
|
|
||||||
|
|
||||||
entryType := fs.EntryDirectory
|
|
||||||
if entry.Source.Type == api.ItemTypeFile {
|
|
||||||
entryType = fs.EntryObject
|
|
||||||
}
|
|
||||||
|
|
||||||
// The box event only includes the new path for the object (e.g.
|
|
||||||
// the path after the object was moved). If there was an old path
|
|
||||||
// saved in our cache, it must be cleared.
|
|
||||||
if cachedItemMetaFound {
|
|
||||||
path := f.getFullPath(itemMeta.ParentID, itemMeta.Name)
|
|
||||||
if path != "" {
|
|
||||||
fs.Debugf(f, "%s added old path (%q) for notify", eventDetails, path)
|
|
||||||
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
|
|
||||||
} else {
|
|
||||||
fs.Debugf(f, "%s old parent not cached", eventDetails)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this is a directory, also delete it from the dir cache.
|
|
||||||
// This will effectively invalidate the item metadata cache
|
|
||||||
// entries for all descendents of this directory, since we
|
|
||||||
// will no longer be able to construct a full path for them.
|
|
||||||
// This is exactly what we want, since we don't want to notify
|
|
||||||
// on the paths of these descendents if one of their ancestors
|
|
||||||
// has been renamed/deleted.
|
|
||||||
if entry.Source.Type == api.ItemTypeFolder {
|
|
||||||
f.dirCache.FlushDir(path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the item is "active", then it is not trashed or deleted, so
|
|
||||||
// it potentially has a valid parent.
|
|
||||||
//
|
|
||||||
// Construct the new path of the object, based on the Parent ID
|
|
||||||
// and its name. If we get an empty result, it means we don't
|
|
||||||
// currently know about this object so notification is unnecessary.
|
|
||||||
if entry.Source.ItemStatus == api.ItemStatusActive {
|
|
||||||
path := f.getFullPath(entry.Source.Parent.ID, entry.Source.Name)
|
|
||||||
if path != "" {
|
|
||||||
fs.Debugf(f, "%s added new path (%q) for notify", eventDetails, path)
|
|
||||||
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
|
|
||||||
} else {
|
|
||||||
fs.Debugf(f, "%s new parent not found", eventDetails)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// box can sometimes repeatedly return the same Event IDs within a
|
|
||||||
// short period of time. If it stops giving us new ones, treat it
|
|
||||||
// the same as if it returned us none at all.
|
|
||||||
if newEventIDs == 0 {
|
|
||||||
return nextStreamPosition, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
notifiedPaths := make(map[string]bool)
|
|
||||||
for _, p := range pathsToClear {
|
|
||||||
if _, ok := notifiedPaths[p.path]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
notifiedPaths[p.path] = true
|
|
||||||
notifyFunc(p.path, p.entryType)
|
|
||||||
}
|
|
||||||
fs.Debugf(f, "Received %v events, resulting in %v paths and %v notifications", len(result.Entries), len(pathsToClear), len(notifiedPaths))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirCacheFlush resets the directory cache - used in testing as an
|
// DirCacheFlush resets the directory cache - used in testing as an
|
||||||
// optional interface
|
// optional interface
|
||||||
func (f *Fs) DirCacheFlush() {
|
func (f *Fs) DirCacheFlush() {
|
||||||
@@ -1608,6 +1235,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||||||
|
|
||||||
// ModTime returns the modification time of the object
|
// ModTime returns the modification time of the object
|
||||||
//
|
//
|
||||||
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
@@ -1718,9 +1346,9 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
|
|||||||
|
|
||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// If existing is set then it updates the object rather than creating a new one.
|
// If existing is set then it updates the object rather than creating a new one
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned.
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
if o.fs.tokenRenewer != nil {
|
if o.fs.tokenRenewer != nil {
|
||||||
o.fs.tokenRenewer.Start()
|
o.fs.tokenRenewer.Start()
|
||||||
@@ -1768,8 +1396,6 @@ var (
|
|||||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
_ fs.CleanUpper = (*Fs)(nil)
|
_ fs.CleanUpper = (*Fs)(nil)
|
||||||
_ fs.ListPer = (*Fs)(nil)
|
|
||||||
_ fs.Shutdowner = (*Fs)(nil)
|
|
||||||
_ fs.Object = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
_ fs.IDer = (*Object)(nil)
|
_ fs.IDer = (*Object)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -105,7 +105,7 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
|
|||||||
const defaultDelay = 10
|
const defaultDelay = 10
|
||||||
var tries int
|
var tries int
|
||||||
outer:
|
outer:
|
||||||
for tries = range maxTries {
|
for tries = 0; tries < maxTries; tries++ {
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -203,7 +203,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
|
|||||||
errs := make(chan error, 1)
|
errs := make(chan error, 1)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
outer:
|
outer:
|
||||||
for part := range session.TotalParts {
|
for part := 0; part < session.TotalParts; part++ {
|
||||||
// Check any errors
|
// Check any errors
|
||||||
select {
|
select {
|
||||||
case err = <-errs:
|
case err = <-errs:
|
||||||
@@ -211,7 +211,10 @@ outer:
|
|||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
reqSize := min(remaining, chunkSize)
|
reqSize := remaining
|
||||||
|
if reqSize >= chunkSize {
|
||||||
|
reqSize = chunkSize
|
||||||
|
}
|
||||||
|
|
||||||
// Make a block of memory
|
// Make a block of memory
|
||||||
buf := make([]byte, reqSize)
|
buf := make([]byte, reqSize)
|
||||||
|
|||||||
57
backend/cache/cache.go
vendored
57
backend/cache/cache.go
vendored
@@ -1,6 +1,6 @@
|
|||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
|
// +build !plan9,!js
|
||||||
|
|
||||||
// Package cache implements a virtual provider to cache existing remotes.
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -29,7 +29,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/list"
|
|
||||||
"github.com/rclone/rclone/fs/rc"
|
"github.com/rclone/rclone/fs/rc"
|
||||||
"github.com/rclone/rclone/fs/walk"
|
"github.com/rclone/rclone/fs/walk"
|
||||||
"github.com/rclone/rclone/lib/atexit"
|
"github.com/rclone/rclone/lib/atexit"
|
||||||
@@ -76,19 +75,17 @@ func init() {
|
|||||||
Name: "plex_url",
|
Name: "plex_url",
|
||||||
Help: "The URL of the Plex server.",
|
Help: "The URL of the Plex server.",
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_username",
|
Name: "plex_username",
|
||||||
Help: "The username of the Plex user.",
|
Help: "The username of the Plex user.",
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_password",
|
Name: "plex_password",
|
||||||
Help: "The password of the Plex user.",
|
Help: "The password of the Plex user.",
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_token",
|
Name: "plex_token",
|
||||||
Help: "The plex token for authentication - auto set normally.",
|
Help: "The plex token for authentication - auto set normally.",
|
||||||
Hide: fs.OptionHideBoth,
|
Hide: fs.OptionHideBoth,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_insecure",
|
Name: "plex_insecure",
|
||||||
Help: "Skip all certificate verification when connecting to the Plex server.",
|
Help: "Skip all certificate verification when connecting to the Plex server.",
|
||||||
@@ -410,16 +407,18 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||||
}
|
}
|
||||||
} else if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
} else {
|
||||||
decPass, err := obscure.Reveal(opt.PlexPassword)
|
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||||
if err != nil {
|
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||||
decPass = opt.PlexPassword
|
if err != nil {
|
||||||
}
|
decPass = opt.PlexPassword
|
||||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
}
|
||||||
m.Set("plex_token", token)
|
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
||||||
})
|
m.Set("plex_token", token)
|
||||||
if err != nil {
|
})
|
||||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -684,7 +683,7 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
|||||||
start, end int64
|
start, end int64
|
||||||
}
|
}
|
||||||
parseChunks := func(ranges string) (crs []chunkRange, err error) {
|
parseChunks := func(ranges string) (crs []chunkRange, err error) {
|
||||||
for part := range strings.SplitSeq(ranges, ",") {
|
for _, part := range strings.Split(ranges, ",") {
|
||||||
var start, end int64 = 0, math.MaxInt64
|
var start, end int64 = 0, math.MaxInt64
|
||||||
switch ints := strings.Split(part, ":"); len(ints) {
|
switch ints := strings.Split(part, ":"); len(ints) {
|
||||||
case 1:
|
case 1:
|
||||||
@@ -1038,7 +1037,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
}
|
}
|
||||||
fs.Debugf(dir, "list: remove entry: %v", entryRemote)
|
fs.Debugf(dir, "list: remove entry: %v", entryRemote)
|
||||||
}
|
}
|
||||||
entries = nil //nolint:ineffassign
|
entries = nil
|
||||||
|
|
||||||
// and then iterate over the ones from source (temp Objects will override source ones)
|
// and then iterate over the ones from source (temp Objects will override source ones)
|
||||||
var batchDirectories []*Directory
|
var batchDirectories []*Directory
|
||||||
@@ -1087,13 +1086,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
return cachedEntries, nil
|
return cachedEntries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) recurse(ctx context.Context, dir string, list *list.Helper) error {
|
func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
|
||||||
entries, err := f.List(ctx, dir)
|
entries, err := f.List(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range entries {
|
for i := 0; i < len(entries); i++ {
|
||||||
innerDir, ok := entries[i].(fs.Directory)
|
innerDir, ok := entries[i].(fs.Directory)
|
||||||
if ok {
|
if ok {
|
||||||
err := f.recurse(ctx, innerDir.Remote(), list)
|
err := f.recurse(ctx, innerDir.Remote(), list)
|
||||||
@@ -1139,7 +1138,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
// if we're here, we're gonna do a standard recursive traversal and cache everything
|
// if we're here, we're gonna do a standard recursive traversal and cache everything
|
||||||
list := list.NewHelper(callback)
|
list := walk.NewListRHelper(callback)
|
||||||
err = f.recurse(ctx, dir, list)
|
err = f.recurse(ctx, dir, list)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1429,7 +1428,7 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// wait until both are done
|
// wait until both are done
|
||||||
for range 2 {
|
for c := 0; c < 2; c++ {
|
||||||
<-done
|
<-done
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1754,7 +1753,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Stats returns stats about the cache storage
|
// Stats returns stats about the cache storage
|
||||||
func (f *Fs) Stats() (map[string]map[string]any, error) {
|
func (f *Fs) Stats() (map[string]map[string]interface{}, error) {
|
||||||
return f.cache.Stats()
|
return f.cache.Stats()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1787,7 +1786,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// StopBackgroundRunners will signal all the runners to stop their work
|
// StopBackgroundRunners will signall all the runners to stop their work
|
||||||
// can be triggered from a terminate signal or from testing between runs
|
// can be triggered from a terminate signal or from testing between runs
|
||||||
func (f *Fs) StopBackgroundRunners() {
|
func (f *Fs) StopBackgroundRunners() {
|
||||||
f.cleanupChan <- false
|
f.cleanupChan <- false
|
||||||
@@ -1934,7 +1933,7 @@ var commandHelp = []fs.CommandHelp{
|
|||||||
// The result should be capable of being JSON encoded
|
// The result should be capable of being JSON encoded
|
||||||
// If it is a string or a []string it will be shown to the user
|
// If it is a string or a []string it will be shown to the user
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
// otherwise it will be JSON encoded and shown to the user like that
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) {
|
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
|
||||||
switch name {
|
switch name {
|
||||||
case "stats":
|
case "stats":
|
||||||
return f.Stats()
|
return f.Stats()
|
||||||
|
|||||||
199
backend/cache/cache_internal_test.go
vendored
199
backend/cache/cache_internal_test.go
vendored
@@ -1,4 +1,5 @@
|
|||||||
//go:build !plan9 && !js && !race
|
//go:build !plan9 && !js && !race
|
||||||
|
// +build !plan9,!js,!race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
@@ -10,6 +11,8 @@ import (
|
|||||||
goflag "flag"
|
goflag "flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
@@ -28,11 +31,10 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/object"
|
"github.com/rclone/rclone/fs/object"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/fstest/testy"
|
"github.com/rclone/rclone/fstest/testy"
|
||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/rclone/rclone/lib/random"
|
||||||
"github.com/rclone/rclone/vfs/vfscommon"
|
"github.com/rclone/rclone/vfs/vfsflags"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -92,7 +94,7 @@ func TestMain(m *testing.M) {
|
|||||||
goflag.Parse()
|
goflag.Parse()
|
||||||
var rc int
|
var rc int
|
||||||
|
|
||||||
fs.Logf(nil, "Running with the following params: \n remote: %v", remoteName)
|
log.Printf("Running with the following params: \n remote: %v", remoteName)
|
||||||
runInstance = newRun()
|
runInstance = newRun()
|
||||||
rc = m.Run()
|
rc = m.Run()
|
||||||
os.Exit(rc)
|
os.Exit(rc)
|
||||||
@@ -100,12 +102,14 @@ func TestMain(m *testing.M) {
|
|||||||
|
|
||||||
func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
||||||
id := fmt.Sprintf("tilrair%v", time.Now().Unix())
|
id := fmt.Sprintf("tilrair%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
// Instantiate inner fs
|
// Instantiate inner fs
|
||||||
innerFolder := "inner"
|
innerFolder := "inner"
|
||||||
runInstance.mkdir(t, rootFs, innerFolder)
|
runInstance.mkdir(t, rootFs, innerFolder)
|
||||||
rootFs2, _ := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil)
|
rootFs2, boltDb2 := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs2, boltDb2)
|
||||||
|
|
||||||
runInstance.writeObjectString(t, rootFs2, "one", "content")
|
runInstance.writeObjectString(t, rootFs2, "one", "content")
|
||||||
listRoot, err := runInstance.list(t, rootFs, "")
|
listRoot, err := runInstance.list(t, rootFs, "")
|
||||||
@@ -122,10 +126,10 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
|||||||
|
|
||||||
/* TODO: is this testing something?
|
/* TODO: is this testing something?
|
||||||
func TestInternalVfsCache(t *testing.T) {
|
func TestInternalVfsCache(t *testing.T) {
|
||||||
vfscommon.Opt.DirCacheTime = time.Second * 30
|
vfsflags.Opt.DirCacheTime = time.Second * 30
|
||||||
testSize := int64(524288000)
|
testSize := int64(524288000)
|
||||||
|
|
||||||
vfscommon.Opt.CacheMode = vfs.CacheModeWrites
|
vfsflags.Opt.CacheMode = vfs.CacheModeWrites
|
||||||
id := "tiuufo"
|
id := "tiuufo"
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
@@ -163,7 +167,7 @@ func TestInternalVfsCache(t *testing.T) {
|
|||||||
li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")}
|
li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")}
|
||||||
for _, r := range li2 {
|
for _, r := range li2 {
|
||||||
var err error
|
var err error
|
||||||
ci, err := os.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
|
ci, err := ioutil.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
|
||||||
if err != nil || len(ci) == 0 {
|
if err != nil || len(ci) == 0 {
|
||||||
log.Printf("========== '%v' not in cache", r)
|
log.Printf("========== '%v' not in cache", r)
|
||||||
} else {
|
} else {
|
||||||
@@ -222,7 +226,8 @@ func TestInternalVfsCache(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalObjWrapFsFound(t *testing.T) {
|
func TestInternalObjWrapFsFound(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiowff%v", time.Now().Unix())
|
id := fmt.Sprintf("tiowff%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
cfs, err := runInstance.getCacheFs(rootFs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -254,7 +259,8 @@ func TestInternalObjWrapFsFound(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalObjNotFound(t *testing.T) {
|
func TestInternalObjNotFound(t *testing.T) {
|
||||||
id := fmt.Sprintf("tionf%v", time.Now().Unix())
|
id := fmt.Sprintf("tionf%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
obj, err := rootFs.NewObject(context.Background(), "404")
|
obj, err := rootFs.NewObject(context.Background(), "404")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
@@ -264,7 +270,8 @@ func TestInternalObjNotFound(t *testing.T) {
|
|||||||
func TestInternalCachedWrittenContentMatches(t *testing.T) {
|
func TestInternalCachedWrittenContentMatches(t *testing.T) {
|
||||||
testy.SkipUnreliable(t)
|
testy.SkipUnreliable(t)
|
||||||
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
|
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
cfs, err := runInstance.getCacheFs(rootFs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -291,7 +298,8 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
|
|||||||
t.Skip("Skip test on windows/386")
|
t.Skip("Skip test on windows/386")
|
||||||
}
|
}
|
||||||
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
|
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
// write the object
|
// write the object
|
||||||
runInstance.writeRemoteString(t, rootFs, "one", "one content")
|
runInstance.writeRemoteString(t, rootFs, "one", "one content")
|
||||||
@@ -309,7 +317,8 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
|
|||||||
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
||||||
testy.SkipUnreliable(t)
|
testy.SkipUnreliable(t)
|
||||||
id := fmt.Sprintf("ticucm%v", time.Now().Unix())
|
id := fmt.Sprintf("ticucm%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
// create some rand test data
|
// create some rand test data
|
||||||
@@ -337,8 +346,9 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
|
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
|
||||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
|
vfsflags.Opt.DirCacheTime = time.Second
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
if runInstance.rootIsCrypt {
|
if runInstance.rootIsCrypt {
|
||||||
t.Skip("test skipped with crypt remote")
|
t.Skip("test skipped with crypt remote")
|
||||||
}
|
}
|
||||||
@@ -360,15 +370,16 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, int64(len(checkSample)), o.Size())
|
require.Equal(t, int64(len(checkSample)), o.Size())
|
||||||
|
|
||||||
for i := range checkSample {
|
for i := 0; i < len(checkSample); i++ {
|
||||||
require.Equal(t, testData[i], checkSample[i])
|
require.Equal(t, testData[i], checkSample[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||||
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
|
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
|
||||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
|
vfsflags.Opt.DirCacheTime = time.Second
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
if runInstance.rootIsCrypt {
|
if runInstance.rootIsCrypt {
|
||||||
t.Skip("test skipped with crypt remote")
|
t.Skip("test skipped with crypt remote")
|
||||||
}
|
}
|
||||||
@@ -387,14 +398,15 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
|||||||
|
|
||||||
readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
|
readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for i := range readData {
|
for i := 0; i < len(readData); i++ {
|
||||||
require.Equalf(t, testData[i], readData[i], "at byte %v", i)
|
require.Equalf(t, testData[i], readData[i], "at byte %v", i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiwfcns%v", time.Now().Unix())
|
id := fmt.Sprintf("tiwfcns%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
cfs, err := runInstance.getCacheFs(rootFs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -407,7 +419,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||||||
// update in the wrapped fs
|
// update in the wrapped fs
|
||||||
originalSize, err := runInstance.size(t, rootFs, "data.bin")
|
originalSize, err := runInstance.size(t, rootFs, "data.bin")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
fs.Logf(nil, "original size: %v", originalSize)
|
log.Printf("original size: %v", originalSize)
|
||||||
|
|
||||||
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -416,7 +428,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||||||
if runInstance.rootIsCrypt {
|
if runInstance.rootIsCrypt {
|
||||||
data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64)
|
data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
expectedSize++ // FIXME newline gets in, likely test data issue
|
expectedSize = expectedSize + 1 // FIXME newline gets in, likely test data issue
|
||||||
} else {
|
} else {
|
||||||
data2 = []byte("test content")
|
data2 = []byte("test content")
|
||||||
}
|
}
|
||||||
@@ -424,7 +436,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||||||
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
|
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, int64(len(data2)), o.Size())
|
require.Equal(t, int64(len(data2)), o.Size())
|
||||||
fs.Logf(nil, "updated size: %v", len(data2))
|
log.Printf("updated size: %v", len(data2))
|
||||||
|
|
||||||
// get a new instance from the cache
|
// get a new instance from the cache
|
||||||
if runInstance.wrappedIsExternal {
|
if runInstance.wrappedIsExternal {
|
||||||
@@ -448,7 +460,8 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalMoveWithNotify(t *testing.T) {
|
func TestInternalMoveWithNotify(t *testing.T) {
|
||||||
id := fmt.Sprintf("timwn%v", time.Now().Unix())
|
id := fmt.Sprintf("timwn%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
if !runInstance.wrappedIsExternal {
|
if !runInstance.wrappedIsExternal {
|
||||||
t.Skipf("Not external")
|
t.Skipf("Not external")
|
||||||
}
|
}
|
||||||
@@ -484,49 +497,49 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
|||||||
err = runInstance.retryBlock(func() error {
|
err = runInstance.retryBlock(func() error {
|
||||||
li, err := runInstance.list(t, rootFs, "test")
|
li, err := runInstance.list(t, rootFs, "test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(nil, "err: %v", err)
|
log.Printf("err: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(li) != 2 {
|
if len(li) != 2 {
|
||||||
fs.Logf(nil, "not expected listing /test: %v", li)
|
log.Printf("not expected listing /test: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test: %v", li)
|
return fmt.Errorf("not expected listing /test: %v", li)
|
||||||
}
|
}
|
||||||
|
|
||||||
li, err = runInstance.list(t, rootFs, "test/one")
|
li, err = runInstance.list(t, rootFs, "test/one")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(nil, "err: %v", err)
|
log.Printf("err: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(li) != 0 {
|
if len(li) != 0 {
|
||||||
fs.Logf(nil, "not expected listing /test/one: %v", li)
|
log.Printf("not expected listing /test/one: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||||
}
|
}
|
||||||
|
|
||||||
li, err = runInstance.list(t, rootFs, "test/second")
|
li, err = runInstance.list(t, rootFs, "test/second")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(nil, "err: %v", err)
|
log.Printf("err: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(li) != 1 {
|
if len(li) != 1 {
|
||||||
fs.Logf(nil, "not expected listing /test/second: %v", li)
|
log.Printf("not expected listing /test/second: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test/second: %v", li)
|
return fmt.Errorf("not expected listing /test/second: %v", li)
|
||||||
}
|
}
|
||||||
if fi, ok := li[0].(os.FileInfo); ok {
|
if fi, ok := li[0].(os.FileInfo); ok {
|
||||||
if fi.Name() != "data.bin" {
|
if fi.Name() != "data.bin" {
|
||||||
fs.Logf(nil, "not expected name: %v", fi.Name())
|
log.Printf("not expected name: %v", fi.Name())
|
||||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||||
}
|
}
|
||||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||||
if di.Remote() != "test/second/data.bin" {
|
if di.Remote() != "test/second/data.bin" {
|
||||||
fs.Logf(nil, "not expected remote: %v", di.Remote())
|
log.Printf("not expected remote: %v", di.Remote())
|
||||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fs.Logf(nil, "unexpected listing: %v", li)
|
log.Printf("unexpected listing: %v", li)
|
||||||
return fmt.Errorf("unexpected listing: %v", li)
|
return fmt.Errorf("unexpected listing: %v", li)
|
||||||
}
|
}
|
||||||
|
|
||||||
fs.Logf(nil, "complete listing: %v", li)
|
log.Printf("complete listing: %v", li)
|
||||||
return nil
|
return nil
|
||||||
}, 12, time.Second*10)
|
}, 12, time.Second*10)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -534,7 +547,8 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||||
id := fmt.Sprintf("tincep%v", time.Now().Unix())
|
id := fmt.Sprintf("tincep%v", time.Now().Unix())
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
if !runInstance.wrappedIsExternal {
|
if !runInstance.wrappedIsExternal {
|
||||||
t.Skipf("Not external")
|
t.Skipf("Not external")
|
||||||
}
|
}
|
||||||
@@ -576,43 +590,43 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
|||||||
err = runInstance.retryBlock(func() error {
|
err = runInstance.retryBlock(func() error {
|
||||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||||
if !found {
|
if !found {
|
||||||
fs.Logf(nil, "not found /test")
|
log.Printf("not found /test")
|
||||||
return fmt.Errorf("not found /test")
|
return fmt.Errorf("not found /test")
|
||||||
}
|
}
|
||||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||||
if !found {
|
if !found {
|
||||||
fs.Logf(nil, "not found /test/one")
|
log.Printf("not found /test/one")
|
||||||
return fmt.Errorf("not found /test/one")
|
return fmt.Errorf("not found /test/one")
|
||||||
}
|
}
|
||||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
||||||
if !found {
|
if !found {
|
||||||
fs.Logf(nil, "not found /test/one/test2")
|
log.Printf("not found /test/one/test2")
|
||||||
return fmt.Errorf("not found /test/one/test2")
|
return fmt.Errorf("not found /test/one/test2")
|
||||||
}
|
}
|
||||||
li, err := runInstance.list(t, rootFs, "test/one")
|
li, err := runInstance.list(t, rootFs, "test/one")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(nil, "err: %v", err)
|
log.Printf("err: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(li) != 1 {
|
if len(li) != 1 {
|
||||||
fs.Logf(nil, "not expected listing /test/one: %v", li)
|
log.Printf("not expected listing /test/one: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||||
}
|
}
|
||||||
if fi, ok := li[0].(os.FileInfo); ok {
|
if fi, ok := li[0].(os.FileInfo); ok {
|
||||||
if fi.Name() != "test2" {
|
if fi.Name() != "test2" {
|
||||||
fs.Logf(nil, "not expected name: %v", fi.Name())
|
log.Printf("not expected name: %v", fi.Name())
|
||||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||||
}
|
}
|
||||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||||
if di.Remote() != "test/one/test2" {
|
if di.Remote() != "test/one/test2" {
|
||||||
fs.Logf(nil, "not expected remote: %v", di.Remote())
|
log.Printf("not expected remote: %v", di.Remote())
|
||||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fs.Logf(nil, "unexpected listing: %v", li)
|
log.Printf("unexpected listing: %v", li)
|
||||||
return fmt.Errorf("unexpected listing: %v", li)
|
return fmt.Errorf("unexpected listing: %v", li)
|
||||||
}
|
}
|
||||||
fs.Logf(nil, "complete listing /test/one/test2")
|
log.Printf("complete listing /test/one/test2")
|
||||||
return nil
|
return nil
|
||||||
}, 12, time.Second*10)
|
}, 12, time.Second*10)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -620,7 +634,8 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||||
id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
|
id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
cfs, err := runInstance.getCacheFs(rootFs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -652,7 +667,8 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalCacheWrites(t *testing.T) {
|
func TestInternalCacheWrites(t *testing.T) {
|
||||||
id := "ticw"
|
id := "ticw"
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"writes": "true"})
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
cfs, err := runInstance.getCacheFs(rootFs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -673,7 +689,8 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
|||||||
t.Skip("Skip test on windows/386")
|
t.Skip("Skip test on windows/386")
|
||||||
}
|
}
|
||||||
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
|
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"workers": "1"})
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
cfs, err := runInstance.getCacheFs(rootFs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -688,7 +705,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
|||||||
co, ok := o.(*cache.Object)
|
co, ok := o.(*cache.Object)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
|
|
||||||
for i := range 4 { // read first 4
|
for i := 0; i < 4; i++ { // read first 4
|
||||||
_ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
|
_ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
|
||||||
}
|
}
|
||||||
cfs.CleanUpCache(true)
|
cfs.CleanUpCache(true)
|
||||||
@@ -707,8 +724,9 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||||
id := fmt.Sprintf("tieer%v", time.Now().Unix())
|
id := fmt.Sprintf("tieer%v", time.Now().Unix())
|
||||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 4) // needs to be lower than the defined
|
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"info_age": "5s"}, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
cfs, err := runInstance.getCacheFs(rootFs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -742,10 +760,12 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalBug2117(t *testing.T) {
|
func TestInternalBug2117(t *testing.T) {
|
||||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 10)
|
vfsflags.Opt.DirCacheTime = time.Second * 10
|
||||||
|
|
||||||
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
|
||||||
|
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
if runInstance.rootIsCrypt {
|
if runInstance.rootIsCrypt {
|
||||||
t.Skipf("skipping crypt")
|
t.Skipf("skipping crypt")
|
||||||
@@ -770,24 +790,24 @@ func TestInternalBug2117(t *testing.T) {
|
|||||||
|
|
||||||
di, err := runInstance.list(t, rootFs, "test/dir1/dir2")
|
di, err := runInstance.list(t, rootFs, "test/dir1/dir2")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
fs.Logf(nil, "len: %v", len(di))
|
log.Printf("len: %v", len(di))
|
||||||
require.Len(t, di, 1)
|
require.Len(t, di, 1)
|
||||||
|
|
||||||
time.Sleep(time.Second * 30)
|
time.Sleep(time.Second * 30)
|
||||||
|
|
||||||
di, err = runInstance.list(t, rootFs, "test/dir1/dir2")
|
di, err = runInstance.list(t, rootFs, "test/dir1/dir2")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
fs.Logf(nil, "len: %v", len(di))
|
log.Printf("len: %v", len(di))
|
||||||
require.Len(t, di, 1)
|
require.Len(t, di, 1)
|
||||||
|
|
||||||
di, err = runInstance.list(t, rootFs, "test/dir1")
|
di, err = runInstance.list(t, rootFs, "test/dir1")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
fs.Logf(nil, "len: %v", len(di))
|
log.Printf("len: %v", len(di))
|
||||||
require.Len(t, di, 4)
|
require.Len(t, di, 4)
|
||||||
|
|
||||||
di, err = runInstance.list(t, rootFs, "test")
|
di, err = runInstance.list(t, rootFs, "test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
fs.Logf(nil, "len: %v", len(di))
|
log.Printf("len: %v", len(di))
|
||||||
require.Len(t, di, 4)
|
require.Len(t, di, 4)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -821,14 +841,14 @@ func newRun() *run {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if uploadDir == "" {
|
if uploadDir == "" {
|
||||||
r.tmpUploadDir, err = os.MkdirTemp("", "rclonecache-tmp")
|
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
|
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
r.tmpUploadDir = uploadDir
|
r.tmpUploadDir = uploadDir
|
||||||
}
|
}
|
||||||
fs.Logf(nil, "Temp Upload Dir: %v", r.tmpUploadDir)
|
log.Printf("Temp Upload Dir: %v", r.tmpUploadDir)
|
||||||
|
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
@@ -846,11 +866,11 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
|
|||||||
return enc
|
return enc
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, cfg map[string]string, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
||||||
fstest.Initialise()
|
fstest.Initialise()
|
||||||
remoteExists := false
|
remoteExists := false
|
||||||
for _, s := range config.GetRemotes() {
|
for _, s := range config.FileSections() {
|
||||||
if s.Name == remote {
|
if s == remote {
|
||||||
remoteExists = true
|
remoteExists = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -874,12 +894,12 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
cacheRemote := remote
|
cacheRemote := remote
|
||||||
if !remoteExists {
|
if !remoteExists {
|
||||||
localRemote := remote + "-local"
|
localRemote := remote + "-local"
|
||||||
config.FileSetValue(localRemote, "type", "local")
|
config.FileSet(localRemote, "type", "local")
|
||||||
config.FileSetValue(localRemote, "nounc", "true")
|
config.FileSet(localRemote, "nounc", "true")
|
||||||
m.Set("type", "cache")
|
m.Set("type", "cache")
|
||||||
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
|
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
|
||||||
} else {
|
} else {
|
||||||
remoteType := config.GetValue(remote, "type")
|
remoteType := config.FileGet(remote, "type")
|
||||||
if remoteType == "" {
|
if remoteType == "" {
|
||||||
t.Skipf("skipped due to invalid remote type for %v", remote)
|
t.Skipf("skipped due to invalid remote type for %v", remote)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@@ -890,14 +910,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
m.Set("password", cryptPassword1)
|
m.Set("password", cryptPassword1)
|
||||||
m.Set("password2", cryptPassword2)
|
m.Set("password2", cryptPassword2)
|
||||||
}
|
}
|
||||||
remoteRemote := config.GetValue(remote, "remote")
|
remoteRemote := config.FileGet(remote, "remote")
|
||||||
if remoteRemote == "" {
|
if remoteRemote == "" {
|
||||||
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
|
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
remoteRemoteParts := strings.Split(remoteRemote, ":")
|
remoteRemoteParts := strings.Split(remoteRemote, ":")
|
||||||
remoteWrapping := remoteRemoteParts[0]
|
remoteWrapping := remoteRemoteParts[0]
|
||||||
remoteType := config.GetValue(remoteWrapping, "type")
|
remoteType := config.FileGet(remoteWrapping, "type")
|
||||||
if remoteType != "cache" {
|
if remoteType != "cache" {
|
||||||
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
|
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@@ -934,20 +954,16 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
if purge {
|
if purge {
|
||||||
_ = operations.Purge(context.Background(), f, "")
|
_ = f.Features().Purge(context.Background(), "")
|
||||||
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
err = f.Mkdir(context.Background(), "")
|
err = f.Mkdir(context.Background(), "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
t.Cleanup(func() {
|
|
||||||
runInstance.cleanupFs(t, f)
|
|
||||||
})
|
|
||||||
|
|
||||||
return f, boltDb
|
return f, boltDb
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
|
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
||||||
err := operations.Purge(context.Background(), f, "")
|
err := f.Features().Purge(context.Background(), "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
cfs, err := r.getCacheFs(f)
|
cfs, err := r.getCacheFs(f)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -968,10 +984,10 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
|||||||
chunk := int64(1024)
|
chunk := int64(1024)
|
||||||
cnt := size / chunk
|
cnt := size / chunk
|
||||||
left := size % chunk
|
left := size % chunk
|
||||||
f, err := os.CreateTemp("", "rclonecache-tempfile")
|
f, err := ioutil.TempFile("", "rclonecache-tempfile")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for range int(cnt) {
|
for i := 0; i < int(cnt); i++ {
|
||||||
data := randStringBytes(int(chunk))
|
data := randStringBytes(int(chunk))
|
||||||
_, _ = f.Write(data)
|
_, _ = f.Write(data)
|
||||||
}
|
}
|
||||||
@@ -1085,9 +1101,9 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]any, error) {
|
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
|
||||||
var err error
|
var err error
|
||||||
var l []any
|
var l []interface{}
|
||||||
var list fs.DirEntries
|
var list fs.DirEntries
|
||||||
list, err = f.List(context.Background(), remote)
|
list, err = f.List(context.Background(), remote)
|
||||||
for _, ll := range list {
|
for _, ll := range list {
|
||||||
@@ -1096,6 +1112,27 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]any, error) {
|
|||||||
return l, err
|
return l, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
|
||||||
|
in, err := os.Open(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
_ = in.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
out, err := os.Create(dst)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
_ = out.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
_, err = io.Copy(out, in)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
@@ -1191,7 +1228,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
|||||||
func (r *run) cleanSize(t *testing.T, size int64) int64 {
|
func (r *run) cleanSize(t *testing.T, size int64) int64 {
|
||||||
if r.rootIsCrypt {
|
if r.rootIsCrypt {
|
||||||
denominator := int64(65536 + 16)
|
denominator := int64(65536 + 16)
|
||||||
size -= 32
|
size = size - 32
|
||||||
quotient := size / denominator
|
quotient := size / denominator
|
||||||
remainder := size % denominator
|
remainder := size % denominator
|
||||||
return (quotient*65536 + remainder - 16)
|
return (quotient*65536 + remainder - 16)
|
||||||
@@ -1215,7 +1252,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
|
|||||||
var err error
|
var err error
|
||||||
var state cache.BackgroundUploadState
|
var state cache.BackgroundUploadState
|
||||||
|
|
||||||
for range 2 {
|
for i := 0; i < 2; i++ {
|
||||||
select {
|
select {
|
||||||
case state = <-buCh:
|
case state = <-buCh:
|
||||||
// continue
|
// continue
|
||||||
@@ -1293,7 +1330,7 @@ func (r *run) completeAllBackgroundUploads(t *testing.T, f fs.Fs, lastRemote str
|
|||||||
|
|
||||||
func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error {
|
func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error {
|
||||||
var err error
|
var err error
|
||||||
for range maxRetries {
|
for i := 0; i < maxRetries; i++ {
|
||||||
err = block()
|
err = block()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
12
backend/cache/cache_test.go
vendored
12
backend/cache/cache_test.go
vendored
@@ -1,6 +1,7 @@
|
|||||||
// Test Cache filesystem interface
|
// Test Cache filesystem interface
|
||||||
|
|
||||||
//go:build !plan9 && !js && !race
|
//go:build !plan9 && !js && !race
|
||||||
|
// +build !plan9,!js,!race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
@@ -15,11 +16,10 @@ import (
|
|||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestCache:",
|
RemoteName: "TestCache:",
|
||||||
NilObject: (*cache.Object)(nil),
|
NilObject: (*cache.Object)(nil),
|
||||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata", "ListP"},
|
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"},
|
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
|
||||||
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
|
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
2
backend/cache/cache_unsupported.go
vendored
2
backend/cache/cache_unsupported.go
vendored
@@ -2,6 +2,6 @@
|
|||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
//go:build plan9 || js
|
//go:build plan9 || js
|
||||||
|
// +build plan9 js
|
||||||
|
|
||||||
// Package cache implements a virtual provider to cache existing remotes.
|
|
||||||
package cache
|
package cache
|
||||||
|
|||||||
31
backend/cache/cache_upload_test.go
vendored
31
backend/cache/cache_upload_test.go
vendored
@@ -1,4 +1,5 @@
|
|||||||
//go:build !plan9 && !js && !race
|
//go:build !plan9 && !js && !race
|
||||||
|
// +build !plan9,!js,!race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
@@ -20,8 +21,10 @@ import (
|
|||||||
|
|
||||||
func TestInternalUploadTempDirCreated(t *testing.T) {
|
func TestInternalUploadTempDirCreated(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||||
runInstance.newCacheFs(t, remoteName, id, false, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||||
|
nil,
|
||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -60,7 +63,9 @@ func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltD
|
|||||||
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||||
}
|
}
|
||||||
@@ -68,15 +73,19 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
|||||||
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir(context.Background(), "one")
|
err := rootFs.Mkdir(context.Background(), "one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -110,8 +119,10 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalUploadTempPathCleaned(t *testing.T) {
|
func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
|
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir(context.Background(), "one")
|
err := rootFs.Mkdir(context.Background(), "one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -151,19 +162,21 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir(context.Background(), "test")
|
err := rootFs.Mkdir(context.Background(), "test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
minSize := 5242880
|
minSize := 5242880
|
||||||
maxSize := 10485760
|
maxSize := 10485760
|
||||||
totalFiles := 10
|
totalFiles := 10
|
||||||
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
|
rand.Seed(time.Now().Unix())
|
||||||
|
|
||||||
lastFile := ""
|
lastFile := ""
|
||||||
for i := range totalFiles {
|
for i := 0; i < totalFiles; i++ {
|
||||||
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
|
size := int64(rand.Intn(maxSize-minSize) + minSize)
|
||||||
testReader := runInstance.randomReader(t, size)
|
testReader := runInstance.randomReader(t, size)
|
||||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||||
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
||||||
@@ -200,7 +213,9 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
|||||||
func TestInternalUploadTempFileOperations(t *testing.T) {
|
func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||||
id := "tiutfo"
|
id := "tiutfo"
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
boltDb.PurgeTempUploads()
|
boltDb.PurgeTempUploads()
|
||||||
|
|
||||||
@@ -328,7 +343,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
|||||||
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||||
id := "tiuufo"
|
id := "tiuufo"
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
boltDb.PurgeTempUploads()
|
boltDb.PurgeTempUploads()
|
||||||
|
|
||||||
|
|||||||
1
backend/cache/directory.go
vendored
1
backend/cache/directory.go
vendored
@@ -1,4 +1,5 @@
|
|||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
17
backend/cache/handle.go
vendored
17
backend/cache/handle.go
vendored
@@ -1,4 +1,5 @@
|
|||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
@@ -118,7 +119,7 @@ func (r *Handle) startReadWorkers() {
|
|||||||
r.scaleWorkers(totalWorkers)
|
r.scaleWorkers(totalWorkers)
|
||||||
}
|
}
|
||||||
|
|
||||||
// scaleWorkers will increase the worker pool count by the provided amount
|
// scaleOutWorkers will increase the worker pool count by the provided amount
|
||||||
func (r *Handle) scaleWorkers(desired int) {
|
func (r *Handle) scaleWorkers(desired int) {
|
||||||
current := r.workers
|
current := r.workers
|
||||||
if current == desired {
|
if current == desired {
|
||||||
@@ -182,7 +183,7 @@ func (r *Handle) queueOffset(offset int64) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range r.workers {
|
for i := 0; i < r.workers; i++ {
|
||||||
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
|
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
|
||||||
if o < 0 || o >= r.cachedObject.Size() {
|
if o < 0 || o >= r.cachedObject.Size() {
|
||||||
continue
|
continue
|
||||||
@@ -208,7 +209,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
|||||||
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
||||||
|
|
||||||
// we align the start offset of the first chunk to a likely chunk in the storage
|
// we align the start offset of the first chunk to a likely chunk in the storage
|
||||||
chunkStart -= offset
|
chunkStart = chunkStart - offset
|
||||||
r.queueOffset(chunkStart)
|
r.queueOffset(chunkStart)
|
||||||
found := false
|
found := false
|
||||||
|
|
||||||
@@ -222,7 +223,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
|||||||
if !found {
|
if !found {
|
||||||
// we're gonna give the workers a chance to pickup the chunk
|
// we're gonna give the workers a chance to pickup the chunk
|
||||||
// and retry a couple of times
|
// and retry a couple of times
|
||||||
for i := range r.cacheFs().opt.ReadRetries * 8 {
|
for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
|
||||||
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
found = true
|
found = true
|
||||||
@@ -327,7 +328,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
|||||||
|
|
||||||
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||||
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
||||||
chunkStart -= int64(r.cacheFs().opt.ChunkSize)
|
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
|
||||||
}
|
}
|
||||||
r.queueOffset(chunkStart)
|
r.queueOffset(chunkStart)
|
||||||
|
|
||||||
@@ -415,8 +416,10 @@ func (w *worker) run() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
} else {
|
||||||
continue
|
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
||||||
|
|||||||
1
backend/cache/object.go
vendored
1
backend/cache/object.go
vendored
@@ -1,4 +1,5 @@
|
|||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
13
backend/cache/plex.go
vendored
13
backend/cache/plex.go
vendored
@@ -1,4 +1,5 @@
|
|||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
@@ -7,7 +8,7 @@ import (
|
|||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -166,7 +167,7 @@ func (p *plexConnector) listenWebsocket() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
var data []byte
|
var data []byte
|
||||||
data, err = io.ReadAll(resp.Body)
|
data, err = ioutil.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -209,7 +210,7 @@ func (p *plexConnector) authenticate() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var data map[string]any
|
var data map[string]interface{}
|
||||||
err = json.NewDecoder(resp.Body).Decode(&data)
|
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to obtain token: %w", err)
|
return fmt.Errorf("failed to obtain token: %w", err)
|
||||||
@@ -273,11 +274,11 @@ func (p *plexConnector) isPlaying(co *Object) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// adapted from: https://stackoverflow.com/a/28878037 (credit)
|
// adapted from: https://stackoverflow.com/a/28878037 (credit)
|
||||||
func get(m any, path ...any) (any, bool) {
|
func get(m interface{}, path ...interface{}) (interface{}, bool) {
|
||||||
for _, p := range path {
|
for _, p := range path {
|
||||||
switch idx := p.(type) {
|
switch idx := p.(type) {
|
||||||
case string:
|
case string:
|
||||||
if mm, ok := m.(map[string]any); ok {
|
if mm, ok := m.(map[string]interface{}); ok {
|
||||||
if val, found := mm[idx]; found {
|
if val, found := mm[idx]; found {
|
||||||
m = val
|
m = val
|
||||||
continue
|
continue
|
||||||
@@ -285,7 +286,7 @@ func get(m any, path ...any) (any, bool) {
|
|||||||
}
|
}
|
||||||
return nil, false
|
return nil, false
|
||||||
case int:
|
case int:
|
||||||
if mm, ok := m.([]any); ok {
|
if mm, ok := m.([]interface{}); ok {
|
||||||
if len(mm) > idx {
|
if len(mm) > idx {
|
||||||
m = mm[idx]
|
m = mm[idx]
|
||||||
continue
|
continue
|
||||||
|
|||||||
1
backend/cache/storage_memory.go
vendored
1
backend/cache/storage_memory.go
vendored
@@ -1,4 +1,5 @@
|
|||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|||||||
17
backend/cache/storage_persistent.go
vendored
17
backend/cache/storage_persistent.go
vendored
@@ -1,4 +1,5 @@
|
|||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
@@ -8,6 +9,7 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -18,7 +20,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/walk"
|
"github.com/rclone/rclone/fs/walk"
|
||||||
bolt "go.etcd.io/bbolt"
|
bolt "go.etcd.io/bbolt"
|
||||||
"go.etcd.io/bbolt/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Constants
|
// Constants
|
||||||
@@ -472,7 +473,7 @@ func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error
|
|||||||
var data []byte
|
var data []byte
|
||||||
|
|
||||||
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
|
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
|
||||||
data, err := os.ReadFile(fp)
|
data, err := ioutil.ReadFile(fp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -485,7 +486,7 @@ func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error {
|
|||||||
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
|
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
|
||||||
|
|
||||||
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
|
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
|
||||||
err := os.WriteFile(filePath, data, os.ModePerm)
|
err := ioutil.WriteFile(filePath, data, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -598,7 +599,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == errors.ErrDatabaseNotOpen {
|
if err == bolt.ErrDatabaseNotOpen {
|
||||||
// we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore
|
// we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -607,16 +608,16 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Stats returns a go map with the stats key values
|
// Stats returns a go map with the stats key values
|
||||||
func (b *Persistent) Stats() (map[string]map[string]any, error) {
|
func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
|
||||||
r := make(map[string]map[string]any)
|
r := make(map[string]map[string]interface{})
|
||||||
r["data"] = make(map[string]any)
|
r["data"] = make(map[string]interface{})
|
||||||
r["data"]["oldest-ts"] = time.Now()
|
r["data"]["oldest-ts"] = time.Now()
|
||||||
r["data"]["oldest-file"] = ""
|
r["data"]["oldest-file"] = ""
|
||||||
r["data"]["newest-ts"] = time.Now()
|
r["data"]["newest-ts"] = time.Now()
|
||||||
r["data"]["newest-file"] = ""
|
r["data"]["newest-file"] = ""
|
||||||
r["data"]["total-chunks"] = 0
|
r["data"]["total-chunks"] = 0
|
||||||
r["data"]["total-size"] = int64(0)
|
r["data"]["total-size"] = int64(0)
|
||||||
r["files"] = make(map[string]any)
|
r["files"] = make(map[string]interface{})
|
||||||
r["files"]["oldest-ts"] = time.Now()
|
r["files"]["oldest-ts"] = time.Now()
|
||||||
r["files"]["oldest-name"] = ""
|
r["files"]["oldest-name"] = ""
|
||||||
r["files"]["newest-ts"] = time.Now()
|
r["files"]["newest-ts"] = time.Now()
|
||||||
|
|||||||
2
backend/cache/utils_test.go
vendored
2
backend/cache/utils_test.go
vendored
@@ -1,5 +1,3 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
import bolt "go.etcd.io/bbolt"
|
import bolt "go.etcd.io/bbolt"
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
gohash "hash"
|
gohash "hash"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
@@ -29,9 +30,9 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/fspath"
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//
|
||||||
// Chunker's composite files have one or more chunks
|
// Chunker's composite files have one or more chunks
|
||||||
// and optional metadata object. If it's present,
|
// and optional metadata object. If it's present,
|
||||||
// meta object is named after the original file.
|
// meta object is named after the original file.
|
||||||
@@ -64,7 +65,7 @@ import (
|
|||||||
// length of 13 decimals it makes a 7-digit base-36 number.
|
// length of 13 decimals it makes a 7-digit base-36 number.
|
||||||
//
|
//
|
||||||
// When transactions is set to the norename style, data chunks will
|
// When transactions is set to the norename style, data chunks will
|
||||||
// keep their temporary chunk names (with the transaction identifier
|
// keep their temporary chunk names (with the transacion identifier
|
||||||
// suffix). To distinguish them from temporary chunks, the txn field
|
// suffix). To distinguish them from temporary chunks, the txn field
|
||||||
// of the metadata file is set to match the transaction identifier of
|
// of the metadata file is set to match the transaction identifier of
|
||||||
// the data chunks.
|
// the data chunks.
|
||||||
@@ -78,6 +79,7 @@ import (
|
|||||||
// Metadata format v1 does not define any control chunk types,
|
// Metadata format v1 does not define any control chunk types,
|
||||||
// they are currently ignored aka reserved.
|
// they are currently ignored aka reserved.
|
||||||
// In future they can be used to implement resumable uploads etc.
|
// In future they can be used to implement resumable uploads etc.
|
||||||
|
//
|
||||||
const (
|
const (
|
||||||
ctrlTypeRegStr = `[a-z][a-z0-9]{2,6}`
|
ctrlTypeRegStr = `[a-z][a-z0-9]{2,6}`
|
||||||
tempSuffixFormat = `_%04s`
|
tempSuffixFormat = `_%04s`
|
||||||
@@ -102,10 +104,8 @@ var (
|
|||||||
//
|
//
|
||||||
// And still chunker's primary function is to chunk large files
|
// And still chunker's primary function is to chunk large files
|
||||||
// rather than serve as a generic metadata container.
|
// rather than serve as a generic metadata container.
|
||||||
const (
|
const maxMetadataSize = 1023
|
||||||
maxMetadataSize = 1023
|
const maxMetadataSizeWritten = 255
|
||||||
maxMetadataSizeWritten = 255
|
|
||||||
)
|
|
||||||
|
|
||||||
// Current/highest supported metadata format.
|
// Current/highest supported metadata format.
|
||||||
const metadataVersion = 2
|
const metadataVersion = 2
|
||||||
@@ -308,6 +308,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
root: rpath,
|
root: rpath,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
}
|
}
|
||||||
|
cache.PinUntilFinalized(f.base, f)
|
||||||
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
|
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
|
||||||
|
|
||||||
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType, opt.Transactions); err != nil {
|
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType, opt.Transactions); err != nil {
|
||||||
@@ -319,45 +320,29 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
// i.e. `rpath` does not exist in the wrapped remote, but chunker
|
// i.e. `rpath` does not exist in the wrapped remote, but chunker
|
||||||
// detects a composite file because it finds the first chunk!
|
// detects a composite file because it finds the first chunk!
|
||||||
// (yet can't satisfy fstest.CheckListing, will ignore)
|
// (yet can't satisfy fstest.CheckListing, will ignore)
|
||||||
if err == nil && !f.useMeta {
|
if err == nil && !f.useMeta && strings.Contains(rpath, "/") {
|
||||||
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
|
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
|
||||||
newBase, testErr := cache.Get(ctx, baseName+firstChunkPath)
|
_, testErr := cache.Get(ctx, baseName+firstChunkPath)
|
||||||
if testErr == fs.ErrorIsFile {
|
if testErr == fs.ErrorIsFile {
|
||||||
f.base = newBase
|
|
||||||
err = testErr
|
err = testErr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cache.PinUntilFinalized(f.base, f)
|
|
||||||
|
|
||||||
// Correct root if definitely pointing to a file
|
|
||||||
if err == fs.ErrorIsFile {
|
|
||||||
f.root = path.Dir(f.root)
|
|
||||||
if f.root == "." || f.root == "/" {
|
|
||||||
f.root = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note 1: the features here are ones we could support, and they are
|
// Note 1: the features here are ones we could support, and they are
|
||||||
// ANDed with the ones from wrappedFs.
|
// ANDed with the ones from wrappedFs.
|
||||||
// Note 2: features.Fill() points features.PutStream to our PutStream,
|
// Note 2: features.Fill() points features.PutStream to our PutStream,
|
||||||
// but features.Mask() will nullify it if wrappedFs does not have it.
|
// but features.Mask() will nullify it if wrappedFs does not have it.
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
DuplicateFiles: true,
|
DuplicateFiles: true,
|
||||||
ReadMimeType: false, // Object.MimeType not supported
|
ReadMimeType: false, // Object.MimeType not supported
|
||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
ServerSideAcrossConfigs: true,
|
ServerSideAcrossConfigs: true,
|
||||||
ReadDirMetadata: true,
|
|
||||||
WriteDirMetadata: true,
|
|
||||||
WriteDirSetModTime: true,
|
|
||||||
UserDirMetadata: true,
|
|
||||||
DirModTimeUpdatesOnWrite: true,
|
|
||||||
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
|
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
|
||||||
|
|
||||||
f.features.ListR = nil // Recursive listing may cause chunker skip files
|
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
|
||||||
f.features.ListP = nil // ListP not supported yet
|
|
||||||
|
|
||||||
return f, err
|
return f, err
|
||||||
}
|
}
|
||||||
@@ -557,6 +542,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
|
|||||||
//
|
//
|
||||||
// xactID is a transaction identifier. Empty xactID denotes active chunk,
|
// xactID is a transaction identifier. Empty xactID denotes active chunk,
|
||||||
// otherwise temporary chunk name is produced.
|
// otherwise temporary chunk name is produced.
|
||||||
|
//
|
||||||
func (f *Fs) makeChunkName(filePath string, chunkNo int, ctrlType, xactID string) string {
|
func (f *Fs) makeChunkName(filePath string, chunkNo int, ctrlType, xactID string) string {
|
||||||
dir, parentName := path.Split(filePath)
|
dir, parentName := path.Split(filePath)
|
||||||
var name, tempSuffix string
|
var name, tempSuffix string
|
||||||
@@ -633,7 +619,7 @@ func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ct
|
|||||||
|
|
||||||
// forbidChunk prints error message or raises error if file is chunk.
|
// forbidChunk prints error message or raises error if file is chunk.
|
||||||
// First argument sets log prefix, use `false` to suppress message.
|
// First argument sets log prefix, use `false` to suppress message.
|
||||||
func (f *Fs) forbidChunk(o any, filePath string) error {
|
func (f *Fs) forbidChunk(o interface{}, filePath string) error {
|
||||||
if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" {
|
if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" {
|
||||||
if f.opt.FailHard {
|
if f.opt.FailHard {
|
||||||
return fmt.Errorf("chunk overlap with %q", parentPath)
|
return fmt.Errorf("chunk overlap with %q", parentPath)
|
||||||
@@ -681,7 +667,7 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
|
|||||||
circleSec := unixSec % closestPrimeZzzzSeconds
|
circleSec := unixSec % closestPrimeZzzzSeconds
|
||||||
first4chars := strconv.FormatInt(circleSec, 36)
|
first4chars := strconv.FormatInt(circleSec, 36)
|
||||||
|
|
||||||
for range maxTransactionProbes {
|
for tries := 0; tries < maxTransactionProbes; tries++ {
|
||||||
f.xactIDMutex.Lock()
|
f.xactIDMutex.Lock()
|
||||||
randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1)
|
randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1)
|
||||||
f.xactIDMutex.Unlock()
|
f.xactIDMutex.Unlock()
|
||||||
@@ -722,6 +708,7 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
|
|||||||
// directory together with dead chunks.
|
// directory together with dead chunks.
|
||||||
// In future a flag named like `--chunker-list-hidden` may be added to
|
// In future a flag named like `--chunker-list-hidden` may be added to
|
||||||
// rclone that will tell List to reveal hidden chunks.
|
// rclone that will tell List to reveal hidden chunks.
|
||||||
|
//
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
entries, err = f.base.List(ctx, dir)
|
entries, err = f.base.List(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -831,7 +818,8 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
|||||||
}
|
}
|
||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
isSubdir[entry.Remote()] = true
|
isSubdir[entry.Remote()] = true
|
||||||
wrapDir := fs.NewDirWrapper(entry.Remote(), entry)
|
wrapDir := fs.NewDirCopy(ctx, entry)
|
||||||
|
wrapDir.SetRemote(entry.Remote())
|
||||||
tempEntries = append(tempEntries, wrapDir)
|
tempEntries = append(tempEntries, wrapDir)
|
||||||
default:
|
default:
|
||||||
if f.opt.FailHard {
|
if f.opt.FailHard {
|
||||||
@@ -880,6 +868,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
|||||||
// Note that chunker prefers analyzing file names rather than reading
|
// Note that chunker prefers analyzing file names rather than reading
|
||||||
// the content of meta object assuming that directory scans are fast
|
// the content of meta object assuming that directory scans are fast
|
||||||
// but opening even a small file can be slow on some backends.
|
// but opening even a small file can be slow on some backends.
|
||||||
|
//
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.scanObject(ctx, remote, false)
|
return f.scanObject(ctx, remote, false)
|
||||||
}
|
}
|
||||||
@@ -964,11 +953,6 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
|||||||
}
|
}
|
||||||
if caseInsensitive {
|
if caseInsensitive {
|
||||||
sameMain = strings.EqualFold(mainRemote, remote)
|
sameMain = strings.EqualFold(mainRemote, remote)
|
||||||
if sameMain && f.base.Features().IsLocal {
|
|
||||||
// on local, make sure the EqualFold still holds true when accounting for encoding.
|
|
||||||
// sometimes paths with special characters will only normalize the same way in Standard Encoding.
|
|
||||||
sameMain = strings.EqualFold(encoder.OS.FromStandardPath(mainRemote), encoder.OS.FromStandardPath(remote))
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
sameMain = mainRemote == remote
|
sameMain = mainRemote == remote
|
||||||
}
|
}
|
||||||
@@ -982,13 +966,13 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
|||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
|
//fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
|
||||||
if err := o.addChunk(entry, chunkNo); err != nil {
|
if err := o.addChunk(entry, chunkNo); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if o.main == nil && len(o.chunks) == 0 {
|
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
|
||||||
// Scanning hasn't found data chunks with conforming names.
|
// Scanning hasn't found data chunks with conforming names.
|
||||||
if f.useMeta || quickScan {
|
if f.useMeta || quickScan {
|
||||||
// Metadata is required but absent and there are no chunks.
|
// Metadata is required but absent and there are no chunks.
|
||||||
@@ -1059,7 +1043,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
metadata, err := io.ReadAll(reader)
|
metadata, err := ioutil.ReadAll(reader)
|
||||||
_ = reader.Close() // ensure file handle is freed on windows
|
_ = reader.Close() // ensure file handle is freed on windows
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1100,7 +1084,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
|||||||
|
|
||||||
// readXactID returns the transaction ID stored in the passed metadata object
|
// readXactID returns the transaction ID stored in the passed metadata object
|
||||||
func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
||||||
// if xactID has already been read and cached return it now
|
// if xactID has already been read and cahced return it now
|
||||||
if o.xIDCached {
|
if o.xIDCached {
|
||||||
return o.xactID, nil
|
return o.xactID, nil
|
||||||
}
|
}
|
||||||
@@ -1118,7 +1102,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
data, err := io.ReadAll(reader)
|
data, err := ioutil.ReadAll(reader)
|
||||||
_ = reader.Close() // ensure file handle is freed on windows
|
_ = reader.Close() // ensure file handle is freed on windows
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@@ -1144,8 +1128,8 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
|||||||
// put implements Put, PutStream, PutUnchecked, Update
|
// put implements Put, PutStream, PutUnchecked, Update
|
||||||
func (f *Fs) put(
|
func (f *Fs) put(
|
||||||
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
|
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
|
||||||
basePut putFn, action string, target fs.Object,
|
basePut putFn, action string, target fs.Object) (obj fs.Object, err error) {
|
||||||
) (obj fs.Object, err error) {
|
|
||||||
// Perform consistency checks
|
// Perform consistency checks
|
||||||
if err := f.forbidChunk(src, remote); err != nil {
|
if err := f.forbidChunk(src, remote); err != nil {
|
||||||
return nil, fmt.Errorf("%s refused: %w", action, err)
|
return nil, fmt.Errorf("%s refused: %w", action, err)
|
||||||
@@ -1190,7 +1174,10 @@ func (f *Fs) put(
|
|||||||
}
|
}
|
||||||
|
|
||||||
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID)
|
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID)
|
||||||
size := min(c.sizeLeft, c.chunkSize)
|
size := c.sizeLeft
|
||||||
|
if size > c.chunkSize {
|
||||||
|
size = c.chunkSize
|
||||||
|
}
|
||||||
savedReadCount := c.readCount
|
savedReadCount := c.readCount
|
||||||
|
|
||||||
// If a single chunk is expected, avoid the extra rename operation
|
// If a single chunk is expected, avoid the extra rename operation
|
||||||
@@ -1475,7 +1462,10 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
|
|||||||
const bufLen = 1048576 // 1 MiB
|
const bufLen = 1048576 // 1 MiB
|
||||||
buf := make([]byte, bufLen)
|
buf := make([]byte, bufLen)
|
||||||
for size > 0 {
|
for size > 0 {
|
||||||
n := min(size, bufLen)
|
n := size
|
||||||
|
if n > bufLen {
|
||||||
|
n = bufLen
|
||||||
|
}
|
||||||
if _, err := io.ReadFull(in, buf[0:n]); err != nil {
|
if _, err := io.ReadFull(in, buf[0:n]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1579,14 +1569,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
return f.base.Mkdir(ctx, dir)
|
return f.base.Mkdir(ctx, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MkdirMetadata makes the root directory of the Fs object
|
|
||||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
|
||||||
if do := f.base.Features().MkdirMetadata; do != nil {
|
|
||||||
return do(ctx, dir, metadata)
|
|
||||||
}
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist or isn't empty
|
// Return an error if it doesn't exist or isn't empty
|
||||||
@@ -1604,6 +1586,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
// This command will chain to `purge` from wrapped remote.
|
// This command will chain to `purge` from wrapped remote.
|
||||||
// As a result it removes not only composite chunker files with their
|
// As a result it removes not only composite chunker files with their
|
||||||
// active chunks but also all hidden temporary chunks in the directory.
|
// active chunks but also all hidden temporary chunks in the directory.
|
||||||
|
//
|
||||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||||
do := f.base.Features().Purge
|
do := f.base.Features().Purge
|
||||||
if do == nil {
|
if do == nil {
|
||||||
@@ -1645,6 +1628,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
// Unsupported control chunks will get re-picked by a more recent
|
// Unsupported control chunks will get re-picked by a more recent
|
||||||
// rclone version with unexpected results. This can be helped by
|
// rclone version with unexpected results. This can be helped by
|
||||||
// the `delete hidden` flag above or at least the user has been warned.
|
// the `delete hidden` flag above or at least the user has been warned.
|
||||||
|
//
|
||||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||||
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
|
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
|
||||||
// operations.Move can still call Remove if chunker's Move refuses
|
// operations.Move can still call Remove if chunker's Move refuses
|
||||||
@@ -1820,9 +1804,9 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
|
|||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1841,9 +1825,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1861,8 +1845,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
|
|
||||||
// baseMove chains to the wrapped Move or simulates it by Copy+Delete
|
// baseMove chains to the wrapped Move or simulates it by Copy+Delete
|
||||||
func (f *Fs) baseMove(ctx context.Context, src fs.Object, remote string, delMode int) (fs.Object, error) {
|
func (f *Fs) baseMove(ctx context.Context, src fs.Object, remote string, delMode int) (fs.Object, error) {
|
||||||
ctx, ci := fs.AddConfig(ctx)
|
|
||||||
ci.NameTransform = nil // ensure operations.Move does not double-transform here
|
|
||||||
var (
|
var (
|
||||||
dest fs.Object
|
dest fs.Object
|
||||||
err error
|
err error
|
||||||
@@ -1906,14 +1888,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
return do(ctx, srcFs.base, srcRemote, dstRemote)
|
return do(ctx, srcFs.base, srcRemote, dstRemote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DirSetModTime sets the directory modtime for dir
|
|
||||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
|
||||||
if do := f.base.Features().DirSetModTime; do != nil {
|
|
||||||
return do(ctx, dir, modTime)
|
|
||||||
}
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// CleanUp the trash in the Fs
|
// CleanUp the trash in the Fs
|
||||||
//
|
//
|
||||||
// Implement this if you have a way of emptying the trash or
|
// Implement this if you have a way of emptying the trash or
|
||||||
@@ -1962,7 +1936,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||||
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
//fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
||||||
if entryType == fs.EntryObject {
|
if entryType == fs.EntryObject {
|
||||||
mainPath, _, _, xactID := f.parseChunkName(path)
|
mainPath, _, _, xactID := f.parseChunkName(path)
|
||||||
metaXactID := ""
|
metaXactID := ""
|
||||||
@@ -2151,6 +2125,7 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
|||||||
// file, then tries to read it from metadata. This in theory
|
// file, then tries to read it from metadata. This in theory
|
||||||
// handles the unusual case when a small file has been tampered
|
// handles the unusual case when a small file has been tampered
|
||||||
// on the level of wrapped remote but chunker is unaware of that.
|
// on the level of wrapped remote but chunker is unaware of that.
|
||||||
|
//
|
||||||
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) {
|
||||||
if err := o.readMetadata(ctx); err != nil {
|
if err := o.readMetadata(ctx); err != nil {
|
||||||
return "", err // valid metadata is required to get hash, abort
|
return "", err // valid metadata is required to get hash, abort
|
||||||
@@ -2439,6 +2414,7 @@ type metaSimpleJSON struct {
|
|||||||
// - for files larger than chunk size
|
// - for files larger than chunk size
|
||||||
// - if file contents can be mistaken as meta object
|
// - if file contents can be mistaken as meta object
|
||||||
// - if consistent hashing is On but wrapped remote can't provide given hash
|
// - if consistent hashing is On but wrapped remote can't provide given hash
|
||||||
|
//
|
||||||
func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1, xactID string) ([]byte, error) {
|
func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1, xactID string) ([]byte, error) {
|
||||||
version := metadataVersion
|
version := metadataVersion
|
||||||
if xactID == "" && version == 2 {
|
if xactID == "" && version == 2 {
|
||||||
@@ -2471,13 +2447,14 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1,
|
|||||||
// New format will have a higher version number and cannot be correctly
|
// New format will have a higher version number and cannot be correctly
|
||||||
// handled by current implementation.
|
// handled by current implementation.
|
||||||
// The version check below will then explicitly ask user to upgrade rclone.
|
// The version check below will then explicitly ask user to upgrade rclone.
|
||||||
|
//
|
||||||
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
|
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
|
||||||
// Be strict about JSON format
|
// Be strict about JSON format
|
||||||
// to reduce possibility that a random small file resembles metadata.
|
// to reduce possibility that a random small file resembles metadata.
|
||||||
if len(data) > maxMetadataSizeWritten {
|
if len(data) > maxMetadataSizeWritten {
|
||||||
return nil, false, ErrMetaTooBig
|
return nil, false, ErrMetaTooBig
|
||||||
}
|
}
|
||||||
if len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||||
return nil, false, errors.New("invalid json")
|
return nil, false, errors.New("invalid json")
|
||||||
}
|
}
|
||||||
var metadata metaSimpleJSON
|
var metadata metaSimpleJSON
|
||||||
@@ -2574,8 +2551,6 @@ var (
|
|||||||
_ fs.Copier = (*Fs)(nil)
|
_ fs.Copier = (*Fs)(nil)
|
||||||
_ fs.Mover = (*Fs)(nil)
|
_ fs.Mover = (*Fs)(nil)
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
_ fs.DirMover = (*Fs)(nil)
|
||||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
|
||||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
|
||||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||||
_ fs.PutStreamer = (*Fs)(nil)
|
_ fs.PutStreamer = (*Fs)(nil)
|
||||||
_ fs.CleanUpper = (*Fs)(nil)
|
_ fs.CleanUpper = (*Fs)(nil)
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io/ioutil"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -40,7 +40,7 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
type settings map[string]any
|
type settings map[string]interface{}
|
||||||
|
|
||||||
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
|
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
|
||||||
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
||||||
@@ -413,7 +413,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
|
|||||||
if r == nil {
|
if r == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
data, err := io.ReadAll(r)
|
data, err := ioutil.ReadAll(r)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, contents, string(data))
|
assert.Equal(t, contents, string(data))
|
||||||
_ = r.Close()
|
_ = r.Close()
|
||||||
@@ -538,7 +538,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
var chunkContents []byte
|
var chunkContents []byte
|
||||||
assert.NotPanics(t, func() {
|
assert.NotPanics(t, func() {
|
||||||
chunkContents, err = io.ReadAll(r)
|
chunkContents, err = ioutil.ReadAll(r)
|
||||||
_ = r.Close()
|
_ = r.Close()
|
||||||
})
|
})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -573,7 +573,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
|||||||
r, err = willyChunk.Open(ctx)
|
r, err = willyChunk.Open(ctx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.NotPanics(t, func() {
|
assert.NotPanics(t, func() {
|
||||||
_, err = io.ReadAll(r)
|
_, err = ioutil.ReadAll(r)
|
||||||
_ = r.Close()
|
_ = r.Close()
|
||||||
})
|
})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -672,7 +672,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
|||||||
assert.NoError(t, err, "open "+description)
|
assert.NoError(t, err, "open "+description)
|
||||||
assert.NotNil(t, r, "open stream of "+description)
|
assert.NotNil(t, r, "open stream of "+description)
|
||||||
if err == nil && r != nil {
|
if err == nil && r != nil {
|
||||||
data, err := io.ReadAll(r)
|
data, err := ioutil.ReadAll(r)
|
||||||
assert.NoError(t, err, "read all of "+description)
|
assert.NoError(t, err, "read all of "+description)
|
||||||
assert.Equal(t, contents, string(data), description+" contents is ok")
|
assert.Equal(t, contents, string(data), description+" contents is ok")
|
||||||
_ = r.Close()
|
_ = r.Close()
|
||||||
@@ -758,8 +758,8 @@ func testFutureProof(t *testing.T, f *Fs) {
|
|||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
|
|
||||||
// Rcat must fail
|
// Rcat must fail
|
||||||
in := io.NopCloser(bytes.NewBufferString("abc"))
|
in := ioutil.NopCloser(bytes.NewBufferString("abc"))
|
||||||
robj, err := operations.Rcat(ctx, f, file, in, modTime, nil)
|
robj, err := operations.Rcat(ctx, f, file, in, modTime)
|
||||||
assert.Nil(t, robj)
|
assert.Nil(t, robj)
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -854,7 +854,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
|
|||||||
r, err := dstFile.Open(ctx)
|
r, err := dstFile.Open(ctx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.NotNil(t, r)
|
assert.NotNil(t, r)
|
||||||
data, err := io.ReadAll(r)
|
data, err := ioutil.ReadAll(r)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, contents, string(data))
|
assert.Equal(t, contents, string(data))
|
||||||
_ = r.Close()
|
_ = r.Close()
|
||||||
|
|||||||
@@ -36,17 +36,14 @@ func TestIntegration(t *testing.T) {
|
|||||||
"GetTier",
|
"GetTier",
|
||||||
"SetTier",
|
"SetTier",
|
||||||
"Metadata",
|
"Metadata",
|
||||||
"SetMetadata",
|
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{
|
UnimplementableFsMethods: []string{
|
||||||
"PublicLink",
|
"PublicLink",
|
||||||
"OpenWriterAt",
|
"OpenWriterAt",
|
||||||
"OpenChunkWriter",
|
|
||||||
"MergeDirs",
|
"MergeDirs",
|
||||||
"DirCacheFlush",
|
"DirCacheFlush",
|
||||||
"UserInfo",
|
"UserInfo",
|
||||||
"Disconnect",
|
"Disconnect",
|
||||||
"ListP",
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if *fstest.RemoteName == "" {
|
if *fstest.RemoteName == "" {
|
||||||
|
|||||||
@@ -1,48 +0,0 @@
|
|||||||
// Package api has type definitions for cloudinary
|
|
||||||
package api
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CloudinaryEncoder extends the built-in encoder
|
|
||||||
type CloudinaryEncoder interface {
|
|
||||||
// FromStandardPath takes a / separated path in Standard encoding
|
|
||||||
// and converts it to a / separated path in this encoding.
|
|
||||||
FromStandardPath(string) string
|
|
||||||
// FromStandardName takes name in Standard encoding and converts
|
|
||||||
// it in this encoding.
|
|
||||||
FromStandardName(string) string
|
|
||||||
// ToStandardPath takes a / separated path in this encoding
|
|
||||||
// and converts it to a / separated path in Standard encoding.
|
|
||||||
ToStandardPath(string) string
|
|
||||||
// ToStandardName takes name in this encoding and converts
|
|
||||||
// it in Standard encoding.
|
|
||||||
ToStandardName(string, string) string
|
|
||||||
// Encoded root of the remote (as passed into NewFs)
|
|
||||||
FromStandardFullPath(string) string
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateOptions was created to pass options from Update to Put
|
|
||||||
type UpdateOptions struct {
|
|
||||||
PublicID string
|
|
||||||
ResourceType string
|
|
||||||
DeliveryType string
|
|
||||||
AssetFolder string
|
|
||||||
DisplayName string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Header formats the option as a string
|
|
||||||
func (o *UpdateOptions) Header() (string, string) {
|
|
||||||
return "UpdateOption", fmt.Sprintf("%s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mandatory returns whether the option must be parsed or can be ignored
|
|
||||||
func (o *UpdateOptions) Mandatory() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// String formats the option into human-readable form
|
|
||||||
func (o *UpdateOptions) String() string {
|
|
||||||
return fmt.Sprintf("Fully qualified Public ID: %s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
|
|
||||||
}
|
|
||||||
@@ -1,754 +0,0 @@
|
|||||||
// Package cloudinary provides an interface to the Cloudinary DAM
|
|
||||||
package cloudinary
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"path"
|
|
||||||
"slices"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/cloudinary/cloudinary-go/v2"
|
|
||||||
SDKApi "github.com/cloudinary/cloudinary-go/v2/api"
|
|
||||||
"github.com/cloudinary/cloudinary-go/v2/api/admin"
|
|
||||||
"github.com/cloudinary/cloudinary-go/v2/api/admin/search"
|
|
||||||
"github.com/cloudinary/cloudinary-go/v2/api/uploader"
|
|
||||||
"github.com/rclone/rclone/backend/cloudinary/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
"github.com/zeebo/blake3"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Cloudinary shouldn't have a trailing dot if there is no path
|
|
||||||
func cldPathDir(somePath string) string {
|
|
||||||
if somePath == "" || somePath == "." {
|
|
||||||
return somePath
|
|
||||||
}
|
|
||||||
dir := path.Dir(somePath)
|
|
||||||
if dir == "." {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return dir
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register with Fs
|
|
||||||
func init() {
|
|
||||||
fs.Register(&fs.RegInfo{
|
|
||||||
Name: "cloudinary",
|
|
||||||
Description: "Cloudinary",
|
|
||||||
NewFs: NewFs,
|
|
||||||
Options: []fs.Option{
|
|
||||||
{
|
|
||||||
Name: "cloud_name",
|
|
||||||
Help: "Cloudinary Environment Name",
|
|
||||||
Required: true,
|
|
||||||
Sensitive: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "api_key",
|
|
||||||
Help: "Cloudinary API Key",
|
|
||||||
Required: true,
|
|
||||||
Sensitive: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "api_secret",
|
|
||||||
Help: "Cloudinary API Secret",
|
|
||||||
Required: true,
|
|
||||||
Sensitive: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "upload_prefix",
|
|
||||||
Help: "Specify the API endpoint for environments out of the US",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "upload_preset",
|
|
||||||
Help: "Upload Preset to select asset manipulation on upload",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: config.ConfigEncoding,
|
|
||||||
Help: config.ConfigEncodingHelp,
|
|
||||||
Advanced: true,
|
|
||||||
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
|
|
||||||
encoder.EncodeSlash |
|
|
||||||
encoder.EncodeLtGt |
|
|
||||||
encoder.EncodeDoubleQuote |
|
|
||||||
encoder.EncodeQuestion |
|
|
||||||
encoder.EncodeAsterisk |
|
|
||||||
encoder.EncodePipe |
|
|
||||||
encoder.EncodeHash |
|
|
||||||
encoder.EncodePercent |
|
|
||||||
encoder.EncodeBackSlash |
|
|
||||||
encoder.EncodeDel |
|
|
||||||
encoder.EncodeCtl |
|
|
||||||
encoder.EncodeRightSpace |
|
|
||||||
encoder.EncodeInvalidUtf8 |
|
|
||||||
encoder.EncodeDot),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "eventually_consistent_delay",
|
|
||||||
Default: fs.Duration(0),
|
|
||||||
Advanced: true,
|
|
||||||
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "adjust_media_files_extensions",
|
|
||||||
Default: true,
|
|
||||||
Advanced: true,
|
|
||||||
Help: "Cloudinary handles media formats as a file attribute and strips it from the name, which is unlike most other file systems",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "media_extensions",
|
|
||||||
Default: []string{
|
|
||||||
"3ds", "3g2", "3gp", "ai", "arw", "avi", "avif", "bmp", "bw",
|
|
||||||
"cr2", "cr3", "djvu", "dng", "eps3", "fbx", "flif", "flv", "gif",
|
|
||||||
"glb", "gltf", "hdp", "heic", "heif", "ico", "indd", "jp2", "jpe",
|
|
||||||
"jpeg", "jpg", "jxl", "jxr", "m2ts", "mov", "mp4", "mpeg", "mts",
|
|
||||||
"mxf", "obj", "ogv", "pdf", "ply", "png", "psd", "svg", "tga",
|
|
||||||
"tif", "tiff", "ts", "u3ma", "usdz", "wdp", "webm", "webp", "wmv"},
|
|
||||||
Advanced: true,
|
|
||||||
Help: "Cloudinary supported media extensions",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
|
||||||
type Options struct {
|
|
||||||
CloudName string `config:"cloud_name"`
|
|
||||||
APIKey string `config:"api_key"`
|
|
||||||
APISecret string `config:"api_secret"`
|
|
||||||
UploadPrefix string `config:"upload_prefix"`
|
|
||||||
UploadPreset string `config:"upload_preset"`
|
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
|
||||||
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
|
|
||||||
MediaExtensions []string `config:"media_extensions"`
|
|
||||||
AdjustMediaFilesExtensions bool `config:"adjust_media_files_extensions"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs represents a remote cloudinary server
|
|
||||||
type Fs struct {
|
|
||||||
name string
|
|
||||||
root string
|
|
||||||
opt Options
|
|
||||||
features *fs.Features
|
|
||||||
pacer *fs.Pacer
|
|
||||||
srv *rest.Client // For downloading assets via the Cloudinary CDN
|
|
||||||
cld *cloudinary.Cloudinary // API calls are going through the Cloudinary SDK
|
|
||||||
lastCRUD time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object describes a cloudinary object
|
|
||||||
type Object struct {
|
|
||||||
fs *Fs
|
|
||||||
remote string
|
|
||||||
size int64
|
|
||||||
modTime time.Time
|
|
||||||
url string
|
|
||||||
md5sum string
|
|
||||||
publicID string
|
|
||||||
resourceType string
|
|
||||||
deliveryType string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, bucket:path
|
|
||||||
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
|
|
||||||
opt := new(Options)
|
|
||||||
err := configstruct.Set(m, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize the Cloudinary client
|
|
||||||
cld, err := cloudinary.NewFromParams(opt.CloudName, opt.APIKey, opt.APISecret)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create Cloudinary client: %w", err)
|
|
||||||
}
|
|
||||||
cld.Admin.Client = *fshttp.NewClient(ctx)
|
|
||||||
cld.Upload.Client = *fshttp.NewClient(ctx)
|
|
||||||
if opt.UploadPrefix != "" {
|
|
||||||
cld.Config.API.UploadPrefix = opt.UploadPrefix
|
|
||||||
}
|
|
||||||
client := fshttp.NewClient(ctx)
|
|
||||||
f := &Fs{
|
|
||||||
name: name,
|
|
||||||
root: root,
|
|
||||||
opt: *opt,
|
|
||||||
cld: cld,
|
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1000), pacer.MaxSleep(10000), pacer.DecayConstant(2))),
|
|
||||||
srv: rest.NewClient(client),
|
|
||||||
}
|
|
||||||
|
|
||||||
f.features = (&fs.Features{
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
}).Fill(ctx, f)
|
|
||||||
|
|
||||||
if root != "" {
|
|
||||||
// Check to see if the root actually an existing file
|
|
||||||
remote := path.Base(root)
|
|
||||||
f.root = cldPathDir(root)
|
|
||||||
_, err := f.NewObject(ctx, remote)
|
|
||||||
if err != nil {
|
|
||||||
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
|
|
||||||
// File doesn't exist so return the previous root
|
|
||||||
f.root = root
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// return an error with an fs which points to the parent
|
|
||||||
return f, fs.ErrorIsFile
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
|
||||||
|
|
||||||
// FromStandardPath implementation of the api.CloudinaryEncoder
|
|
||||||
func (f *Fs) FromStandardPath(s string) string {
|
|
||||||
return strings.ReplaceAll(f.opt.Enc.FromStandardPath(s), "&", "\uFF06")
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromStandardName implementation of the api.CloudinaryEncoder
|
|
||||||
func (f *Fs) FromStandardName(s string) string {
|
|
||||||
if f.opt.AdjustMediaFilesExtensions {
|
|
||||||
parsedURL, err := url.Parse(s)
|
|
||||||
ext := ""
|
|
||||||
if err != nil {
|
|
||||||
fs.Logf(nil, "Error parsing URL: %v", err)
|
|
||||||
} else {
|
|
||||||
ext = path.Ext(parsedURL.Path)
|
|
||||||
if slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
|
|
||||||
s = strings.TrimSuffix(parsedURL.Path, ext)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToStandardPath implementation of the api.CloudinaryEncoder
|
|
||||||
func (f *Fs) ToStandardPath(s string) string {
|
|
||||||
return strings.ReplaceAll(f.opt.Enc.ToStandardPath(s), "\uFF06", "&")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToStandardName implementation of the api.CloudinaryEncoder
|
|
||||||
func (f *Fs) ToStandardName(s string, assetURL string) string {
|
|
||||||
ext := ""
|
|
||||||
if f.opt.AdjustMediaFilesExtensions {
|
|
||||||
parsedURL, err := url.Parse(assetURL)
|
|
||||||
if err != nil {
|
|
||||||
fs.Logf(nil, "Error parsing URL: %v", err)
|
|
||||||
} else {
|
|
||||||
ext = path.Ext(parsedURL.Path)
|
|
||||||
if !slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
|
|
||||||
ext = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&") + ext
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromStandardFullPath encodes a full path to Cloudinary standard
|
|
||||||
func (f *Fs) FromStandardFullPath(dir string) string {
|
|
||||||
return path.Join(api.CloudinaryEncoder.FromStandardPath(f, f.root), api.CloudinaryEncoder.FromStandardPath(f, dir))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToAssetFolderAPI encodes folders as expected by the Cloudinary SDK
|
|
||||||
func (f *Fs) ToAssetFolderAPI(dir string) string {
|
|
||||||
return strings.ReplaceAll(dir, "%", "%25")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToDisplayNameElastic encodes a special case of elasticsearch
|
|
||||||
func (f *Fs) ToDisplayNameElastic(dir string) string {
|
|
||||||
return strings.ReplaceAll(dir, "!", "\\!")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Root() string {
|
|
||||||
return f.root
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitEventuallyConsistent waits till the FS is eventually consistent
|
|
||||||
func (f *Fs) WaitEventuallyConsistent() {
|
|
||||||
if f.opt.EventuallyConsistentDelay == fs.Duration(0) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
delay := time.Duration(f.opt.EventuallyConsistentDelay)
|
|
||||||
timeSinceLastCRUD := time.Since(f.lastCRUD)
|
|
||||||
if timeSinceLastCRUD < delay {
|
|
||||||
time.Sleep(delay - timeSinceLastCRUD)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// String converts this Fs to a string
|
|
||||||
func (f *Fs) String() string {
|
|
||||||
return fmt.Sprintf("Cloudinary root '%s'", f.root)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
|
||||||
func (f *Fs) Features() *fs.Features {
|
|
||||||
return f.features
|
|
||||||
}
|
|
||||||
|
|
||||||
// List the objects and directories in dir into entries
|
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
|
||||||
remotePrefix := f.FromStandardFullPath(dir)
|
|
||||||
if remotePrefix != "" && !strings.HasSuffix(remotePrefix, "/") {
|
|
||||||
remotePrefix += "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
var entries fs.DirEntries
|
|
||||||
dirs := make(map[string]struct{})
|
|
||||||
nextCursor := ""
|
|
||||||
f.WaitEventuallyConsistent()
|
|
||||||
for {
|
|
||||||
// user the folders api to list folders.
|
|
||||||
folderParams := admin.SubFoldersParams{
|
|
||||||
Folder: f.ToAssetFolderAPI(remotePrefix),
|
|
||||||
MaxResults: 500,
|
|
||||||
}
|
|
||||||
if nextCursor != "" {
|
|
||||||
folderParams.NextCursor = nextCursor
|
|
||||||
}
|
|
||||||
|
|
||||||
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to list sub-folders: %w", err)
|
|
||||||
}
|
|
||||||
if results.Error.Message != "" {
|
|
||||||
if strings.HasPrefix(results.Error.Message, "Can't find folder with path") {
|
|
||||||
return nil, fs.ErrorDirNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("failed to list sub-folders: %s", results.Error.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, folder := range results.Folders {
|
|
||||||
relativePath := api.CloudinaryEncoder.ToStandardPath(f, strings.TrimPrefix(folder.Path, remotePrefix))
|
|
||||||
parts := strings.Split(relativePath, "/")
|
|
||||||
|
|
||||||
// It's a directory
|
|
||||||
dirName := parts[len(parts)-1]
|
|
||||||
if _, found := dirs[dirName]; !found {
|
|
||||||
d := fs.NewDir(path.Join(dir, dirName), time.Time{})
|
|
||||||
entries = append(entries, d)
|
|
||||||
dirs[dirName] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Break if there are no more results
|
|
||||||
if results.NextCursor == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
nextCursor = results.NextCursor
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
// Use the assets.AssetsByAssetFolder API to list assets
|
|
||||||
assetsParams := admin.AssetsByAssetFolderParams{
|
|
||||||
AssetFolder: remotePrefix,
|
|
||||||
MaxResults: 500,
|
|
||||||
}
|
|
||||||
if nextCursor != "" {
|
|
||||||
assetsParams.NextCursor = nextCursor
|
|
||||||
}
|
|
||||||
|
|
||||||
results, err := f.cld.Admin.AssetsByAssetFolder(ctx, assetsParams)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to list assets: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, asset := range results.Assets {
|
|
||||||
remote := path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName, asset.SecureURL))
|
|
||||||
o := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: remote,
|
|
||||||
size: int64(asset.Bytes),
|
|
||||||
modTime: asset.CreatedAt,
|
|
||||||
url: asset.SecureURL,
|
|
||||||
publicID: asset.PublicID,
|
|
||||||
resourceType: asset.AssetType,
|
|
||||||
deliveryType: asset.Type,
|
|
||||||
}
|
|
||||||
entries = append(entries, o)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Break if there are no more results
|
|
||||||
if results.NextCursor == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
nextCursor = results.NextCursor
|
|
||||||
}
|
|
||||||
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found it returns the error fs.ErrorObjectNotFound.
|
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|
||||||
searchParams := search.Query{
|
|
||||||
Expression: fmt.Sprintf("asset_folder:\"%s\" AND display_name:\"%s\"",
|
|
||||||
f.FromStandardFullPath(cldPathDir(remote)),
|
|
||||||
f.ToDisplayNameElastic(api.CloudinaryEncoder.FromStandardName(f, path.Base(remote)))),
|
|
||||||
SortBy: []search.SortByField{{"uploaded_at": "desc"}},
|
|
||||||
MaxResults: 2,
|
|
||||||
}
|
|
||||||
var results *admin.SearchResult
|
|
||||||
f.WaitEventuallyConsistent()
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
var err1 error
|
|
||||||
results, err1 = f.cld.Admin.Search(ctx, searchParams)
|
|
||||||
if err1 == nil && results.TotalCount != len(results.Assets) {
|
|
||||||
err1 = errors.New("partial response so waiting for eventual consistency")
|
|
||||||
}
|
|
||||||
return shouldRetry(ctx, nil, err1)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
if results.TotalCount == 0 || len(results.Assets) == 0 {
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
asset := results.Assets[0]
|
|
||||||
|
|
||||||
o := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: remote,
|
|
||||||
size: int64(asset.Bytes),
|
|
||||||
modTime: asset.UploadedAt,
|
|
||||||
url: asset.SecureURL,
|
|
||||||
md5sum: asset.Etag,
|
|
||||||
publicID: asset.PublicID,
|
|
||||||
resourceType: asset.ResourceType,
|
|
||||||
deliveryType: asset.Type,
|
|
||||||
}
|
|
||||||
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) getSuggestedPublicID(assetFolder string, displayName string, modTime time.Time) string {
|
|
||||||
payload := []byte(path.Join(assetFolder, displayName))
|
|
||||||
hash := blake3.Sum256(payload)
|
|
||||||
return hex.EncodeToString(hash[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put uploads content to Cloudinary
|
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
if src.Size() == 0 {
|
|
||||||
return nil, fs.ErrorCantUploadEmptyFiles
|
|
||||||
}
|
|
||||||
|
|
||||||
params := uploader.UploadParams{
|
|
||||||
UploadPreset: f.opt.UploadPreset,
|
|
||||||
}
|
|
||||||
|
|
||||||
updateObject := false
|
|
||||||
var modTime time.Time
|
|
||||||
for _, option := range options {
|
|
||||||
if updateOptions, ok := option.(*api.UpdateOptions); ok {
|
|
||||||
if updateOptions.PublicID != "" {
|
|
||||||
updateObject = true
|
|
||||||
params.Overwrite = SDKApi.Bool(true)
|
|
||||||
params.Invalidate = SDKApi.Bool(true)
|
|
||||||
params.PublicID = updateOptions.PublicID
|
|
||||||
params.ResourceType = updateOptions.ResourceType
|
|
||||||
params.Type = SDKApi.DeliveryType(updateOptions.DeliveryType)
|
|
||||||
params.AssetFolder = updateOptions.AssetFolder
|
|
||||||
params.DisplayName = updateOptions.DisplayName
|
|
||||||
modTime = src.ModTime(ctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !updateObject {
|
|
||||||
params.AssetFolder = f.FromStandardFullPath(cldPathDir(src.Remote()))
|
|
||||||
params.DisplayName = api.CloudinaryEncoder.FromStandardName(f, path.Base(src.Remote()))
|
|
||||||
// We want to conform to the unique asset ID of rclone, which is (asset_folder,display_name,last_modified).
|
|
||||||
// We also want to enable customers to choose their own public_id, in case duplicate names are not a crucial use case.
|
|
||||||
// Upload_presets that apply randomness to the public ID would not work well with rclone duplicate assets support.
|
|
||||||
params.FilenameOverride = f.getSuggestedPublicID(params.AssetFolder, params.DisplayName, src.ModTime(ctx))
|
|
||||||
}
|
|
||||||
uploadResult, err := f.cld.Upload.Upload(ctx, in, params)
|
|
||||||
f.lastCRUD = time.Now()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to upload to Cloudinary: %w", err)
|
|
||||||
}
|
|
||||||
if !updateObject {
|
|
||||||
modTime = uploadResult.CreatedAt
|
|
||||||
}
|
|
||||||
if uploadResult.Error.Message != "" {
|
|
||||||
return nil, errors.New(uploadResult.Error.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
o := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: src.Remote(),
|
|
||||||
size: int64(uploadResult.Bytes),
|
|
||||||
modTime: modTime,
|
|
||||||
url: uploadResult.SecureURL,
|
|
||||||
md5sum: uploadResult.Etag,
|
|
||||||
publicID: uploadResult.PublicID,
|
|
||||||
resourceType: uploadResult.ResourceType,
|
|
||||||
deliveryType: uploadResult.Type,
|
|
||||||
}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Precision of the remote
|
|
||||||
func (f *Fs) Precision() time.Duration {
|
|
||||||
return fs.ModTimeNotSupported
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashes returns the supported hash sets
|
|
||||||
func (f *Fs) Hashes() hash.Set {
|
|
||||||
return hash.Set(hash.MD5)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mkdir creates empty folders
|
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|
||||||
params := admin.CreateFolderParams{Folder: f.ToAssetFolderAPI(f.FromStandardFullPath(dir))}
|
|
||||||
res, err := f.cld.Admin.CreateFolder(ctx, params)
|
|
||||||
f.lastCRUD = time.Now()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if res.Error.Message != "" {
|
|
||||||
return errors.New(res.Error.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir deletes empty folders
|
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
||||||
// Additional test because Cloudinary will delete folders without
|
|
||||||
// assets, regardless of empty sub-folders
|
|
||||||
folder := f.ToAssetFolderAPI(f.FromStandardFullPath(dir))
|
|
||||||
folderParams := admin.SubFoldersParams{
|
|
||||||
Folder: folder,
|
|
||||||
MaxResults: 1,
|
|
||||||
}
|
|
||||||
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if results.TotalCount > 0 {
|
|
||||||
return fs.ErrorDirectoryNotEmpty
|
|
||||||
}
|
|
||||||
|
|
||||||
params := admin.DeleteFolderParams{Folder: folder}
|
|
||||||
res, err := f.cld.Admin.DeleteFolder(ctx, params)
|
|
||||||
f.lastCRUD = time.Now()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if res.Error.Message != "" {
|
|
||||||
if strings.HasPrefix(res.Error.Message, "Can't find folder with path") {
|
|
||||||
return fs.ErrorDirNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
return errors.New(res.Error.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// retryErrorCodes is a slice of error codes that we will retry
|
|
||||||
var retryErrorCodes = []int{
|
|
||||||
420, // Too Many Requests (legacy)
|
|
||||||
429, // Too Many Requests
|
|
||||||
500, // Internal Server Error
|
|
||||||
502, // Bad Gateway
|
|
||||||
503, // Service Unavailable
|
|
||||||
504, // Gateway Timeout
|
|
||||||
509, // Bandwidth Limit Exceeded
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this resp and err
|
|
||||||
// deserve to be retried. It returns the err as a convenience
|
|
||||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
|
||||||
if fserrors.ContextError(ctx, &err) {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
tryAgain := "Try again on "
|
|
||||||
if idx := strings.Index(err.Error(), tryAgain); idx != -1 {
|
|
||||||
layout := "2006-01-02 15:04:05 UTC"
|
|
||||||
dateStr := err.Error()[idx+len(tryAgain) : idx+len(tryAgain)+len(layout)]
|
|
||||||
timestamp, err2 := time.Parse(layout, dateStr)
|
|
||||||
if err2 == nil {
|
|
||||||
return true, fserrors.NewErrorRetryAfter(time.Until(timestamp))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Debugf(nil, "Retrying API error %v", err)
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
|
||||||
|
|
||||||
// Hash returns the MD5 of an object
|
|
||||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
|
||||||
if ty != hash.MD5 {
|
|
||||||
return "", hash.ErrUnsupported
|
|
||||||
}
|
|
||||||
return o.md5sum, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return a string version
|
|
||||||
func (o *Object) String() string {
|
|
||||||
if o == nil {
|
|
||||||
return "<nil>"
|
|
||||||
}
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs returns the parent Fs
|
|
||||||
func (o *Object) Fs() fs.Info {
|
|
||||||
return o.fs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remote returns the remote path
|
|
||||||
func (o *Object) Remote() string {
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModTime returns the modification time of the object
|
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
|
||||||
return o.modTime
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size of object in bytes
|
|
||||||
func (o *Object) Size() int64 {
|
|
||||||
return o.size
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storable returns if this object is storable
|
|
||||||
func (o *Object) Storable() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|
||||||
return fs.ErrorCantSetModTime
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open an object for read
|
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|
||||||
var resp *http.Response
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
RootURL: o.url,
|
|
||||||
Options: options,
|
|
||||||
}
|
|
||||||
var offset int64
|
|
||||||
var count int64
|
|
||||||
var key string
|
|
||||||
var value string
|
|
||||||
fs.FixRangeOption(options, o.size)
|
|
||||||
for _, option := range options {
|
|
||||||
switch x := option.(type) {
|
|
||||||
case *fs.RangeOption:
|
|
||||||
offset, count = x.Decode(o.size)
|
|
||||||
if count < 0 {
|
|
||||||
count = o.size - offset
|
|
||||||
}
|
|
||||||
key, value = option.Header()
|
|
||||||
case *fs.SeekOption:
|
|
||||||
offset = x.Offset
|
|
||||||
count = o.size - offset
|
|
||||||
key, value = option.Header()
|
|
||||||
default:
|
|
||||||
if option.Mandatory() {
|
|
||||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if key != "" && value != "" {
|
|
||||||
opts.ExtraHeaders = make(map[string]string)
|
|
||||||
opts.ExtraHeaders[key] = value
|
|
||||||
}
|
|
||||||
// Make sure that the asset is fully available
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
|
||||||
if err == nil {
|
|
||||||
cl, clErr := strconv.Atoi(resp.Header.Get("content-length"))
|
|
||||||
if clErr == nil && count == int64(cl) {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed download of \"%s\": %w", o.url, err)
|
|
||||||
}
|
|
||||||
return resp.Body, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the object with the contents of the io.Reader
|
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
|
||||||
options = append(options, &api.UpdateOptions{
|
|
||||||
PublicID: o.publicID,
|
|
||||||
ResourceType: o.resourceType,
|
|
||||||
DeliveryType: o.deliveryType,
|
|
||||||
DisplayName: api.CloudinaryEncoder.FromStandardName(o.fs, path.Base(o.Remote())),
|
|
||||||
AssetFolder: o.fs.FromStandardFullPath(cldPathDir(o.Remote())),
|
|
||||||
})
|
|
||||||
updatedObj, err := o.fs.Put(ctx, in, src, options...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if uo, ok := updatedObj.(*Object); ok {
|
|
||||||
o.size = uo.size
|
|
||||||
o.modTime = time.Now() // Skipping uo.modTime because the API returns the create time
|
|
||||||
o.url = uo.url
|
|
||||||
o.md5sum = uo.md5sum
|
|
||||||
o.publicID = uo.publicID
|
|
||||||
o.resourceType = uo.resourceType
|
|
||||||
o.deliveryType = uo.deliveryType
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove an object
|
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
|
||||||
params := uploader.DestroyParams{
|
|
||||||
PublicID: o.publicID,
|
|
||||||
ResourceType: o.resourceType,
|
|
||||||
Type: o.deliveryType,
|
|
||||||
}
|
|
||||||
res, dErr := o.fs.cld.Upload.Destroy(ctx, params)
|
|
||||||
o.fs.lastCRUD = time.Now()
|
|
||||||
if dErr != nil {
|
|
||||||
return dErr
|
|
||||||
}
|
|
||||||
|
|
||||||
if res.Error.Message != "" {
|
|
||||||
return errors.New(res.Error.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
if res.Result != "ok" {
|
|
||||||
return errors.New(res.Result)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
// Test Cloudinary filesystem interface
|
|
||||||
|
|
||||||
package cloudinary_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/cloudinary"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
name := "TestCloudinary"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":",
|
|
||||||
NilObject: (*cloudinary.Object)(nil),
|
|
||||||
SkipInvalidUTF8: true,
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
// Package combine implements a backend to combine multiple remotes in a directory tree
|
// Package combine implents a backend to combine multipe remotes in a directory tree
|
||||||
package combine
|
package combine
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -20,7 +20,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/list"
|
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/rclone/rclone/fs/walk"
|
"github.com/rclone/rclone/fs/walk"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
@@ -146,7 +145,6 @@ func (f *Fs) newUpstream(ctx context.Context, dir, remote string) (*upstream, er
|
|||||||
dir: dir,
|
dir: dir,
|
||||||
pathAdjustment: newAdjustment(f.root, dir),
|
pathAdjustment: newAdjustment(f.root, dir),
|
||||||
}
|
}
|
||||||
cache.PinUntilFinalized(u.f, u)
|
|
||||||
return u, nil
|
return u, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -187,6 +185,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
|||||||
g, gCtx := errgroup.WithContext(ctx)
|
g, gCtx := errgroup.WithContext(ctx)
|
||||||
var mu sync.Mutex
|
var mu sync.Mutex
|
||||||
for _, upstream := range opt.Upstreams {
|
for _, upstream := range opt.Upstreams {
|
||||||
|
upstream := upstream
|
||||||
g.Go(func() (err error) {
|
g.Go(func() (err error) {
|
||||||
equal := strings.IndexRune(upstream, '=')
|
equal := strings.IndexRune(upstream, '=')
|
||||||
if equal < 0 {
|
if equal < 0 {
|
||||||
@@ -222,40 +221,30 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
|||||||
}
|
}
|
||||||
// check features
|
// check features
|
||||||
var features = (&fs.Features{
|
var features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
DuplicateFiles: false,
|
DuplicateFiles: false,
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
SetTier: true,
|
SetTier: true,
|
||||||
GetTier: true,
|
GetTier: true,
|
||||||
ReadMetadata: true,
|
ReadMetadata: true,
|
||||||
WriteMetadata: true,
|
WriteMetadata: true,
|
||||||
UserMetadata: true,
|
UserMetadata: true,
|
||||||
ReadDirMetadata: true,
|
|
||||||
WriteDirMetadata: true,
|
|
||||||
WriteDirSetModTime: true,
|
|
||||||
UserDirMetadata: true,
|
|
||||||
DirModTimeUpdatesOnWrite: true,
|
|
||||||
PartialUploads: true,
|
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
canMove, slowHash := true, false
|
canMove := true
|
||||||
for _, u := range f.upstreams {
|
for _, u := range f.upstreams {
|
||||||
features = features.Mask(ctx, u.f) // Mask all upstream fs
|
features = features.Mask(ctx, u.f) // Mask all upstream fs
|
||||||
if !operations.CanServerSideMove(u.f) {
|
if !operations.CanServerSideMove(u.f) {
|
||||||
canMove = false
|
canMove = false
|
||||||
}
|
}
|
||||||
slowHash = slowHash || u.f.Features().SlowHash
|
|
||||||
}
|
}
|
||||||
// We can move if all remotes support Move or Copy
|
// We can move if all remotes support Move or Copy
|
||||||
if canMove {
|
if canMove {
|
||||||
features.Move = f.Move
|
features.Move = f.Move
|
||||||
}
|
}
|
||||||
|
|
||||||
// If any of upstreams are SlowHash, propagate it
|
|
||||||
features.SlowHash = slowHash
|
|
||||||
|
|
||||||
// Enable ListR when upstreams either support ListR or is local
|
// Enable ListR when upstreams either support ListR or is local
|
||||||
// But not when all upstreams are local
|
// But not when all upstreams are local
|
||||||
if features.ListR == nil {
|
if features.ListR == nil {
|
||||||
@@ -269,9 +258,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enable ListP always
|
|
||||||
features.ListP = f.ListP
|
|
||||||
|
|
||||||
// Enable Purge when any upstreams support it
|
// Enable Purge when any upstreams support it
|
||||||
if features.Purge == nil {
|
if features.Purge == nil {
|
||||||
for _, u := range f.upstreams {
|
for _, u := range f.upstreams {
|
||||||
@@ -302,16 +288,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enable CleanUp when any upstreams support it
|
|
||||||
if features.CleanUp == nil {
|
|
||||||
for _, u := range f.upstreams {
|
|
||||||
if u.f.Features().CleanUp != nil {
|
|
||||||
features.CleanUp = f.CleanUp
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enable ChangeNotify when any upstreams support it
|
// Enable ChangeNotify when any upstreams support it
|
||||||
if features.ChangeNotify == nil {
|
if features.ChangeNotify == nil {
|
||||||
for _, u := range f.upstreams {
|
for _, u := range f.upstreams {
|
||||||
@@ -322,9 +298,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// show that we wrap other backends
|
|
||||||
features.Overlay = true
|
|
||||||
|
|
||||||
f.features = features
|
f.features = features
|
||||||
|
|
||||||
// Get common intersection of hashes
|
// Get common intersection of hashes
|
||||||
@@ -369,6 +342,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
|||||||
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
|
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
|
||||||
g, gCtx := errgroup.WithContext(ctx)
|
g, gCtx := errgroup.WithContext(ctx)
|
||||||
for _, u := range f.upstreams {
|
for _, u := range f.upstreams {
|
||||||
|
u := u
|
||||||
g.Go(func() (err error) {
|
g.Go(func() (err error) {
|
||||||
return fn(gCtx, u)
|
return fn(gCtx, u)
|
||||||
})
|
})
|
||||||
@@ -376,7 +350,7 @@ func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream
|
|||||||
return g.Wait()
|
return g.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
// join the elements together but unlike path.Join return empty string
|
// join the elements together but unline path.Join return empty string
|
||||||
func join(elem ...string) string {
|
func join(elem ...string) string {
|
||||||
result := path.Join(elem...)
|
result := path.Join(elem...)
|
||||||
if result == "." {
|
if result == "." {
|
||||||
@@ -451,32 +425,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
return u.f.Mkdir(ctx, uRemote)
|
return u.f.Mkdir(ctx, uRemote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MkdirMetadata makes the root directory of the Fs object
|
|
||||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
|
||||||
u, uRemote, err := f.findUpstream(dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
do := u.f.Features().MkdirMetadata
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
newDir, err := do(ctx, uRemote, metadata)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
entries := fs.DirEntries{newDir}
|
|
||||||
entries, err = u.wrapEntries(ctx, entries)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
newDir, ok := entries[0].(fs.Directory)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0])
|
|
||||||
}
|
|
||||||
return newDir, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// purge the upstream or fallback to a slow way
|
// purge the upstream or fallback to a slow way
|
||||||
func (u *upstream) purge(ctx context.Context, dir string) (err error) {
|
func (u *upstream) purge(ctx context.Context, dir string) (err error) {
|
||||||
if do := u.f.Features().Purge; do != nil {
|
if do := u.f.Features().Purge; do != nil {
|
||||||
@@ -508,9 +456,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -542,9 +490,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -635,6 +583,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
|||||||
var uChans []chan time.Duration
|
var uChans []chan time.Duration
|
||||||
|
|
||||||
for _, u := range f.upstreams {
|
for _, u := range f.upstreams {
|
||||||
|
u := u
|
||||||
if do := u.f.Features().ChangeNotify; do != nil {
|
if do := u.f.Features().ChangeNotify; do != nil {
|
||||||
ch := make(chan time.Duration)
|
ch := make(chan time.Duration)
|
||||||
uChans = append(uChans, ch)
|
uChans = append(uChans, ch)
|
||||||
@@ -681,7 +630,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
uSrc := fs.NewOverrideRemote(src, uRemote)
|
uSrc := operations.NewOverrideRemote(src, uRemote)
|
||||||
var o fs.Object
|
var o fs.Object
|
||||||
if stream {
|
if stream {
|
||||||
o, err = u.f.Features().PutStream(ctx, in, uSrc, options...)
|
o, err = u.f.Features().PutStream(ctx, in, uSrc, options...)
|
||||||
@@ -791,11 +740,12 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
|
|||||||
case fs.Object:
|
case fs.Object:
|
||||||
entries[i] = u.newObject(x)
|
entries[i] = u.newObject(x)
|
||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
newPath, err := u.pathAdjustment.do(x.Remote())
|
newDir := fs.NewDirCopy(ctx, x)
|
||||||
|
newPath, err := u.pathAdjustment.do(newDir.Remote())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
newDir := fs.NewDirWrapper(newPath, x)
|
newDir.SetRemote(newPath)
|
||||||
entries[i] = newDir
|
entries[i] = newDir
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown entry type %T", entry)
|
return nil, fmt.Errorf("unknown entry type %T", entry)
|
||||||
@@ -814,52 +764,24 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
|
|||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
return list.WithListP(ctx, dir, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListP lists the objects and directories of the Fs starting
|
|
||||||
// from dir non recursively into out.
|
|
||||||
//
|
|
||||||
// dir should be "" to start from the root, and should not
|
|
||||||
// have trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
//
|
|
||||||
// It should call callback for each tranche of entries read.
|
|
||||||
// These need not be returned in any particular order. If
|
|
||||||
// callback returns an error then the listing will stop
|
|
||||||
// immediately.
|
|
||||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
|
||||||
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
|
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
|
||||||
if f.root == "" && dir == "" {
|
if f.root == "" && dir == "" {
|
||||||
entries := make(fs.DirEntries, 0, len(f.upstreams))
|
entries = make(fs.DirEntries, 0, len(f.upstreams))
|
||||||
for combineDir := range f.upstreams {
|
for combineDir := range f.upstreams {
|
||||||
d := fs.NewLimitedDirWrapper(combineDir, fs.NewDir(combineDir, f.when))
|
d := fs.NewDir(combineDir, f.when)
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
}
|
}
|
||||||
return callback(entries)
|
return entries, nil
|
||||||
}
|
}
|
||||||
u, uRemote, err := f.findUpstream(dir)
|
u, uRemote, err := f.findUpstream(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
wrappedCallback := func(entries fs.DirEntries) error {
|
entries, err = u.f.List(ctx, uRemote)
|
||||||
entries, err := u.wrapEntries(ctx, entries)
|
if err != nil {
|
||||||
if err != nil {
|
return nil, err
|
||||||
return err
|
|
||||||
}
|
|
||||||
return callback(entries)
|
|
||||||
}
|
}
|
||||||
listP := u.f.Features().ListP
|
return u.wrapEntries(ctx, entries)
|
||||||
if listP == nil {
|
|
||||||
entries, err := u.f.List(ctx, uRemote)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return wrappedCallback(entries)
|
|
||||||
}
|
|
||||||
return listP(ctx, uRemote, wrappedCallback)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@@ -964,116 +886,6 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
|
||||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
|
||||||
u, uRemote, err := f.findUpstream(remote)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
do := u.f.Features().PublicLink
|
|
||||||
if do == nil {
|
|
||||||
return "", fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do(ctx, uRemote, expire, unlink)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutUnchecked in to the remote path with the modTime given of the given size
|
|
||||||
//
|
|
||||||
// May create the object even if it returns an error - if so
|
|
||||||
// will return the object and the error, otherwise will return
|
|
||||||
// nil and the error
|
|
||||||
//
|
|
||||||
// May create duplicates or return errors if src already
|
|
||||||
// exists.
|
|
||||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
srcPath := src.Remote()
|
|
||||||
u, uRemote, err := f.findUpstream(srcPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
do := u.f.Features().PutUnchecked
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
uSrc := fs.NewOverrideRemote(src, uRemote)
|
|
||||||
return do(ctx, in, uSrc, options...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MergeDirs merges the contents of all the directories passed
|
|
||||||
// in into the first one and rmdirs the other directories.
|
|
||||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|
||||||
if len(dirs) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var (
|
|
||||||
u *upstream
|
|
||||||
uDirs []fs.Directory
|
|
||||||
)
|
|
||||||
for _, dir := range dirs {
|
|
||||||
uNew, uDir, err := f.findUpstream(dir.Remote())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if u == nil {
|
|
||||||
u = uNew
|
|
||||||
} else if u != uNew {
|
|
||||||
return fmt.Errorf("can't merge directories from different upstreams")
|
|
||||||
}
|
|
||||||
uDirs = append(uDirs, fs.NewOverrideDirectory(dir, uDir))
|
|
||||||
}
|
|
||||||
do := u.f.Features().MergeDirs
|
|
||||||
if do == nil {
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do(ctx, uDirs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirSetModTime sets the directory modtime for dir
|
|
||||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
|
||||||
u, uDir, err := f.findUpstream(dir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if uDir == "" {
|
|
||||||
fs.Debugf(dir, "Can't set modtime on upstream root. skipping.")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if do := u.f.Features().DirSetModTime; do != nil {
|
|
||||||
return do(ctx, uDir, modTime)
|
|
||||||
}
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// CleanUp the trash in the Fs
|
|
||||||
//
|
|
||||||
// Implement this if you have a way of emptying the trash or
|
|
||||||
// otherwise cleaning up old versions of files.
|
|
||||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
|
||||||
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
|
|
||||||
if do := u.f.Features().CleanUp; do != nil {
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenWriterAt opens with a handle for random access writes
|
|
||||||
//
|
|
||||||
// Pass in the remote desired and the size if known.
|
|
||||||
//
|
|
||||||
// It truncates any existing object
|
|
||||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
|
||||||
u, uRemote, err := f.findUpstream(remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
do := u.f.Features().OpenWriterAt
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do(ctx, uRemote, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object describes a wrapped Object
|
// Object describes a wrapped Object
|
||||||
//
|
//
|
||||||
// This is a wrapped Object which knows its path prefix
|
// This is a wrapped Object which knows its path prefix
|
||||||
@@ -1103,7 +915,7 @@ func (o *Object) String() string {
|
|||||||
func (o *Object) Remote() string {
|
func (o *Object) Remote() string {
|
||||||
newPath, err := o.u.pathAdjustment.do(o.Object.String())
|
newPath, err := o.u.pathAdjustment.do(o.Object.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o.Object, "Bad object: %v", err)
|
fs.Errorf(o, "Bad object: %v", err)
|
||||||
return err.Error()
|
return err.Error()
|
||||||
}
|
}
|
||||||
return newPath
|
return newPath
|
||||||
@@ -1152,17 +964,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|||||||
return do.Metadata(ctx)
|
return do.Metadata(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMetadata sets metadata for an Object
|
|
||||||
//
|
|
||||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
|
||||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
|
||||||
do, ok := o.Object.(fs.SetMetadataer)
|
|
||||||
if !ok {
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do.SetMetadata(ctx, metadata)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetTier performs changing storage tier of the Object if
|
// SetTier performs changing storage tier of the Object if
|
||||||
// multiple storage classes supported
|
// multiple storage classes supported
|
||||||
func (o *Object) SetTier(tier string) error {
|
func (o *Object) SetTier(tier string) error {
|
||||||
@@ -1186,12 +987,5 @@ var (
|
|||||||
_ fs.Abouter = (*Fs)(nil)
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
_ fs.ListRer = (*Fs)(nil)
|
_ fs.ListRer = (*Fs)(nil)
|
||||||
_ fs.Shutdowner = (*Fs)(nil)
|
_ fs.Shutdowner = (*Fs)(nil)
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
|
||||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
|
||||||
_ fs.MergeDirser = (*Fs)(nil)
|
|
||||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
|
||||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
|
||||||
_ fs.CleanUpper = (*Fs)(nil)
|
|
||||||
_ fs.OpenWriterAter = (*Fs)(nil)
|
|
||||||
_ fs.FullObject = (*Object)(nil)
|
_ fs.FullObject = (*Object)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -10,11 +10,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "OpenChunkWriter"}
|
|
||||||
unimplementableObjectMethods = []string{}
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
if *fstest.RemoteName == "" {
|
if *fstest.RemoteName == "" {
|
||||||
@@ -22,8 +17,8 @@ func TestIntegration(t *testing.T) {
|
|||||||
}
|
}
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: *fstest.RemoteName,
|
RemoteName: *fstest.RemoteName,
|
||||||
UnimplementableFsMethods: unimplementableFsMethods,
|
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -40,9 +35,7 @@ func TestLocal(t *testing.T) {
|
|||||||
{Name: name, Key: "type", Value: "combine"},
|
{Name: name, Key: "type", Value: "combine"},
|
||||||
{Name: name, Key: "upstreams", Value: upstreams},
|
{Name: name, Key: "upstreams", Value: upstreams},
|
||||||
},
|
},
|
||||||
QuickTestOK: true,
|
QuickTestOK: true,
|
||||||
UnimplementableFsMethods: unimplementableFsMethods,
|
|
||||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -58,9 +51,7 @@ func TestMemory(t *testing.T) {
|
|||||||
{Name: name, Key: "type", Value: "combine"},
|
{Name: name, Key: "type", Value: "combine"},
|
||||||
{Name: name, Key: "upstreams", Value: upstreams},
|
{Name: name, Key: "upstreams", Value: upstreams},
|
||||||
},
|
},
|
||||||
QuickTestOK: true,
|
QuickTestOK: true,
|
||||||
UnimplementableFsMethods: unimplementableFsMethods,
|
|
||||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -77,8 +68,6 @@ func TestMixed(t *testing.T) {
|
|||||||
{Name: name, Key: "type", Value: "combine"},
|
{Name: name, Key: "type", Value: "combine"},
|
||||||
{Name: name, Key: "upstreams", Value: upstreams},
|
{Name: name, Key: "upstreams", Value: upstreams},
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: unimplementableFsMethods,
|
|
||||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2,8 +2,10 @@
|
|||||||
package compress
|
package compress
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/md5"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
@@ -11,8 +13,8 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -27,8 +29,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/list"
|
|
||||||
"github.com/rclone/rclone/fs/log"
|
|
||||||
"github.com/rclone/rclone/fs/object"
|
"github.com/rclone/rclone/fs/object"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
)
|
)
|
||||||
@@ -37,14 +37,12 @@ import (
|
|||||||
const (
|
const (
|
||||||
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
||||||
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
|
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
|
||||||
chunkStreams = 0 // Streams to use for reading
|
|
||||||
|
|
||||||
bufferSize = 8388608
|
bufferSize = 8388608
|
||||||
heuristicBytes = 1048576
|
heuristicBytes = 1048576
|
||||||
minCompressionRatio = 1.1
|
minCompressionRatio = 1.1
|
||||||
|
|
||||||
gzFileExt = ".gz"
|
gzFileExt = ".gz"
|
||||||
zstdFileExt = ".zst"
|
|
||||||
metaFileExt = ".json"
|
metaFileExt = ".json"
|
||||||
uncompressedFileExt = ".bin"
|
uncompressedFileExt = ".bin"
|
||||||
)
|
)
|
||||||
@@ -53,7 +51,6 @@ const (
|
|||||||
const (
|
const (
|
||||||
Uncompressed = 0
|
Uncompressed = 0
|
||||||
Gzip = 2
|
Gzip = 2
|
||||||
Zstd = 4
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var nameRegexp = regexp.MustCompile(`^(.+?)\.([A-Za-z0-9-_]{11})$`)
|
var nameRegexp = regexp.MustCompile(`^(.+?)\.([A-Za-z0-9-_]{11})$`)
|
||||||
@@ -66,10 +63,6 @@ func init() {
|
|||||||
Value: "gzip",
|
Value: "gzip",
|
||||||
Help: "Standard gzip compression with fastest parameters.",
|
Help: "Standard gzip compression with fastest parameters.",
|
||||||
},
|
},
|
||||||
{
|
|
||||||
Value: "zstd",
|
|
||||||
Help: "Zstandard compression — fast modern algorithm offering adjustable speed-to-compression tradeoffs.",
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register our remote
|
// Register our remote
|
||||||
@@ -91,23 +84,17 @@ func init() {
|
|||||||
Examples: compressionModeOptions,
|
Examples: compressionModeOptions,
|
||||||
}, {
|
}, {
|
||||||
Name: "level",
|
Name: "level",
|
||||||
Help: `GZIP (levels -2 to 9):
|
Help: `GZIP compression level (-2 to 9).
|
||||||
- -2 — Huffman encoding only. Only use if you know what you're doing.
|
|
||||||
- -1 (default) — recommended; equivalent to level 5.
|
Generally -1 (default, equivalent to 5) is recommended.
|
||||||
- 0 — turns off compression.
|
Levels 1 to 9 increase compression at the cost of speed. Going past 6
|
||||||
- 1–9 — increase compression at the cost of speed. Going past 6 generally offers very little return.
|
generally offers very little return.
|
||||||
|
|
||||||
ZSTD (levels 0 to 4):
|
Level -2 uses Huffmann encoding only. Only use if you know what you
|
||||||
- 0 — turns off compression entirely.
|
are doing.
|
||||||
- 1 — fastest compression with the lowest ratio.
|
Level 0 turns off compression.`,
|
||||||
- 2 (default) — good balance of speed and compression.
|
Default: sgzip.DefaultCompression,
|
||||||
- 3 — better compression, but uses about 2–3x more CPU than the default.
|
Advanced: true,
|
||||||
- 4 — best possible compression ratio (highest CPU cost).
|
|
||||||
|
|
||||||
Notes:
|
|
||||||
- Choose GZIP for wide compatibility; ZSTD for better speed/ratio tradeoffs.
|
|
||||||
- Negative gzip levels: -2 = Huffman-only, -1 = default (≈ level 5).`,
|
|
||||||
Required: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "ram_cache_limit",
|
Name: "ram_cache_limit",
|
||||||
Help: `Some remotes don't allow the upload of files with unknown size.
|
Help: `Some remotes don't allow the upload of files with unknown size.
|
||||||
@@ -122,47 +109,6 @@ this limit will be cached on disk.`,
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// compressionModeHandler defines the interface for handling different compression modes
|
|
||||||
type compressionModeHandler interface {
|
|
||||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
|
||||||
processFileNameGetFileExtension(compressionMode int) string
|
|
||||||
|
|
||||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
|
||||||
newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error)
|
|
||||||
|
|
||||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
|
||||||
// the configured threshold
|
|
||||||
isCompressible(r io.Reader, compressionMode int) (bool, error)
|
|
||||||
|
|
||||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
|
||||||
putCompress(
|
|
||||||
ctx context.Context,
|
|
||||||
f *Fs,
|
|
||||||
in io.Reader,
|
|
||||||
src fs.ObjectInfo,
|
|
||||||
options []fs.OpenOption,
|
|
||||||
mimeType string,
|
|
||||||
) (fs.Object, *ObjectMetadata, error)
|
|
||||||
|
|
||||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
|
||||||
openGetReadCloser(
|
|
||||||
ctx context.Context,
|
|
||||||
o *Object,
|
|
||||||
offset int64,
|
|
||||||
limit int64,
|
|
||||||
cr chunkedreader.ChunkedReader,
|
|
||||||
closer io.Closer,
|
|
||||||
options ...fs.OpenOption,
|
|
||||||
) (rc io.ReadCloser, err error)
|
|
||||||
|
|
||||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
|
||||||
putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error)
|
|
||||||
|
|
||||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
|
||||||
// Warning: This function panics if cmeta is not of the expected type.
|
|
||||||
newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Remote string `config:"remote"`
|
Remote string `config:"remote"`
|
||||||
@@ -176,16 +122,15 @@ type Options struct {
|
|||||||
// Fs represents a wrapped fs.Fs
|
// Fs represents a wrapped fs.Fs
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
fs.Fs
|
fs.Fs
|
||||||
wrapper fs.Fs
|
wrapper fs.Fs
|
||||||
name string
|
name string
|
||||||
root string
|
root string
|
||||||
opt Options
|
opt Options
|
||||||
mode int // compression mode id
|
mode int // compression mode id
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
modeHandler compressionModeHandler // compression mode handler
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs contstructs an Fs from the path, container:path
|
||||||
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
@@ -219,56 +164,28 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
return nil, fmt.Errorf("failed to make remote %s:%q to wrap: %w", wName, remotePath, err)
|
return nil, fmt.Errorf("failed to make remote %s:%q to wrap: %w", wName, remotePath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
compressionMode := compressionModeFromName(opt.CompressionMode)
|
|
||||||
var modeHandler compressionModeHandler
|
|
||||||
|
|
||||||
switch compressionMode {
|
|
||||||
case Gzip:
|
|
||||||
modeHandler = &gzipModeHandler{}
|
|
||||||
case Zstd:
|
|
||||||
modeHandler = &zstdModeHandler{}
|
|
||||||
case Uncompressed:
|
|
||||||
modeHandler = &uncompressedModeHandler{}
|
|
||||||
default:
|
|
||||||
modeHandler = &unknownModeHandler{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the wrapping fs
|
// Create the wrapping fs
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
Fs: wrappedFs,
|
Fs: wrappedFs,
|
||||||
name: name,
|
name: name,
|
||||||
root: rpath,
|
root: rpath,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
mode: compressionMode,
|
mode: compressionModeFromName(opt.CompressionMode),
|
||||||
modeHandler: modeHandler,
|
|
||||||
}
|
|
||||||
// Correct root if definitely pointing to a file
|
|
||||||
if err == fs.ErrorIsFile {
|
|
||||||
f.root = path.Dir(f.root)
|
|
||||||
if f.root == "." || f.root == "/" {
|
|
||||||
f.root = ""
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// the features here are ones we could support, and they are
|
// the features here are ones we could support, and they are
|
||||||
// ANDed with the ones from wrappedFs
|
// ANDed with the ones from wrappedFs
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
DuplicateFiles: false,
|
DuplicateFiles: false,
|
||||||
ReadMimeType: false,
|
ReadMimeType: false,
|
||||||
WriteMimeType: false,
|
WriteMimeType: false,
|
||||||
GetTier: true,
|
GetTier: true,
|
||||||
SetTier: true,
|
SetTier: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
ReadMetadata: true,
|
ReadMetadata: true,
|
||||||
WriteMetadata: true,
|
WriteMetadata: true,
|
||||||
UserMetadata: true,
|
UserMetadata: true,
|
||||||
ReadDirMetadata: true,
|
|
||||||
WriteDirMetadata: true,
|
|
||||||
WriteDirSetModTime: true,
|
|
||||||
UserDirMetadata: true,
|
|
||||||
DirModTimeUpdatesOnWrite: true,
|
|
||||||
PartialUploads: true,
|
|
||||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
// We support reading MIME types no matter the wrapped fs
|
// We support reading MIME types no matter the wrapped fs
|
||||||
f.features.ReadMimeType = true
|
f.features.ReadMimeType = true
|
||||||
@@ -276,19 +193,14 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
if !operations.CanServerSideMove(wrappedFs) {
|
if !operations.CanServerSideMove(wrappedFs) {
|
||||||
f.features.Disable("PutStream")
|
f.features.Disable("PutStream")
|
||||||
}
|
}
|
||||||
// Enable ListP always
|
|
||||||
f.features.ListP = f.ListP
|
|
||||||
|
|
||||||
return f, err
|
return f, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// compressionModeFromName converts a compression mode name to its int representation.
|
|
||||||
func compressionModeFromName(name string) int {
|
func compressionModeFromName(name string) int {
|
||||||
switch name {
|
switch name {
|
||||||
case "gzip":
|
case "gzip":
|
||||||
return Gzip
|
return Gzip
|
||||||
case "zstd":
|
|
||||||
return Zstd
|
|
||||||
default:
|
default:
|
||||||
return Uncompressed
|
return Uncompressed
|
||||||
}
|
}
|
||||||
@@ -312,7 +224,7 @@ func base64ToInt64(str string) (int64, error) {
|
|||||||
|
|
||||||
// Processes a file name for a compressed file. Returns the original file name, the extension, and the size of the original file.
|
// Processes a file name for a compressed file. Returns the original file name, the extension, and the size of the original file.
|
||||||
// Returns -2 for the original size if the file is uncompressed.
|
// Returns -2 for the original size if the file is uncompressed.
|
||||||
func processFileName(compressedFileName string, modeHandler compressionModeHandler) (origFileName string, extension string, origSize int64, err error) {
|
func processFileName(compressedFileName string) (origFileName string, extension string, origSize int64, err error) {
|
||||||
// Separate the filename and size from the extension
|
// Separate the filename and size from the extension
|
||||||
extensionPos := strings.LastIndex(compressedFileName, ".")
|
extensionPos := strings.LastIndex(compressedFileName, ".")
|
||||||
if extensionPos == -1 {
|
if extensionPos == -1 {
|
||||||
@@ -331,8 +243,7 @@ func processFileName(compressedFileName string, modeHandler compressionModeHandl
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", 0, errors.New("could not decode size")
|
return "", "", 0, errors.New("could not decode size")
|
||||||
}
|
}
|
||||||
ext := modeHandler.processFileNameGetFileExtension(compressionModeFromName(compressedFileName[extensionPos+1:]))
|
return match[1], gzFileExt, size, nil
|
||||||
return match[1], ext, size, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generates the file name for a metadata file
|
// Generates the file name for a metadata file
|
||||||
@@ -345,27 +256,13 @@ func isMetadataFile(filename string) bool {
|
|||||||
return strings.HasSuffix(filename, metaFileExt)
|
return strings.HasSuffix(filename, metaFileExt)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Checks whether a file is a metadata file and returns the original
|
|
||||||
// file name and a flag indicating whether it was a metadata file or
|
|
||||||
// not.
|
|
||||||
func unwrapMetadataFile(filename string) (string, bool) {
|
|
||||||
if !isMetadataFile(filename) {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
return filename[:len(filename)-len(metaFileExt)], true
|
|
||||||
}
|
|
||||||
|
|
||||||
// makeDataName generates the file name for a data file with specified compression mode
|
// makeDataName generates the file name for a data file with specified compression mode
|
||||||
func makeDataName(remote string, size int64, mode int) (newRemote string) {
|
func makeDataName(remote string, size int64, mode int) (newRemote string) {
|
||||||
switch mode {
|
if mode != Uncompressed {
|
||||||
case Gzip:
|
|
||||||
newRemote = remote + "." + int64ToBase64(size) + gzFileExt
|
newRemote = remote + "." + int64ToBase64(size) + gzFileExt
|
||||||
case Zstd:
|
} else {
|
||||||
newRemote = remote + "." + int64ToBase64(size) + zstdFileExt
|
|
||||||
default:
|
|
||||||
newRemote = remote + uncompressedFileExt
|
newRemote = remote + uncompressedFileExt
|
||||||
}
|
}
|
||||||
|
|
||||||
return newRemote
|
return newRemote
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -379,7 +276,7 @@ func (f *Fs) dataName(remote string, size int64, compressed bool) (name string)
|
|||||||
|
|
||||||
// addData parses an object and adds it to the DirEntries
|
// addData parses an object and adds it to the DirEntries
|
||||||
func (f *Fs) addData(entries *fs.DirEntries, o fs.Object) {
|
func (f *Fs) addData(entries *fs.DirEntries, o fs.Object) {
|
||||||
origFileName, _, size, err := processFileName(o.Remote(), f.modeHandler)
|
origFileName, _, size, err := processFileName(o.Remote())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "Error on parsing file name: %v", err)
|
fs.Errorf(o, "Error on parsing file name: %v", err)
|
||||||
return
|
return
|
||||||
@@ -430,39 +327,11 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
|
|||||||
// found.
|
// found.
|
||||||
// List entries and process them
|
// List entries and process them
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
return list.WithListP(ctx, dir, f)
|
entries, err = f.Fs.List(ctx, dir)
|
||||||
}
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
// ListP lists the objects and directories of the Fs starting
|
|
||||||
// from dir non recursively into out.
|
|
||||||
//
|
|
||||||
// dir should be "" to start from the root, and should not
|
|
||||||
// have trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
//
|
|
||||||
// It should call callback for each tranche of entries read.
|
|
||||||
// These need not be returned in any particular order. If
|
|
||||||
// callback returns an error then the listing will stop
|
|
||||||
// immediately.
|
|
||||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
|
||||||
wrappedCallback := func(entries fs.DirEntries) error {
|
|
||||||
entries, err := f.processEntries(entries)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return callback(entries)
|
|
||||||
}
|
}
|
||||||
listP := f.Fs.Features().ListP
|
return f.processEntries(entries)
|
||||||
if listP == nil {
|
|
||||||
entries, err := f.Fs.List(ctx, dir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return wrappedCallback(entries)
|
|
||||||
}
|
|
||||||
return listP(ctx, dir, wrappedCallback)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@@ -498,25 +367,18 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
meta, err := readMetadata(ctx, mo)
|
meta := readMetadata(ctx, mo)
|
||||||
if err != nil {
|
if meta == nil {
|
||||||
return nil, fmt.Errorf("error decoding metadata: %w", err)
|
return nil, errors.New("error decoding metadata")
|
||||||
}
|
|
||||||
size, err := f.modeHandler.newObjectGetOriginalSize(meta)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error reading metadata: %w", err)
|
|
||||||
}
|
}
|
||||||
// Create our Object
|
// Create our Object
|
||||||
o, err := f.Fs.NewObject(ctx, makeDataName(remote, size, meta.Mode))
|
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
|
||||||
if err != nil {
|
return f.newObject(o, mo, meta), err
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return f.newObject(o, mo, meta), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkCompressAndType checks if an object is compressible and determines it's mime type
|
// checkCompressAndType checks if an object is compressible and determines it's mime type
|
||||||
// returns a multireader with the bytes that were read to determine mime type
|
// returns a multireader with the bytes that were read to determine mime type
|
||||||
func checkCompressAndType(in io.Reader, compressionMode int, modeHandler compressionModeHandler) (newReader io.Reader, compressible bool, mimeType string, err error) {
|
func checkCompressAndType(in io.Reader) (newReader io.Reader, compressible bool, mimeType string, err error) {
|
||||||
in, wrap := accounting.UnWrap(in)
|
in, wrap := accounting.UnWrap(in)
|
||||||
buf := make([]byte, heuristicBytes)
|
buf := make([]byte, heuristicBytes)
|
||||||
n, err := in.Read(buf)
|
n, err := in.Read(buf)
|
||||||
@@ -525,7 +387,7 @@ func checkCompressAndType(in io.Reader, compressionMode int, modeHandler compres
|
|||||||
return nil, false, "", err
|
return nil, false, "", err
|
||||||
}
|
}
|
||||||
mime := mimetype.Detect(buf)
|
mime := mimetype.Detect(buf)
|
||||||
compressible, err = modeHandler.isCompressible(bytes.NewReader(buf), compressionMode)
|
compressible, err = isCompressible(bytes.NewReader(buf))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, "", err
|
return nil, false, "", err
|
||||||
}
|
}
|
||||||
@@ -533,6 +395,26 @@ func checkCompressAndType(in io.Reader, compressionMode int, modeHandler compres
|
|||||||
return wrap(in), compressible, mime.String(), nil
|
return wrap(in), compressible, mime.String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
||||||
|
// the configured threshold
|
||||||
|
func isCompressible(r io.Reader) (bool, error) {
|
||||||
|
var b bytes.Buffer
|
||||||
|
w, err := sgzip.NewWriterLevel(&b, sgzip.DefaultCompression)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
n, err := io.Copy(w, r)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
err = w.Close()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
ratio := float64(n) / float64(b.Len())
|
||||||
|
return ratio > minCompressionRatio, nil
|
||||||
|
}
|
||||||
|
|
||||||
// verifyObjectHash verifies the Objects hash
|
// verifyObjectHash verifies the Objects hash
|
||||||
func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.MultiHasher, ht hash.Type) error {
|
func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.MultiHasher, ht hash.Type) error {
|
||||||
srcHash := hasher.Sums()[ht]
|
srcHash := hasher.Sums()[ht]
|
||||||
@@ -546,16 +428,16 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash)
|
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
||||||
|
|
||||||
type compressionResult[T sgzip.GzipMetadata | SzstdMetadata] struct {
|
type compressionResult struct {
|
||||||
err error
|
err error
|
||||||
meta T
|
meta sgzip.GzipMetadata
|
||||||
}
|
}
|
||||||
|
|
||||||
// replicating some of operations.Rcat functionality because we want to support remotes without streaming
|
// replicating some of operations.Rcat functionality because we want to support remotes without streaming
|
||||||
@@ -569,7 +451,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
|||||||
return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...)
|
return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Need to include what we already read
|
// Need to include what we allready read
|
||||||
in = &ReadCloserWrapper{
|
in = &ReadCloserWrapper{
|
||||||
Reader: io.MultiReader(bytes.NewReader(buf), in),
|
Reader: io.MultiReader(bytes.NewReader(buf), in),
|
||||||
Closer: in,
|
Closer: in,
|
||||||
@@ -582,7 +464,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
|||||||
}
|
}
|
||||||
|
|
||||||
fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file")
|
fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file")
|
||||||
tempFile, err := os.CreateTemp("", "rclone-press-")
|
tempFile, err := ioutil.TempFile("", "rclone-press-")
|
||||||
defer func() {
|
defer func() {
|
||||||
// these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish
|
// these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish
|
||||||
// to ignore them
|
// to ignore them
|
||||||
@@ -596,18 +478,106 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
|||||||
return nil, fmt.Errorf("failed to write temporary local file: %w", err)
|
return nil, fmt.Errorf("failed to write temporary local file: %w", err)
|
||||||
}
|
}
|
||||||
if _, err = tempFile.Seek(0, 0); err != nil {
|
if _, err = tempFile.Seek(0, 0); err != nil {
|
||||||
return nil, fmt.Errorf("failed to seek temporary local file: %w", err)
|
return nil, err
|
||||||
}
|
}
|
||||||
finfo, err := tempFile.Stat()
|
finfo, err := tempFile.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to stat temporary local file: %w", err)
|
return nil, err
|
||||||
}
|
}
|
||||||
return f.Fs.Put(ctx, tempFile, object.NewStaticObjectInfo(dstFileName, modTime, finfo.Size(), false, nil, f.Fs))
|
return f.Fs.Put(ctx, tempFile, object.NewStaticObjectInfo(dstFileName, modTime, finfo.Size(), false, nil, f.Fs))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put a compressed version of a file. Returns a wrappable object and metadata.
|
// Put a compressed version of a file. Returns a wrappable object and metadata.
|
||||||
func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, mimeType string) (fs.Object, *ObjectMetadata, error) {
|
func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, mimeType string) (fs.Object, *ObjectMetadata, error) {
|
||||||
return f.modeHandler.putCompress(ctx, f, in, src, options, mimeType)
|
// Unwrap reader accounting
|
||||||
|
in, wrap := accounting.UnWrap(in)
|
||||||
|
|
||||||
|
// Add the metadata hasher
|
||||||
|
metaHasher := md5.New()
|
||||||
|
in = io.TeeReader(in, metaHasher)
|
||||||
|
|
||||||
|
// Compress the file
|
||||||
|
pipeReader, pipeWriter := io.Pipe()
|
||||||
|
results := make(chan compressionResult)
|
||||||
|
go func() {
|
||||||
|
gz, err := sgzip.NewWriterLevel(pipeWriter, f.opt.CompressionLevel)
|
||||||
|
if err != nil {
|
||||||
|
results <- compressionResult{err: err, meta: sgzip.GzipMetadata{}}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, err = io.Copy(gz, in)
|
||||||
|
gzErr := gz.Close()
|
||||||
|
if gzErr != nil {
|
||||||
|
fs.Errorf(nil, "Failed to close compress: %v", gzErr)
|
||||||
|
if err == nil {
|
||||||
|
err = gzErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
closeErr := pipeWriter.Close()
|
||||||
|
if closeErr != nil {
|
||||||
|
fs.Errorf(nil, "Failed to close pipe: %v", closeErr)
|
||||||
|
if err == nil {
|
||||||
|
err = closeErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
results <- compressionResult{err: err, meta: gz.MetaData()}
|
||||||
|
}()
|
||||||
|
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize)) // Probably no longer needed as sgzip has it's own buffering
|
||||||
|
|
||||||
|
// Find a hash the destination supports to compute a hash of
|
||||||
|
// the compressed data.
|
||||||
|
ht := f.Fs.Hashes().GetOne()
|
||||||
|
var hasher *hash.MultiHasher
|
||||||
|
var err error
|
||||||
|
if ht != hash.None {
|
||||||
|
// unwrap the accounting again
|
||||||
|
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
|
||||||
|
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
// add the hasher and re-wrap the accounting
|
||||||
|
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
||||||
|
wrappedIn = wrap(wrappedIn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transfer the data
|
||||||
|
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
||||||
|
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx))
|
||||||
|
if err != nil {
|
||||||
|
if o != nil {
|
||||||
|
removeErr := o.Remove(ctx)
|
||||||
|
if removeErr != nil {
|
||||||
|
fs.Errorf(o, "Failed to remove partially transferred object: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
// Check whether we got an error during compression
|
||||||
|
result := <-results
|
||||||
|
err = result.err
|
||||||
|
if err != nil {
|
||||||
|
if o != nil {
|
||||||
|
removeErr := o.Remove(ctx)
|
||||||
|
if removeErr != nil {
|
||||||
|
fs.Errorf(o, "Failed to remove partially compressed object: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate metadata
|
||||||
|
meta := newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
|
||||||
|
|
||||||
|
// Check the hashes of the compressed data if we were comparing them
|
||||||
|
if ht != hash.None && hasher != nil {
|
||||||
|
err = f.verifyObjectHash(ctx, o, hasher, ht)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return o, meta, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put an uncompressed version of a file. Returns a wrappable object and metadata.
|
// Put an uncompressed version of a file. Returns a wrappable object and metadata.
|
||||||
@@ -651,8 +621,7 @@ func (f *Fs) putUncompress(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
return o, newMetadata(o.Size(), Uncompressed, sgzip.GzipMetadata{}, hex.EncodeToString(sum), mimeType), nil
|
||||||
return f.modeHandler.putUncompressGetNewMetadata(o, Uncompressed, hex.EncodeToString(sum), mimeType, sum)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function will write a metadata struct to a metadata Object for an src. Returns a wrappable metadata object.
|
// This function will write a metadata struct to a metadata Object for an src. Returns a wrappable metadata object.
|
||||||
@@ -708,7 +677,7 @@ func (f *Fs) putWithCustomFunctions(ctx context.Context, in io.Reader, src fs.Ob
|
|||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return f.newObject(dataObject, mo, meta), nil
|
return f.newObject(dataObject, mo, meta), err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put in to the remote path with the modTime given of the given size
|
// Put in to the remote path with the modTime given of the given size
|
||||||
@@ -723,7 +692,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
o, err := f.NewObject(ctx, src.Remote())
|
o, err := f.NewObject(ctx, src.Remote())
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
// Get our file compressibility
|
// Get our file compressibility
|
||||||
in, compressible, mimeType, err := checkCompressAndType(in, f.mode, f.modeHandler)
|
in, compressible, mimeType, err := checkCompressAndType(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -743,7 +712,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
}
|
}
|
||||||
found := err == nil
|
found := err == nil
|
||||||
|
|
||||||
in, compressible, mimeType, err := checkCompressAndType(in, f.mode, f.modeHandler)
|
in, compressible, mimeType, err := checkCompressAndType(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -762,7 +731,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If our new object is compressed we have to rename it with the correct size.
|
// If our new object is compressed we have to rename it with the correct size.
|
||||||
// Uncompressed objects don't store the size in the name so we they'll already have the correct name.
|
// Uncompressed objects don't store the size in the name so we they'll allready have the correct name.
|
||||||
if compressible {
|
if compressible {
|
||||||
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
|
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -773,7 +742,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
return newObj, nil
|
return newObj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Temporarily disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
|
// Temporarely disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
|
||||||
// will break stuff. Right no I can't think of a way to make this work.
|
// will break stuff. Right no I can't think of a way to make this work.
|
||||||
|
|
||||||
// PutUnchecked uploads the object
|
// PutUnchecked uploads the object
|
||||||
@@ -793,14 +762,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
return f.Fs.Mkdir(ctx, dir)
|
return f.Fs.Mkdir(ctx, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MkdirMetadata makes the root directory of the Fs object
|
|
||||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
|
||||||
if do := f.Fs.Features().MkdirMetadata; do != nil {
|
|
||||||
return do(ctx, dir, metadata)
|
|
||||||
}
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist or isn't empty
|
// Return an error if it doesn't exist or isn't empty
|
||||||
@@ -824,9 +785,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -874,9 +835,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -944,14 +905,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
return do(ctx, srcFs.Fs, srcRemote, dstRemote)
|
return do(ctx, srcFs.Fs, srcRemote, dstRemote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DirSetModTime sets the directory modtime for dir
|
|
||||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
|
||||||
if do := f.Fs.Features().DirSetModTime; do != nil {
|
|
||||||
return do(ctx, dir, modTime)
|
|
||||||
}
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// CleanUp the trash in the Fs
|
// CleanUp the trash in the Fs
|
||||||
//
|
//
|
||||||
// Implement this if you have a way of emptying the trash or
|
// Implement this if you have a way of emptying the trash or
|
||||||
@@ -1022,8 +975,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
|||||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||||
fs.Logf(f, "path %q entryType %d", path, entryType)
|
fs.Logf(f, "path %q entryType %d", path, entryType)
|
||||||
var (
|
var (
|
||||||
wrappedPath string
|
wrappedPath string
|
||||||
isMetadataFile bool
|
|
||||||
)
|
)
|
||||||
switch entryType {
|
switch entryType {
|
||||||
case fs.EntryDirectory:
|
case fs.EntryDirectory:
|
||||||
@@ -1031,10 +983,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
|||||||
case fs.EntryObject:
|
case fs.EntryObject:
|
||||||
// Note: All we really need to do to monitor the object is to check whether the metadata changed,
|
// Note: All we really need to do to monitor the object is to check whether the metadata changed,
|
||||||
// as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same.
|
// as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same.
|
||||||
wrappedPath, isMetadataFile = unwrapMetadataFile(path)
|
wrappedPath = makeMetadataName(path)
|
||||||
if !isMetadataFile {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType)
|
fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType)
|
||||||
return
|
return
|
||||||
@@ -1062,12 +1011,11 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, duration fs.Duration
|
|||||||
|
|
||||||
// ObjectMetadata describes the metadata for an Object.
|
// ObjectMetadata describes the metadata for an Object.
|
||||||
type ObjectMetadata struct {
|
type ObjectMetadata struct {
|
||||||
Mode int // Compression mode of the file.
|
Mode int // Compression mode of the file.
|
||||||
Size int64 // Size of the object.
|
Size int64 // Size of the object.
|
||||||
MD5 string // MD5 hash of the file.
|
MD5 string // MD5 hash of the file.
|
||||||
MimeType string // Mime type of the file
|
MimeType string // Mime type of the file
|
||||||
CompressionMetadataGzip *sgzip.GzipMetadata // Metadata for Gzip compression
|
CompressionMetadata sgzip.GzipMetadata
|
||||||
CompressionMetadataZstd *SzstdMetadata // Metadata for Zstd compression
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object with external metadata
|
// Object with external metadata
|
||||||
@@ -1080,20 +1028,36 @@ type Object struct {
|
|||||||
meta *ObjectMetadata // Metadata struct for this object (nil if not loaded)
|
meta *ObjectMetadata // Metadata struct for this object (nil if not loaded)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This function generates a metadata object
|
||||||
|
func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mimeType string) *ObjectMetadata {
|
||||||
|
meta := new(ObjectMetadata)
|
||||||
|
meta.Size = size
|
||||||
|
meta.Mode = mode
|
||||||
|
meta.CompressionMetadata = cmeta
|
||||||
|
meta.MD5 = md5
|
||||||
|
meta.MimeType = mimeType
|
||||||
|
return meta
|
||||||
|
}
|
||||||
|
|
||||||
// This function will read the metadata from a metadata object.
|
// This function will read the metadata from a metadata object.
|
||||||
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) {
|
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata) {
|
||||||
// Open our meradata object
|
// Open our meradata object
|
||||||
rc, err := mo.Open(ctx)
|
rc, err := mo.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil
|
||||||
}
|
}
|
||||||
defer fs.CheckClose(rc, &err)
|
defer func() {
|
||||||
|
err := rc.Close()
|
||||||
|
if err != nil {
|
||||||
|
fs.Errorf(mo, "Error closing object: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
jr := json.NewDecoder(rc)
|
jr := json.NewDecoder(rc)
|
||||||
meta = new(ObjectMetadata)
|
meta = new(ObjectMetadata)
|
||||||
if err = jr.Decode(meta); err != nil {
|
if err = jr.Decode(meta); err != nil {
|
||||||
return nil, err
|
return nil
|
||||||
}
|
}
|
||||||
return meta, nil
|
return meta
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove removes this object
|
// Remove removes this object
|
||||||
@@ -1127,7 +1091,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return o.mo, o.mo.Update(ctx, in, src, options...)
|
return o.mo, o.mo.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
in, compressible, mimeType, err := checkCompressAndType(in, o.meta.Mode, o.f.modeHandler)
|
in, compressible, mimeType, err := checkCompressAndType(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1138,9 +1102,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
origName := o.Remote()
|
origName := o.Remote()
|
||||||
if o.meta.Mode != Uncompressed || compressible {
|
if o.meta.Mode != Uncompressed || compressible {
|
||||||
newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType)
|
newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if newObject.Object.Remote() != o.Object.Remote() {
|
if newObject.Object.Remote() != o.Object.Remote() {
|
||||||
if removeErr := o.Object.Remove(ctx); removeErr != nil {
|
if removeErr := o.Object.Remove(ctx); removeErr != nil {
|
||||||
return removeErr
|
return removeErr
|
||||||
@@ -1154,9 +1115,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
// If we are, just update the object and metadata
|
// If we are, just update the object and metadata
|
||||||
newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType)
|
newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType)
|
||||||
if err != nil {
|
}
|
||||||
return err
|
if err != nil {
|
||||||
}
|
return err
|
||||||
}
|
}
|
||||||
// Update object metadata and return
|
// Update object metadata and return
|
||||||
o.Object = newObject.Object
|
o.Object = newObject.Object
|
||||||
@@ -1167,9 +1128,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
// This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified.
|
// This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified.
|
||||||
func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object {
|
func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object {
|
||||||
if o == nil {
|
|
||||||
log.Trace(nil, "newObject(%#v, %#v, %#v) called with nil o", o, mo, meta)
|
|
||||||
}
|
|
||||||
return &Object{
|
return &Object{
|
||||||
Object: o,
|
Object: o,
|
||||||
f: f,
|
f: f,
|
||||||
@@ -1182,9 +1140,6 @@ func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object
|
|||||||
|
|
||||||
// This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand.
|
// This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand.
|
||||||
func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object {
|
func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object {
|
||||||
if o == nil {
|
|
||||||
log.Trace(nil, "newObjectSizeAndNameOnly(%#v, %#v, %#v) called with nil o", o, moName, size)
|
|
||||||
}
|
|
||||||
return &Object{
|
return &Object{
|
||||||
Object: o,
|
Object: o,
|
||||||
f: f,
|
f: f,
|
||||||
@@ -1212,7 +1167,7 @@ func (o *Object) loadMetadataIfNotLoaded(ctx context.Context) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if o.meta == nil {
|
if o.meta == nil {
|
||||||
o.meta, err = readMetadata(ctx, o.mo)
|
o.meta = readMetadata(ctx, o.mo)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1240,7 +1195,7 @@ func (o *Object) String() string {
|
|||||||
|
|
||||||
// Remote returns the remote path
|
// Remote returns the remote path
|
||||||
func (o *Object) Remote() string {
|
func (o *Object) Remote() string {
|
||||||
origFileName, _, _, err := processFileName(o.Object.Remote(), o.f.modeHandler)
|
origFileName, _, _, err := processFileName(o.Object.Remote())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o.f, "Could not get remote path for: %s", o.Object.Remote())
|
fs.Errorf(o.f, "Could not get remote path for: %s", o.Object.Remote())
|
||||||
return o.Object.Remote()
|
return o.Object.Remote()
|
||||||
@@ -1280,17 +1235,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|||||||
return do.Metadata(ctx)
|
return do.Metadata(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMetadata sets metadata for an Object
|
|
||||||
//
|
|
||||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
|
||||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
|
||||||
do, ok := o.Object.(fs.SetMetadataer)
|
|
||||||
if !ok {
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do.SetMetadata(ctx, metadata)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns the selected checksum of the file
|
// Hash returns the selected checksum of the file
|
||||||
// If no checksum is available it returns ""
|
// If no checksum is available it returns ""
|
||||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||||
@@ -1343,6 +1287,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||||||
return o.Object.Open(ctx, options...)
|
return o.Object.Open(ctx, options...)
|
||||||
}
|
}
|
||||||
// Get offset and limit from OpenOptions, pass the rest to the underlying remote
|
// Get offset and limit from OpenOptions, pass the rest to the underlying remote
|
||||||
|
var openOptions = []fs.OpenOption{&fs.SeekOption{Offset: 0}}
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
switch x := option.(type) {
|
switch x := option.(type) {
|
||||||
@@ -1350,12 +1295,31 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||||||
offset = x.Offset
|
offset = x.Offset
|
||||||
case *fs.RangeOption:
|
case *fs.RangeOption:
|
||||||
offset, limit = x.Decode(o.Size())
|
offset, limit = x.Decode(o.Size())
|
||||||
|
default:
|
||||||
|
openOptions = append(openOptions, option)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Get a chunkedreader for the wrapped object
|
// Get a chunkedreader for the wrapped object
|
||||||
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams)
|
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize)
|
||||||
var retCloser io.Closer = chunkedReader
|
// Get file handle
|
||||||
return o.f.modeHandler.openGetReadCloser(ctx, o, offset, limit, chunkedReader, retCloser, options...)
|
var file io.Reader
|
||||||
|
if offset != 0 {
|
||||||
|
file, err = sgzip.NewReaderAt(chunkedReader, &o.meta.CompressionMetadata, offset)
|
||||||
|
} else {
|
||||||
|
file, err = sgzip.NewReader(chunkedReader)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileReader io.Reader
|
||||||
|
if limit != -1 {
|
||||||
|
fileReader = io.LimitReader(file, limit)
|
||||||
|
} else {
|
||||||
|
fileReader = file
|
||||||
|
}
|
||||||
|
// Return a ReadCloser
|
||||||
|
return ReadCloserWrapper{Reader: fileReader, Closer: chunkedReader}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
||||||
@@ -1503,8 +1467,6 @@ var (
|
|||||||
_ fs.Copier = (*Fs)(nil)
|
_ fs.Copier = (*Fs)(nil)
|
||||||
_ fs.Mover = (*Fs)(nil)
|
_ fs.Mover = (*Fs)(nil)
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
_ fs.DirMover = (*Fs)(nil)
|
||||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
|
||||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
|
||||||
_ fs.PutStreamer = (*Fs)(nil)
|
_ fs.PutStreamer = (*Fs)(nil)
|
||||||
_ fs.CleanUpper = (*Fs)(nil)
|
_ fs.CleanUpper = (*Fs)(nil)
|
||||||
_ fs.UnWrapper = (*Fs)(nil)
|
_ fs.UnWrapper = (*Fs)(nil)
|
||||||
|
|||||||
@@ -14,26 +14,23 @@ import (
|
|||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
var defaultOpt = fstests.Opt{
|
|
||||||
RemoteName: "TestCompress:",
|
|
||||||
NilObject: (*Object)(nil),
|
|
||||||
UnimplementableFsMethods: []string{
|
|
||||||
"OpenWriterAt",
|
|
||||||
"OpenChunkWriter",
|
|
||||||
"MergeDirs",
|
|
||||||
"DirCacheFlush",
|
|
||||||
"PutUnchecked",
|
|
||||||
"PutStream",
|
|
||||||
"UserInfo",
|
|
||||||
"Disconnect",
|
|
||||||
},
|
|
||||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
|
||||||
UnimplementableObjectMethods: []string{},
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &defaultOpt)
|
opt := fstests.Opt{
|
||||||
|
RemoteName: *fstest.RemoteName,
|
||||||
|
NilObject: (*Object)(nil),
|
||||||
|
UnimplementableFsMethods: []string{
|
||||||
|
"OpenWriterAt",
|
||||||
|
"MergeDirs",
|
||||||
|
"DirCacheFlush",
|
||||||
|
"PutUnchecked",
|
||||||
|
"PutStream",
|
||||||
|
"UserInfo",
|
||||||
|
"Disconnect",
|
||||||
|
},
|
||||||
|
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||||
|
UnimplementableObjectMethods: []string{}}
|
||||||
|
fstests.Run(t, &opt)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestRemoteGzip tests GZIP compression
|
// TestRemoteGzip tests GZIP compression
|
||||||
@@ -43,33 +40,27 @@ func TestRemoteGzip(t *testing.T) {
|
|||||||
}
|
}
|
||||||
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
|
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
|
||||||
name := "TestCompressGzip"
|
name := "TestCompressGzip"
|
||||||
opt := defaultOpt
|
fstests.Run(t, &fstests.Opt{
|
||||||
opt.RemoteName = name + ":"
|
RemoteName: name + ":",
|
||||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
NilObject: (*Object)(nil),
|
||||||
{Name: name, Key: "type", Value: "compress"},
|
UnimplementableFsMethods: []string{
|
||||||
{Name: name, Key: "remote", Value: tempdir},
|
"OpenWriterAt",
|
||||||
{Name: name, Key: "mode", Value: "gzip"},
|
"MergeDirs",
|
||||||
{Name: name, Key: "level", Value: "-1"},
|
"DirCacheFlush",
|
||||||
}
|
"PutUnchecked",
|
||||||
opt.QuickTestOK = true
|
"PutStream",
|
||||||
fstests.Run(t, &opt)
|
"UserInfo",
|
||||||
}
|
"Disconnect",
|
||||||
|
},
|
||||||
// TestRemoteZstd tests ZSTD compression
|
UnimplementableObjectMethods: []string{
|
||||||
func TestRemoteZstd(t *testing.T) {
|
"GetTier",
|
||||||
if *fstest.RemoteName != "" {
|
"SetTier",
|
||||||
t.Skip("Skipping as -remote set")
|
},
|
||||||
}
|
ExtraConfig: []fstests.ExtraConfigItem{
|
||||||
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-zstd")
|
{Name: name, Key: "type", Value: "compress"},
|
||||||
name := "TestCompressZstd"
|
{Name: name, Key: "remote", Value: tempdir},
|
||||||
opt := defaultOpt
|
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||||
opt.RemoteName = name + ":"
|
},
|
||||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
QuickTestOK: true,
|
||||||
{Name: name, Key: "type", Value: "compress"},
|
})
|
||||||
{Name: name, Key: "remote", Value: tempdir},
|
|
||||||
{Name: name, Key: "mode", Value: "zstd"},
|
|
||||||
{Name: name, Key: "level", Value: "2"},
|
|
||||||
}
|
|
||||||
opt.QuickTestOK = true
|
|
||||||
fstests.Run(t, &opt)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,207 +0,0 @@
|
|||||||
package compress
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"crypto/md5"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/buengese/sgzip"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
|
||||||
"github.com/rclone/rclone/fs/chunkedreader"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
)
|
|
||||||
|
|
||||||
// gzipModeHandler implements compressionModeHandler for gzip
|
|
||||||
type gzipModeHandler struct{}
|
|
||||||
|
|
||||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
|
||||||
// the configured threshold
|
|
||||||
func (g *gzipModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
|
||||||
var b bytes.Buffer
|
|
||||||
var n int64
|
|
||||||
w, err := sgzip.NewWriterLevel(&b, sgzip.DefaultCompression)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
n, err = io.Copy(w, r)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
err = w.Close()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
ratio := float64(n) / float64(b.Len())
|
|
||||||
return ratio > minCompressionRatio, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
|
||||||
func (g *gzipModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
|
||||||
if meta.CompressionMetadataGzip == nil {
|
|
||||||
return 0, errors.New("missing gzip metadata")
|
|
||||||
}
|
|
||||||
return meta.CompressionMetadataGzip.Size, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
|
||||||
func (g *gzipModeHandler) openGetReadCloser(
|
|
||||||
ctx context.Context,
|
|
||||||
o *Object,
|
|
||||||
offset int64,
|
|
||||||
limit int64,
|
|
||||||
cr chunkedreader.ChunkedReader,
|
|
||||||
closer io.Closer,
|
|
||||||
options ...fs.OpenOption,
|
|
||||||
) (rc io.ReadCloser, err error) {
|
|
||||||
var file io.Reader
|
|
||||||
|
|
||||||
if offset != 0 {
|
|
||||||
file, err = sgzip.NewReaderAt(cr, o.meta.CompressionMetadataGzip, offset)
|
|
||||||
} else {
|
|
||||||
file, err = sgzip.NewReader(cr)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileReader io.Reader
|
|
||||||
if limit != -1 {
|
|
||||||
fileReader = io.LimitReader(file, limit)
|
|
||||||
} else {
|
|
||||||
fileReader = file
|
|
||||||
}
|
|
||||||
// Return a ReadCloser
|
|
||||||
return ReadCloserWrapper{Reader: fileReader, Closer: closer}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
|
||||||
func (g *gzipModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
|
||||||
if compressionMode == Gzip {
|
|
||||||
return gzFileExt
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
|
||||||
func (g *gzipModeHandler) putCompress(
|
|
||||||
ctx context.Context,
|
|
||||||
f *Fs,
|
|
||||||
in io.Reader,
|
|
||||||
src fs.ObjectInfo,
|
|
||||||
options []fs.OpenOption,
|
|
||||||
mimeType string,
|
|
||||||
) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
// Unwrap reader accounting
|
|
||||||
in, wrap := accounting.UnWrap(in)
|
|
||||||
|
|
||||||
// Add the metadata hasher
|
|
||||||
metaHasher := md5.New()
|
|
||||||
in = io.TeeReader(in, metaHasher)
|
|
||||||
|
|
||||||
// Compress the file
|
|
||||||
pipeReader, pipeWriter := io.Pipe()
|
|
||||||
|
|
||||||
resultsGzip := make(chan compressionResult[sgzip.GzipMetadata])
|
|
||||||
go func() {
|
|
||||||
gz, err := sgzip.NewWriterLevel(pipeWriter, f.opt.CompressionLevel)
|
|
||||||
if err != nil {
|
|
||||||
resultsGzip <- compressionResult[sgzip.GzipMetadata]{err: err, meta: sgzip.GzipMetadata{}}
|
|
||||||
close(resultsGzip)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_, err = io.Copy(gz, in)
|
|
||||||
gzErr := gz.Close()
|
|
||||||
if gzErr != nil && err == nil {
|
|
||||||
err = gzErr
|
|
||||||
}
|
|
||||||
closeErr := pipeWriter.Close()
|
|
||||||
if closeErr != nil && err == nil {
|
|
||||||
err = closeErr
|
|
||||||
}
|
|
||||||
resultsGzip <- compressionResult[sgzip.GzipMetadata]{err: err, meta: gz.MetaData()}
|
|
||||||
close(resultsGzip)
|
|
||||||
}()
|
|
||||||
|
|
||||||
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize)) // Probably no longer needed as sgzip has it's own buffering
|
|
||||||
|
|
||||||
// Find a hash the destination supports to compute a hash of
|
|
||||||
// the compressed data.
|
|
||||||
ht := f.Fs.Hashes().GetOne()
|
|
||||||
var hasher *hash.MultiHasher
|
|
||||||
var err error
|
|
||||||
if ht != hash.None {
|
|
||||||
// unwrap the accounting again
|
|
||||||
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
|
|
||||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
// add the hasher and re-wrap the accounting
|
|
||||||
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
|
||||||
wrappedIn = wrap(wrappedIn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transfer the data
|
|
||||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
|
||||||
if err != nil {
|
|
||||||
if o != nil {
|
|
||||||
if removeErr := o.Remove(ctx); removeErr != nil {
|
|
||||||
fs.Errorf(o, "Failed to remove partially transferred object: %v", removeErr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
// Check whether we got an error during compression
|
|
||||||
result := <-resultsGzip
|
|
||||||
if result.err != nil {
|
|
||||||
if o != nil {
|
|
||||||
if removeErr := o.Remove(ctx); removeErr != nil {
|
|
||||||
fs.Errorf(o, "Failed to remove partially compressed object: %v", removeErr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, nil, result.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate metadata
|
|
||||||
meta := g.newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
|
|
||||||
|
|
||||||
// Check the hashes of the compressed data if we were comparing them
|
|
||||||
if ht != hash.None && hasher != nil {
|
|
||||||
err = f.verifyObjectHash(ctx, o, hasher, ht)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return o, meta, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
|
||||||
func (g *gzipModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
return o, g.newMetadata(o.Size(), mode, sgzip.GzipMetadata{}, hex.EncodeToString(sum), mimeType), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
|
||||||
// Warning: This function panics if cmeta is not of the expected type.
|
|
||||||
func (g *gzipModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
|
||||||
meta, ok := cmeta.(sgzip.GzipMetadata)
|
|
||||||
if !ok {
|
|
||||||
panic("invalid cmeta type: expected sgzip.GzipMetadata")
|
|
||||||
}
|
|
||||||
|
|
||||||
objMeta := new(ObjectMetadata)
|
|
||||||
objMeta.Size = size
|
|
||||||
objMeta.Mode = mode
|
|
||||||
objMeta.CompressionMetadataGzip = &meta
|
|
||||||
objMeta.CompressionMetadataZstd = nil
|
|
||||||
objMeta.MD5 = md5
|
|
||||||
objMeta.MimeType = mimeType
|
|
||||||
|
|
||||||
return objMeta
|
|
||||||
}
|
|
||||||
@@ -1,327 +0,0 @@
|
|||||||
package compress
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"runtime"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
szstd "github.com/a1ex3/zstd-seekable-format-go/pkg"
|
|
||||||
"github.com/klauspost/compress/zstd"
|
|
||||||
)
|
|
||||||
|
|
||||||
const szstdChunkSize int = 1 << 20 // 1 MiB chunk size
|
|
||||||
|
|
||||||
// SzstdMetadata holds metadata for szstd compressed files.
|
|
||||||
type SzstdMetadata struct {
|
|
||||||
BlockSize int // BlockSize is the size of the blocks in the zstd file
|
|
||||||
Size int64 // Size is the uncompressed size of the file
|
|
||||||
BlockData []uint32 // BlockData is the block data for the zstd file, used for seeking
|
|
||||||
}
|
|
||||||
|
|
||||||
// SzstdWriter is a writer that compresses data in szstd format.
|
|
||||||
type SzstdWriter struct {
|
|
||||||
enc *zstd.Encoder
|
|
||||||
w szstd.ConcurrentWriter
|
|
||||||
metadata SzstdMetadata
|
|
||||||
mu sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriterSzstd creates a new szstd writer with the specified options.
|
|
||||||
// It initializes the szstd writer with a zstd encoder and returns a pointer to the SzstdWriter.
|
|
||||||
// The writer can be used to write data in chunks, and it will automatically handle block sizes and metadata.
|
|
||||||
func NewWriterSzstd(w io.Writer, opts ...zstd.EOption) (*SzstdWriter, error) {
|
|
||||||
encoder, err := zstd.NewWriter(nil, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sw, err := szstd.NewWriter(w, encoder)
|
|
||||||
if err != nil {
|
|
||||||
if err := encoder.Close(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &SzstdWriter{
|
|
||||||
enc: encoder,
|
|
||||||
w: sw,
|
|
||||||
metadata: SzstdMetadata{
|
|
||||||
BlockSize: szstdChunkSize,
|
|
||||||
Size: 0,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write writes data to the szstd writer in chunks of szstdChunkSize.
|
|
||||||
// It handles the block size and metadata updates automatically.
|
|
||||||
func (w *SzstdWriter) Write(p []byte) (int, error) {
|
|
||||||
if len(p) == 0 {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if w.metadata.BlockData == nil {
|
|
||||||
numBlocks := (len(p) + w.metadata.BlockSize - 1) / w.metadata.BlockSize
|
|
||||||
w.metadata.BlockData = make([]uint32, 1, numBlocks+1)
|
|
||||||
w.metadata.BlockData[0] = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
start := 0
|
|
||||||
total := len(p)
|
|
||||||
|
|
||||||
var writerFunc szstd.FrameSource = func() ([]byte, error) {
|
|
||||||
if start >= total {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
end := min(start+w.metadata.BlockSize, total)
|
|
||||||
chunk := p[start:end]
|
|
||||||
size := end - start
|
|
||||||
|
|
||||||
w.mu.Lock()
|
|
||||||
w.metadata.Size += int64(size)
|
|
||||||
w.mu.Unlock()
|
|
||||||
|
|
||||||
start = end
|
|
||||||
return chunk, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// write sizes of compressed blocks in the callback
|
|
||||||
err := w.w.WriteMany(context.Background(), writerFunc,
|
|
||||||
szstd.WithWriteCallback(func(size uint32) {
|
|
||||||
w.mu.Lock()
|
|
||||||
lastOffset := w.metadata.BlockData[len(w.metadata.BlockData)-1]
|
|
||||||
w.metadata.BlockData = append(w.metadata.BlockData, lastOffset+size)
|
|
||||||
w.mu.Unlock()
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return total, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the SzstdWriter and its underlying encoder.
|
|
||||||
func (w *SzstdWriter) Close() error {
|
|
||||||
if err := w.w.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.enc.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetadata returns the metadata of the szstd writer.
|
|
||||||
func (w *SzstdWriter) GetMetadata() SzstdMetadata {
|
|
||||||
return w.metadata
|
|
||||||
}
|
|
||||||
|
|
||||||
// SzstdReaderAt is a reader that allows random access in szstd compressed data.
|
|
||||||
type SzstdReaderAt struct {
|
|
||||||
r szstd.Reader
|
|
||||||
decoder *zstd.Decoder
|
|
||||||
metadata *SzstdMetadata
|
|
||||||
pos int64
|
|
||||||
mu sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReaderAtSzstd creates a new SzstdReaderAt at the specified io.ReadSeeker.
|
|
||||||
func NewReaderAtSzstd(rs io.ReadSeeker, meta *SzstdMetadata, offset int64, opts ...zstd.DOption) (*SzstdReaderAt, error) {
|
|
||||||
decoder, err := zstd.NewReader(nil, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
r, err := szstd.NewReader(rs, decoder)
|
|
||||||
if err != nil {
|
|
||||||
decoder.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sr := &SzstdReaderAt{
|
|
||||||
r: r,
|
|
||||||
decoder: decoder,
|
|
||||||
metadata: meta,
|
|
||||||
pos: 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set initial position to the provided offset
|
|
||||||
if _, err := sr.Seek(offset, io.SeekStart); err != nil {
|
|
||||||
if err := sr.Close(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return sr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seek sets the offset for the next Read.
|
|
||||||
func (s *SzstdReaderAt) Seek(offset int64, whence int) (int64, error) {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
|
|
||||||
pos, err := s.r.Seek(offset, whence)
|
|
||||||
if err == nil {
|
|
||||||
s.pos = pos
|
|
||||||
}
|
|
||||||
return pos, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SzstdReaderAt) Read(p []byte) (int, error) {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
|
|
||||||
n, err := s.r.Read(p)
|
|
||||||
if err == nil {
|
|
||||||
s.pos += int64(n)
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadAt reads data at the specified offset.
|
|
||||||
func (s *SzstdReaderAt) ReadAt(p []byte, off int64) (int, error) {
|
|
||||||
if off < 0 {
|
|
||||||
return 0, errors.New("invalid offset")
|
|
||||||
}
|
|
||||||
if off >= s.metadata.Size {
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
endOff := min(off+int64(len(p)), s.metadata.Size)
|
|
||||||
|
|
||||||
// Find all blocks covered by the range
|
|
||||||
type blockInfo struct {
|
|
||||||
index int // Block index
|
|
||||||
offsetInBlock int64 // Offset within the block for starting reading
|
|
||||||
bytesToRead int64 // How many bytes to read from this block
|
|
||||||
}
|
|
||||||
|
|
||||||
var blocks []blockInfo
|
|
||||||
uncompressedOffset := int64(0)
|
|
||||||
currentOff := off
|
|
||||||
|
|
||||||
for i := 0; i < len(s.metadata.BlockData)-1; i++ {
|
|
||||||
blockUncompressedEnd := min(uncompressedOffset+int64(s.metadata.BlockSize), s.metadata.Size)
|
|
||||||
|
|
||||||
if currentOff < blockUncompressedEnd && endOff > uncompressedOffset {
|
|
||||||
offsetInBlock := max(0, currentOff-uncompressedOffset)
|
|
||||||
bytesToRead := min(blockUncompressedEnd-uncompressedOffset-offsetInBlock, endOff-currentOff)
|
|
||||||
|
|
||||||
blocks = append(blocks, blockInfo{
|
|
||||||
index: i,
|
|
||||||
offsetInBlock: offsetInBlock,
|
|
||||||
bytesToRead: bytesToRead,
|
|
||||||
})
|
|
||||||
|
|
||||||
currentOff += bytesToRead
|
|
||||||
if currentOff >= endOff {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
uncompressedOffset = blockUncompressedEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(blocks) == 0 {
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parallel block decoding
|
|
||||||
type decodeResult struct {
|
|
||||||
index int
|
|
||||||
data []byte
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
resultCh := make(chan decodeResult, len(blocks))
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
sem := make(chan struct{}, runtime.NumCPU())
|
|
||||||
|
|
||||||
for _, block := range blocks {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(block blockInfo) {
|
|
||||||
defer wg.Done()
|
|
||||||
sem <- struct{}{}
|
|
||||||
defer func() { <-sem }()
|
|
||||||
|
|
||||||
startOffset := int64(s.metadata.BlockData[block.index])
|
|
||||||
endOffset := int64(s.metadata.BlockData[block.index+1])
|
|
||||||
compressedSize := endOffset - startOffset
|
|
||||||
|
|
||||||
compressed := make([]byte, compressedSize)
|
|
||||||
_, err := s.r.ReadAt(compressed, startOffset)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
resultCh <- decodeResult{index: block.index, err: err}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
decoded, err := s.decoder.DecodeAll(compressed, nil)
|
|
||||||
if err != nil {
|
|
||||||
resultCh <- decodeResult{index: block.index, err: err}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
resultCh <- decodeResult{index: block.index, data: decoded, err: nil}
|
|
||||||
}(block)
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
wg.Wait()
|
|
||||||
close(resultCh)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Collect results in block index order
|
|
||||||
totalRead := 0
|
|
||||||
results := make(map[int]decodeResult)
|
|
||||||
expected := len(blocks)
|
|
||||||
minIndex := blocks[0].index
|
|
||||||
|
|
||||||
for res := range resultCh {
|
|
||||||
results[res.index] = res
|
|
||||||
for {
|
|
||||||
if result, ok := results[minIndex]; ok {
|
|
||||||
if result.err != nil {
|
|
||||||
return 0, result.err
|
|
||||||
}
|
|
||||||
// find the corresponding blockInfo
|
|
||||||
var blk blockInfo
|
|
||||||
for _, b := range blocks {
|
|
||||||
if b.index == result.index {
|
|
||||||
blk = b
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
start := blk.offsetInBlock
|
|
||||||
end := start + blk.bytesToRead
|
|
||||||
copy(p[totalRead:totalRead+int(blk.bytesToRead)], result.data[start:end])
|
|
||||||
totalRead += int(blk.bytesToRead)
|
|
||||||
minIndex++
|
|
||||||
if minIndex-blocks[0].index >= len(blocks) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(results) == expected && minIndex-blocks[0].index >= len(blocks) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return totalRead, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the SzstdReaderAt and underlying decoder.
|
|
||||||
func (s *SzstdReaderAt) Close() error {
|
|
||||||
if err := s.r.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.decoder.Close()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,65 +0,0 @@
|
|||||||
package compress
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/chunkedreader"
|
|
||||||
)
|
|
||||||
|
|
||||||
// uncompressedModeHandler implements compressionModeHandler for uncompressed files
|
|
||||||
type uncompressedModeHandler struct{}
|
|
||||||
|
|
||||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
|
||||||
// the configured threshold
|
|
||||||
func (u *uncompressedModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
|
||||||
func (u *uncompressedModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
|
||||||
func (u *uncompressedModeHandler) openGetReadCloser(
|
|
||||||
ctx context.Context,
|
|
||||||
o *Object,
|
|
||||||
offset int64,
|
|
||||||
limit int64,
|
|
||||||
cr chunkedreader.ChunkedReader,
|
|
||||||
closer io.Closer,
|
|
||||||
options ...fs.OpenOption,
|
|
||||||
) (rc io.ReadCloser, err error) {
|
|
||||||
return o.Object.Open(ctx, options...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
|
||||||
func (u *uncompressedModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
|
||||||
func (u *uncompressedModeHandler) putCompress(
|
|
||||||
ctx context.Context,
|
|
||||||
f *Fs,
|
|
||||||
in io.Reader,
|
|
||||||
src fs.ObjectInfo,
|
|
||||||
options []fs.OpenOption,
|
|
||||||
mimeType string,
|
|
||||||
) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
return nil, nil, fmt.Errorf("unsupported compression mode %d", f.mode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
|
||||||
func (u *uncompressedModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
return nil, nil, fmt.Errorf("unsupported compression mode %d", Uncompressed)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
|
||||||
// Warning: This function panics if cmeta is not of the expected type.
|
|
||||||
func (u *uncompressedModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,65 +0,0 @@
|
|||||||
package compress
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/chunkedreader"
|
|
||||||
)
|
|
||||||
|
|
||||||
// unknownModeHandler implements compressionModeHandler for unknown compression types
|
|
||||||
type unknownModeHandler struct{}
|
|
||||||
|
|
||||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
|
||||||
// the configured threshold
|
|
||||||
func (unk *unknownModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
|
||||||
return false, fmt.Errorf("unknown compression mode %d", compressionMode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
|
||||||
func (unk *unknownModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
|
||||||
func (unk *unknownModeHandler) openGetReadCloser(
|
|
||||||
ctx context.Context,
|
|
||||||
o *Object,
|
|
||||||
offset int64,
|
|
||||||
limit int64,
|
|
||||||
cr chunkedreader.ChunkedReader,
|
|
||||||
closer io.Closer,
|
|
||||||
options ...fs.OpenOption,
|
|
||||||
) (rc io.ReadCloser, err error) {
|
|
||||||
return nil, fmt.Errorf("unknown compression mode %d", o.meta.Mode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
|
||||||
func (unk *unknownModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
|
||||||
func (unk *unknownModeHandler) putCompress(
|
|
||||||
ctx context.Context,
|
|
||||||
f *Fs,
|
|
||||||
in io.Reader,
|
|
||||||
src fs.ObjectInfo,
|
|
||||||
options []fs.OpenOption,
|
|
||||||
mimeType string,
|
|
||||||
) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
return nil, nil, fmt.Errorf("unknown compression mode %d", f.mode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
|
||||||
func (unk *unknownModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
return nil, nil, fmt.Errorf("unknown compression mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
|
||||||
// Warning: This function panics if cmeta is not of the expected type.
|
|
||||||
func (unk *unknownModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,192 +0,0 @@
|
|||||||
package compress
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"crypto/md5"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/klauspost/compress/zstd"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
|
||||||
"github.com/rclone/rclone/fs/chunkedreader"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
)
|
|
||||||
|
|
||||||
// zstdModeHandler implements compressionModeHandler for zstd
|
|
||||||
type zstdModeHandler struct{}
|
|
||||||
|
|
||||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
|
||||||
// the configured threshold
|
|
||||||
func (z *zstdModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
|
||||||
var b bytes.Buffer
|
|
||||||
var n int64
|
|
||||||
w, err := NewWriterSzstd(&b, zstd.WithEncoderLevel(zstd.SpeedDefault))
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
n, err = io.Copy(w, r)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
err = w.Close()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
ratio := float64(n) / float64(b.Len())
|
|
||||||
return ratio > minCompressionRatio, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
|
||||||
func (z *zstdModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
|
||||||
if meta.CompressionMetadataZstd == nil {
|
|
||||||
return 0, errors.New("missing zstd metadata")
|
|
||||||
}
|
|
||||||
return meta.CompressionMetadataZstd.Size, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
|
||||||
func (z *zstdModeHandler) openGetReadCloser(
|
|
||||||
ctx context.Context,
|
|
||||||
o *Object,
|
|
||||||
offset int64,
|
|
||||||
limit int64,
|
|
||||||
cr chunkedreader.ChunkedReader,
|
|
||||||
closer io.Closer,
|
|
||||||
options ...fs.OpenOption,
|
|
||||||
) (rc io.ReadCloser, err error) {
|
|
||||||
var file io.Reader
|
|
||||||
|
|
||||||
if offset != 0 {
|
|
||||||
file, err = NewReaderAtSzstd(cr, o.meta.CompressionMetadataZstd, offset)
|
|
||||||
} else {
|
|
||||||
file, err = zstd.NewReader(cr)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileReader io.Reader
|
|
||||||
if limit != -1 {
|
|
||||||
fileReader = io.LimitReader(file, limit)
|
|
||||||
} else {
|
|
||||||
fileReader = file
|
|
||||||
}
|
|
||||||
// Return a ReadCloser
|
|
||||||
return ReadCloserWrapper{Reader: fileReader, Closer: closer}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
|
||||||
func (z *zstdModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
|
||||||
if compressionMode == Zstd {
|
|
||||||
return zstdFileExt
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
|
||||||
func (z *zstdModeHandler) putCompress(
|
|
||||||
ctx context.Context,
|
|
||||||
f *Fs,
|
|
||||||
in io.Reader,
|
|
||||||
src fs.ObjectInfo,
|
|
||||||
options []fs.OpenOption,
|
|
||||||
mimeType string,
|
|
||||||
) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
// Unwrap reader accounting
|
|
||||||
in, wrap := accounting.UnWrap(in)
|
|
||||||
|
|
||||||
// Add the metadata hasher
|
|
||||||
metaHasher := md5.New()
|
|
||||||
in = io.TeeReader(in, metaHasher)
|
|
||||||
|
|
||||||
// Compress the file
|
|
||||||
pipeReader, pipeWriter := io.Pipe()
|
|
||||||
|
|
||||||
resultsZstd := make(chan compressionResult[SzstdMetadata])
|
|
||||||
go func() {
|
|
||||||
writer, err := NewWriterSzstd(pipeWriter, zstd.WithEncoderLevel(zstd.EncoderLevel(f.opt.CompressionLevel)))
|
|
||||||
if err != nil {
|
|
||||||
resultsZstd <- compressionResult[SzstdMetadata]{err: err}
|
|
||||||
close(resultsZstd)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_, err = io.Copy(writer, in)
|
|
||||||
if wErr := writer.Close(); wErr != nil && err == nil {
|
|
||||||
err = wErr
|
|
||||||
}
|
|
||||||
if cErr := pipeWriter.Close(); cErr != nil && err == nil {
|
|
||||||
err = cErr
|
|
||||||
}
|
|
||||||
|
|
||||||
resultsZstd <- compressionResult[SzstdMetadata]{err: err, meta: writer.GetMetadata()}
|
|
||||||
close(resultsZstd)
|
|
||||||
}()
|
|
||||||
|
|
||||||
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize))
|
|
||||||
|
|
||||||
ht := f.Fs.Hashes().GetOne()
|
|
||||||
var hasher *hash.MultiHasher
|
|
||||||
var err error
|
|
||||||
if ht != hash.None {
|
|
||||||
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
|
|
||||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
|
||||||
wrappedIn = wrap(wrappedIn)
|
|
||||||
}
|
|
||||||
|
|
||||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
result := <-resultsZstd
|
|
||||||
if result.err != nil {
|
|
||||||
if o != nil {
|
|
||||||
_ = o.Remove(ctx)
|
|
||||||
}
|
|
||||||
return nil, nil, result.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build metadata using uncompressed size for filename
|
|
||||||
meta := z.newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
|
|
||||||
if ht != hash.None && hasher != nil {
|
|
||||||
err = f.verifyObjectHash(ctx, o, hasher, ht)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return o, meta, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
|
||||||
func (z *zstdModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
return o, z.newMetadata(o.Size(), mode, SzstdMetadata{}, hex.EncodeToString(sum), mimeType), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
|
||||||
// Warning: This function panics if cmeta is not of the expected type.
|
|
||||||
func (z *zstdModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
|
||||||
meta, ok := cmeta.(SzstdMetadata)
|
|
||||||
if !ok {
|
|
||||||
panic("invalid cmeta type: expected SzstdMetadata")
|
|
||||||
}
|
|
||||||
|
|
||||||
objMeta := new(ObjectMetadata)
|
|
||||||
objMeta.Size = size
|
|
||||||
objMeta.Mode = mode
|
|
||||||
objMeta.CompressionMetadataGzip = nil
|
|
||||||
objMeta.CompressionMetadataZstd = &meta
|
|
||||||
objMeta.MD5 = md5
|
|
||||||
objMeta.MimeType = mimeType
|
|
||||||
|
|
||||||
return objMeta
|
|
||||||
}
|
|
||||||
@@ -21,7 +21,6 @@ import (
|
|||||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
|
||||||
"github.com/rclone/rclone/lib/version"
|
"github.com/rclone/rclone/lib/version"
|
||||||
"github.com/rfjakob/eme"
|
"github.com/rfjakob/eme"
|
||||||
"golang.org/x/crypto/nacl/secretbox"
|
"golang.org/x/crypto/nacl/secretbox"
|
||||||
@@ -38,6 +37,7 @@ const (
|
|||||||
blockHeaderSize = secretbox.Overhead
|
blockHeaderSize = secretbox.Overhead
|
||||||
blockDataSize = 64 * 1024
|
blockDataSize = 64 * 1024
|
||||||
blockSize = blockHeaderSize + blockDataSize
|
blockSize = blockHeaderSize + blockDataSize
|
||||||
|
encryptedSuffix = ".bin" // when file name encryption is off we add this suffix to make sure the cloud provider doesn't process the file
|
||||||
)
|
)
|
||||||
|
|
||||||
// Errors returned by cipher
|
// Errors returned by cipher
|
||||||
@@ -53,9 +53,8 @@ var (
|
|||||||
ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?")
|
ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?")
|
||||||
ErrorBadBase32Encoding = errors.New("bad base32 filename encoding")
|
ErrorBadBase32Encoding = errors.New("bad base32 filename encoding")
|
||||||
ErrorFileClosed = errors.New("file already closed")
|
ErrorFileClosed = errors.New("file already closed")
|
||||||
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - does not match suffix")
|
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - no \"" + encryptedSuffix + "\" suffix")
|
||||||
ErrorBadSeek = errors.New("Seek beyond end of file")
|
ErrorBadSeek = errors.New("Seek beyond end of file")
|
||||||
ErrorSuffixMissingDot = errors.New("suffix config setting should include a '.'")
|
|
||||||
defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1}
|
defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1}
|
||||||
obfuscQuoteRune = '!'
|
obfuscQuoteRune = '!'
|
||||||
)
|
)
|
||||||
@@ -128,11 +127,11 @@ type fileNameEncoding interface {
|
|||||||
// RFC4648
|
// RFC4648
|
||||||
//
|
//
|
||||||
// The standard encoding is modified in two ways
|
// The standard encoding is modified in two ways
|
||||||
// - it becomes lower case (no-one likes upper case filenames!)
|
// * it becomes lower case (no-one likes upper case filenames!)
|
||||||
// - we strip the padding character `=`
|
// * we strip the padding character `=`
|
||||||
type caseInsensitiveBase32Encoding struct{}
|
type caseInsensitiveBase32Encoding struct{}
|
||||||
|
|
||||||
// EncodeToString encodes a string using the modified version of
|
// EncodeToString encodes a strign using the modified version of
|
||||||
// base32 encoding.
|
// base32 encoding.
|
||||||
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
|
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
|
||||||
encoded := base32.HexEncoding.EncodeToString(src)
|
encoded := base32.HexEncoding.EncodeToString(src)
|
||||||
@@ -170,30 +169,27 @@ func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
|
|||||||
|
|
||||||
// Cipher defines an encoding and decoding cipher for the crypt backend
|
// Cipher defines an encoding and decoding cipher for the crypt backend
|
||||||
type Cipher struct {
|
type Cipher struct {
|
||||||
dataKey [32]byte // Key for secretbox
|
dataKey [32]byte // Key for secretbox
|
||||||
nameKey [32]byte // 16,24 or 32 bytes
|
nameKey [32]byte // 16,24 or 32 bytes
|
||||||
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
||||||
block gocipher.Block
|
block gocipher.Block
|
||||||
mode NameEncryptionMode
|
mode NameEncryptionMode
|
||||||
fileNameEnc fileNameEncoding
|
fileNameEnc fileNameEncoding
|
||||||
buffers sync.Pool // encrypt/decrypt buffers
|
buffers sync.Pool // encrypt/decrypt buffers
|
||||||
cryptoRand io.Reader // read crypto random numbers from here
|
cryptoRand io.Reader // read crypto random numbers from here
|
||||||
dirNameEncrypt bool
|
dirNameEncrypt bool
|
||||||
passBadBlocks bool // if set passed bad blocks as zeroed blocks
|
|
||||||
encryptedSuffix string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
||||||
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
|
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
|
||||||
c := &Cipher{
|
c := &Cipher{
|
||||||
mode: mode,
|
mode: mode,
|
||||||
fileNameEnc: enc,
|
fileNameEnc: enc,
|
||||||
cryptoRand: rand.Reader,
|
cryptoRand: rand.Reader,
|
||||||
dirNameEncrypt: dirNameEncrypt,
|
dirNameEncrypt: dirNameEncrypt,
|
||||||
encryptedSuffix: ".bin",
|
|
||||||
}
|
}
|
||||||
c.buffers.New = func() any {
|
c.buffers.New = func() interface{} {
|
||||||
return new([blockSize]byte)
|
return make([]byte, blockSize)
|
||||||
}
|
}
|
||||||
err := c.Key(password, salt)
|
err := c.Key(password, salt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -202,29 +198,11 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
|||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// setEncryptedSuffix set suffix, or an empty string
|
|
||||||
func (c *Cipher) setEncryptedSuffix(suffix string) {
|
|
||||||
if strings.EqualFold(suffix, "none") {
|
|
||||||
c.encryptedSuffix = ""
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !strings.HasPrefix(suffix, ".") {
|
|
||||||
fs.Errorf(nil, "crypt: bad suffix: %v", ErrorSuffixMissingDot)
|
|
||||||
suffix = "." + suffix
|
|
||||||
}
|
|
||||||
c.encryptedSuffix = suffix
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call to set bad block pass through
|
|
||||||
func (c *Cipher) setPassBadBlocks(passBadBlocks bool) {
|
|
||||||
c.passBadBlocks = passBadBlocks
|
|
||||||
}
|
|
||||||
|
|
||||||
// Key creates all the internal keys from the password passed in using
|
// Key creates all the internal keys from the password passed in using
|
||||||
// scrypt.
|
// scrypt.
|
||||||
//
|
//
|
||||||
// If salt is "" we use a fixed salt just to make attackers lives
|
// If salt is "" we use a fixed salt just to make attackers lives
|
||||||
// slightly harder than using no salt.
|
// slighty harder than using no salt.
|
||||||
//
|
//
|
||||||
// Note that empty password makes all 0x00 keys which is used in the
|
// Note that empty password makes all 0x00 keys which is used in the
|
||||||
// tests.
|
// tests.
|
||||||
@@ -252,18 +230,21 @@ func (c *Cipher) Key(password, salt string) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getBlock gets a block from the pool of size blockSize
|
// getBlock gets a block from the pool of size blockSize
|
||||||
func (c *Cipher) getBlock() *[blockSize]byte {
|
func (c *Cipher) getBlock() []byte {
|
||||||
return c.buffers.Get().(*[blockSize]byte)
|
return c.buffers.Get().([]byte)
|
||||||
}
|
}
|
||||||
|
|
||||||
// putBlock returns a block to the pool of size blockSize
|
// putBlock returns a block to the pool of size blockSize
|
||||||
func (c *Cipher) putBlock(buf *[blockSize]byte) {
|
func (c *Cipher) putBlock(buf []byte) {
|
||||||
|
if len(buf) != blockSize {
|
||||||
|
panic("bad blocksize returned to pool")
|
||||||
|
}
|
||||||
c.buffers.Put(buf)
|
c.buffers.Put(buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
// encryptSegment encrypts a path segment
|
// encryptSegment encrypts a path segment
|
||||||
//
|
//
|
||||||
// This uses EME with AES.
|
// This uses EME with AES
|
||||||
//
|
//
|
||||||
// EME (ECB-Mix-ECB) is a wide-block encryption mode presented in the
|
// EME (ECB-Mix-ECB) is a wide-block encryption mode presented in the
|
||||||
// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and
|
// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and
|
||||||
@@ -273,8 +254,8 @@ func (c *Cipher) putBlock(buf *[blockSize]byte) {
|
|||||||
// same filename must encrypt to the same thing.
|
// same filename must encrypt to the same thing.
|
||||||
//
|
//
|
||||||
// This means that
|
// This means that
|
||||||
// - filenames with the same name will encrypt the same
|
// * filenames with the same name will encrypt the same
|
||||||
// - filenames which start the same won't have a common prefix
|
// * filenames which start the same won't have a common prefix
|
||||||
func (c *Cipher) encryptSegment(plaintext string) string {
|
func (c *Cipher) encryptSegment(plaintext string) string {
|
||||||
if plaintext == "" {
|
if plaintext == "" {
|
||||||
return ""
|
return ""
|
||||||
@@ -329,14 +310,14 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
|
|||||||
for _, runeValue := range plaintext {
|
for _, runeValue := range plaintext {
|
||||||
dir += int(runeValue)
|
dir += int(runeValue)
|
||||||
}
|
}
|
||||||
dir %= 256
|
dir = dir % 256
|
||||||
|
|
||||||
// We'll use this number to store in the result filename...
|
// We'll use this number to store in the result filename...
|
||||||
var result bytes.Buffer
|
var result bytes.Buffer
|
||||||
_, _ = result.WriteString(strconv.Itoa(dir) + ".")
|
_, _ = result.WriteString(strconv.Itoa(dir) + ".")
|
||||||
|
|
||||||
// but we'll augment it with the nameKey for real calculation
|
// but we'll augment it with the nameKey for real calculation
|
||||||
for i := range len(c.nameKey) {
|
for i := 0; i < len(c.nameKey); i++ {
|
||||||
dir += int(c.nameKey[i])
|
dir += int(c.nameKey[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -418,7 +399,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// add the nameKey to get the real rotate distance
|
// add the nameKey to get the real rotate distance
|
||||||
for i := range len(c.nameKey) {
|
for i := 0; i < len(c.nameKey); i++ {
|
||||||
dir += int(c.nameKey[i])
|
dir += int(c.nameKey[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -450,7 +431,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
|||||||
if pos >= 26 {
|
if pos >= 26 {
|
||||||
pos -= 6
|
pos -= 6
|
||||||
}
|
}
|
||||||
pos -= thisdir
|
pos = pos - thisdir
|
||||||
if pos < 0 {
|
if pos < 0 {
|
||||||
pos += 52
|
pos += 52
|
||||||
}
|
}
|
||||||
@@ -527,7 +508,7 @@ func (c *Cipher) encryptFileName(in string) string {
|
|||||||
// EncryptFileName encrypts a file path
|
// EncryptFileName encrypts a file path
|
||||||
func (c *Cipher) EncryptFileName(in string) string {
|
func (c *Cipher) EncryptFileName(in string) string {
|
||||||
if c.mode == NameEncryptionOff {
|
if c.mode == NameEncryptionOff {
|
||||||
return in + c.encryptedSuffix
|
return in + encryptedSuffix
|
||||||
}
|
}
|
||||||
return c.encryptFileName(in)
|
return c.encryptFileName(in)
|
||||||
}
|
}
|
||||||
@@ -587,8 +568,8 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
|||||||
// DecryptFileName decrypts a file path
|
// DecryptFileName decrypts a file path
|
||||||
func (c *Cipher) DecryptFileName(in string) (string, error) {
|
func (c *Cipher) DecryptFileName(in string) (string, error) {
|
||||||
if c.mode == NameEncryptionOff {
|
if c.mode == NameEncryptionOff {
|
||||||
remainingLength := len(in) - len(c.encryptedSuffix)
|
remainingLength := len(in) - len(encryptedSuffix)
|
||||||
if remainingLength == 0 || !strings.HasSuffix(in, c.encryptedSuffix) {
|
if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) {
|
||||||
return "", ErrorNotAnEncryptedFile
|
return "", ErrorNotAnEncryptedFile
|
||||||
}
|
}
|
||||||
decrypted := in[:remainingLength]
|
decrypted := in[:remainingLength]
|
||||||
@@ -628,7 +609,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
|
|||||||
// fromReader fills the nonce from an io.Reader - normally the OSes
|
// fromReader fills the nonce from an io.Reader - normally the OSes
|
||||||
// crypto random number generator
|
// crypto random number generator
|
||||||
func (n *nonce) fromReader(in io.Reader) error {
|
func (n *nonce) fromReader(in io.Reader) error {
|
||||||
read, err := readers.ReadFill(in, (*n)[:])
|
read, err := io.ReadFull(in, (*n)[:])
|
||||||
if read != fileNonceSize {
|
if read != fileNonceSize {
|
||||||
return fmt.Errorf("short read of nonce: %w", err)
|
return fmt.Errorf("short read of nonce: %w", err)
|
||||||
}
|
}
|
||||||
@@ -664,7 +645,7 @@ func (n *nonce) increment() {
|
|||||||
// add a uint64 to the nonce
|
// add a uint64 to the nonce
|
||||||
func (n *nonce) add(x uint64) {
|
func (n *nonce) add(x uint64) {
|
||||||
carry := uint16(0)
|
carry := uint16(0)
|
||||||
for i := range 8 {
|
for i := 0; i < 8; i++ {
|
||||||
digit := (*n)[i]
|
digit := (*n)[i]
|
||||||
xDigit := byte(x)
|
xDigit := byte(x)
|
||||||
x >>= 8
|
x >>= 8
|
||||||
@@ -683,8 +664,8 @@ type encrypter struct {
|
|||||||
in io.Reader
|
in io.Reader
|
||||||
c *Cipher
|
c *Cipher
|
||||||
nonce nonce
|
nonce nonce
|
||||||
buf *[blockSize]byte
|
buf []byte
|
||||||
readBuf *[blockSize]byte
|
readBuf []byte
|
||||||
bufIndex int
|
bufIndex int
|
||||||
bufSize int
|
bufSize int
|
||||||
err error
|
err error
|
||||||
@@ -709,9 +690,9 @@ func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Copy magic into buffer
|
// Copy magic into buffer
|
||||||
copy((*fh.buf)[:], fileMagicBytes)
|
copy(fh.buf, fileMagicBytes)
|
||||||
// Copy nonce into buffer
|
// Copy nonce into buffer
|
||||||
copy((*fh.buf)[fileMagicSize:], fh.nonce[:])
|
copy(fh.buf[fileMagicSize:], fh.nonce[:])
|
||||||
return fh, nil
|
return fh, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -726,20 +707,22 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
|
|||||||
if fh.bufIndex >= fh.bufSize {
|
if fh.bufIndex >= fh.bufSize {
|
||||||
// Read data
|
// Read data
|
||||||
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
||||||
readBuf := (*fh.readBuf)[:blockDataSize]
|
readBuf := fh.readBuf[:blockDataSize]
|
||||||
n, err = readers.ReadFill(fh.in, readBuf)
|
n, err = io.ReadFull(fh.in, readBuf)
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
|
// err can't be nil since:
|
||||||
|
// n == len(buf) if and only if err == nil.
|
||||||
return fh.finish(err)
|
return fh.finish(err)
|
||||||
}
|
}
|
||||||
// possibly err != nil here, but we will process the
|
// possibly err != nil here, but we will process the
|
||||||
// data and the next call to ReadFill will return 0, err
|
// data and the next call to ReadFull will return 0, err
|
||||||
// Encrypt the block using the nonce
|
// Encrypt the block using the nonce
|
||||||
secretbox.Seal((*fh.buf)[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||||
fh.bufIndex = 0
|
fh.bufIndex = 0
|
||||||
fh.bufSize = blockHeaderSize + n
|
fh.bufSize = blockHeaderSize + n
|
||||||
fh.nonce.increment()
|
fh.nonce.increment()
|
||||||
}
|
}
|
||||||
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufSize])
|
n = copy(p, fh.buf[fh.bufIndex:fh.bufSize])
|
||||||
fh.bufIndex += n
|
fh.bufIndex += n
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
@@ -780,8 +763,8 @@ type decrypter struct {
|
|||||||
nonce nonce
|
nonce nonce
|
||||||
initialNonce nonce
|
initialNonce nonce
|
||||||
c *Cipher
|
c *Cipher
|
||||||
buf *[blockSize]byte
|
buf []byte
|
||||||
readBuf *[blockSize]byte
|
readBuf []byte
|
||||||
bufIndex int
|
bufIndex int
|
||||||
bufSize int
|
bufSize int
|
||||||
err error
|
err error
|
||||||
@@ -799,12 +782,12 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
|||||||
limit: -1,
|
limit: -1,
|
||||||
}
|
}
|
||||||
// Read file header (magic + nonce)
|
// Read file header (magic + nonce)
|
||||||
readBuf := (*fh.readBuf)[:fileHeaderSize]
|
readBuf := fh.readBuf[:fileHeaderSize]
|
||||||
n, err := readers.ReadFill(fh.rc, readBuf)
|
_, err := io.ReadFull(fh.rc, readBuf)
|
||||||
if n < fileHeaderSize && err == io.EOF {
|
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||||
// This read from 0..fileHeaderSize-1 bytes
|
// This read from 0..fileHeaderSize-1 bytes
|
||||||
return nil, fh.finishAndClose(ErrorEncryptedFileTooShort)
|
return nil, fh.finishAndClose(ErrorEncryptedFileTooShort)
|
||||||
} else if err != io.EOF && err != nil {
|
} else if err != nil {
|
||||||
return nil, fh.finishAndClose(err)
|
return nil, fh.finishAndClose(err)
|
||||||
}
|
}
|
||||||
// check the magic
|
// check the magic
|
||||||
@@ -862,8 +845,10 @@ func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
|
|||||||
func (fh *decrypter) fillBuffer() (err error) {
|
func (fh *decrypter) fillBuffer() (err error) {
|
||||||
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
||||||
readBuf := fh.readBuf
|
readBuf := fh.readBuf
|
||||||
n, err := readers.ReadFill(fh.rc, (*readBuf)[:])
|
n, err := io.ReadFull(fh.rc, readBuf)
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
|
// err can't be nil since:
|
||||||
|
// n == len(buf) if and only if err == nil.
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// possibly err != nil here, but we will process the data and
|
// possibly err != nil here, but we will process the data and
|
||||||
@@ -871,25 +856,18 @@ func (fh *decrypter) fillBuffer() (err error) {
|
|||||||
|
|
||||||
// Check header + 1 byte exists
|
// Check header + 1 byte exists
|
||||||
if n <= blockHeaderSize {
|
if n <= blockHeaderSize {
|
||||||
if err != nil && err != io.EOF {
|
if err != nil {
|
||||||
return err // return pending error as it is likely more accurate
|
return err // return pending error as it is likely more accurate
|
||||||
}
|
}
|
||||||
return ErrorEncryptedFileBadHeader
|
return ErrorEncryptedFileBadHeader
|
||||||
}
|
}
|
||||||
// Decrypt the block using the nonce
|
// Decrypt the block using the nonce
|
||||||
_, ok := secretbox.Open((*fh.buf)[:0], (*readBuf)[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||||
if !ok {
|
if !ok {
|
||||||
if err != nil && err != io.EOF {
|
if err != nil {
|
||||||
return err // return pending error as it is likely more accurate
|
return err // return pending error as it is likely more accurate
|
||||||
}
|
}
|
||||||
if !fh.c.passBadBlocks {
|
return ErrorEncryptedBadBlock
|
||||||
return ErrorEncryptedBadBlock
|
|
||||||
}
|
|
||||||
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
|
|
||||||
// Zero out the bad block and continue
|
|
||||||
for i := range (*fh.buf)[:n] {
|
|
||||||
fh.buf[i] = 0
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
fh.bufIndex = 0
|
fh.bufIndex = 0
|
||||||
fh.bufSize = n - blockHeaderSize
|
fh.bufSize = n - blockHeaderSize
|
||||||
@@ -915,7 +893,7 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
|
|||||||
if fh.limit >= 0 && fh.limit < int64(toCopy) {
|
if fh.limit >= 0 && fh.limit < int64(toCopy) {
|
||||||
toCopy = int(fh.limit)
|
toCopy = int(fh.limit)
|
||||||
}
|
}
|
||||||
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufIndex+toCopy])
|
n = copy(p, fh.buf[fh.bufIndex:fh.bufIndex+toCopy])
|
||||||
fh.bufIndex += n
|
fh.bufIndex += n
|
||||||
if fh.limit >= 0 {
|
if fh.limit >= 0 {
|
||||||
fh.limit -= int64(n)
|
fh.limit -= int64(n)
|
||||||
@@ -926,8 +904,9 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
|
|||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// calculateUnderlying converts an (offset, limit) in an encrypted file
|
// calculateUnderlying converts an (offset, limit) in a crypted file
|
||||||
// into an (underlyingOffset, underlyingLimit) for the underlying file.
|
// into an (underlyingOffset, underlyingLimit) for the underlying
|
||||||
|
// file.
|
||||||
//
|
//
|
||||||
// It also returns number of bytes to discard after reading the first
|
// It also returns number of bytes to discard after reading the first
|
||||||
// block and number of blocks this is from the start so the nonce can
|
// block and number of blocks this is from the start so the nonce can
|
||||||
@@ -1106,7 +1085,7 @@ func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
|
|||||||
|
|
||||||
// DecryptDataSeek decrypts the data stream from offset
|
// DecryptDataSeek decrypts the data stream from offset
|
||||||
//
|
//
|
||||||
// The open function must return a ReadCloser opened to the offset supplied.
|
// The open function must return a ReadCloser opened to the offset supplied
|
||||||
//
|
//
|
||||||
// You must use this form of DecryptData if you might want to Seek the file handle
|
// You must use this form of DecryptData if you might want to Seek the file handle
|
||||||
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@@ -27,14 +28,14 @@ func TestNewNameEncryptionMode(t *testing.T) {
|
|||||||
{"off", NameEncryptionOff, ""},
|
{"off", NameEncryptionOff, ""},
|
||||||
{"standard", NameEncryptionStandard, ""},
|
{"standard", NameEncryptionStandard, ""},
|
||||||
{"obfuscate", NameEncryptionObfuscated, ""},
|
{"obfuscate", NameEncryptionObfuscated, ""},
|
||||||
{"potato", NameEncryptionOff, "unknown file name encryption mode \"potato\""},
|
{"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""},
|
||||||
} {
|
} {
|
||||||
actual, actualErr := NewNameEncryptionMode(test.in)
|
actual, actualErr := NewNameEncryptionMode(test.in)
|
||||||
assert.Equal(t, actual, test.expected)
|
assert.Equal(t, actual, test.expected)
|
||||||
if test.expectedErr == "" {
|
if test.expectedErr == "" {
|
||||||
assert.NoError(t, actualErr)
|
assert.NoError(t, actualErr)
|
||||||
} else {
|
} else {
|
||||||
assert.EqualError(t, actualErr, test.expectedErr)
|
assert.Error(t, actualErr, test.expectedErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -405,13 +406,6 @@ func TestNonStandardEncryptFileName(t *testing.T) {
|
|||||||
// Off mode
|
// Off mode
|
||||||
c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
|
c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
|
||||||
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
|
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
|
||||||
// Off mode with custom suffix
|
|
||||||
c, _ = newCipher(NameEncryptionOff, "", "", true, nil)
|
|
||||||
c.setEncryptedSuffix(".jpg")
|
|
||||||
assert.Equal(t, "1/12/123.jpg", c.EncryptFileName("1/12/123"))
|
|
||||||
// Off mode with empty suffix
|
|
||||||
c.setEncryptedSuffix("none")
|
|
||||||
assert.Equal(t, "1/12/123", c.EncryptFileName("1/12/123"))
|
|
||||||
// Obfuscation mode
|
// Obfuscation mode
|
||||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
|
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
|
||||||
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||||
@@ -490,27 +484,21 @@ func TestNonStandardDecryptFileName(t *testing.T) {
|
|||||||
in string
|
in string
|
||||||
expected string
|
expected string
|
||||||
expectedErr error
|
expectedErr error
|
||||||
customSuffix string
|
|
||||||
}{
|
}{
|
||||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil, ""},
|
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
||||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile, ""},
|
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
||||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile, ""},
|
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
||||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil, ""},
|
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
|
||||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil, ""},
|
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
|
||||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil, ""},
|
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
|
||||||
{NameEncryptionOff, true, "1/12/123.jpg", "1/12/123", nil, ".jpg"},
|
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
||||||
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil, "none"},
|
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
||||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil, ""},
|
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
||||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile, ""},
|
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
||||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil, ""},
|
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
||||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil, ""},
|
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
|
||||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil, ""},
|
|
||||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil, ""},
|
|
||||||
} {
|
} {
|
||||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
|
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
|
||||||
if test.customSuffix != "" {
|
|
||||||
c.setEncryptedSuffix(test.customSuffix)
|
|
||||||
}
|
|
||||||
actual, actualErr := c.DecryptFileName(test.in)
|
actual, actualErr := c.DecryptFileName(test.in)
|
||||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||||
assert.Equal(t, test.expected, actual, what)
|
assert.Equal(t, test.expected, actual, what)
|
||||||
@@ -739,7 +727,7 @@ func TestNonceFromReader(t *testing.T) {
|
|||||||
assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x)
|
assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x)
|
||||||
buf = bytes.NewBufferString("123456789abcdefghijklmn")
|
buf = bytes.NewBufferString("123456789abcdefghijklmn")
|
||||||
err = x.fromReader(buf)
|
err = x.fromReader(buf)
|
||||||
assert.EqualError(t, err, "short read of nonce: EOF")
|
assert.Error(t, err, "short read of nonce")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNonceFromBuf(t *testing.T) {
|
func TestNonceFromBuf(t *testing.T) {
|
||||||
@@ -1063,7 +1051,7 @@ func TestRandomSource(t *testing.T) {
|
|||||||
_, _ = source.Read(buf)
|
_, _ = source.Read(buf)
|
||||||
sink = newRandomSource(1e8)
|
sink = newRandomSource(1e8)
|
||||||
_, err = io.Copy(sink, source)
|
_, err = io.Copy(sink, source)
|
||||||
assert.EqualError(t, err, "Error in stream at 1")
|
assert.Error(t, err, "Error in stream")
|
||||||
}
|
}
|
||||||
|
|
||||||
type zeroes struct{}
|
type zeroes struct{}
|
||||||
@@ -1085,7 +1073,7 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
|
|||||||
source := newRandomSource(copySize)
|
source := newRandomSource(copySize)
|
||||||
encrypted, err := c.newEncrypter(source, nil)
|
encrypted, err := c.newEncrypter(source, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
decrypted, err := c.newDecrypter(io.NopCloser(encrypted))
|
decrypted, err := c.newDecrypter(ioutil.NopCloser(encrypted))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
sink := newRandomSource(copySize)
|
sink := newRandomSource(copySize)
|
||||||
n, err := io.CopyBuffer(sink, decrypted, buf)
|
n, err := io.CopyBuffer(sink, decrypted, buf)
|
||||||
@@ -1156,15 +1144,15 @@ func TestEncryptData(t *testing.T) {
|
|||||||
buf := bytes.NewBuffer(test.in)
|
buf := bytes.NewBuffer(test.in)
|
||||||
encrypted, err := c.EncryptData(buf)
|
encrypted, err := c.EncryptData(buf)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
out, err := io.ReadAll(encrypted)
|
out, err := ioutil.ReadAll(encrypted)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, test.expected, out)
|
assert.Equal(t, test.expected, out)
|
||||||
|
|
||||||
// Check we can decode the data properly too...
|
// Check we can decode the data properly too...
|
||||||
buf = bytes.NewBuffer(out)
|
buf = bytes.NewBuffer(out)
|
||||||
decrypted, err := c.DecryptData(io.NopCloser(buf))
|
decrypted, err := c.DecryptData(ioutil.NopCloser(buf))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
out, err = io.ReadAll(decrypted)
|
out, err = ioutil.ReadAll(decrypted)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, test.in, out)
|
assert.Equal(t, test.in, out)
|
||||||
}
|
}
|
||||||
@@ -1180,13 +1168,13 @@ func TestNewEncrypter(t *testing.T) {
|
|||||||
fh, err := c.newEncrypter(z, nil)
|
fh, err := c.newEncrypter(z, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, nonce{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.nonce)
|
assert.Equal(t, nonce{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.nonce)
|
||||||
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, (*fh.buf)[:32])
|
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.buf[:32])
|
||||||
|
|
||||||
// Test error path
|
// Test error path
|
||||||
c.cryptoRand = bytes.NewBufferString("123456789abcdefghijklmn")
|
c.cryptoRand = bytes.NewBufferString("123456789abcdefghijklmn")
|
||||||
fh, err = c.newEncrypter(z, nil)
|
fh, err = c.newEncrypter(z, nil)
|
||||||
assert.Nil(t, fh)
|
assert.Nil(t, fh)
|
||||||
assert.EqualError(t, err, "short read of nonce: EOF")
|
assert.Error(t, err, "short read of nonce")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to
|
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to
|
||||||
@@ -1199,7 +1187,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
|
|||||||
fh, err := c.newEncrypter(in, nil)
|
fh, err := c.newEncrypter(in, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
n, err := io.CopyN(io.Discard, fh, 1e6)
|
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
|
||||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||||
assert.Equal(t, int64(32), n)
|
assert.Equal(t, int64(32), n)
|
||||||
}
|
}
|
||||||
@@ -1237,7 +1225,7 @@ func TestNewDecrypter(t *testing.T) {
|
|||||||
cd := newCloseDetector(bytes.NewBuffer(file0[:i]))
|
cd := newCloseDetector(bytes.NewBuffer(file0[:i]))
|
||||||
fh, err = c.newDecrypter(cd)
|
fh, err = c.newDecrypter(cd)
|
||||||
assert.Nil(t, fh)
|
assert.Nil(t, fh)
|
||||||
assert.EqualError(t, err, ErrorEncryptedFileTooShort.Error())
|
assert.Error(t, err, ErrorEncryptedFileTooShort.Error())
|
||||||
assert.Equal(t, 1, cd.closed)
|
assert.Equal(t, 1, cd.closed)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1245,7 +1233,7 @@ func TestNewDecrypter(t *testing.T) {
|
|||||||
cd = newCloseDetector(er)
|
cd = newCloseDetector(er)
|
||||||
fh, err = c.newDecrypter(cd)
|
fh, err = c.newDecrypter(cd)
|
||||||
assert.Nil(t, fh)
|
assert.Nil(t, fh)
|
||||||
assert.EqualError(t, err, "potato")
|
assert.Error(t, err, "potato")
|
||||||
assert.Equal(t, 1, cd.closed)
|
assert.Equal(t, 1, cd.closed)
|
||||||
|
|
||||||
// bad magic
|
// bad magic
|
||||||
@@ -1256,7 +1244,7 @@ func TestNewDecrypter(t *testing.T) {
|
|||||||
cd := newCloseDetector(bytes.NewBuffer(file0copy))
|
cd := newCloseDetector(bytes.NewBuffer(file0copy))
|
||||||
fh, err := c.newDecrypter(cd)
|
fh, err := c.newDecrypter(cd)
|
||||||
assert.Nil(t, fh)
|
assert.Nil(t, fh)
|
||||||
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error())
|
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
|
||||||
file0copy[i] ^= 0x1
|
file0copy[i] ^= 0x1
|
||||||
assert.Equal(t, 1, cd.closed)
|
assert.Equal(t, 1, cd.closed)
|
||||||
}
|
}
|
||||||
@@ -1269,12 +1257,12 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
|||||||
|
|
||||||
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
|
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
|
||||||
in1 := bytes.NewBuffer(file16)
|
in1 := bytes.NewBuffer(file16)
|
||||||
in := io.NopCloser(io.MultiReader(in1, in2))
|
in := ioutil.NopCloser(io.MultiReader(in1, in2))
|
||||||
|
|
||||||
fh, err := c.newDecrypter(in)
|
fh, err := c.newDecrypter(in)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
n, err := io.CopyN(io.Discard, fh, 1e6)
|
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
|
||||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||||
assert.Equal(t, int64(16), n)
|
assert.Equal(t, int64(16), n)
|
||||||
}
|
}
|
||||||
@@ -1286,14 +1274,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
|||||||
|
|
||||||
// Make random data
|
// Make random data
|
||||||
const dataSize = 150000
|
const dataSize = 150000
|
||||||
plaintext, err := io.ReadAll(newRandomSource(dataSize))
|
plaintext, err := ioutil.ReadAll(newRandomSource(dataSize))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// Encrypt the data
|
// Encrypt the data
|
||||||
buf := bytes.NewBuffer(plaintext)
|
buf := bytes.NewBuffer(plaintext)
|
||||||
encrypted, err := c.EncryptData(buf)
|
encrypted, err := c.EncryptData(buf)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
ciphertext, err := io.ReadAll(encrypted)
|
ciphertext, err := ioutil.ReadAll(encrypted)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
trials := []int{0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65,
|
trials := []int{0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65,
|
||||||
@@ -1307,9 +1295,12 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
|||||||
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||||
end := len(ciphertext)
|
end := len(ciphertext)
|
||||||
if underlyingLimit >= 0 {
|
if underlyingLimit >= 0 {
|
||||||
end = min(int(underlyingOffset+underlyingLimit), len(ciphertext))
|
end = int(underlyingOffset + underlyingLimit)
|
||||||
|
if end > len(ciphertext) {
|
||||||
|
end = len(ciphertext)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
|
reader = ioutil.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
|
||||||
return reader, nil
|
return reader, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1487,7 +1478,7 @@ func TestDecrypterRead(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// Test truncating the file at each possible point
|
// Test truncating the file at each possible point
|
||||||
for i := range len(file16) - 1 {
|
for i := 0; i < len(file16)-1; i++ {
|
||||||
what := fmt.Sprintf("truncating to %d/%d", i, len(file16))
|
what := fmt.Sprintf("truncating to %d/%d", i, len(file16))
|
||||||
cd := newCloseDetector(bytes.NewBuffer(file16[:i]))
|
cd := newCloseDetector(bytes.NewBuffer(file16[:i]))
|
||||||
fh, err := c.newDecrypter(cd)
|
fh, err := c.newDecrypter(cd)
|
||||||
@@ -1499,16 +1490,14 @@ func TestDecrypterRead(t *testing.T) {
|
|||||||
assert.NoError(t, err, what)
|
assert.NoError(t, err, what)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
_, err = io.ReadAll(fh)
|
_, err = ioutil.ReadAll(fh)
|
||||||
var expectedErr error
|
var expectedErr error
|
||||||
switch {
|
switch {
|
||||||
case i == fileHeaderSize:
|
case i == fileHeaderSize:
|
||||||
// This would normally produce an error *except* on the first block
|
// This would normally produce an error *except* on the first block
|
||||||
expectedErr = nil
|
expectedErr = nil
|
||||||
case i <= fileHeaderSize+blockHeaderSize:
|
|
||||||
expectedErr = ErrorEncryptedFileBadHeader
|
|
||||||
default:
|
default:
|
||||||
expectedErr = ErrorEncryptedBadBlock
|
expectedErr = io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if expectedErr != nil {
|
if expectedErr != nil {
|
||||||
assert.EqualError(t, err, expectedErr.Error(), what)
|
assert.EqualError(t, err, expectedErr.Error(), what)
|
||||||
@@ -1525,8 +1514,8 @@ func TestDecrypterRead(t *testing.T) {
|
|||||||
cd := newCloseDetector(in)
|
cd := newCloseDetector(in)
|
||||||
fh, err := c.newDecrypter(cd)
|
fh, err := c.newDecrypter(cd)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
_, err = io.ReadAll(fh)
|
_, err = ioutil.ReadAll(fh)
|
||||||
assert.EqualError(t, err, "potato")
|
assert.Error(t, err, "potato")
|
||||||
assert.Equal(t, 0, cd.closed)
|
assert.Equal(t, 0, cd.closed)
|
||||||
|
|
||||||
// Test corrupting the input
|
// Test corrupting the input
|
||||||
@@ -1535,28 +1524,17 @@ func TestDecrypterRead(t *testing.T) {
|
|||||||
copy(file16copy, file16)
|
copy(file16copy, file16)
|
||||||
for i := range file16copy {
|
for i := range file16copy {
|
||||||
file16copy[i] ^= 0xFF
|
file16copy[i] ^= 0xFF
|
||||||
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
|
fh, err := c.newDecrypter(ioutil.NopCloser(bytes.NewBuffer(file16copy)))
|
||||||
if i < fileMagicSize {
|
if i < fileMagicSize {
|
||||||
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error())
|
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
|
||||||
assert.Nil(t, fh)
|
assert.Nil(t, fh)
|
||||||
} else {
|
} else {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
_, err = io.ReadAll(fh)
|
_, err = ioutil.ReadAll(fh)
|
||||||
assert.EqualError(t, err, ErrorEncryptedBadBlock.Error())
|
assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
|
||||||
}
|
}
|
||||||
file16copy[i] ^= 0xFF
|
file16copy[i] ^= 0xFF
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that we can corrupt a byte and read zeroes if
|
|
||||||
// passBadBlocks is set
|
|
||||||
copy(file16copy, file16)
|
|
||||||
file16copy[len(file16copy)-1] ^= 0xFF
|
|
||||||
c.passBadBlocks = true
|
|
||||||
fh, err = c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
|
|
||||||
assert.NoError(t, err)
|
|
||||||
buf, err := io.ReadAll(fh)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, make([]byte, 16), buf)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDecrypterClose(t *testing.T) {
|
func TestDecrypterClose(t *testing.T) {
|
||||||
@@ -1577,7 +1555,7 @@ func TestDecrypterClose(t *testing.T) {
|
|||||||
|
|
||||||
// double close
|
// double close
|
||||||
err = fh.Close()
|
err = fh.Close()
|
||||||
assert.EqualError(t, err, ErrorFileClosed.Error())
|
assert.Error(t, err, ErrorFileClosed.Error())
|
||||||
assert.Equal(t, 1, cd.closed)
|
assert.Equal(t, 1, cd.closed)
|
||||||
|
|
||||||
// try again reading the file this time
|
// try again reading the file this time
|
||||||
@@ -1587,7 +1565,7 @@ func TestDecrypterClose(t *testing.T) {
|
|||||||
assert.Equal(t, 0, cd.closed)
|
assert.Equal(t, 0, cd.closed)
|
||||||
|
|
||||||
// close after reading
|
// close after reading
|
||||||
out, err := io.ReadAll(fh)
|
out, err := ioutil.ReadAll(fh)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, []byte{1}, out)
|
assert.Equal(t, []byte{1}, out)
|
||||||
assert.Equal(t, io.EOF, fh.err)
|
assert.Equal(t, io.EOF, fh.err)
|
||||||
@@ -1604,6 +1582,8 @@ func TestPutGetBlock(t *testing.T) {
|
|||||||
block := c.getBlock()
|
block := c.getBlock()
|
||||||
c.putBlock(block)
|
c.putBlock(block)
|
||||||
c.putBlock(block)
|
c.putBlock(block)
|
||||||
|
|
||||||
|
assert.Panics(t, func() { c.putBlock(block[:len(block)-1]) })
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestKey(t *testing.T) {
|
func TestKey(t *testing.T) {
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/list"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
@@ -49,7 +48,7 @@ func init() {
|
|||||||
Help: "Very simple filename obfuscation.",
|
Help: "Very simple filename obfuscation.",
|
||||||
}, {
|
}, {
|
||||||
Value: "off",
|
Value: "off",
|
||||||
Help: "Don't encrypt the file names.\nAdds a \".bin\", or \"suffix\" extension only.",
|
Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
@@ -80,9 +79,7 @@ NB If filename_encryption is "off" then this option will do nothing.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "server_side_across_configs",
|
Name: "server_side_across_configs",
|
||||||
Default: false,
|
Default: false,
|
||||||
Help: `Deprecated: use --server-side-across-configs instead.
|
Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
|
||||||
|
|
||||||
Allow server-side operations (e.g. copy) to work across different crypt configs.
|
|
||||||
|
|
||||||
Normally this option is not what you want, but if you have two crypts
|
Normally this option is not what you want, but if you have two crypts
|
||||||
pointing to the same backend you can use it.
|
pointing to the same backend you can use it.
|
||||||
@@ -122,32 +119,13 @@ names, or for debugging purposes.`,
|
|||||||
Help: "Encrypt file data.",
|
Help: "Encrypt file data.",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
|
||||||
Name: "pass_bad_blocks",
|
|
||||||
Help: `If set this will pass bad blocks through as all 0.
|
|
||||||
|
|
||||||
This should not be set in normal operation, it should only be set if
|
|
||||||
trying to recover an encrypted file with errors and it is desired to
|
|
||||||
recover as much of the file as possible.`,
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "strict_names",
|
|
||||||
Help: `If set, this will raise an error when crypt comes across a filename that can't be decrypted.
|
|
||||||
|
|
||||||
(By default, rclone will just log a NOTICE and continue as normal.)
|
|
||||||
This can happen if encrypted and unencrypted files are stored in the same
|
|
||||||
directory (which is not recommended.) It may also indicate a more serious
|
|
||||||
problem that should be investigated.`,
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "filename_encoding",
|
Name: "filename_encoding",
|
||||||
Help: `How to encode the encrypted filename to text string.
|
Help: `How to encode the encrypted filename to text string.
|
||||||
|
|
||||||
This option could help with shortening the encrypted filename. The
|
This option could help with shortening the encrypted filename. The
|
||||||
suitable option would depend on the way your remote count the filename
|
suitable option would depend on the way your remote count the filename
|
||||||
length and if it's case sensitive.`,
|
length and if it's case sensitve.`,
|
||||||
Default: "base32",
|
Default: "base32",
|
||||||
Examples: []fs.OptionExample{
|
Examples: []fs.OptionExample{
|
||||||
{
|
{
|
||||||
@@ -160,18 +138,10 @@ length and if it's case sensitive.`,
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
Value: "base32768",
|
Value: "base32768",
|
||||||
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)",
|
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "suffix",
|
|
||||||
Help: `If this is set it will override the default suffix of ".bin".
|
|
||||||
|
|
||||||
Setting suffix to "none" will result in an empty suffix. This may be useful
|
|
||||||
when the path length is critical.`,
|
|
||||||
Default: ".bin",
|
|
||||||
Advanced: true,
|
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -204,8 +174,6 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to make cipher: %w", err)
|
return nil, fmt.Errorf("failed to make cipher: %w", err)
|
||||||
}
|
}
|
||||||
cipher.setEncryptedSuffix(opt.Suffix)
|
|
||||||
cipher.setPassBadBlocks(opt.PassBadBlocks)
|
|
||||||
return cipher, nil
|
return cipher, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -264,39 +232,23 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
cipher: cipher,
|
cipher: cipher,
|
||||||
}
|
}
|
||||||
cache.PinUntilFinalized(f.Fs, f)
|
cache.PinUntilFinalized(f.Fs, f)
|
||||||
// Correct root if definitely pointing to a file
|
|
||||||
if err == fs.ErrorIsFile {
|
|
||||||
f.root = path.Dir(f.root)
|
|
||||||
if f.root == "." || f.root == "/" {
|
|
||||||
f.root = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// the features here are ones we could support, and they are
|
// the features here are ones we could support, and they are
|
||||||
// ANDed with the ones from wrappedFs
|
// ANDed with the ones from wrappedFs
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
|
CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||||
DuplicateFiles: true,
|
DuplicateFiles: true,
|
||||||
ReadMimeType: false, // MimeTypes not supported with crypt
|
ReadMimeType: false, // MimeTypes not supported with crypt
|
||||||
WriteMimeType: false,
|
WriteMimeType: false,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
SetTier: true,
|
SetTier: true,
|
||||||
GetTier: true,
|
GetTier: true,
|
||||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||||
ReadMetadata: true,
|
ReadMetadata: true,
|
||||||
WriteMetadata: true,
|
WriteMetadata: true,
|
||||||
UserMetadata: true,
|
UserMetadata: true,
|
||||||
ReadDirMetadata: true,
|
|
||||||
WriteDirMetadata: true,
|
|
||||||
WriteDirSetModTime: true,
|
|
||||||
UserDirMetadata: true,
|
|
||||||
DirModTimeUpdatesOnWrite: true,
|
|
||||||
PartialUploads: true,
|
|
||||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
|
|
||||||
// Enable ListP always
|
|
||||||
f.features.ListP = f.ListP
|
|
||||||
|
|
||||||
return f, err
|
return f, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -310,10 +262,7 @@ type Options struct {
|
|||||||
Password2 string `config:"password2"`
|
Password2 string `config:"password2"`
|
||||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||||
ShowMapping bool `config:"show_mapping"`
|
ShowMapping bool `config:"show_mapping"`
|
||||||
PassBadBlocks bool `config:"pass_bad_blocks"`
|
|
||||||
FilenameEncoding string `config:"filename_encoding"`
|
FilenameEncoding string `config:"filename_encoding"`
|
||||||
Suffix string `config:"suffix"`
|
|
||||||
StrictNames bool `config:"strict_names"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a wrapped fs.Fs
|
// Fs represents a wrapped fs.Fs
|
||||||
@@ -348,64 +297,45 @@ func (f *Fs) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt an object file name to entries.
|
// Encrypt an object file name to entries.
|
||||||
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) error {
|
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
|
||||||
remote := obj.Remote()
|
remote := obj.Remote()
|
||||||
decryptedRemote, err := f.cipher.DecryptFileName(remote)
|
decryptedRemote, err := f.cipher.DecryptFileName(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if f.opt.StrictNames {
|
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
|
||||||
return fmt.Errorf("%s: undecryptable file name detected: %v", remote, err)
|
return
|
||||||
}
|
|
||||||
fs.Logf(remote, "Skipping undecryptable file name: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
if f.opt.ShowMapping {
|
if f.opt.ShowMapping {
|
||||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||||
}
|
}
|
||||||
*entries = append(*entries, f.newObject(obj))
|
*entries = append(*entries, f.newObject(obj))
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt a directory file name to entries.
|
// Encrypt a directory file name to entries.
|
||||||
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) error {
|
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
|
||||||
remote := dir.Remote()
|
remote := dir.Remote()
|
||||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if f.opt.StrictNames {
|
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
|
||||||
return fmt.Errorf("%s: undecryptable dir name detected: %v", remote, err)
|
return
|
||||||
}
|
|
||||||
fs.Logf(remote, "Skipping undecryptable dir name: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
if f.opt.ShowMapping {
|
if f.opt.ShowMapping {
|
||||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||||
}
|
}
|
||||||
*entries = append(*entries, f.newDir(ctx, dir))
|
*entries = append(*entries, f.newDir(ctx, dir))
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt some directory entries. This alters entries returning it as newEntries.
|
// Encrypt some directory entries. This alters entries returning it as newEntries.
|
||||||
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
||||||
newEntries = entries[:0] // in place filter
|
newEntries = entries[:0] // in place filter
|
||||||
errors := 0
|
|
||||||
var firsterr error
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
switch x := entry.(type) {
|
switch x := entry.(type) {
|
||||||
case fs.Object:
|
case fs.Object:
|
||||||
err = f.add(&newEntries, x)
|
f.add(&newEntries, x)
|
||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
err = f.addDir(ctx, &newEntries, x)
|
f.addDir(ctx, &newEntries, x)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown object type %T", entry)
|
return nil, fmt.Errorf("unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
errors++
|
|
||||||
if firsterr == nil {
|
|
||||||
firsterr = err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if firsterr != nil {
|
|
||||||
return nil, fmt.Errorf("there were %v undecryptable name errors. first error: %v", errors, firsterr)
|
|
||||||
}
|
}
|
||||||
return newEntries, nil
|
return newEntries, nil
|
||||||
}
|
}
|
||||||
@@ -420,40 +350,11 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
|
|||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
return list.WithListP(ctx, dir, f)
|
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
|
||||||
}
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
// ListP lists the objects and directories of the Fs starting
|
|
||||||
// from dir non recursively into out.
|
|
||||||
//
|
|
||||||
// dir should be "" to start from the root, and should not
|
|
||||||
// have trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
//
|
|
||||||
// It should call callback for each tranche of entries read.
|
|
||||||
// These need not be returned in any particular order. If
|
|
||||||
// callback returns an error then the listing will stop
|
|
||||||
// immediately.
|
|
||||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
|
||||||
wrappedCallback := func(entries fs.DirEntries) error {
|
|
||||||
entries, err := f.encryptEntries(ctx, entries)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return callback(entries)
|
|
||||||
}
|
}
|
||||||
listP := f.Fs.Features().ListP
|
return f.encryptEntries(ctx, entries)
|
||||||
encryptedDir := f.cipher.EncryptDirName(dir)
|
|
||||||
if listP == nil {
|
|
||||||
entries, err := f.Fs.List(ctx, encryptedDir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return wrappedCallback(entries)
|
|
||||||
}
|
|
||||||
return listP(ctx, encryptedDir, wrappedCallback)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@@ -495,8 +396,6 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
|
|||||||
|
|
||||||
// put implements Put or PutStream
|
// put implements Put or PutStream
|
||||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||||
ci := fs.GetConfig(ctx)
|
|
||||||
|
|
||||||
if f.opt.NoDataEncryption {
|
if f.opt.NoDataEncryption {
|
||||||
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
|
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
|
||||||
if err == nil && o != nil {
|
if err == nil && o != nil {
|
||||||
@@ -514,9 +413,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
|||||||
// Find a hash the destination supports to compute a hash of
|
// Find a hash the destination supports to compute a hash of
|
||||||
// the encrypted data
|
// the encrypted data
|
||||||
ht := f.Fs.Hashes().GetOne()
|
ht := f.Fs.Hashes().GetOne()
|
||||||
if ci.IgnoreChecksum {
|
|
||||||
ht = hash.None
|
|
||||||
}
|
|
||||||
var hasher *hash.MultiHasher
|
var hasher *hash.MultiHasher
|
||||||
if ht != hash.None {
|
if ht != hash.None {
|
||||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||||
@@ -553,7 +449,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash)
|
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
|
||||||
}
|
}
|
||||||
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
||||||
}
|
}
|
||||||
@@ -588,37 +484,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
|
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
|
||||||
}
|
}
|
||||||
|
|
||||||
// MkdirMetadata makes the root directory of the Fs object
|
|
||||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
|
||||||
do := f.Fs.Features().MkdirMetadata
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
newDir, err := do(ctx, f.cipher.EncryptDirName(dir), metadata)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var entries = make(fs.DirEntries, 0, 1)
|
|
||||||
err = f.addDir(ctx, &entries, newDir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
newDir, ok := entries[0].(fs.Directory)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0])
|
|
||||||
}
|
|
||||||
return newDir, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirSetModTime sets the directory modtime for dir
|
|
||||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
|
||||||
do := f.Fs.Features().DirSetModTime
|
|
||||||
if do == nil {
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do(ctx, f.cipher.EncryptDirName(dir), modTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist or isn't empty
|
// Return an error if it doesn't exist or isn't empty
|
||||||
@@ -642,9 +507,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -667,9 +532,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -860,7 +725,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|||||||
}
|
}
|
||||||
out := make([]fs.Directory, len(dirs))
|
out := make([]fs.Directory, len(dirs))
|
||||||
for i, dir := range dirs {
|
for i, dir := range dirs {
|
||||||
out[i] = fs.NewDirWrapper(f.cipher.EncryptDirName(dir.Remote()), dir)
|
out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
|
||||||
}
|
}
|
||||||
return do(ctx, out)
|
return do(ctx, out)
|
||||||
}
|
}
|
||||||
@@ -923,30 +788,28 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
|||||||
var commandHelp = []fs.CommandHelp{
|
var commandHelp = []fs.CommandHelp{
|
||||||
{
|
{
|
||||||
Name: "encode",
|
Name: "encode",
|
||||||
Short: "Encode the given filename(s).",
|
Short: "Encode the given filename(s)",
|
||||||
Long: `This encodes the filenames given as arguments returning a list of
|
Long: `This encodes the filenames given as arguments returning a list of
|
||||||
strings of the encoded results.
|
strings of the encoded results.
|
||||||
|
|
||||||
Usage examples:
|
Usage Example:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend encode crypt: file1 [file2...]
|
||||||
rclone backend encode crypt: file1 [file2...]
|
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
|
||||||
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
|
`,
|
||||||
` + "```",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "decode",
|
Name: "decode",
|
||||||
Short: "Decode the given filename(s).",
|
Short: "Decode the given filename(s)",
|
||||||
Long: `This decodes the filenames given as arguments returning a list of
|
Long: `This decodes the filenames given as arguments returning a list of
|
||||||
strings of the decoded results. It will return an error if any of the
|
strings of the decoded results. It will return an error if any of the
|
||||||
inputs are invalid.
|
inputs are invalid.
|
||||||
|
|
||||||
Usage examples:
|
Usage Example:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
|
||||||
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
|
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
|
||||||
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
|
`,
|
||||||
` + "```",
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -959,7 +822,7 @@ rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile
|
|||||||
// The result should be capable of being JSON encoded
|
// The result should be capable of being JSON encoded
|
||||||
// If it is a string or a []string it will be shown to the user
|
// If it is a string or a []string it will be shown to the user
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
// otherwise it will be JSON encoded and shown to the user like that
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||||
switch name {
|
switch name {
|
||||||
case "decode":
|
case "decode":
|
||||||
out := make([]string, 0, len(arg))
|
out := make([]string, 0, len(arg))
|
||||||
@@ -1098,14 +961,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
// newDir returns a dir with the Name decrypted
|
// newDir returns a dir with the Name decrypted
|
||||||
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
|
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
|
||||||
|
newDir := fs.NewDirCopy(ctx, dir)
|
||||||
remote := dir.Remote()
|
remote := dir.Remote()
|
||||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(remote, "Undecryptable dir name: %v", err)
|
fs.Debugf(remote, "Undecryptable dir name: %v", err)
|
||||||
} else {
|
} else {
|
||||||
remote = decryptedRemote
|
newDir.SetRemote(decryptedRemote)
|
||||||
}
|
}
|
||||||
newDir := fs.NewDirWrapper(remote, dir)
|
|
||||||
return newDir
|
return newDir
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1184,11 +1047,10 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
|||||||
// Get the underlying object if there is one
|
// Get the underlying object if there is one
|
||||||
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
|
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
|
||||||
// Prefer direct interface assertion
|
// Prefer direct interface assertion
|
||||||
} else if do, ok := o.ObjectInfo.(*fs.OverrideRemote); ok {
|
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
|
||||||
// Unwrap if it is an operations.OverrideRemote
|
// Otherwise likely is an operations.OverrideRemote
|
||||||
srcObj = do.UnWrap()
|
srcObj = do.UnWrap()
|
||||||
} else {
|
} else {
|
||||||
// Otherwise don't unwrap any further
|
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
// if this is wrapping a local object then we work out the hash
|
// if this is wrapping a local object then we work out the hash
|
||||||
@@ -1283,17 +1145,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|||||||
return do.Metadata(ctx)
|
return do.Metadata(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMetadata sets metadata for an Object
|
|
||||||
//
|
|
||||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
|
||||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
|
||||||
do, ok := o.Object.(fs.SetMetadataer)
|
|
||||||
if !ok {
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do.SetMetadata(ctx, metadata)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MimeType returns the content type of the Object if
|
// MimeType returns the content type of the Object if
|
||||||
// known, or "" if not
|
// known, or "" if not
|
||||||
//
|
//
|
||||||
@@ -1319,8 +1170,6 @@ var (
|
|||||||
_ fs.Abouter = (*Fs)(nil)
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
_ fs.Wrapper = (*Fs)(nil)
|
_ fs.Wrapper = (*Fs)(nil)
|
||||||
_ fs.MergeDirser = (*Fs)(nil)
|
_ fs.MergeDirser = (*Fs)(nil)
|
||||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
|
||||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
|
||||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
|
|||||||
@@ -17,28 +17,41 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type testWrapper struct {
|
||||||
|
fs.ObjectInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnWrap returns the Object that this Object is wrapping or nil if it
|
||||||
|
// isn't wrapping anything
|
||||||
|
func (o testWrapper) UnWrap() fs.Object {
|
||||||
|
if o, ok := o.ObjectInfo.(fs.Object); ok {
|
||||||
|
return o
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Create a temporary local fs to upload things from
|
// Create a temporary local fs to upload things from
|
||||||
|
|
||||||
func makeTempLocalFs(t *testing.T) (localFs fs.Fs) {
|
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
|
||||||
localFs, err := fs.TemporaryLocalFs(context.Background())
|
localFs, err := fs.TemporaryLocalFs(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
t.Cleanup(func() {
|
cleanup = func() {
|
||||||
require.NoError(t, localFs.Rmdir(context.Background(), ""))
|
require.NoError(t, localFs.Rmdir(context.Background(), ""))
|
||||||
})
|
}
|
||||||
return localFs
|
return localFs, cleanup
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upload a file to a remote
|
// Upload a file to a remote
|
||||||
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object) {
|
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
|
||||||
inBuf := bytes.NewBufferString(contents)
|
inBuf := bytes.NewBufferString(contents)
|
||||||
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
|
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
|
||||||
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
|
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
|
||||||
obj, err := f.Put(context.Background(), inBuf, upSrc)
|
obj, err := f.Put(context.Background(), inBuf, upSrc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
t.Cleanup(func() {
|
cleanup = func() {
|
||||||
require.NoError(t, obj.Remove(context.Background()))
|
require.NoError(t, obj.Remove(context.Background()))
|
||||||
})
|
}
|
||||||
return obj
|
return obj, cleanup
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test the ObjectInfo
|
// Test the ObjectInfo
|
||||||
@@ -52,9 +65,11 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
|||||||
path = "_wrap"
|
path = "_wrap"
|
||||||
}
|
}
|
||||||
|
|
||||||
localFs := makeTempLocalFs(t)
|
localFs, cleanupLocalFs := makeTempLocalFs(t)
|
||||||
|
defer cleanupLocalFs()
|
||||||
|
|
||||||
obj := uploadFile(t, localFs, path, contents)
|
obj, cleanupObj := uploadFile(t, localFs, path, contents)
|
||||||
|
defer cleanupObj()
|
||||||
|
|
||||||
// encrypt the data
|
// encrypt the data
|
||||||
inBuf := bytes.NewBufferString(contents)
|
inBuf := bytes.NewBufferString(contents)
|
||||||
@@ -68,7 +83,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
|||||||
var oi fs.ObjectInfo = obj
|
var oi fs.ObjectInfo = obj
|
||||||
if wrap {
|
if wrap {
|
||||||
// wrap the object in an fs.ObjectUnwrapper if required
|
// wrap the object in an fs.ObjectUnwrapper if required
|
||||||
oi = fs.NewOverrideRemote(oi, "new_remote")
|
oi = testWrapper{oi}
|
||||||
}
|
}
|
||||||
|
|
||||||
// wrap the object in a crypt for upload using the nonce we
|
// wrap the object in a crypt for upload using the nonce we
|
||||||
@@ -101,13 +116,16 @@ func testComputeHash(t *testing.T, f *Fs) {
|
|||||||
t.Skipf("%v: does not support hashes", f.Fs)
|
t.Skipf("%v: does not support hashes", f.Fs)
|
||||||
}
|
}
|
||||||
|
|
||||||
localFs := makeTempLocalFs(t)
|
localFs, cleanupLocalFs := makeTempLocalFs(t)
|
||||||
|
defer cleanupLocalFs()
|
||||||
|
|
||||||
// Upload a file to localFs as a test object
|
// Upload a file to localFs as a test object
|
||||||
localObj := uploadFile(t, localFs, path, contents)
|
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
|
||||||
|
defer cleanupLocalObj()
|
||||||
|
|
||||||
// Upload the same data to the remote Fs also
|
// Upload the same data to the remote Fs also
|
||||||
remoteObj := uploadFile(t, f, path, contents)
|
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
|
||||||
|
defer cleanupRemoteObj()
|
||||||
|
|
||||||
// Calculate the expected Hash of the remote object
|
// Calculate the expected Hash of the remote object
|
||||||
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)
|
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ func TestIntegration(t *testing.T) {
|
|||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: *fstest.RemoteName,
|
RemoteName: *fstest.RemoteName,
|
||||||
NilObject: (*crypt.Object)(nil),
|
NilObject: (*crypt.Object)(nil),
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -45,7 +45,7 @@ func TestStandardBase32(t *testing.T) {
|
|||||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
@@ -67,7 +67,7 @@ func TestStandardBase64(t *testing.T) {
|
|||||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||||
{Name: name, Key: "filename_encoding", Value: "base64"},
|
{Name: name, Key: "filename_encoding", Value: "base64"},
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
@@ -89,7 +89,7 @@ func TestStandardBase32768(t *testing.T) {
|
|||||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||||
{Name: name, Key: "filename_encoding", Value: "base32768"},
|
{Name: name, Key: "filename_encoding", Value: "base32768"},
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
@@ -111,7 +111,7 @@ func TestOff(t *testing.T) {
|
|||||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||||
{Name: name, Key: "filename_encryption", Value: "off"},
|
{Name: name, Key: "filename_encryption", Value: "off"},
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
@@ -137,7 +137,7 @@ func TestObfuscate(t *testing.T) {
|
|||||||
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
||||||
},
|
},
|
||||||
SkipBadWindowsCharacters: true,
|
SkipBadWindowsCharacters: true,
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
@@ -164,7 +164,7 @@ func TestNoDataObfuscate(t *testing.T) {
|
|||||||
{Name: name, Key: "no_data_encryption", Value: "true"},
|
{Name: name, Key: "no_data_encryption", Value: "true"},
|
||||||
},
|
},
|
||||||
SkipBadWindowsCharacters: true,
|
SkipBadWindowsCharacters: true,
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ func Pad(n int, buf []byte) []byte {
|
|||||||
}
|
}
|
||||||
length := len(buf)
|
length := len(buf)
|
||||||
padding := n - (length % n)
|
padding := n - (length % n)
|
||||||
for range padding {
|
for i := 0; i < padding; i++ {
|
||||||
buf = append(buf, byte(padding))
|
buf = append(buf, byte(padding))
|
||||||
}
|
}
|
||||||
if (len(buf) % n) != 0 {
|
if (len(buf) % n) != 0 {
|
||||||
@@ -54,7 +54,7 @@ func Unpad(n int, buf []byte) ([]byte, error) {
|
|||||||
if padding == 0 {
|
if padding == 0 {
|
||||||
return nil, ErrorPaddingTooShort
|
return nil, ErrorPaddingTooShort
|
||||||
}
|
}
|
||||||
for i := range padding {
|
for i := 0; i < padding; i++ {
|
||||||
if buf[length-1-i] != byte(padding) {
|
if buf[length-1-i] != byte(padding) {
|
||||||
return nil, ErrorPaddingNotAllTheSame
|
return nil, ErrorPaddingNotAllTheSame
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,38 +0,0 @@
|
|||||||
// Type definitions specific to Dataverse
|
|
||||||
|
|
||||||
package api
|
|
||||||
|
|
||||||
// DataverseDatasetResponse is returned by the Dataverse dataset API
|
|
||||||
type DataverseDatasetResponse struct {
|
|
||||||
Status string `json:"status"`
|
|
||||||
Data DataverseDataset `json:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DataverseDataset is the representation of a dataset
|
|
||||||
type DataverseDataset struct {
|
|
||||||
LatestVersion DataverseDatasetVersion `json:"latestVersion"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DataverseDatasetVersion is the representation of a dataset version
|
|
||||||
type DataverseDatasetVersion struct {
|
|
||||||
LastUpdateTime string `json:"lastUpdateTime"`
|
|
||||||
Files []DataverseFile `json:"files"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DataverseFile is the representation of a file found in a dataset
|
|
||||||
type DataverseFile struct {
|
|
||||||
DirectoryLabel string `json:"directoryLabel"`
|
|
||||||
DataFile DataverseDataFile `json:"dataFile"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DataverseDataFile represents file metadata details
|
|
||||||
type DataverseDataFile struct {
|
|
||||||
ID int64 `json:"id"`
|
|
||||||
Filename string `json:"filename"`
|
|
||||||
ContentType string `json:"contentType"`
|
|
||||||
FileSize int64 `json:"filesize"`
|
|
||||||
OriginalFileFormat string `json:"originalFileFormat"`
|
|
||||||
OriginalFileSize int64 `json:"originalFileSize"`
|
|
||||||
OriginalFileName string `json:"originalFileName"`
|
|
||||||
MD5 string `json:"md5"`
|
|
||||||
}
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
// Type definitions specific to InvenioRDM
|
|
||||||
|
|
||||||
package api
|
|
||||||
|
|
||||||
// InvenioRecordResponse is the representation of a record stored in InvenioRDM
|
|
||||||
type InvenioRecordResponse struct {
|
|
||||||
Links InvenioRecordResponseLinks `json:"links"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// InvenioRecordResponseLinks represents a record's links
|
|
||||||
type InvenioRecordResponseLinks struct {
|
|
||||||
Self string `json:"self"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// InvenioFilesResponse is the representation of a record's files
|
|
||||||
type InvenioFilesResponse struct {
|
|
||||||
Entries []InvenioFilesResponseEntry `json:"entries"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// InvenioFilesResponseEntry is the representation of a file entry
|
|
||||||
type InvenioFilesResponseEntry struct {
|
|
||||||
Key string `json:"key"`
|
|
||||||
Checksum string `json:"checksum"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
Updated string `json:"updated"`
|
|
||||||
MimeType string `json:"mimetype"`
|
|
||||||
Links InvenioFilesResponseEntryLinks `json:"links"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// InvenioFilesResponseEntryLinks represents file links details
|
|
||||||
type InvenioFilesResponseEntryLinks struct {
|
|
||||||
Content string `json:"content"`
|
|
||||||
}
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
// Package api has general type definitions for doi
|
|
||||||
package api
|
|
||||||
|
|
||||||
// DoiResolverResponse is returned by the DOI resolver API
|
|
||||||
//
|
|
||||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
|
||||||
type DoiResolverResponse struct {
|
|
||||||
ResponseCode int `json:"responseCode"`
|
|
||||||
Handle string `json:"handle"`
|
|
||||||
Values []DoiResolverResponseValue `json:"values"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoiResolverResponseValue is a single handle record value
|
|
||||||
type DoiResolverResponseValue struct {
|
|
||||||
Index int `json:"index"`
|
|
||||||
Type string `json:"type"`
|
|
||||||
Data DoiResolverResponseValueData `json:"data"`
|
|
||||||
TTL int `json:"ttl"`
|
|
||||||
Timestamp string `json:"timestamp"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoiResolverResponseValueData is the data held in a handle value
|
|
||||||
type DoiResolverResponseValueData struct {
|
|
||||||
Format string `json:"format"`
|
|
||||||
Value any `json:"value"`
|
|
||||||
}
|
|
||||||
@@ -1,112 +0,0 @@
|
|||||||
// Implementation for Dataverse
|
|
||||||
|
|
||||||
package doi
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/doi/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Returns true if resolvedURL is likely a DOI hosted on a Dataverse intallation
|
|
||||||
func activateDataverse(resolvedURL *url.URL) (isActive bool) {
|
|
||||||
queryValues := resolvedURL.Query()
|
|
||||||
persistentID := queryValues.Get("persistentId")
|
|
||||||
return persistentID != ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve the main API endpoint for a DOI hosted on a Dataverse installation
|
|
||||||
func resolveDataverseEndpoint(resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
|
|
||||||
queryValues := resolvedURL.Query()
|
|
||||||
persistentID := queryValues.Get("persistentId")
|
|
||||||
|
|
||||||
query := url.Values{}
|
|
||||||
query.Add("persistentId", persistentID)
|
|
||||||
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/datasets/:persistentId/", RawQuery: query.Encode()})
|
|
||||||
|
|
||||||
return Dataverse, endpointURL, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// dataverseProvider implements the doiProvider interface for Dataverse installations
|
|
||||||
type dataverseProvider struct {
|
|
||||||
f *Fs
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
|
||||||
func (dp *dataverseProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
|
|
||||||
// Use the cache if populated
|
|
||||||
cachedEntries, found := dp.f.cache.GetMaybe("files")
|
|
||||||
if found {
|
|
||||||
parsedEntries, ok := cachedEntries.([]Object)
|
|
||||||
if ok {
|
|
||||||
for _, entry := range parsedEntries {
|
|
||||||
newEntry := entry
|
|
||||||
entries = append(entries, &newEntry)
|
|
||||||
}
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
filesURL := dp.f.endpoint
|
|
||||||
var res *http.Response
|
|
||||||
var result api.DataverseDatasetResponse
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
|
|
||||||
Parameters: filesURL.Query(),
|
|
||||||
}
|
|
||||||
err = dp.f.pacer.Call(func() (bool, error) {
|
|
||||||
res, err = dp.f.srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return shouldRetry(ctx, res, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("readDir failed: %w", err)
|
|
||||||
}
|
|
||||||
modTime, modTimeErr := time.Parse(time.RFC3339, result.Data.LatestVersion.LastUpdateTime)
|
|
||||||
if modTimeErr != nil {
|
|
||||||
fs.Logf(dp.f, "error: could not parse last update time %v", modTimeErr)
|
|
||||||
modTime = timeUnset
|
|
||||||
}
|
|
||||||
for _, file := range result.Data.LatestVersion.Files {
|
|
||||||
contentURLPath := fmt.Sprintf("/api/access/datafile/%d", file.DataFile.ID)
|
|
||||||
query := url.Values{}
|
|
||||||
query.Add("format", "original")
|
|
||||||
contentURL := dp.f.endpoint.ResolveReference(&url.URL{Path: contentURLPath, RawQuery: query.Encode()})
|
|
||||||
entry := &Object{
|
|
||||||
fs: dp.f,
|
|
||||||
remote: path.Join(file.DirectoryLabel, file.DataFile.Filename),
|
|
||||||
contentURL: contentURL.String(),
|
|
||||||
size: file.DataFile.FileSize,
|
|
||||||
modTime: modTime,
|
|
||||||
md5: file.DataFile.MD5,
|
|
||||||
contentType: file.DataFile.ContentType,
|
|
||||||
}
|
|
||||||
if file.DataFile.OriginalFileName != "" {
|
|
||||||
entry.remote = path.Join(file.DirectoryLabel, file.DataFile.OriginalFileName)
|
|
||||||
entry.size = file.DataFile.OriginalFileSize
|
|
||||||
entry.contentType = file.DataFile.OriginalFileFormat
|
|
||||||
}
|
|
||||||
entries = append(entries, entry)
|
|
||||||
}
|
|
||||||
// Populate the cache
|
|
||||||
cacheEntries := []Object{}
|
|
||||||
for _, entry := range entries {
|
|
||||||
cacheEntries = append(cacheEntries, *entry)
|
|
||||||
}
|
|
||||||
dp.f.cache.Put("files", cacheEntries)
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newDataverseProvider(f *Fs) doiProvider {
|
|
||||||
return &dataverseProvider{
|
|
||||||
f: f,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,653 +0,0 @@
|
|||||||
// Package doi provides a filesystem interface for digital objects identified by DOIs.
|
|
||||||
//
|
|
||||||
// See: https://www.doi.org/the-identifier/what-is-a-doi/
|
|
||||||
package doi
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/doi/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/lib/cache"
|
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// the URL of the DOI resolver
|
|
||||||
//
|
|
||||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
|
||||||
doiResolverAPIURL = "https://doi.org/api"
|
|
||||||
minSleep = 10 * time.Millisecond
|
|
||||||
maxSleep = 2 * time.Second
|
|
||||||
decayConstant = 2 // bigger for slower decay, exponential
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errorReadOnly = errors.New("doi remotes are read only")
|
|
||||||
timeUnset = time.Unix(0, 0)
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
fsi := &fs.RegInfo{
|
|
||||||
Name: "doi",
|
|
||||||
Description: "DOI datasets",
|
|
||||||
NewFs: NewFs,
|
|
||||||
CommandHelp: commandHelp,
|
|
||||||
Options: []fs.Option{{
|
|
||||||
Name: "doi",
|
|
||||||
Help: "The DOI or the doi.org URL.",
|
|
||||||
Required: true,
|
|
||||||
}, {
|
|
||||||
Name: fs.ConfigProvider,
|
|
||||||
Help: `DOI provider.
|
|
||||||
|
|
||||||
The DOI provider can be set when rclone does not automatically recognize a supported DOI provider.`,
|
|
||||||
Examples: []fs.OptionExample{
|
|
||||||
{
|
|
||||||
Value: "auto",
|
|
||||||
Help: "Auto-detect provider",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Value: string(Zenodo),
|
|
||||||
Help: "Zenodo",
|
|
||||||
}, {
|
|
||||||
Value: string(Dataverse),
|
|
||||||
Help: "Dataverse",
|
|
||||||
}, {
|
|
||||||
Value: string(Invenio),
|
|
||||||
Help: "Invenio",
|
|
||||||
}},
|
|
||||||
Required: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "doi_resolver_api_url",
|
|
||||||
Help: `The URL of the DOI resolver API to use.
|
|
||||||
|
|
||||||
The DOI resolver can be set for testing or for cases when the the canonical DOI resolver API cannot be used.
|
|
||||||
|
|
||||||
Defaults to "https://doi.org/api".`,
|
|
||||||
Required: false,
|
|
||||||
Advanced: true,
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
fs.Register(fsi)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Provider defines the type of provider hosting the DOI
|
|
||||||
type Provider string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Zenodo provider, see https://zenodo.org
|
|
||||||
Zenodo Provider = "zenodo"
|
|
||||||
// Dataverse provider, see https://dataverse.harvard.edu
|
|
||||||
Dataverse Provider = "dataverse"
|
|
||||||
// Invenio provider, see https://inveniordm.docs.cern.ch
|
|
||||||
Invenio Provider = "invenio"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
|
||||||
type Options struct {
|
|
||||||
Doi string `config:"doi"` // The DOI, a digital identifier of an object, usually a dataset
|
|
||||||
Provider string `config:"provider"` // The DOI provider
|
|
||||||
DoiResolverAPIURL string `config:"doi_resolver_api_url"` // The URL of the DOI resolver API to use.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs stores the interface to the remote HTTP files
|
|
||||||
type Fs struct {
|
|
||||||
name string // name of this remote
|
|
||||||
root string // the path we are working on
|
|
||||||
provider Provider // the DOI provider
|
|
||||||
doiProvider doiProvider // the interface used to interact with the DOI provider
|
|
||||||
features *fs.Features // optional features
|
|
||||||
opt Options // options for this backend
|
|
||||||
ci *fs.ConfigInfo // global config
|
|
||||||
endpoint *url.URL // the main API endpoint for this remote
|
|
||||||
endpointURL string // endpoint as a string
|
|
||||||
srv *rest.Client // the connection to the server
|
|
||||||
pacer *fs.Pacer // pacer for API calls
|
|
||||||
cache *cache.Cache // a cache for the remote metadata
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
|
|
||||||
type Object struct {
|
|
||||||
fs *Fs // what this object is part of
|
|
||||||
remote string // the remote path
|
|
||||||
contentURL string // the URL where the contents of the file can be downloaded
|
|
||||||
size int64 // size of the object
|
|
||||||
modTime time.Time // modification time of the object
|
|
||||||
contentType string // content type of the object
|
|
||||||
md5 string // MD5 hash of the object content
|
|
||||||
}
|
|
||||||
|
|
||||||
// doiProvider is the interface used to list objects in a DOI
|
|
||||||
type doiProvider interface {
|
|
||||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
|
||||||
ListEntries(ctx context.Context) (entries []*Object, err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse the input string as a DOI
|
|
||||||
// Examples:
|
|
||||||
// 10.1000/182 -> 10.1000/182
|
|
||||||
// https://doi.org/10.1000/182 -> 10.1000/182
|
|
||||||
// doi:10.1000/182 -> 10.1000/182
|
|
||||||
func parseDoi(doi string) string {
|
|
||||||
doiURL, err := url.Parse(doi)
|
|
||||||
if err != nil {
|
|
||||||
return doi
|
|
||||||
}
|
|
||||||
if doiURL.Scheme == "doi" {
|
|
||||||
return strings.TrimLeft(strings.TrimPrefix(doi, "doi:"), "/")
|
|
||||||
}
|
|
||||||
if strings.HasSuffix(doiURL.Hostname(), "doi.org") {
|
|
||||||
return strings.TrimLeft(doiURL.Path, "/")
|
|
||||||
}
|
|
||||||
return doi
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve a DOI to a URL
|
|
||||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
|
||||||
func resolveDoiURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (doiURL *url.URL, err error) {
|
|
||||||
resolverURL := opt.DoiResolverAPIURL
|
|
||||||
if resolverURL == "" {
|
|
||||||
resolverURL = doiResolverAPIURL
|
|
||||||
}
|
|
||||||
|
|
||||||
var result api.DoiResolverResponse
|
|
||||||
params := url.Values{}
|
|
||||||
params.Add("index", "1")
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
RootURL: resolverURL,
|
|
||||||
Path: "/handles/" + opt.Doi,
|
|
||||||
Parameters: params,
|
|
||||||
}
|
|
||||||
err = pacer.Call(func() (bool, error) {
|
|
||||||
res, err := srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return shouldRetry(ctx, res, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if result.ResponseCode != 1 {
|
|
||||||
return nil, fmt.Errorf("could not resolve DOI (error code %d)", result.ResponseCode)
|
|
||||||
}
|
|
||||||
resolvedURLStr := ""
|
|
||||||
for _, value := range result.Values {
|
|
||||||
if value.Type == "URL" && value.Data.Format == "string" {
|
|
||||||
valueStr, ok := value.Data.Value.(string)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("could not resolve DOI (incorrect response format)")
|
|
||||||
}
|
|
||||||
resolvedURLStr = valueStr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
resolvedURL, err := url.Parse(resolvedURLStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return resolvedURL, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve the passed configuration into a provider and enpoint
|
|
||||||
func resolveEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (provider Provider, endpoint *url.URL, err error) {
|
|
||||||
resolvedURL, err := resolveDoiURL(ctx, srv, pacer, opt)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch opt.Provider {
|
|
||||||
case string(Dataverse):
|
|
||||||
return resolveDataverseEndpoint(resolvedURL)
|
|
||||||
case string(Invenio):
|
|
||||||
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
|
||||||
case string(Zenodo):
|
|
||||||
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
|
|
||||||
}
|
|
||||||
|
|
||||||
hostname := strings.ToLower(resolvedURL.Hostname())
|
|
||||||
if hostname == "dataverse.harvard.edu" || activateDataverse(resolvedURL) {
|
|
||||||
return resolveDataverseEndpoint(resolvedURL)
|
|
||||||
}
|
|
||||||
if hostname == "zenodo.org" || strings.HasSuffix(hostname, ".zenodo.org") {
|
|
||||||
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
|
|
||||||
}
|
|
||||||
if activateInvenio(ctx, srv, pacer, resolvedURL) {
|
|
||||||
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", nil, fmt.Errorf("provider '%s' is not supported", resolvedURL.Hostname())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make the http connection from the passed options
|
|
||||||
func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err error) {
|
|
||||||
provider, endpoint, err := resolveEndpoint(ctx, f.srv, f.pacer, opt)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update f with the new parameters
|
|
||||||
f.srv.SetRoot(endpoint.ResolveReference(&url.URL{Path: "/"}).String())
|
|
||||||
f.endpoint = endpoint
|
|
||||||
f.endpointURL = endpoint.String()
|
|
||||||
f.provider = provider
|
|
||||||
f.opt.Provider = string(provider)
|
|
||||||
|
|
||||||
switch f.provider {
|
|
||||||
case Dataverse:
|
|
||||||
f.doiProvider = newDataverseProvider(f)
|
|
||||||
case Invenio, Zenodo:
|
|
||||||
f.doiProvider = newInvenioProvider(f)
|
|
||||||
default:
|
|
||||||
return false, fmt.Errorf("provider type '%s' not supported", f.provider)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine if the root is a file
|
|
||||||
entries, err := f.doiProvider.ListEntries(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
for _, entry := range entries {
|
|
||||||
if entry.remote == f.root {
|
|
||||||
isFile = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return isFile, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// retryErrorCodes is a slice of error codes that we will retry
|
|
||||||
var retryErrorCodes = []int{
|
|
||||||
429, // Too Many Requests.
|
|
||||||
500, // Internal Server Error
|
|
||||||
502, // Bad Gateway
|
|
||||||
503, // Service Unavailable
|
|
||||||
504, // Gateway Timeout
|
|
||||||
509, // Bandwidth Limit Exceeded
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this res and err
|
|
||||||
// deserve to be retried. It returns the err as a convenience.
|
|
||||||
func shouldRetry(ctx context.Context, res *http.Response, err error) (bool, error) {
|
|
||||||
if fserrors.ContextError(ctx, &err) {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFs creates a new Fs object from the name and root. It connects to
|
|
||||||
// the host specified in the config file.
|
|
||||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|
||||||
root = strings.Trim(root, "/")
|
|
||||||
|
|
||||||
// Parse config into Options struct
|
|
||||||
opt := new(Options)
|
|
||||||
err := configstruct.Set(m, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
opt.Doi = parseDoi(opt.Doi)
|
|
||||||
|
|
||||||
client := fshttp.NewClient(ctx)
|
|
||||||
ci := fs.GetConfig(ctx)
|
|
||||||
f := &Fs{
|
|
||||||
name: name,
|
|
||||||
root: root,
|
|
||||||
opt: *opt,
|
|
||||||
ci: ci,
|
|
||||||
srv: rest.NewClient(client),
|
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
|
||||||
cache: cache.New(),
|
|
||||||
}
|
|
||||||
f.features = (&fs.Features{
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
}).Fill(ctx, f)
|
|
||||||
|
|
||||||
isFile, err := f.httpConnection(ctx, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if isFile {
|
|
||||||
// return an error with an fs which points to the parent
|
|
||||||
newRoot := path.Dir(f.root)
|
|
||||||
if newRoot == "." {
|
|
||||||
newRoot = ""
|
|
||||||
}
|
|
||||||
f.root = newRoot
|
|
||||||
return f, fs.ErrorIsFile
|
|
||||||
}
|
|
||||||
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the configured name of the file system
|
|
||||||
func (f *Fs) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root returns the root for the filesystem
|
|
||||||
func (f *Fs) Root() string {
|
|
||||||
return f.root
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the URL for the filesystem
|
|
||||||
func (f *Fs) String() string {
|
|
||||||
return fmt.Sprintf("DOI %s", f.opt.Doi)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
|
||||||
func (f *Fs) Features() *fs.Features {
|
|
||||||
return f.features
|
|
||||||
}
|
|
||||||
|
|
||||||
// Precision is the remote http file system's modtime precision, which we have no way of knowing. We estimate at 1s
|
|
||||||
func (f *Fs) Precision() time.Duration {
|
|
||||||
return time.Second
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
|
||||||
func (f *Fs) Hashes() hash.Set {
|
|
||||||
return hash.Set(hash.MD5)
|
|
||||||
// return hash.Set(hash.None)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mkdir makes the root directory of the Fs object
|
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|
||||||
return errorReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove a remote http file object
|
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
|
||||||
return errorReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the root directory of the Fs object
|
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
||||||
return errorReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject creates a new remote http file object
|
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|
||||||
entries, err := f.doiProvider.ListEntries(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
remoteFullPath := remote
|
|
||||||
if f.root != "" {
|
|
||||||
remoteFullPath = path.Join(f.root, remote)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, entry := range entries {
|
|
||||||
if entry.Remote() == remoteFullPath {
|
|
||||||
return entry, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
// List the objects and directories in dir into entries. The
|
|
||||||
// entries can be returned in any order but should be for a
|
|
||||||
// complete directory.
|
|
||||||
//
|
|
||||||
// dir should be "" to list the root, and should not have
|
|
||||||
// trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
|
||||||
fileEntries, err := f.doiProvider.ListEntries(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error listing %q: %w", dir, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fullDir := path.Join(f.root, dir)
|
|
||||||
if fullDir != "" {
|
|
||||||
fullDir += "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
dirPaths := map[string]bool{}
|
|
||||||
for _, entry := range fileEntries {
|
|
||||||
// First, filter out files not in `fullDir`
|
|
||||||
if !strings.HasPrefix(entry.remote, fullDir) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Then, find entries in subfolers
|
|
||||||
remotePath := entry.remote
|
|
||||||
if fullDir != "" {
|
|
||||||
remotePath = strings.TrimLeft(strings.TrimPrefix(remotePath, fullDir), "/")
|
|
||||||
}
|
|
||||||
parts := strings.SplitN(remotePath, "/", 2)
|
|
||||||
if len(parts) == 1 {
|
|
||||||
newEntry := *entry
|
|
||||||
newEntry.remote = path.Join(dir, remotePath)
|
|
||||||
entries = append(entries, &newEntry)
|
|
||||||
} else {
|
|
||||||
dirPaths[path.Join(dir, parts[0])] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for dirPath := range dirPaths {
|
|
||||||
entry := fs.NewDir(dirPath, time.Time{})
|
|
||||||
entries = append(entries, entry)
|
|
||||||
}
|
|
||||||
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put in to the remote path with the modTime given of the given size
|
|
||||||
//
|
|
||||||
// May create the object even if it returns an error - if so
|
|
||||||
// will return the object and the error, otherwise will return
|
|
||||||
// nil and the error
|
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
return nil, errorReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
|
||||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
return nil, errorReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs is the filesystem this remote http file object is located within
|
|
||||||
func (o *Object) Fs() fs.Info {
|
|
||||||
return o.fs
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the URL to the remote HTTP file
|
|
||||||
func (o *Object) String() string {
|
|
||||||
if o == nil {
|
|
||||||
return "<nil>"
|
|
||||||
}
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remote the name of the remote HTTP file, relative to the fs root
|
|
||||||
func (o *Object) Remote() string {
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
|
|
||||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|
||||||
if t != hash.MD5 {
|
|
||||||
return "", hash.ErrUnsupported
|
|
||||||
}
|
|
||||||
return o.md5, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the size in bytes of the remote http file
|
|
||||||
func (o *Object) Size() int64 {
|
|
||||||
return o.size
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModTime returns the modification time of the remote http file
|
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
|
||||||
return o.modTime
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModTime sets the modification and access time to the specified time
|
|
||||||
//
|
|
||||||
// it also updates the info field
|
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|
||||||
return errorReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
|
|
||||||
func (o *Object) Storable() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open a remote http file object for reading. Seek is supported
|
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|
||||||
fs.FixRangeOption(options, o.size)
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
RootURL: o.contentURL,
|
|
||||||
Options: options,
|
|
||||||
}
|
|
||||||
var res *http.Response
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
res, err = o.fs.srv.Call(ctx, &opts)
|
|
||||||
return shouldRetry(ctx, res, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Open failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle non-compliant redirects
|
|
||||||
if res.Header.Get("Location") != "" {
|
|
||||||
newURL, err := res.Location()
|
|
||||||
if err == nil {
|
|
||||||
opts.RootURL = newURL.String()
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
res, err = o.fs.srv.Call(ctx, &opts)
|
|
||||||
return shouldRetry(ctx, res, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Open failed: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res.Body, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update in to the object with the modTime given of the given size
|
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
|
||||||
return errorReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
|
||||||
func (o *Object) MimeType(ctx context.Context) string {
|
|
||||||
return o.contentType
|
|
||||||
}
|
|
||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{{
|
|
||||||
Name: "metadata",
|
|
||||||
Short: "Show metadata about the DOI.",
|
|
||||||
Long: `This command returns a JSON object with some information about the DOI.
|
|
||||||
|
|
||||||
Usage example:
|
|
||||||
|
|
||||||
` + "```console" + `
|
|
||||||
rclone backend metadata doi:
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
It returns a JSON object representing metadata about the DOI.`,
|
|
||||||
}, {
|
|
||||||
Name: "set",
|
|
||||||
Short: "Set command for updating the config parameters.",
|
|
||||||
Long: `This set command can be used to update the config parameters
|
|
||||||
for a running doi backend.
|
|
||||||
|
|
||||||
Usage examples:
|
|
||||||
|
|
||||||
` + "```console" + `
|
|
||||||
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
|
||||||
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
|
||||||
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
The option keys are named as they are in the config file.
|
|
||||||
|
|
||||||
This rebuilds the connection to the doi backend when it is called with
|
|
||||||
the new parameters. Only new parameters need be passed as the values
|
|
||||||
will default to those currently in use.
|
|
||||||
|
|
||||||
It doesn't return anything.`,
|
|
||||||
}}
|
|
||||||
|
|
||||||
// Command the backend to run a named command
|
|
||||||
//
|
|
||||||
// The command run is name
|
|
||||||
// args may be used to read arguments from
|
|
||||||
// opts may be used to read optional arguments from
|
|
||||||
//
|
|
||||||
// The result should be capable of being JSON encoded
|
|
||||||
// If it is a string or a []string it will be shown to the user
|
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
|
||||||
switch name {
|
|
||||||
case "metadata":
|
|
||||||
return f.ShowMetadata(ctx)
|
|
||||||
case "set":
|
|
||||||
newOpt := f.opt
|
|
||||||
err := configstruct.Set(configmap.Simple(opt), &newOpt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("reading config: %w", err)
|
|
||||||
}
|
|
||||||
_, err = f.httpConnection(ctx, &newOpt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("updating session: %w", err)
|
|
||||||
}
|
|
||||||
f.opt = newOpt
|
|
||||||
keys := []string{}
|
|
||||||
for k := range opt {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", "))
|
|
||||||
return nil, nil
|
|
||||||
default:
|
|
||||||
return nil, fs.ErrorCommandNotFound
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShowMetadata returns some metadata about the corresponding DOI
|
|
||||||
func (f *Fs) ShowMetadata(ctx context.Context) (metadata any, err error) {
|
|
||||||
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
info := map[string]any{}
|
|
||||||
info["DOI"] = f.opt.Doi
|
|
||||||
info["URL"] = doiURL.String()
|
|
||||||
info["metadataURL"] = f.endpointURL
|
|
||||||
info["provider"] = f.provider
|
|
||||||
return info, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
|
||||||
var (
|
|
||||||
_ fs.Fs = (*Fs)(nil)
|
|
||||||
_ fs.PutStreamer = (*Fs)(nil)
|
|
||||||
_ fs.Commander = (*Fs)(nil)
|
|
||||||
_ fs.Object = (*Object)(nil)
|
|
||||||
_ fs.MimeTyper = (*Object)(nil)
|
|
||||||
)
|
|
||||||
@@ -1,260 +0,0 @@
|
|||||||
package doi
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/md5"
|
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"net/url"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/doi/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
var remoteName = "TestDoi"
|
|
||||||
|
|
||||||
func TestParseDoi(t *testing.T) {
|
|
||||||
// 10.1000/182 -> 10.1000/182
|
|
||||||
doi := "10.1000/182"
|
|
||||||
parsed := parseDoi(doi)
|
|
||||||
assert.Equal(t, "10.1000/182", parsed)
|
|
||||||
|
|
||||||
// https://doi.org/10.1000/182 -> 10.1000/182
|
|
||||||
doi = "https://doi.org/10.1000/182"
|
|
||||||
parsed = parseDoi(doi)
|
|
||||||
assert.Equal(t, "10.1000/182", parsed)
|
|
||||||
|
|
||||||
// https://dx.doi.org/10.1000/182 -> 10.1000/182
|
|
||||||
doi = "https://dxdoi.org/10.1000/182"
|
|
||||||
parsed = parseDoi(doi)
|
|
||||||
assert.Equal(t, "10.1000/182", parsed)
|
|
||||||
|
|
||||||
// doi:10.1000/182 -> 10.1000/182
|
|
||||||
doi = "doi:10.1000/182"
|
|
||||||
parsed = parseDoi(doi)
|
|
||||||
assert.Equal(t, "10.1000/182", parsed)
|
|
||||||
|
|
||||||
// doi://10.1000/182 -> 10.1000/182
|
|
||||||
doi = "doi://10.1000/182"
|
|
||||||
parsed = parseDoi(doi)
|
|
||||||
assert.Equal(t, "10.1000/182", parsed)
|
|
||||||
}
|
|
||||||
|
|
||||||
// prepareMockDoiResolverServer prepares a test server to resolve DOIs
|
|
||||||
func prepareMockDoiResolverServer(t *testing.T, resolvedURL string) (doiResolverAPIURL string) {
|
|
||||||
mux := http.NewServeMux()
|
|
||||||
|
|
||||||
// Handle requests for resolving DOIs
|
|
||||||
mux.HandleFunc("GET /api/handles/{handle...}", func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
// Check that we are resolving a DOI
|
|
||||||
handle := strings.TrimPrefix(r.URL.Path, "/api/handles/")
|
|
||||||
assert.NotEmpty(t, handle)
|
|
||||||
index := r.URL.Query().Get("index")
|
|
||||||
assert.Equal(t, "1", index)
|
|
||||||
|
|
||||||
// Return the most basic response
|
|
||||||
result := api.DoiResolverResponse{
|
|
||||||
ResponseCode: 1,
|
|
||||||
Handle: handle,
|
|
||||||
Values: []api.DoiResolverResponseValue{
|
|
||||||
{
|
|
||||||
Index: 1,
|
|
||||||
Type: "URL",
|
|
||||||
Data: api.DoiResolverResponseValueData{
|
|
||||||
Format: "string",
|
|
||||||
Value: resolvedURL,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
resultBytes, err := json.Marshal(result)
|
|
||||||
require.NoError(t, err)
|
|
||||||
w.Header().Add("Content-Type", "application/json")
|
|
||||||
_, err = w.Write(resultBytes)
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Make the test server
|
|
||||||
ts := httptest.NewServer(mux)
|
|
||||||
|
|
||||||
// Close the server at the end of the test
|
|
||||||
t.Cleanup(ts.Close)
|
|
||||||
|
|
||||||
return ts.URL + "/api"
|
|
||||||
}
|
|
||||||
|
|
||||||
func md5Sum(text string) string {
|
|
||||||
hash := md5.Sum([]byte(text))
|
|
||||||
return hex.EncodeToString(hash[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
// prepareMockZenodoServer prepares a test server that mocks Zenodo.org
|
|
||||||
func prepareMockZenodoServer(t *testing.T, files map[string]string) *httptest.Server {
|
|
||||||
mux := http.NewServeMux()
|
|
||||||
|
|
||||||
// Handle requests for a single record
|
|
||||||
mux.HandleFunc("GET /api/records/{recordID...}", func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
// Check that we are returning data about a single record
|
|
||||||
recordID := strings.TrimPrefix(r.URL.Path, "/api/records/")
|
|
||||||
assert.NotEmpty(t, recordID)
|
|
||||||
|
|
||||||
// Return the most basic response
|
|
||||||
selfURL, err := url.Parse("http://" + r.Host)
|
|
||||||
require.NoError(t, err)
|
|
||||||
selfURL = selfURL.JoinPath(r.URL.String())
|
|
||||||
result := api.InvenioRecordResponse{
|
|
||||||
Links: api.InvenioRecordResponseLinks{
|
|
||||||
Self: selfURL.String(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
resultBytes, err := json.Marshal(result)
|
|
||||||
require.NoError(t, err)
|
|
||||||
w.Header().Add("Content-Type", "application/json")
|
|
||||||
_, err = w.Write(resultBytes)
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
// Handle requests for listing files in a record
|
|
||||||
mux.HandleFunc("GET /api/records/{record}/files", func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
// Return the most basic response
|
|
||||||
filesBaseURL, err := url.Parse("http://" + r.Host)
|
|
||||||
require.NoError(t, err)
|
|
||||||
filesBaseURL = filesBaseURL.JoinPath("/api/files/")
|
|
||||||
|
|
||||||
entries := []api.InvenioFilesResponseEntry{}
|
|
||||||
for filename, contents := range files {
|
|
||||||
entries = append(entries,
|
|
||||||
api.InvenioFilesResponseEntry{
|
|
||||||
Key: filename,
|
|
||||||
Checksum: md5Sum(contents),
|
|
||||||
Size: int64(len(contents)),
|
|
||||||
Updated: time.Now().UTC().Format(time.RFC3339),
|
|
||||||
MimeType: "text/plain; charset=utf-8",
|
|
||||||
Links: api.InvenioFilesResponseEntryLinks{
|
|
||||||
Content: filesBaseURL.JoinPath(filename).String(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
result := api.InvenioFilesResponse{
|
|
||||||
Entries: entries,
|
|
||||||
}
|
|
||||||
resultBytes, err := json.Marshal(result)
|
|
||||||
require.NoError(t, err)
|
|
||||||
w.Header().Add("Content-Type", "application/json")
|
|
||||||
_, err = w.Write(resultBytes)
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
// Handle requests for file contents
|
|
||||||
mux.HandleFunc("/api/files/{file}", func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
// Check that we are returning the contents of a file
|
|
||||||
filename := strings.TrimPrefix(r.URL.Path, "/api/files/")
|
|
||||||
assert.NotEmpty(t, filename)
|
|
||||||
contents, found := files[filename]
|
|
||||||
if !found {
|
|
||||||
w.WriteHeader(404)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the most basic response
|
|
||||||
_, err := w.Write([]byte(contents))
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Make the test server
|
|
||||||
ts := httptest.NewServer(mux)
|
|
||||||
|
|
||||||
// Close the server at the end of the test
|
|
||||||
t.Cleanup(ts.Close)
|
|
||||||
|
|
||||||
return ts
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestZenodoRemote(t *testing.T) {
|
|
||||||
recordID := "2600782"
|
|
||||||
doi := "10.5281/zenodo.2600782"
|
|
||||||
|
|
||||||
// The files in the dataset
|
|
||||||
files := map[string]string{
|
|
||||||
"README.md": "This is a dataset.",
|
|
||||||
"data.txt": "Some data",
|
|
||||||
}
|
|
||||||
|
|
||||||
ts := prepareMockZenodoServer(t, files)
|
|
||||||
resolvedURL := ts.URL + "/record/" + recordID
|
|
||||||
|
|
||||||
doiResolverAPIURL := prepareMockDoiResolverServer(t, resolvedURL)
|
|
||||||
|
|
||||||
testConfig := configmap.Simple{
|
|
||||||
"type": "doi",
|
|
||||||
"doi": doi,
|
|
||||||
"provider": "zenodo",
|
|
||||||
"doi_resolver_api_url": doiResolverAPIURL,
|
|
||||||
}
|
|
||||||
f, err := NewFs(context.Background(), remoteName, "", testConfig)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Test listing the DOI files
|
|
||||||
entries, err := f.List(context.Background(), "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
sort.Sort(entries)
|
|
||||||
|
|
||||||
require.Equal(t, len(files), len(entries))
|
|
||||||
|
|
||||||
e := entries[0]
|
|
||||||
assert.Equal(t, "README.md", e.Remote())
|
|
||||||
assert.Equal(t, int64(18), e.Size())
|
|
||||||
_, ok := e.(*Object)
|
|
||||||
assert.True(t, ok)
|
|
||||||
|
|
||||||
e = entries[1]
|
|
||||||
assert.Equal(t, "data.txt", e.Remote())
|
|
||||||
assert.Equal(t, int64(9), e.Size())
|
|
||||||
_, ok = e.(*Object)
|
|
||||||
assert.True(t, ok)
|
|
||||||
|
|
||||||
// Test reading the DOI files
|
|
||||||
o, err := f.NewObject(context.Background(), "README.md")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(18), o.Size())
|
|
||||||
md5Hash, err := o.Hash(context.Background(), hash.MD5)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "464352b1cab5240e44528a56fda33d9d", md5Hash)
|
|
||||||
fd, err := o.Open(context.Background())
|
|
||||||
require.NoError(t, err)
|
|
||||||
data, err := io.ReadAll(fd)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, fd.Close())
|
|
||||||
assert.Equal(t, []byte(files["README.md"]), data)
|
|
||||||
do, ok := o.(fs.MimeTyper)
|
|
||||||
require.True(t, ok)
|
|
||||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
|
|
||||||
|
|
||||||
o, err = f.NewObject(context.Background(), "data.txt")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(9), o.Size())
|
|
||||||
md5Hash, err = o.Hash(context.Background(), hash.MD5)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "5b82f8bf4df2bfb0e66ccaa7306fd024", md5Hash)
|
|
||||||
fd, err = o.Open(context.Background())
|
|
||||||
require.NoError(t, err)
|
|
||||||
data, err = io.ReadAll(fd)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, fd.Close())
|
|
||||||
assert.Equal(t, []byte(files["data.txt"]), data)
|
|
||||||
do, ok = o.(fs.MimeTyper)
|
|
||||||
require.True(t, ok)
|
|
||||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
|
|
||||||
}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
// Test DOI filesystem interface
|
|
||||||
package doi
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: "TestDoi:",
|
|
||||||
NilObject: (*Object)(nil),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,164 +0,0 @@
|
|||||||
// Implementation for InvenioRDM
|
|
||||||
|
|
||||||
package doi
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/doi/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
)
|
|
||||||
|
|
||||||
var invenioRecordRegex = regexp.MustCompile(`\/records?\/(.+)`)
|
|
||||||
|
|
||||||
// Returns true if resolvedURL is likely a DOI hosted on an InvenioRDM intallation
|
|
||||||
func activateInvenio(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (isActive bool) {
|
|
||||||
_, _, err := resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
|
||||||
return err == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve the main API endpoint for a DOI hosted on an InvenioRDM installation
|
|
||||||
func resolveInvenioEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
|
|
||||||
var res *http.Response
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
RootURL: resolvedURL.String(),
|
|
||||||
}
|
|
||||||
err = pacer.Call(func() (bool, error) {
|
|
||||||
res, err = srv.Call(ctx, &opts)
|
|
||||||
return shouldRetry(ctx, res, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// First, attempt to grab the API URL from the headers
|
|
||||||
var linksetURL *url.URL
|
|
||||||
links := parseLinkHeader(res.Header.Get("Link"))
|
|
||||||
for _, link := range links {
|
|
||||||
if link.Rel == "linkset" && link.Type == "application/linkset+json" {
|
|
||||||
parsed, err := url.Parse(link.Href)
|
|
||||||
if err == nil {
|
|
||||||
linksetURL = parsed
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if linksetURL != nil {
|
|
||||||
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, linksetURL)
|
|
||||||
if err == nil {
|
|
||||||
return Invenio, endpoint, nil
|
|
||||||
}
|
|
||||||
fs.Logf(nil, "using linkset URL failed: %s", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// If there is no linkset header, try to grab the record ID from the URL
|
|
||||||
recordID := ""
|
|
||||||
resURL := res.Request.URL
|
|
||||||
match := invenioRecordRegex.FindStringSubmatch(resURL.EscapedPath())
|
|
||||||
if match != nil {
|
|
||||||
recordID = match[1]
|
|
||||||
guessedURL := res.Request.URL.ResolveReference(&url.URL{
|
|
||||||
Path: "/api/records/" + recordID,
|
|
||||||
})
|
|
||||||
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, guessedURL)
|
|
||||||
if err == nil {
|
|
||||||
return Invenio, endpoint, nil
|
|
||||||
}
|
|
||||||
fs.Logf(nil, "guessing the URL failed: %s", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", nil, fmt.Errorf("could not resolve the Invenio API endpoint for '%s'", resolvedURL.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkInvenioAPIURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (endpoint *url.URL, err error) {
|
|
||||||
var result api.InvenioRecordResponse
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
RootURL: resolvedURL.String(),
|
|
||||||
}
|
|
||||||
err = pacer.Call(func() (bool, error) {
|
|
||||||
res, err := srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return shouldRetry(ctx, res, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if result.Links.Self == "" {
|
|
||||||
return nil, fmt.Errorf("could not parse API response from '%s'", resolvedURL.String())
|
|
||||||
}
|
|
||||||
return url.Parse(result.Links.Self)
|
|
||||||
}
|
|
||||||
|
|
||||||
// invenioProvider implements the doiProvider interface for InvenioRDM installations
|
|
||||||
type invenioProvider struct {
|
|
||||||
f *Fs
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
|
||||||
func (ip *invenioProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
|
|
||||||
// Use the cache if populated
|
|
||||||
cachedEntries, found := ip.f.cache.GetMaybe("files")
|
|
||||||
if found {
|
|
||||||
parsedEntries, ok := cachedEntries.([]Object)
|
|
||||||
if ok {
|
|
||||||
for _, entry := range parsedEntries {
|
|
||||||
newEntry := entry
|
|
||||||
entries = append(entries, &newEntry)
|
|
||||||
}
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
filesURL := ip.f.endpoint.JoinPath("files")
|
|
||||||
var result api.InvenioFilesResponse
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
|
|
||||||
}
|
|
||||||
err = ip.f.pacer.Call(func() (bool, error) {
|
|
||||||
res, err := ip.f.srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return shouldRetry(ctx, res, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("readDir failed: %w", err)
|
|
||||||
}
|
|
||||||
for _, file := range result.Entries {
|
|
||||||
modTime, modTimeErr := time.Parse(time.RFC3339, file.Updated)
|
|
||||||
if modTimeErr != nil {
|
|
||||||
fs.Logf(ip.f, "error: could not parse last update time %v", modTimeErr)
|
|
||||||
modTime = timeUnset
|
|
||||||
}
|
|
||||||
entry := &Object{
|
|
||||||
fs: ip.f,
|
|
||||||
remote: file.Key,
|
|
||||||
contentURL: file.Links.Content,
|
|
||||||
size: file.Size,
|
|
||||||
modTime: modTime,
|
|
||||||
contentType: file.MimeType,
|
|
||||||
md5: strings.TrimPrefix(file.Checksum, "md5:"),
|
|
||||||
}
|
|
||||||
entries = append(entries, entry)
|
|
||||||
}
|
|
||||||
// Populate the cache
|
|
||||||
cacheEntries := []Object{}
|
|
||||||
for _, entry := range entries {
|
|
||||||
cacheEntries = append(cacheEntries, *entry)
|
|
||||||
}
|
|
||||||
ip.f.cache.Put("files", cacheEntries)
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newInvenioProvider(f *Fs) doiProvider {
|
|
||||||
return &invenioProvider{
|
|
||||||
f: f,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,75 +0,0 @@
|
|||||||
package doi
|
|
||||||
|
|
||||||
import (
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var linkRegex = regexp.MustCompile(`^<(.+)>$`)
|
|
||||||
var valueRegex = regexp.MustCompile(`^"(.+)"$`)
|
|
||||||
|
|
||||||
// headerLink represents a link as presented in HTTP headers
|
|
||||||
// MDN Reference: https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Link
|
|
||||||
type headerLink struct {
|
|
||||||
Href string
|
|
||||||
Rel string
|
|
||||||
Type string
|
|
||||||
Extras map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseLinkHeader(header string) (links []headerLink) {
|
|
||||||
for link := range strings.SplitSeq(header, ",") {
|
|
||||||
link = strings.TrimSpace(link)
|
|
||||||
parsed := parseLink(link)
|
|
||||||
if parsed != nil {
|
|
||||||
links = append(links, *parsed)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return links
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseLink(link string) (parsedLink *headerLink) {
|
|
||||||
var parts []string
|
|
||||||
for part := range strings.SplitSeq(link, ";") {
|
|
||||||
parts = append(parts, strings.TrimSpace(part))
|
|
||||||
}
|
|
||||||
|
|
||||||
match := linkRegex.FindStringSubmatch(parts[0])
|
|
||||||
if match == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result := &headerLink{
|
|
||||||
Href: match[1],
|
|
||||||
Extras: map[string]string{},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, keyValue := range parts[1:] {
|
|
||||||
parsed := parseKeyValue(keyValue)
|
|
||||||
if parsed != nil {
|
|
||||||
key, value := parsed[0], parsed[1]
|
|
||||||
switch strings.ToLower(key) {
|
|
||||||
case "rel":
|
|
||||||
result.Rel = value
|
|
||||||
case "type":
|
|
||||||
result.Type = value
|
|
||||||
default:
|
|
||||||
result.Extras[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseKeyValue(keyValue string) []string {
|
|
||||||
parts := strings.SplitN(keyValue, "=", 2)
|
|
||||||
if parts[0] == "" || len(parts) < 2 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
match := valueRegex.FindStringSubmatch(parts[1])
|
|
||||||
if match != nil {
|
|
||||||
parts[1] = match[1]
|
|
||||||
return parts
|
|
||||||
}
|
|
||||||
return parts
|
|
||||||
}
|
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
package doi
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestParseLinkHeader(t *testing.T) {
|
|
||||||
header := "<https://zenodo.org/api/records/15063252> ; rel=\"linkset\" ; type=\"application/linkset+json\""
|
|
||||||
links := parseLinkHeader(header)
|
|
||||||
expected := headerLink{
|
|
||||||
Href: "https://zenodo.org/api/records/15063252",
|
|
||||||
Rel: "linkset",
|
|
||||||
Type: "application/linkset+json",
|
|
||||||
Extras: map[string]string{},
|
|
||||||
}
|
|
||||||
assert.Contains(t, links, expected)
|
|
||||||
|
|
||||||
header = "<https://api.example.com/issues?page=2>; rel=\"prev\", <https://api.example.com/issues?page=4>; rel=\"next\", <https://api.example.com/issues?page=10>; rel=\"last\", <https://api.example.com/issues?page=1>; rel=\"first\""
|
|
||||||
links = parseLinkHeader(header)
|
|
||||||
expectedList := []headerLink{{
|
|
||||||
Href: "https://api.example.com/issues?page=2",
|
|
||||||
Rel: "prev",
|
|
||||||
Type: "",
|
|
||||||
Extras: map[string]string{},
|
|
||||||
}, {
|
|
||||||
Href: "https://api.example.com/issues?page=4",
|
|
||||||
Rel: "next",
|
|
||||||
Type: "",
|
|
||||||
Extras: map[string]string{},
|
|
||||||
}, {
|
|
||||||
Href: "https://api.example.com/issues?page=10",
|
|
||||||
Rel: "last",
|
|
||||||
Type: "",
|
|
||||||
Extras: map[string]string{},
|
|
||||||
}, {
|
|
||||||
Href: "https://api.example.com/issues?page=1",
|
|
||||||
Rel: "first",
|
|
||||||
Type: "",
|
|
||||||
Extras: map[string]string{},
|
|
||||||
}}
|
|
||||||
assert.Equal(t, links, expectedList)
|
|
||||||
}
|
|
||||||
@@ -1,47 +0,0 @@
|
|||||||
// Implementation for Zenodo
|
|
||||||
|
|
||||||
package doi
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"regexp"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/doi/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
)
|
|
||||||
|
|
||||||
var zenodoRecordRegex = regexp.MustCompile(`zenodo[.](.+)`)
|
|
||||||
|
|
||||||
// Resolve the main API endpoint for a DOI hosted on Zenodo
|
|
||||||
func resolveZenodoEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL, doi string) (provider Provider, endpoint *url.URL, err error) {
|
|
||||||
match := zenodoRecordRegex.FindStringSubmatch(doi)
|
|
||||||
if match == nil {
|
|
||||||
return "", nil, fmt.Errorf("could not derive API endpoint URL from '%s'", resolvedURL.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
recordID := match[1]
|
|
||||||
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/records/" + recordID})
|
|
||||||
|
|
||||||
var result api.InvenioRecordResponse
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
RootURL: endpointURL.String(),
|
|
||||||
}
|
|
||||||
err = pacer.Call(func() (bool, error) {
|
|
||||||
res, err := srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return shouldRetry(ctx, res, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
endpointURL, err = url.Parse(result.Links.Self)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return Zenodo, endpointURL, nil
|
|
||||||
}
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user