1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-06 00:03:32 +00:00

Compare commits

..

1 Commits

2215 changed files with 121682 additions and 515712 deletions

4
.gitattributes vendored
View File

@@ -1,7 +1,3 @@
# Go writes go.mod and go.sum with lf even on windows
go.mod text eol=lf
go.sum text eol=lf
# Ignore generated files in GitHub language statistics and diffs
/MANUAL.* linguist-generated=true
/rclone.1 linguist-generated=true

4
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,4 @@
github: [ncw]
patreon: njcw
liberapay: ncw
custom: ["https://rclone.org/donate/"]

View File

@@ -9,7 +9,7 @@ We understand you are having a problem with rclone; we want to help you with tha
**STOP and READ**
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
Please show the effort you've put into solving the problem and please be specific.
Please show the effort you've put in to solving the problem and please be specific.
People are volunteering their time to help! Low effort posts are not likely to get good answers!
If you think you might have found a bug, try to replicate it with the latest beta (or stable).
@@ -37,6 +37,7 @@ The Rclone Developers
-->
#### The associated forum post URL from `https://forum.rclone.org`
@@ -64,11 +65,3 @@ The Rclone Developers
#### A log from the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
<!--- Please keep the note below for others who read your bug report. -->
#### How to use GitHub
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
* Subscribe to receive notifications on status change and new comments.

View File

@@ -26,6 +26,7 @@ The Rclone Developers
-->
#### The associated forum post URL from `https://forum.rclone.org`
@@ -41,11 +42,3 @@ The Rclone Developers
#### How do you think rclone should be changed to solve that?
<!--- Please keep the note below for others who read your feature request. -->
#### How to use GitHub
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
* Subscribe to receive notifications on status change and new comments.

View File

@@ -22,7 +22,7 @@ Link issues and relevant forum posts here.
#### Checklist
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-new-feature-or-bug-fix).
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
- [ ] I have added tests for all changes in this PR if appropriate.
- [ ] I have added documentation for the changes if appropriate.
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).

View File

@@ -1,6 +0,0 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

View File

@@ -8,51 +8,33 @@ name: build
on:
push:
branches:
- '**'
- '*'
tags:
- '**'
- '*'
pull_request:
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
jobs:
build:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
timeout-minutes: 60
defaults:
run:
shell: bash
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.24']
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.13', 'go1.14', 'go1.15']
include:
- job_name: linux
os: ubuntu-latest
go: '>=1.25.0-rc.1'
go: '1.16.x'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
quicktest: true
racequicktest: true
librclonetest: true
deploy: true
- job_name: linux_386
os: ubuntu-latest
go: '>=1.25.0-rc.1'
goarch: 386
gotags: cmount
quicktest: true
- job_name: mac_amd64
os: macos-latest
go: '>=1.25.0-rc.1'
os: macOS-latest
go: '1.16.x'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -60,32 +42,54 @@ jobs:
deploy: true
- job_name: mac_arm64
os: macos-latest
go: '>=1.25.0-rc.1'
os: macOS-latest
go: '1.16.x'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows
- job_name: windows_amd64
os: windows-latest
go: '>=1.25.0-rc.1'
go: '1.16.x'
gotags: cmount
cgo: '0'
build_flags: '-include "^windows/"'
build_flags: '-include "^windows/amd64" -cgo'
build_args: '-buildmode exe'
quicktest: true
racequicktest: true
deploy: true
- job_name: windows_386
os: windows-latest
go: '1.16.x'
gotags: cmount
goarch: '386'
cgo: '1'
build_flags: '-include "^windows/386" -cgo'
build_args: '-buildmode exe'
quicktest: true
deploy: true
- job_name: other_os
os: ubuntu-latest
go: '>=1.25.0-rc.1'
go: '1.16.x'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.24
- job_name: go1.13
os: ubuntu-latest
go: '1.24'
go: '1.13.x'
quicktest: true
- job_name: go1.14
os: ubuntu-latest
go: '1.14.x'
quicktest: true
racequicktest: true
- job_name: go1.15
os: ubuntu-latest
go: '1.15.x'
quicktest: true
racequicktest: true
@@ -95,17 +99,18 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@v6
uses: actions/setup-go@v2
with:
stable: 'false'
go-version: ${{ matrix.go }}
check-latest: true
- name: Set environment variables
shell: bash
run: |
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
@@ -114,25 +119,20 @@ jobs:
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
- name: Install Libraries on Linux
shell: bash
run: |
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get update
sudo apt-get install -y fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
sudo apt-get install fuse libfuse-dev rpm pkg-config
if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS
shell: bash
run: |
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
unset HOMEBREW_NO_INSTALL_FROM_API
brew untap --force homebrew/core
brew untap --force homebrew/cask
brew update
brew install --cask macfuse
brew install git-annex git-annex-remote-rclone
if: matrix.os == 'macos-latest'
if: matrix.os == 'macOS-latest'
- name: Install Libraries on Windows
shell: powershell
@@ -151,6 +151,7 @@ jobs:
if: matrix.os == 'windows-latest'
- name: Print Go version and environment
shell: bash
run: |
printf "Using go at: $(which go)\n"
printf "Go version: $(go version)\n"
@@ -161,233 +162,149 @@ jobs:
printf "\n\nSystem environment:\n\n"
env
- name: Go module cache
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Build rclone
shell: bash
run: |
make
- name: Rclone version
run: |
rclone version
- name: Run tests
shell: bash
run: |
make quicktest
if: matrix.quicktest
- name: Race test
shell: bash
run: |
make racequicktest
if: matrix.racequicktest
- name: Run librclone tests
- name: Code quality test
shell: bash
run: |
make -C librclone/ctest test
make -C librclone/ctest clean
librclone/python/test_rclone.py
if: matrix.librclonetest
make build_dep
make check
if: matrix.check
- name: Compile all architectures test
shell: bash
run: |
make
make compile_all
if: matrix.compile_all
- name: Deploy built binaries
shell: bash
run: |
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
if [[ "${{ matrix.os }}" == "windows-latest" ]]; then make release_dep_windows ; fi
make ci_beta
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# working-directory: '$(modulePath)'
# Deploy binaries if enabled in config && not a PR && not a fork
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
lint:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
timeout-minutes: 30
name: "lint"
runs-on: ubuntu-latest
steps:
- name: Get runner parameters
id: get-runner-parameters
run: |
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Install Go
id: setup-go
uses: actions/setup-go@v6
with:
go-version: '>=1.24.0-rc.1'
check-latest: true
cache: false
- name: Cache
uses: actions/cache@v4
with:
path: |
~/go/pkg/mod
~/.cache/go-build
~/.cache/golangci-lint
key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }}
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
- name: Code quality test (Linux)
uses: golangci/golangci-lint-action@v9
with:
version: latest
skip-cache: true
- name: Code quality test (Windows)
uses: golangci/golangci-lint-action@v9
env:
GOOS: "windows"
with:
version: latest
skip-cache: true
- name: Code quality test (macOS)
uses: golangci/golangci-lint-action@v9
env:
GOOS: "darwin"
with:
version: latest
skip-cache: true
- name: Code quality test (FreeBSD)
uses: golangci/golangci-lint-action@v9
env:
GOOS: "freebsd"
with:
version: latest
skip-cache: true
- name: Code quality test (OpenBSD)
uses: golangci/golangci-lint-action@v9
env:
GOOS: "openbsd"
with:
version: latest
skip-cache: true
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
- name: Scan for vulnerabilities
run: govulncheck ./...
- name: Check Markdown format
uses: DavidAnson/markdownlint-cli2-action@v20
with:
globs: |
CONTRIBUTING.md
MAINTAINERS.md
README.md
RELEASE.md
CODE_OF_CONDUCT.md
librclone\README.md
backend\s3\README.md
docs/content/{_index,authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
- name: Scan edits of autogenerated files
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
if: github.event_name == 'pull_request'
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
android:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
timeout-minutes: 30
name: "android-all"
runs-on: ubuntu-latest
timeout-minutes: 30
name: "android-all"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
steps:
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 0
# Upgrade together with NDK version
- name: Set up Go 1.14
uses: actions/setup-go@v1
with:
go-version: 1.14
# Upgrade together with NDK version
- name: Set up Go
uses: actions/setup-go@v6
with:
go-version: '>=1.25.0-rc.1'
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
- name: Force NDK version
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;21.4.7075529" | grep -v = || true
- name: Set global environment variables
run: |
echo "VERSION=$(make version)" >> $GITHUB_ENV
- name: Go module cache
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: build native rclone
run: |
make
- name: Set global environment variables
shell: bash
run: |
echo "VERSION=$(make version)" >> $GITHUB_ENV
- name: install gomobile
run: |
go install golang.org/x/mobile/cmd/gobind@latest
go install golang.org/x/mobile/cmd/gomobile@latest
env PATH=$PATH:~/go/bin gomobile init
echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV
- name: build native rclone
run: |
make
- name: arm-v7a gomobile build
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
- name: arm-v7a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm' >> $GITHUB_ENV
echo 'GOARM=7' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm-v7a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
- name: arm-v7a Set environment variables
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm' >> $GITHUB_ENV
echo 'GOARM=7' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm64-v8a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm-v7a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
- name: arm64-v8a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
- name: arm64-v8a Set environment variables
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x86 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=386' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm64-v8a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
- name: x86 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
- name: x86 Set environment variables
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=386' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x64 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=amd64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x86 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
- name: x64 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
- name: x64 Set environment variables
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=amd64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x64 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 .
- name: Upload artifacts
run: |
make ci_upload
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'
- name: Upload artifacts
run: |
make ci_upload
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork
if: github.head_ref == '' && github.repository == 'rclone/rclone'

View File

@@ -1,294 +1,25 @@
---
# Github Actions release for rclone
# -*- compile-command: "yamllint -f parsable build_publish_docker_image.yml" -*-
name: Docker beta build
name: Build & Push Docker Images
# Trigger the workflow on push or pull request
on:
push:
branches:
- '**'
tags:
- '**'
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
push:
branches:
- master
jobs:
build-image:
if: inputs.manual || (github.repository == 'rclone/rclone' && github.event_name != 'pull_request')
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
include:
- platform: linux/amd64
runs-on: ubuntu-24.04
- platform: linux/386
runs-on: ubuntu-24.04
- platform: linux/arm64
runs-on: ubuntu-24.04-arm
- platform: linux/arm/v7
runs-on: ubuntu-24.04-arm
- platform: linux/arm/v6
runs-on: ubuntu-24.04-arm
name: Build Docker Image for ${{ matrix.platform }}
runs-on: ${{ matrix.runs-on }}
steps:
- name: Free Space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout Repository
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set REPO_NAME Variable
run: |
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
- name: Set PLATFORM Variable
run: |
platform=${{ matrix.platform }}
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
- name: Set CACHE_NAME Variable
shell: python
run: |
import os, re
def slugify(input_string, max_length=63):
slug = input_string.lower()
slug = re.sub(r'[^a-z0-9 -]', ' ', slug)
slug = slug.strip()
slug = re.sub(r'\s+', '-', slug)
slug = re.sub(r'-+', '-', slug)
slug = slug[:max_length]
slug = re.sub(r'[-]+$', '', slug)
return slug
ref_name_slug = "cache"
if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request":
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"CACHE_NAME={ref_name_slug}\n")
- name: Get ImageOS
# There's no way around this, because "ImageOS" is only available to
# processes, but the setup-go action uses it in its key.
id: imageos
uses: actions/github-script@v8
with:
result-encoding: string
script: |
return process.env.ImageOS
- name: Extract Metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,manifest-descriptor # Important for digest annotation (used by Github packages)
with:
images: |
ghcr.io/${{ env.REPO_NAME }}
labels: |
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
org.opencontainers.image.vendor=${{ github.repository_owner }}
org.opencontainers.image.authors=rclone <https://github.com/rclone>
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
tags: |
type=sha
type=ref,event=pr
type=ref,event=branch
type=semver,pattern={{version}}
type=semver,pattern={{major}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=beta,enable={{is_default_branch}}
- name: Setup QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Load Go Build Cache for Docker
id: go-cache
uses: actions/cache@v4
with:
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
# Cache only the go builds, the module download is cached via the docker layer caching
path: |
go-build-cache
- name: Inject Go Build Cache into Docker
uses: reproducible-containers/buildkit-cache-dance@v3
with:
cache-map: |
{
"go-build-cache": "/root/.cache/go-build"
}
skip-extraction: ${{ steps.go-cache.outputs.cache-hit }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and Publish Image Digest
id: build
uses: docker/build-push-action@v6
with:
file: Dockerfile
context: .
provenance: false
# don't specify 'tags' here (error "get can't push tagged ref by digest")
# tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
annotations: ${{ steps.meta.outputs.annotations }}
platforms: ${{ matrix.platform }}
outputs: |
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
cache-from: |
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
cache-to: |
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd
- name: Export Image Digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
- name: Upload Image Digest
uses: actions/upload-artifact@v5
with:
name: digests-${{ env.PLATFORM }}
path: /tmp/digests/*
retention-days: 1
if-no-files-found: error
merge-image:
name: Merge & Push Final Docker Image
runs-on: ubuntu-24.04
needs:
- build-image
steps:
- name: Download Image Digests
uses: actions/download-artifact@v6
with:
path: /tmp/digests
pattern: digests-*
merge-multiple: true
- name: Set REPO_NAME Variable
run: |
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
- name: Extract Metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
with:
images: |
${{ env.REPO_NAME }}
ghcr.io/${{ env.REPO_NAME }}
labels: |
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
org.opencontainers.image.vendor=${{ github.repository_owner }}
org.opencontainers.image.authors=rclone <https://github.com/rclone>
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
tags: |
type=sha
type=ref,event=pr
type=ref,event=branch
type=semver,pattern={{version}}
type=semver,pattern={{major}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=beta,enable={{is_default_branch}}
- name: Extract Tags
shell: python
run: |
import json, os
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
metadata = json.loads(metadata_json)
tags = [f"--tag '{tag}'" for tag in metadata["tags"]]
tags_string = " ".join(tags)
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"TAGS={tags_string}\n")
- name: Extract Annotations
shell: python
run: |
import json, os
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
metadata = json.loads(metadata_json)
annotations = [f"--annotation '{annotation}'" for annotation in metadata["annotations"]]
annotations_string = " ".join(annotations)
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"ANNOTATIONS={annotations_string}\n")
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Create & Push Manifest List
working-directory: /tmp/digests
run: |
docker buildx imagetools create \
${{ env.TAGS }} \
${{ env.ANNOTATIONS }} \
$(printf 'ghcr.io/${{ env.REPO_NAME }}@sha256:%s ' *)
- name: Inspect and Run Multi-Platform Image
run: |
docker buildx imagetools inspect --raw ${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
docker buildx imagetools inspect --raw ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
docker run --rm ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} version
build:
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Build and publish image
uses: ilteoood/docker_buildx@1.1.0
with:
tag: beta
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}

View File

@@ -1,49 +0,0 @@
---
# Github Actions release for rclone
# -*- compile-command: "yamllint -f parsable build_publish_docker_plugin.yml" -*-
name: Release Build for Docker Plugin
on:
release:
types: [published]
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
jobs:
build_docker_volume_plugin:
if: inputs.manual || github.repository == 'rclone/rclone'
name: Build docker plugin job
runs-on: ubuntu-latest
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Build and publish docker plugin
shell: bash
run: |
VER=${GITHUB_REF#refs/tags/}
PLUGIN_USER=rclone
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
export PLUGIN_USER PLUGIN_ARCH
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
done
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}

View File

@@ -0,0 +1,33 @@
name: Docker release build
on:
release:
types: [published]
jobs:
build:
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Get actual patch version
id: actual_patch_version
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
- name: Get actual minor version
id: actual_minor_version
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
- name: Get actual major version
id: actual_major_version
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
- name: Build and publish image
uses: ilteoood/docker_buildx@1.1.0
with:
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}

View File

@@ -1,15 +0,0 @@
name: Notify users based on issue labels
on:
issues:
types: [labeled]
jobs:
notify:
runs-on: ubuntu-latest
steps:
- uses: jenschelkopf/issue-label-notification-action@1.3
with:
token: ${{ secrets.NOTIFY_ACTION_TOKEN }}
recipients: |
Support Contract=@rclone/support

View File

@@ -1,14 +0,0 @@
name: Publish to Winget
on:
release:
types: [released]
jobs:
publish:
runs-on: ubuntu-latest
steps:
- uses: vedantmgoyal2009/winget-releaser@v2
with:
identifier: Rclone.Rclone
installers-regex: '-windows-\w+\.zip$'
token: ${{ secrets.WINGET_TOKEN }}

13
.gitignore vendored
View File

@@ -3,20 +3,11 @@ _junk/
rclone
rclone.exe
build
/docs/public/
/docs/.hugo_build.lock
/docs/static/img/logos/
docs/public
rclone.iml
.idea
.history
.vscode
*.test
*.log
*.iml
fuzz-build.zip
*.orig
*.rej
Thumbs.db
__pycache__
.DS_Store
resource_windows_*.syso
.devcontainer

View File

@@ -1,151 +1,26 @@
version: "2"
# golangci-lint configuration options
linters:
# Configure the linter set. To avoid unexpected results the implicit default
# set is ignored and all the ones to use are explicitly enabled.
default: none
enable:
# Default
- deadcode
- errcheck
- govet
- ineffassign
- staticcheck
- unused
# Additional
- gocritic
- misspell
#- prealloc # TODO
- revive
- unconvert
# Configure checks. Mostly using defaults but with some commented exceptions.
settings:
govet:
enable-all: true
disable:
- fieldalignment
- shadow
staticcheck:
# With staticcheck there is only one setting, so to extend the implicit
# default value it must be explicitly included.
checks:
# Default
- all
- -ST1000
- -ST1003
- -ST1016
- -ST1020
- -ST1021
- -ST1022
# Disable quickfix checks
- -QF*
gocritic:
# With gocritic there are different settings, but since enabled-checks
# and disabled-checks cannot both be set, for full customization the
# alternative is to disable all defaults and explicitly enable the ones
# to use.
disable-all: true
enabled-checks:
#- appendAssign # Skip default
- argOrder
- assignOp
- badCall
- badCond
#- captLocal # Skip default
- caseOrder
- codegenComment
#- commentFormatting # Skip default
- defaultCaseOrder
- deprecatedComment
- dupArg
- dupBranchBody
- dupCase
- dupSubExpr
- elseif
#- exitAfterDefer # Skip default
- flagDeref
- flagName
#- ifElseChain # Skip default
- mapKey
- newDeref
- offBy1
- regexpMust
- ruleguard # Enable additional check that are not enabled by default
#- singleCaseSwitch # Skip default
- sloppyLen
- sloppyTypeAssert
- switchTrue
- typeSwitchVar
- underef
- unlambda
- unslice
- valSwap
- wrapperFunc
settings:
ruleguard:
rules: ${base-path}/bin/rules.go
revive:
# With revive there is in reality only one setting, and when at least one
# rule are specified then only these rules will be considered, defaults
# and all others are then implicitly disabled, so must explicitly enable
# all rules to be used.
rules:
- name: blank-imports
disabled: false
- name: context-as-argument
disabled: false
- name: context-keys-type
disabled: false
- name: dot-imports
disabled: false
#- name: empty-block # Skip default
# disabled: true
- name: error-naming
disabled: false
- name: error-return
disabled: false
- name: error-strings
disabled: false
- name: errorf
disabled: false
- name: exported
disabled: false
#- name: increment-decrement # Skip default
# disabled: true
- name: indent-error-flow
disabled: false
- name: package-comments
disabled: false
- name: range
disabled: false
- name: receiver-naming
disabled: false
#- name: redefines-builtin-id # Skip default
# disabled: true
#- name: superfluous-else # Skip default
# disabled: true
- name: time-naming
disabled: false
- name: unexported-return
disabled: false
#- name: unreachable-code # Skip default
# disabled: true
#- name: unused-parameter # Skip default
# disabled: true
- name: var-declaration
disabled: false
- name: var-naming
disabled: false
formatters:
enable:
- goimports
- golint
- ineffassign
- structcheck
- varcheck
- govet
- unconvert
#- prealloc
#- maligned
disable-all: true
issues:
# Enable some lints excluded by default
exclude-use-default: false
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-issues-per-linter: 0
max-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0
run:
# Timeout for total work, e.g. 30s, 5m, 5m30s. Default is 0 (disabled).
timeout: 10m

View File

@@ -1,72 +0,0 @@
default: true
# Use specific styles, to be consistent accross all documents.
# Default is to accept any as long as it is consistent within the same document.
heading-style: # MD003
style: atx
ul-style: # MD004
style: dash
hr-style: # MD035
style: ---
code-block-style: # MD046
style: fenced
code-fence-style: # MD048
style: backtick
emphasis-style: # MD049
style: asterisk
strong-style: # MD050
style: asterisk
# Allow multiple headers with same text as long as they are not siblings.
no-duplicate-heading: # MD024
siblings_only: true
# Allow long lines in code blocks and tables.
line-length: # MD013
code_blocks: false
tables: false
# The Markdown files used to generated docs with Hugo contain a top level
# header, even though the YAML front matter has a title property (which is
# used for the HTML document title only). Suppress Markdownlint warning:
# Multiple top-level headings in the same document.
single-title: # MD025
level: 1
front_matter_title:
# The HTML docs generated by Hugo from Markdown files may have slightly
# different header anchors than GitHub rendered Markdown, e.g. Hugo trims
# leading dashes so "--config string" becomes "#config-string" while it is
# "#--config-string" in GitHub preview. When writing links to headers in the
# Markdown files we must use whatever works in the final HTML generated docs.
# Suppress Markdownlint warning: Link fragments should be valid.
link-fragments: false # MD051
# Restrict the languages and language identifiers to use for code blocks.
# We only want those supported by both Hugo and GitHub. These are documented
# here:
# https://gohugo.io/content-management/syntax-highlighting/#languages
# https://docs.github.com//get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks#syntax-highlighting
# In addition, we only want to allow identifiers (aliases) that correspond to
# the same language in Hugo and GitHub, and preferrably also VSCode and other
# commonly used tools, to avoid confusion. An example of this is that "shell"
# by some are considered an identifier for shell scripts, i.e. an alias for
# "sh", while others consider it an identifier for shell sessions, i.e. an
# alias for "console". Although Hugo and GitHub in this case are consistent and
# have choosen the former, using "sh" instead, and not allowing use of "shell",
# avoids the confusion entirely.
fenced-code-language: # MD040
allowed_languages:
- text
- console
- sh
- bat
- ini
- json
- yaml
- go
- python
- c++
- c#
- java
- powershell

View File

@@ -1,80 +0,0 @@
# Rclone Code of Conduct
Like the technical community as a whole, the Rclone team and community
is made up of a mixture of professionals and volunteers from all over
the world, working on every aspect of the mission - including
mentorship, teaching, and connecting people.
Diversity is one of our huge strengths, but it can also lead to
communication issues and unhappiness. To that end, we have a few
ground rules that we ask people to adhere to. This code applies
equally to founders, mentors and those seeking help and guidance.
This isn't an exhaustive list of things that you can't do. Rather,
take it in the spirit in which it's intended - a guide to make it
easier to enrich all of us and the technical communities in which we
participate.
This code of conduct applies to all spaces managed by the Rclone
project or Rclone Services Ltd. This includes the issue tracker, the
forum, the GitHub site, the wiki, any other online services or
in-person events. In addition, violations of this code outside these
spaces may affect a person's ability to participate within them.
- **Be friendly and patient.**
- **Be welcoming.** We strive to be a community that welcomes and
supports people of all backgrounds and identities. This includes,
but is not limited to members of any race, ethnicity, culture,
national origin, colour, immigration status, social and economic
class, educational level, sex, sexual orientation, gender identity
and expression, age, size, family status, political belief,
religion, and mental and physical ability.
- **Be considerate.** Your work will be used by other people, and you
in turn will depend on the work of others. Any decision you take
will affect users and colleagues, and you should take those
consequences into account when making decisions. Remember that we're
a world-wide community, so you might not be communicating in someone
else's primary language.
- **Be respectful.** Not all of us will agree all the time, but
disagreement is no excuse for poor behavior and poor manners. We
might all experience some frustration now and then, but we cannot
allow that frustration to turn into a personal attack. It's
important to remember that a community where people feel
uncomfortable or threatened is not a productive one. Members of the
Rclone community should be respectful when dealing with other
members as well as with people outside the Rclone community.
- **Be careful in the words that you choose.** We are a community of
professionals, and we conduct ourselves professionally. Be kind to
others. Do not insult or put down other participants. Harassment and
other exclusionary behavior aren't acceptable. This includes, but is
not limited to:
- Violent threats or language directed against another person.
- Discriminatory jokes and language.
- Posting sexually explicit or violent material.
- Posting (or threatening to post) other people's personally
identifying information ("doxing").
- Personal insults, especially those using racist or sexist terms.
- Unwelcome sexual attention.
- Advocating for, or encouraging, any of the above behavior.
- Repeated harassment of others. In general, if someone asks you to
stop, then stop.
- **When we disagree, try to understand why.** Disagreements, both
social and technical, happen all the time and Rclone is no
exception. It is important that we resolve disagreements and
differing views constructively. Remember that we're different. The
strength of Rclone comes from its varied community, people from a
wide range of backgrounds. Different people have different
perspectives on issues. Being unable to understand why someone holds
a viewpoint doesn't mean that they're wrong. Don't forget that it is
human to err and blaming each other doesn't get us anywhere.
Instead, focus on helping to resolve issues and learning from
mistakes.
If you believe someone is violating the code of conduct, we ask that
you report it by emailing [info@rclone.com](mailto:info@rclone.com).
Original text courtesy of the [Speak Up! project](http://web.archive.org/web/20141109123859/http://speakup.io/coc.html).
## Questions?
If you have questions, please feel free to [contact us](mailto:info@rclone.com).

View File

@@ -1,8 +1,8 @@
# Contributing to rclone
# Contributing to rclone #
This is a short guide on how to contribute things to rclone.
## Reporting a bug
## Reporting a bug ##
If you've just got a question or aren't sure if you've found a bug
then please use the [rclone forum](https://forum.rclone.org/) instead
@@ -12,227 +12,94 @@ When filing an issue, please include the following information if
possible as well as a description of the problem. Make sure you test
with the [latest beta of rclone](https://beta.rclone.org/):
- Rclone version (e.g. output from `rclone version`)
- Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
- The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
- A log of the command with the `-vv` flag (e.g. output from
`rclone -vv copy /tmp remote:tmp`)
- if the log contains secrets then edit the file with a text editor first to
obscure them
* Rclone version (e.g. output from `rclone -V`)
* Which OS you are using and how many bits (e.g. Windows 7, 64 bit)
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
* if the log contains secrets then edit the file with a text editor first to obscure them
## Submitting a new feature or bug fix
## Submitting a pull request ##
If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via GitHub.
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues)
first so it can be discussed.
If it is a big feature then make an issue first so it can be discussed.
To prepare your pull request first press the fork button on [rclone's GitHub
You'll need a Go environment set up with GOPATH set. See [the Go
getting started docs](https://golang.org/doc/install) for more info.
First in your web browser press the fork button on [rclone's GitHub
page](https://github.com/rclone/rclone).
Then [install Git](https://git-scm.com/downloads) and set your public contribution
[name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git)
and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
Now in your terminal
Next open your terminal, change directory to your preferred folder and initialise
your local rclone project:
git clone https://github.com/rclone/rclone.git
cd rclone
git remote rename origin upstream
git remote add origin git@github.com:YOURUSER/rclone.git
go build
```console
git clone https://github.com/rclone/rclone.git
cd rclone
git remote rename origin upstream
# if you have SSH keys setup in your GitHub account:
git remote add origin git@github.com:YOURUSER/rclone.git
# otherwise:
git remote add origin https://github.com/YOURUSER/rclone.git
```
Make a branch to add your new feature
Note that most of the terminal commands in the rest of this guide must be
executed from the rclone folder created above.
Now [install Go](https://golang.org/doc/install) and verify your installation:
```console
go version
```
Great, you can now compile and execute your own version of rclone:
```console
go build
./rclone version
```
(Note that you can also replace `go build` with `make`, which will include a
more accurate version number in the executable as well as enable you to specify
more build options.) Finally make a branch to add your new feature
```console
git checkout -b my-new-feature
```
git checkout -b my-new-feature
And get hacking.
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins)
and a quick view on the rclone [code organisation](#code-organisation).
When ready - run the unit tests for the code you changed
When ready - test the affected functionality and run the unit tests for the
code you changed
```console
cd folder/with/changed/files
go test -v
```
go test -v
Note that you may need to make a test remote, e.g. `TestSwift` for some
of the unit tests.
This is typically enough if you made a simple bug fix, otherwise please read
the rclone [testing](#testing) section too.
Note the top level Makefile targets
* make check
* make test
Both of these will be run by Travis when you make a pull request but
you can do this yourself locally too. These require some extra go
packages which you can install with
* make build_dep
Make sure you
- Add [unit tests](#testing) for a new feature.
- Add [documentation](#writing-documentation) for a new feature.
- [Commit your changes](#committing-your-changes) using the [commit message guidelines](#commit-messages).
* Add [documentation](#writing-documentation) for a new feature.
* Follow the [commit message guidelines](#commit-messages).
* Add [unit tests](#testing) for a new feature
* squash commits down to one per feature
* rebase to master with `git rebase master`
When you are done with that push your changes to GitHub:
When you are done with that
```console
git push -u origin my-new-feature
```
git push -u origin my-new-feature
and open the GitHub website to [create your pull
Go to the GitHub website and click [Create pull
request](https://help.github.com/articles/creating-a-pull-request/).
Your changes will then get reviewed and you might get asked to fix some stuff.
If so, then make the changes in the same branch, commit and push your updates to
GitHub.
You patch will get reviewed and you might get asked to fix some stuff.
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master)
or [squash your commits](#squashing-your-commits).
## Using Git and GitHub
### Committing your changes
Follow the guideline for [commit messages](#commit-messages) and then:
```console
git checkout my-new-feature # To switch to your branch
git status # To see the new and changed files
git add FILENAME # To select FILENAME for the commit
git status # To verify the changes to be committed
git commit # To do the commit
git log # To verify the commit. Use q to quit the log
If so, then make the changes in the same branch, squash the commits (make multiple commits one commit) by running:
```
git log # See how many commits you want to squash
git reset --soft HEAD~2 # This squashes the 2 latest commits together.
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
git commit # Add a new commit message.
git push --force # Push the squashed commit to your GitHub repo.
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com
```
You can modify the message or changes in the latest commit using:
## CI for your fork ##
```console
git commit --amend
```
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
If you amend to commits that have been pushed to GitHub, then you will have to
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Replacing your previously pushed commits
Note that you are about to rewrite the GitHub history of your branch. It is good
practice to involve your collaborators before modifying commits that have been
pushed to GitHub.
Your previously pushed commits are replaced by:
```console
git push --force origin my-new-feature
```
### Basing your changes on the latest master
To base your changes on the latest version of the
[rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
```console
git checkout master
git fetch upstream
git merge --ff-only
git push origin --follow-tags # optional update of your fork in GitHub
git checkout my-new-feature
git rebase master
```
If you rebase commits that have been pushed to GitHub, then you will have to
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Squashing your commits
To combine your commits into one commit:
```console
git log # To count the commits to squash, e.g. the last 2
git reset --soft HEAD~2 # To undo the 2 latest commits
git status # To check everything is as expected
```
If everything is fine, then make the new combined commit:
```console
git commit # To commit the undone commits as one
```
otherwise, you may roll back using:
```console
git reflog # To check that HEAD{1} is your previous state
git reset --soft 'HEAD@{1}' # To roll back to your previous state
```
If you squash commits that have been pushed to GitHub, then you will have to
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
Tip: You may like to use `git rebase -i master` if you are experienced or have a
more complex situation.
### GitHub Continuous Integration
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions)
to build and test the project, which should be automatically available for your
fork too from the `Actions` tab in your repository.
## Testing
### Code quality tests
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then
you can run the same tests as get run in the CI which can be very helpful.
You can run them with `make check` or with `golangci-lint run ./...`.
Using these tests ensures that the rclone codebase all uses the same coding
standards. These tests also check for easy mistakes to make (like forgetting
to check an error return).
### Quick testing
## Testing ##
rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests.
```console
go test -v ./...
```
You can also use `make`, if supported by your platform
```console
make quicktest
```
The quicktest is [automatically run by GitHub](#github-continuous-integration)
when you push your branch to GitHub.
### Backend testing
go test -v ./...
rclone contains a mixture of unit tests and integration tests.
Because it is difficult (and in some respects pointless) to test cloud
@@ -246,216 +113,113 @@ need to make a remote called `TestDrive`.
You can then run the unit tests in the drive directory. These tests
are skipped if `TestDrive:` isn't defined.
```console
cd backend/drive
go test -v
```
cd backend/drive
go test -v
You can then run the integration tests which test all of rclone's
operations. Normally these get run against the local file system,
but they can be run against any of the remotes.
```console
cd fs/sync
go test -v -remote TestDrive:
go test -v -remote TestDrive: -fast-list
cd fs/sync
go test -v -remote TestDrive:
go test -v -remote TestDrive: -fast-list
cd fs/operations
go test -v -remote TestDrive:
```
cd fs/operations
go test -v -remote TestDrive:
If you want to use the integration test framework to run these tests
altogether with an HTML report and test retries then from the
project root:
```console
go run ./fstest/test_all -backends drive
```
### Full integration testing
go install github.com/rclone/rclone/fstest/test_all
test_all -backend drive
If you want to run all the integration tests against all the remotes,
then change into the project root and run
```console
make check
make test
```
make test
The commands may require some extra go packages which you can install with
This command is run daily on the integration test server. You can
find the results at https://pub.rclone.org/integration-tests/
```console
make build_dep
```
The full integration tests are run daily on the integration test server. You can
find the results at <https://integration.rclone.org>
## Code Organisation
## Code Organisation ##
Rclone code is organised into a small number of top level directories
with modules beneath.
- backend - the rclone backends for interfacing to cloud providers -
- all - import this to load all the cloud providers
- ...providers
- bin - scripts for use while building or maintaining rclone
- cmd - the rclone commands
- all - import this to load all the commands
- ...commands
- cmdtest - end-to-end tests of commands, flags, environment variables,...
- docs - the documentation and website
- content - adjust these docs only, except those marked autogenerated
or portions marked autogenerated where the corresponding .go file must be
edited instead, and everything else is autogenerated
- commands - these are auto-generated, edit the corresponding .go file
- fs - main rclone definitions - minimal amount of code
- accounting - bandwidth limiting and statistics
- asyncreader - an io.Reader which reads ahead
- config - manage the config file and flags
- driveletter - detect if a name is a drive letter
- filter - implements include/exclude filtering
- fserrors - rclone specific error handling
- fshttp - http handling for rclone
- fspath - path handling for rclone
- hash - defines rclone's hash types and functions
- list - list a remote
- log - logging facilities
- march - iterates directories in lock step
- object - in memory Fs objects
- operations - primitives for sync, e.g. Copy, Move
- sync - sync directories
- walk - walk a directory
- fstest - provides integration test framework
- fstests - integration tests for the backends
- mockdir - mocks an fs.Directory
- mockobject - mocks an fs.Object
- test_all - Runs integration tests for everything
- graphics - the images used in the website, etc.
- lib - libraries used by the backend
- atexit - register functions to run when rclone exits
- dircache - directory ID to name caching
- oauthutil - helpers for using oauth
- pacer - retries with backoff and paces operations
- readers - a selection of useful io.Readers
- rest - a thin abstraction over net/http for REST
- librclone - in memory interface to rclone's API for embedding rclone
- vfs - Virtual FileSystem layer for implementing rclone mount and similar
* backend - the rclone backends for interfacing to cloud providers -
* all - import this to load all the cloud providers
* ...providers
* bin - scripts for use while building or maintaining rclone
* cmd - the rclone commands
* all - import this to load all the commands
* ...commands
* docs - the documentation and website
* content - adjust these docs only - everything else is autogenerated
* command - these are auto generated - edit the corresponding .go file
* fs - main rclone definitions - minimal amount of code
* accounting - bandwidth limiting and statistics
* asyncreader - an io.Reader which reads ahead
* config - manage the config file and flags
* driveletter - detect if a name is a drive letter
* filter - implements include/exclude filtering
* fserrors - rclone specific error handling
* fshttp - http handling for rclone
* fspath - path handling for rclone
* hash - defines rclone's hash types and functions
* list - list a remote
* log - logging facilities
* march - iterates directories in lock step
* object - in memory Fs objects
* operations - primitives for sync, e.g. Copy, Move
* sync - sync directories
* walk - walk a directory
* fstest - provides integration test framework
* fstests - integration tests for the backends
* mockdir - mocks an fs.Directory
* mockobject - mocks an fs.Object
* test_all - Runs integration tests for everything
* graphics - the images used in the website, etc.
* lib - libraries used by the backend
* atexit - register functions to run when rclone exits
* dircache - directory ID to name caching
* oauthutil - helpers for using oauth
* pacer - retries with backoff and paces operations
* readers - a selection of useful io.Readers
* rest - a thin abstraction over net/http for REST
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
## Writing Documentation
## Writing Documentation ##
If you are adding a new feature then please update the documentation.
The documentation sources are generally in Markdown format, in conformance
with the CommonMark specification and compatible with GitHub Flavored
Markdown (GFM). The markdown format and style is checked as part of the lint
operation that runs automatically on pull requests, to enforce standards and
consistency. This is based on the [markdownlint](https://github.com/DavidAnson/markdownlint)
tool by David Anson, which can also be integrated into editors so you can
perform the same checks while writing. It generally follows Ciro Santilli's
[Markdown Style Guide](https://cirosantilli.com/markdown-style-guide), which
is good source if you want to know more.
HTML pages, served as website <rclone.org>, are generated from the Markdown,
using [Hugo](https://gohugo.io). Note that when generating the HTML pages,
there is currently used a different algorithm for generating header anchors
than what GitHub uses for its Markdown rendering. For example, in the HTML docs
generated by Hugo any leading `-` characters are ignored, which means when
linking to a header with text `--config string` we therefore need to use the
link `#config-string` in our Markdown source, which will not work in GitHub's
preview where `#--config-string` would be the correct link.
Most of the documentation are written directly in text files with extension
`.md`, mainly within folder `docs/content`. Note that several of such files
are autogenerated (e.g. the command documentation, and `docs/content/flags.md`),
or contain autogenerated portions (e.g. the backend documentation under
`docs/content/commands`). These are marked with an `autogenerated` comment.
The sources of the autogenerated text are usually Markdown formatted text
embedded as string values in the Go source code, so you need to locate these
and edit the `.go` file instead. The `MANUAL.*`, `rclone.1` and other text
files in the root of the repository are also autogenerated. The autogeneration
of files, and the website, will be done during the release process. See the
`make doc` and `make website` targets in the Makefile if you are interested in
how. You don't need to run these when adding a feature.
If you add a new general flag (not for a backend), then document it in
`docs/content/docs.md` - the flags there are supposed to be in
alphabetical order.
If you add a new backend option/flag, then it should be documented in
the source file in the `Help:` field:
the source file in the `Help:` field. The first line of this is used
for the flag help, the remainder is shown to the user in `rclone
config` and is added to the docs with `make backenddocs`.
- Start with the most important information about the option,
as a single sentence on a single line.
- This text will be used for the command-line flag help.
- It will be combined with other information, such as any default value,
and the result will look odd if not written as a single sentence.
- It should end with a period/full stop character, which will be shown
in docs but automatically removed when producing the flag help.
- Try to keep it below 80 characters, to reduce text wrapping in the terminal.
- More details can be added in a new paragraph, after an empty line (`"\n\n"`).
- Like with docs generated from Markdown, a single line break is ignored
and two line breaks creates a new paragraph.
- This text will be shown to the user in `rclone config`
and in the docs (where it will be added by `make backenddocs`,
normally run some time before next release).
- To create options of enumeration type use the `Examples:` field.
- Each example value have their own `Help:` field, but they are treated
a bit different than the main option help text. They will be shown
as an unordered list, therefore a single line break is enough to
create a new list item. Also, for enumeration texts like name of
countries, it looks better without an ending period/full stop character.
- You can run `make backenddocs` to verify the resulting Markdown.
- This will update the autogenerated sections of the backend docs Markdown
files under `docs/content`.
- It requires you to have [Python](https://www.python.org) installed.
- The `backenddocs` make target runs the Python script `bin/make_backend_docs.py`,
and you can also run this directly, optionally with the name of a backend
as argument to only update the docs for a specific backend.
- **Do not** commit the updated Markdown files. This operation is run as part of
the release process. Since any manual changes in the autogenerated sections
of the Markdown files will then be lost, we have a pull request check that
reports error for any changes within the autogenerated sections. Should you
have done manual changes outside of the autogenerated sections they must be
committed, of course.
- You can run `make serve` to verify the resulting website.
- This will build the website and serve it locally, so you can open it in
your web browser and verify that the end result looks OK. Check specifically
any added links, also in light of the note above regarding different algorithms
for generated header anchors.
- It requires you to have the [Hugo](https://gohugo.io) tool available.
- The `serve` make target depends on the `website` target, which runs the
`hugo` command from the `docs` directory to build the website, and then
it serves the website locally with an embedded web server using a command
`hugo server --logLevel info -w --disableFastRender --ignoreCache`, so you
can run similar Hugo commands directly as well.
The only documentation you need to edit are the `docs/content/*.md`
files. The `MANUAL.*`, `rclone.1`, web site, etc. are all auto generated
from those during the release process. See the `make doc` and `make
website` targets in the Makefile if you are interested in how. You
don't need to run these when adding a feature.
When writing documentation for an entirely new backend,
see [backend documentation](#backend-documentation).
Documentation for rclone sub commands is with their code, e.g.
`cmd/ls/ls.go`.
If you are updating documentation for a command, you must do that in the
command source code, e.g. `cmd/ls/ls.go`. Write flag help strings as a single
sentence on a single line, without a period/full stop character at the end,
as it will be combined unmodified with other information (such as any default
value).
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
for small changes in the docs which makes it very easy.
Note that you can use
[GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
for small changes in the docs which makes it very easy. Just remember the
caveat when linking to header anchors, noted above, which means that GitHub's
Markdown preview may not be an entirely reliable verification of the results.
After your changes have been merged, you can verify them on
[tip.rclone.org](https://tip.rclone.org). This site is updated daily with the
current state of the master branch at 07:00 UTC. The changes will be on the main
[rclone.org](https://rclone.org) site once they have been included in a release.
## Making a release
## Making a release ##
There are separate instructions for making a release in the RELEASE.md
file.
## Commit messages
## Commit messages ##
Please make the first line of your commit message a summary of the
change that a user (not a developer) of rclone would like to read, and
@@ -479,16 +243,16 @@ change will get linked into the issue.
Here is an example of a short commit message:
```text
```
drive: add team drive support - fixes #885
```
And here is an example of a longer one:
```text
```
mount: fix hang on errored upload
In certain circumstances, if an upload failed then the mount could hang
In certain circumstances if an upload failed then the mount could hang
indefinitely. This was fixed by closing the read pipe after the Put
completed. This will cause the write side to return a pipe closed
error fixing the hang.
@@ -496,7 +260,7 @@ error fixing the hang.
Fixes #1498
```
## Adding a dependency
## Adding a dependency ##
rclone uses the [go
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
@@ -508,9 +272,7 @@ To add a dependency `github.com/ncw/new_dependency` see the
instructions below. These will fetch the dependency and add it to
`go.mod` and `go.sum`.
```console
go get github.com/ncw/new_dependency
```
GO111MODULE=on go get github.com/ncw/new_dependency
You can add constraints on that package when doing `go get` (see the
go docs linked above), but don't unless you really need to.
@@ -518,17 +280,15 @@ go docs linked above), but don't unless you really need to.
Please check in the changes generated by `go mod` including `go.mod`
and `go.sum` in the same commit as your other changes.
## Updating a dependency
## Updating a dependency ##
If you need to update a dependency then run
```console
go get golang.org/x/crypto
```
GO111MODULE=on go get -u github.com/pkg/errors
Check in a single commit as above.
## Updating all the dependencies
## Updating all the dependencies ##
In order to update all the dependencies then run `make update`. This
just uses the go modules to update all the modules to their latest
@@ -537,7 +297,7 @@ stable release. Check in the changes in a single commit as above.
This should be done early in the release cycle to pick up new versions
of packages in time for them to get some testing.
## Updating a backend
## Updating a backend ##
If you update a backend then please run the unit tests and the
integration tests for that backend.
@@ -552,153 +312,105 @@ integration tests.
The next section goes into more detail about the tests.
## Writing a new backend
## Writing a new backend ##
Choose a name. The docs here will use `remote` as an example.
Note that in rclone terminology a file system backend is called a
remote or an fs.
### Research
Research
- Look at the interfaces defined in `fs/types.go`
- Study one or more of the existing remotes
* Look at the interfaces defined in `fs/fs.go`
* Study one or more of the existing remotes
### Getting going
Getting going
- Create `backend/remote/remote.go` (copy this from a similar remote)
- box is a good one to start from if you have a directory-based remote (and
shows how to use the directory cache)
- b2 is a good one to start from if you have a bucket-based remote
- Add your remote to the imports in `backend/all/all.go`
- HTTP based remotes are easiest to maintain if they use rclone's
[lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but
if there is a really good Go SDK from the provider then use that instead.
- Try to implement as many optional methods as possible as it makes the remote
more usable.
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to
make sure we can encode any path name and `rclone info` to help determine the
encodings needed
- `rclone purge -v TestRemote:rclone-info`
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
- open `remote.csv` in a spreadsheet and examine
* Create `backend/remote/remote.go` (copy this from a similar remote)
* box is a good one to start from if you have a directory based remote
* b2 is a good one to start from if you have a bucket based remote
* Add your remote to the imports in `backend/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* Try to implement as many optional methods as possible as it makes the remote more usable.
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* `rclone purge -v TestRemote:rclone-info`
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
* open `remote.csv` in a spreadsheet and examine
### Guidelines for a speedy merge
Unit tests
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest)
if you are implementing a REST like backend and parsing XML/JSON in the backend.
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp)
if your backend is HTTP based - this adds features like `--dump bodies`,
`--tpslimit`, `--user-agent` without you having to code anything!
- **Do** follow your example backend exactly - use the same code order, function
names, layout, structure. **Don't** move stuff around and **Don't** delete the
comments.
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few
backends like that - don't follow them!)
- **Do** put your API type definitions in a separate file - by preference `api/types.go`
- **Remember** we have >50 backends to maintain so keeping them as similar as
possible to each other is a high priority!
* Create a config entry called `TestRemote` for the unit tests to use
* Create a `backend/remote/remote_test.go` - copy and adjust your example remote
* Make sure all tests pass with `go test -v`
### Unit tests
Integration tests
- Create a config entry called `TestRemote` for the unit tests to use
- Create a `backend/remote/remote_test.go` - copy and adjust your example remote
- Make sure all tests pass with `go test -v`
### Integration tests
- Add your backend to `fstest/test_all/config.yaml`
- Once you've done that then you can use the integration test framework from
the project root:
- `go run ./fstest/test_all -backends remote`
* Add your backend to `fstest/test_all/config.yaml`
* Once you've done that then you can use the integration test framework from the project root:
* go install ./...
* test_all -backends remote
Or if you want to run the integration tests manually:
- Make sure integration tests pass with
- `cd fs/operations`
- `go test -v -remote TestRemote:`
- `cd fs/sync`
- `go test -v -remote TestRemote:`
- If your remote defines `ListR` check with this also
- `go test -v -remote TestRemote: -fast-list`
* Make sure integration tests pass with
* `cd fs/operations`
* `go test -v -remote TestRemote:`
* `cd fs/sync`
* `go test -v -remote TestRemote:`
* If your remote defines `ListR` check with this also
* `go test -v -remote TestRemote: -fast-list`
See the [testing](#testing) section for more information on integration tests.
### Backend documentation
Add your backend to the docs - you'll need to pick an icon for it from
Add your fs to the docs - you'll need to pick an icon for it from
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
alphabetical order of full name of remote (e.g. `drive` is ordered as
`Google Drive`) but with the local file system last.
- `README.md` - main GitHub page
- `docs/content/remote.md` - main docs page (note the backend options are
automatically added to this file with `make backenddocs`)
- make sure this has the `autogenerated options` comments in (see your
reference backend docs)
- update them in your backend with `bin/make_backend_docs.py remote`
- `docs/content/overview.md` - overview docs - add an entry into the Features
table and the Optional Features table.
- `docs/content/docs.md` - list of remotes in config section
- `docs/content/_index.md` - front page of rclone.org
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
- `bin/make_manual.py` - add the page to the `docs` constant
* `README.md` - main GitHub page
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
* update them with `make backenddocs` - revert any changes in other backends
* `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section
* `docs/content/_index.md` - front page of rclone.org
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
* `bin/make_manual.py` - add the page to the `docs` constant
Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work.
## Adding a new s3 provider
## Writing a plugin ##
[Please see the guide in the S3 backend directory](backend/s3/README.md).
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
## Writing a plugin
Usage
New features (backends, commands) can also be added "out-of-tree", through Go
plugins. Changes will be kept in a dynamically loaded file instead of being
compiled into the main binary. This is useful if you can't merge your changes
upstream or don't want to maintain a fork of rclone.
- Naming
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
- `KIND` should be one of `backend`, `command` or `bundle`.
- Example: A plugin with backend support for PiFS would be called
`librcloneplugin_backend_pifs.so`.
- Loading
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
- Supported on rclone v1.50 or greater.
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
- If this variable doesn't exist, plugin support is disabled.
- Plugins must be compiled against the exact version of rclone to work.
(The rclone used during building the plugin must be the same as the source of rclone)
### Usage
- Naming
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
- `KIND` should be one of `backend`, `command` or `bundle`.
- Example: A plugin with backend support for PiFS would be called
`librcloneplugin_backend_pifs.so`.
- Loading
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
- Supported on rclone v1.50 or greater.
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
- If this variable doesn't exist, plugin support is disabled.
- Plugins must be compiled against the exact version of rclone to work.
(The rclone used during building the plugin must be the same as the source
of rclone)
### Building
Building
To turn your existing additions into a Go plugin, move them to an external repository
and change the top-level package name to `main`.
Check `rclone --version` and make sure that the plugin's rclone dependency and
host Go version match.
Check `rclone --version` and make sure that the plugin's rclone dependency and host Go version match.
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
## Keeping a backend or command out of tree
Rclone was designed to be modular so it is very easy to keep a backend
or a command out of the main rclone source tree.
So for example if you had a backend which accessed your proprietary
systems or a command which was specialised for your needs you could
add them out of tree.
This may be easier than using a plugin and is supported on all
platforms not just macOS and Linux.
This is explained further in <https://github.com/rclone/rclone_out_of_tree_example>
which has an example of an out of tree backend `ram` (which is a
renamed version of the `memory` backend).
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)

View File

@@ -1,47 +1,18 @@
FROM golang:alpine AS builder
ARG CGO_ENABLED=0
FROM golang AS builder
COPY . /go/src/github.com/rclone/rclone/
WORKDIR /go/src/github.com/rclone/rclone/
RUN echo "**** Set Go Environment Variables ****" && \
go env -w GOCACHE=/root/.cache/go-build
RUN echo "**** Install Dependencies ****" && \
apk add --no-cache \
make \
bash \
gawk \
git
COPY go.mod .
COPY go.sum .
RUN echo "**** Download Go Dependencies ****" && \
go mod download -x
RUN echo "**** Verify Go Dependencies ****" && \
go mod verify
COPY . .
RUN --mount=type=cache,target=/root/.cache/go-build,sharing=locked \
echo "**** Build Binary ****" && \
make
RUN echo "**** Print Version Binary ****" && \
./rclone version
RUN \
CGO_ENABLED=0 \
make
RUN ./rclone version
# Begin final image
FROM alpine:latest
RUN echo "**** Install Dependencies ****" && \
apk add --no-cache \
ca-certificates \
fuse3 \
tzdata && \
echo "Enable user_allow_other in fuse" && \
echo "user_allow_other" >> /etc/fuse.conf
RUN apk --no-cache add ca-certificates fuse tzdata && \
echo "user_allow_other" >> /etc/fuse.conf
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/

View File

@@ -1,4 +1,4 @@
# Maintainers guide for rclone
# Maintainers guide for rclone #
Current active maintainers of rclone are:
@@ -15,117 +15,82 @@ Current active maintainers of rclone are:
| Ivan Andreev | @ivandeex | chunker & mailru backends |
| Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend |
| Caleb Case | @calebcase | storj backend |
| wiserain | @wiserain | pikpak backend |
| albertony | @albertony | |
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
| Hideo Aoyama | @boukendesho | snap packaging |
| nielash | @nielash | bisync |
| Dan McArdle | @dmcardle | gitannex |
| Sam Harrison | @childish-sambino | filescom |
| Caleb Case | @calebcase | tardigrade backend |
## This is a work in progress draft
**This is a work in progress Draft**
This is a guide for how to be an rclone maintainer. This is mostly a write-up
of what I (@ncw) attempt to do.
This is a guide for how to be an rclone maintainer. This is mostly a writeup of what I (@ncw) attempt to do.
## Triaging Tickets
## Triaging Tickets ##
When a ticket comes in it should be triaged. This means it should be classified
by adding labels and placed into a milestone. Quite a lot of tickets need a bit
of back and forth to determine whether it is a valid ticket so tickets may
remain without labels or milestone for a while.
When a ticket comes in it should be triaged. This means it should be classified by adding labels and placed into a milestone. Quite a lot of tickets need a bit of back and forth to determine whether it is a valid ticket so tickets may remain without labels or milestone for a while.
Rclone uses the labels like this:
- `bug` - a definitely verified bug
- `can't reproduce` - a problem which we can't reproduce
- `doc fix` - a bug in the documentation - if users need help understanding the
docs add this label
- `duplicate` - normally close these and ask the user to subscribe to the original
- `enhancement: new remote` - a new rclone backend
- `enhancement` - a new feature
- `FUSE` - to do with `rclone mount` command
- `good first issue` - mark these if you find a small self-contained issue -
these get shown to new visitors to the project
- `help` wanted - mark these if you find a self-contained issue - these get
shown to new visitors to the project
- `IMPORTANT` - note to maintainers not to forget to fix this for the release
- `maintenance` - internal enhancement, code re-organisation, etc.
- `Needs Go 1.XX` - waiting for that version of Go to be released
- `question` - not a `bug` or `enhancement` - direct to the forum for next time
- `Remote: XXX` - which rclone backend this affects
- `thinking` - not decided on the course of action yet
* `bug` - a definite verified bug
* `can't reproduce` - a problem which we can't reproduce
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
* `duplicate` - normally close these and ask the user to subscribe to the original
* `enhancement: new remote` - a new rclone backend
* `enhancement` - a new feature
* `FUSE` - to do with `rclone mount` command
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
* `maintenance` - internal enhancement, code re-organisation, etc.
* `Needs Go 1.XX` - waiting for that version of Go to be released
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
* `Remote: XXX` - which rclone backend this affects
* `thinking` - not decided on the course of action yet
If it turns out to be a bug or an enhancement it should be tagged as such, with
the appropriate other tags. Don't forget the "good first issue" tag to give new
contributors something easy to do to get going.
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
When a ticket is tagged it should be added to a milestone, either the next
release, the one after, Soon or Help Wanted. Bugs can be added to the
"Known Bugs" milestone if they aren't planned to be fixed or need to wait for
something (e.g. the next go release).
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (e.g. the next go release).
The milestones have these meanings:
- v1.XX - stuff we would like to fit into this release
- v1.XX+1 - stuff we are leaving until the next release
- Soon - stuff we think is a good idea - waiting to be scheduled for a release
- Help wanted - blue sky stuff that might get moved up, or someone could help with
- Known bugs - bugs waiting on external factors or we aren't going to fix for
the moment
* v1.XX - stuff we would like to fit into this release
* v1.XX+1 - stuff we are leaving until the next release
* Soon - stuff we think is a good idea - waiting to be scheduled to a release
* Help wanted - blue sky stuff that might get moved up, or someone could help with
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile)
are good candidates for ones that have slipped between the gaps and need
following up.
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
## Closing Tickets
## Closing Tickets ##
Close tickets as soon as you can - make sure they are tagged with a release.
Post a link to a beta in the ticket with the fix in, asking for feedback.
Close tickets as soon as you can - make sure they are tagged with a release. Post a link to a beta in the ticket with the fix in, asking for feedback.
## Pull requests
## Pull requests ##
Try to process pull requests promptly!
Merging pull requests on GitHub itself works quite well nowadays so you can
squash and rebase or rebase pull requests. rclone doesn't use merge commits.
Use the squash and rebase option if you need to edit the commit message.
Merging pull requests on GitHub itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
After merging the commit, in your local master branch, do `git pull` then run
`bin/update-authors.py` to update the authors file then `git push`.
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
Sometimes pull requests need to be left open for a while - this especially true
of contributions of new backends which take a long time to get right.
Sometimes pull requests need to be left open for a while - this especially true of contributions of new backends which take a long time to get right.
## Merges
## Merges ##
If you are merging a branch locally then do `git merge --ff-only branch-name` to
avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
If you are merging a branch locally then do `git merge --ff-only branch-name` to avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
## Release cycle
## Release cycle ##
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer
if there is something big to merge that didn't stabilize properly or for personal
reasons.
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer if there is something big to merge that didn't stabilize properly or for personal reasons.
High impact regressions should be fixed before the next release.
Near the start of the release cycle, the dependencies should be updated with
`make update` to give time for bugs to surface.
Near the start of the release cycle the dependencies should be updated with `make update` to give time for bugs to surface.
Towards the end of the release cycle try not to merge anything too big so let
things settle down.
Towards the end of the release cycle try not to merge anything too big so let things settle down.
Follow the instructions in RELEASE.md for making the release. Note that the
testing part is the most time-consuming often needing several rounds of test
and fix depending on exactly how many new features rclone has gained.
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
## Mailing list
## Mailing list ##
There is now an invite-only mailing list for rclone developers `rclone-dev` on
google groups.
There is now an invite only mailing list for rclone developers `rclone-dev` on google groups.
## TODO
## TODO ##
I should probably make a <dev@rclone.org> to register with cloud providers.
I should probably make a dev@rclone.org to register with cloud providers.

66433
MANUAL.html generated

File diff suppressed because it is too large Load Diff

57618
MANUAL.md generated

File diff suppressed because it is too large Load Diff

50349
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

114
Makefile
View File

@@ -30,37 +30,29 @@ ifdef RELEASE_TAG
TAG := $(RELEASE_TAG)
endif
GO_VERSION := $(shell go version)
GO_OS := $(shell go env GOOS)
ifdef BETA_SUBDIR
BETA_SUBDIR := /$(BETA_SUBDIR)
endif
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
BETA_UPLOAD_ROOT := beta.rclone.org:
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
# Pass in GOTAGS=xyz on the make command line to set build tags
ifdef GOTAGS
BUILDTAGS=-tags "$(GOTAGS)"
LINTTAGS=--build-tags "$(GOTAGS)"
endif
LDFLAGS=--ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)"
.PHONY: rclone test_all vars version
rclone:
ifeq ($(GO_OS),windows)
go run bin/resource_windows.go -version $(TAG) -syso resource_windows_`go env GOARCH`.syso
endif
go build -v $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS)
ifeq ($(GO_OS),windows)
rm resource_windows_`go env GOARCH`.syso
endif
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
mkdir -p `go env GOPATH`/bin/
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
test_all:
go install $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
vars:
@echo SHELL="'$(SHELL)'"
@@ -74,10 +66,6 @@ btest:
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
@echo "Copied markdown of beta release to clip board"
btesth:
@echo "<a href="$(BETA_URL)">$(TAG)</a> on branch <a href="https://github.com/rclone/rclone/tree/$(BRANCH)">$(BRANCH)</a> (uploaded in 15-30 mins)" | xclip -r -sel clip -t text/html
@echo "Copied beta release in HTML to clip board"
version:
@echo '$(TAG)'
@@ -88,47 +76,43 @@ test: rclone test_all
# Quick test
quicktest:
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) ./...
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
racequicktest:
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -cpu=2 -race ./...
compiletest:
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -run XXX ./...
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
# Do source code quality checks
check: rclone
@echo "-- START CODE QUALITY REPORT -------------------------------"
@golangci-lint run $(LINTTAGS) ./...
@bin/markdown-lint
@echo "-- END CODE QUALITY REPORT ---------------------------------"
# Get the build dependencies
build_dep:
go run bin/get-github-release.go -use-api -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
# Get the release dependencies we only install on linux
release_dep_linux:
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
# Get the release dependencies we only install on Windows
release_dep_windows:
GO111MODULE=off GOOS="" GOARCH="" go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo
# Update dependencies
showupdates:
@echo "*** Direct dependencies that could be updated ***"
@go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
# Update direct dependencies only
updatedirect:
go get $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
go mod tidy
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
# Update direct and indirect dependencies and test dependencies
update:
go get -u -t ./...
go mod tidy
GO111MODULE=on go get -u -t ./...
-#GO111MODULE=on go get -d $(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
GO111MODULE=on go mod tidy
# Tidy the module dependencies
tidy:
go mod tidy
GO111MODULE=on go mod tidy
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
@@ -145,23 +129,17 @@ MANUAL.txt: MANUAL.md
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone
go generate ./lib/transform
-@rmdir -p '$$HOME/.config/rclone'
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
go run bin/make_bisync_docs.go ./docs/content/
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
backenddocs: rclone bin/make_backend_docs.py
-@rmdir -p '$$HOME/.config/rclone'
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
rcdocs: rclone
bin/make_rc_docs.sh
install: rclone
install -d ${DESTDIR}/usr/bin
install ${GOPATH}/bin/rclone ${DESTDIR}/usr/bin
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
clean:
go clean ./...
@@ -175,7 +153,7 @@ website:
@if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi
upload_website: website
rclone -v sync docs/public www.rclone.org:
rclone -v sync docs/public memstore:www-rclone-org
upload_test_website: website
rclone -P sync docs/public test-rclone-org:
@@ -202,8 +180,8 @@ check_sign:
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
upload:
rclone -P copy build/ downloads.rclone.org:/$(TAG)
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "downloads.rclone.org:/$(TAG)/$$i" "downloads.rclone.org:/$$j"'
rclone -P copy build/ memstore:downloads-rclone-org/$(TAG)
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"'
upload_github:
./bin/upload-github $(TAG)
@@ -213,7 +191,7 @@ cross: doc
beta:
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
rclone -v copy build/ pub.rclone.org:/$(TAG)
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
log_since_last_release:
@@ -226,18 +204,18 @@ ci_upload:
sudo chown -R $$USER build
find build -type l -delete
gzip -r9v build
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
endif
@echo Beta release ready at $(BETA_URL)/testbuilds
ci_beta:
git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
endif
@echo Beta release ready at $(BETA_URL)
@@ -246,7 +224,7 @@ fetch_binaries:
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
serve: website
cd docs && hugo server --logLevel info -w --disableFastRender --ignoreCache
cd docs && hugo server -v -w --disableFastRender
tag: retag doc
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
@@ -263,48 +241,18 @@ retag:
startdev:
@echo "Version is $(VERSION)"
@echo "Next version is $(NEXT_VERSION)"
echo -e "package fs\n\n// VersionTag of rclone\nvar VersionTag = \"$(NEXT_VERSION)\"\n" | gofmt > fs/versiontag.go
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_VERSION)" > VERSION
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/versiontag.go VERSION docs/layouts/partials/version.html
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
startstable:
@echo "Version is $(VERSION)"
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
echo -e "package fs\n\n// VersionTag of rclone\nvar VersionTag = \"$(NEXT_PATCH_VERSION)\"\n" | gofmt > fs/versiontag.go
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_PATCH_VERSION)" > VERSION
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/versiontag.go VERSION docs/layouts/partials/version.html
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
winzip:
zip -9 rclone-$(TAG).zip rclone.exe
# docker volume plugin
PLUGIN_USER ?= rclone
PLUGIN_TAG ?= latest
PLUGIN_BASE_TAG ?= latest
PLUGIN_ARCH ?= amd64
PLUGIN_IMAGE := $(PLUGIN_USER)/docker-volume-rclone:$(PLUGIN_TAG)
PLUGIN_BASE := $(PLUGIN_USER)/rclone:$(PLUGIN_BASE_TAG)
PLUGIN_BUILD_DIR := ./build/docker-plugin
PLUGIN_CONTRIB_DIR := ./contrib/docker-plugin/managed
docker-plugin-create:
docker buildx inspect |grep -q /${PLUGIN_ARCH} || \
docker run --rm --privileged tonistiigi/binfmt --install all
rm -rf ${PLUGIN_BUILD_DIR}
docker buildx build \
--no-cache --pull \
--build-arg BASE_IMAGE=${PLUGIN_BASE} \
--platform linux/${PLUGIN_ARCH} \
--output ${PLUGIN_BUILD_DIR}/rootfs \
${PLUGIN_CONTRIB_DIR}
cp ${PLUGIN_CONTRIB_DIR}/config.json ${PLUGIN_BUILD_DIR}
docker plugin rm --force ${PLUGIN_IMAGE} 2>/dev/null || true
docker plugin create ${PLUGIN_IMAGE} ${PLUGIN_BUILD_DIR}
docker-plugin-push:
docker plugin push ${PLUGIN_IMAGE}
docker plugin rm ${PLUGIN_IMAGE}
docker-plugin: docker-plugin-create docker-plugin-push

235
README.md
View File

@@ -1,11 +1,8 @@
<!-- markdownlint-disable-next-line first-line-heading no-inline-html -->
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
<!-- markdownlint-disable-next-line no-inline-html -->
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
[Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) |
[Download](https://rclone.org/downloads/) |
[Download](https://rclone.org/downloads/) |
[Contributing](CONTRIBUTING.md) |
[Changelog](https://rclone.org/changelog/) |
[Installation](https://rclone.org/install/) |
@@ -13,172 +10,106 @@
[![Build Status](https://github.com/rclone/rclone/workflows/build/badge.svg)](https://github.com/rclone/rclone/actions?query=workflow%3Abuild)
[![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone)
[![Docker Pulls](https://img.shields.io/docker/pulls/rclone/rclone)](https://hub.docker.com/r/rclone/rclone)
# Rclone
Rclone *("rsync for cloud storage")* is a command-line program to sync files and
directories to and from different cloud storage providers.
Rclone *("rsync for cloud storage")* is a command line program to sync files and directories to and from different cloud storage providers.
## Storage providers
- 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
- Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
- Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
- Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
- ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
- Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
- Box [:page_facing_up:](https://rclone.org/box/)
- Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
- Cubbit DS3 [:page_facing_up:](https://rclone.org/s3/#Cubbit)
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
- Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
- Exaba [:page_facing_up:](https://rclone.org/s3/#exaba)
- Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
- FileLu [:page_facing_up:](https://rclone.org/filelu/)
- Files.com [:page_facing_up:](https://rclone.org/filescom/)
- FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
- FTP [:page_facing_up:](https://rclone.org/ftp/)
- GoFile [:page_facing_up:](https://rclone.org/gofile/)
- Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
- Google Drive [:page_facing_up:](https://rclone.org/drive/)
- Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
- HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
- Hetzner Object Storage [:page_facing_up:](https://rclone.org/s3/#hetzner)
- Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
- HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
- HTTP [:page_facing_up:](https://rclone.org/http/)
- Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
- iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
- ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
- Intercolo Object Storage [:page_facing_up:](https://rclone.org/s3/#intercolo)
- IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
- Koofr [:page_facing_up:](https://rclone.org/koofr/)
- Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
- Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
- Linkbox [:page_facing_up:](https://rclone.org/linkbox)
- Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
- Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
- Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
- Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
- MEGA [:page_facing_up:](https://rclone.org/mega/)
- MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega)
- Memory [:page_facing_up:](https://rclone.org/memory/)
- Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
- Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
- Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
- Minio [:page_facing_up:](https://rclone.org/s3/#minio)
- Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
- Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
- OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
- OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
- Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
- Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
- Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
- OVHcloud Object Storage (Swift) [:page_facing_up:](https://rclone.org/swift/)
- OVHcloud Object Storage (S3-compatible) [:page_facing_up:](https://rclone.org/s3/#ovhcloud)
- ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
- pCloud [:page_facing_up:](https://rclone.org/pcloud/)
- Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
- PikPak [:page_facing_up:](https://rclone.org/pikpak/)
- Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
- premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
- put.io [:page_facing_up:](https://rclone.org/putio/)
- Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
- QingStor [:page_facing_up:](https://rclone.org/qingstor/)
- Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
- Rabata Cloud Storage [:page_facing_up:](https://rclone.org/s3/#Rabata)
- Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
- Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
- RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
- rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
- Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
- Seafile [:page_facing_up:](https://rclone.org/seafile/)
- Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
- SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
- Servercore Object Storage [:page_facing_up:](https://rclone.org/s3/#servercore)
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
- Storj [:page_facing_up:](https://rclone.org/storj/)
- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
- Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
- Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
- Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
- Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
- WebDAV [:page_facing_up:](https://rclone.org/webdav/)
- Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
- Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
- Zata.ai [:page_facing_up:](https://rclone.org/s3/#Zata)
- The local filesystem [:page_facing_up:](https://rclone.org/local/)
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
* HTTP [:page_facing_up:](https://rclone.org/http/)
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
* Memory [:page_facing_up:](https://rclone.org/memory/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
* OVH [:page_facing_up:](https://rclone.org/swift/)
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
### Virtual storage providers
These backends adapt or modify other storage providers
- Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
- Archive: read archive files [:page_facing_up:](https://rclone.org/archive/)
- Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
- Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
- Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
- Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
- Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
- Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
- Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
## Features
- MD5/SHA-1 hashes checked at all times for file integrity
- Timestamps preserved on files
- Partial syncs supported on a whole file basis
- [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed
files
- [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory
identical
- [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync
bidirectionally
- [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash
equality
- Can sync to and from network, e.g. two different cloud accounts
- Optional large file chunking ([Chunker](https://rclone.org/chunker/))
- Optional transparent compression ([Compress](https://rclone.org/compress/))
- Optional encryption ([Crypt](https://rclone.org/crypt/))
- Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
- Multi-threaded downloads to local disk
- Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files
over HTTP/WebDAV/FTP/SFTP/DLNA
* MD5/SHA-1 hashes checked at all times for file integrity
* Timestamps preserved on files
* Partial syncs supported on a whole file basis
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
* Can sync to and from network, e.g. two different cloud accounts
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
* Optional transparent compression ([Compress](https://rclone.org/compress/))
* Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional cache ([Cache](https://rclone.org/cache/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
* Multi-threaded downloads to local disk
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
## Installation & documentation
Please see the [rclone website](https://rclone.org/) for:
- [Installation](https://rclone.org/install/)
- [Documentation & configuration](https://rclone.org/docs/)
- [Changelog](https://rclone.org/changelog/)
- [FAQ](https://rclone.org/faq/)
- [Storage providers](https://rclone.org/overview/)
- [Forum](https://forum.rclone.org/)
- ...and more
* [Installation](https://rclone.org/install/)
* [Documentation & configuration](https://rclone.org/docs/)
* [Changelog](https://rclone.org/changelog/)
* [FAQ](https://rclone.org/faq/)
* [Storage providers](https://rclone.org/overview/)
* [Forum](https://forum.rclone.org/)
* ...and more
## Downloads
- <https://rclone.org/downloads/>
* https://rclone.org/downloads/
## License
License
-------
This is free software under the terms of the MIT license (check the
This is free software under the terms of MIT the license (check the
[COPYING file](/COPYING) included in this package).

View File

@@ -4,119 +4,43 @@ This file describes how to make the various kinds of releases
## Extra required software for making a release
- [gh the github cli](https://github.com/cli/cli) for uploading packages
- pandoc for making the html and man pages
* [gh the github cli](https://github.com/cli/cli) for uploading packages
* pandoc for making the html and man pages
## Making a release
- git checkout master # see below for stable branch
- git pull # IMPORTANT
- git status - make sure everything is checked in
- Check GitHub actions build for master is Green
- make test # see integration test server or run locally
- make tag
- edit docs/content/changelog.md # make sure to remove duplicate logs from point
releases
- make tidy
- make doc
- git status - to check for new man pages - git add them
- git commit -a -v -m "Version v1.XX.0"
- make retag
- git push origin # without --follow-tags so it doesn't push the tag if it fails
- git push --follow-tags origin
- \# Wait for the GitHub builds to complete then...
- make fetch_binaries
- make tarball
- make vendorball
- make sign_upload
- make check_sign
- make upload
- make upload_website
- make upload_github
- make startdev # make startstable for stable branch
- \# announce with forum post, twitter post, patreon post
* git checkout master # see below for stable branch
* git pull
* git status - make sure everything is checked in
* Check GitHub actions build for master is Green
* make test # see integration test server or run locally
* make tag
* edit docs/content/changelog.md # make sure to remove duplicate logs from point releases
* make tidy
* make doc
* git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0"
* make retag
* git push --follow-tags origin
* # Wait for the GitHub builds to complete then...
* make fetch_binaries
* make tarball
* make vendorball
* make sign_upload
* make check_sign
* make upload
* make upload_website
* make upload_github
* make startdev # make startstable for stable branch
* # announce with forum post, twitter post, patreon post
## Update dependencies
Early in the next release cycle update the dependencies
Early in the next release cycle update the dependencies.
- Review any pinned packages in go.mod and remove if possible
- `make updatedirect`
- `make GOTAGS=cmount`
- `make compiletest`
- Fix anything which doesn't compile at this point and commit changes here
- `git commit -a -v -m "build: update all dependencies"`
If the `make updatedirect` upgrades the version of go in the `go.mod`
```text
go 1.22.0
```
then go to manual mode. `go1.22` here is the lowest supported version
in the `go.mod`.
If `make updatedirect` added a `toolchain` directive then remove it.
We don't want to force a toolchain on our users. Linux packagers are
often using a version of Go that is a few versions out of date.
```console
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
go get -d $(cat /tmp/potential-upgrades)
go mod tidy -go=1.22 -compat=1.22
```
If the `go mod tidy` fails use the output from it to remove the
package which can't be upgraded from `/tmp/potential-upgrades` when
done
```console
git co go.mod go.sum
```
And try again.
Optionally upgrade the direct and indirect dependencies. This is very
likely to fail if the manual method was used abve - in that case
ignore it as it is too time consuming to fix.
- `make update`
- `make GOTAGS=cmount`
- `make compiletest`
- roll back any updates which didn't compile
- `git commit -a -v --amend`
- **NB** watch out for this changing the default go version in `go.mod`
Note that `make update` updates all direct and indirect dependencies
and there can occasionally be forwards compatibility problems with
doing that so it may be necessary to roll back dependencies to the
version specified by `make updatedirect` in order to get rclone to
build.
Once it compiles locally, push it on a test branch and commit fixes
until the tests pass.
### Major versions
The above procedure will not upgrade major versions, so v2 to v3.
However this tool can show which major versions might need to be
upgraded:
```console
go run github.com/icholy/gomajor@latest list -major
```
Expect API breakage when updating major versions.
## Tidy beta
At some point after the release run
```console
bin/tidy-beta v1.55
```
where the version number is that of a couple ago to remove old beta binaries.
* Review any pinned packages in go.mod and remove if possible
* make update
* git status
* git add new files
* git commit -a -v
## Making a point release
@@ -124,86 +48,57 @@ If rclone needs a point release due to some horrendous bug:
Set vars
- BASE_TAG=v1.XX # e.g. v1.52
- NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
- echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
* BASE_TAG=v1.XX # e.g. v1.52
* NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
First make the release branch. If this is a second point release then
this will be done already.
- git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
- make startstable
* git branch ${BASE_TAG} ${BASE_TAG}-stable
* git co ${BASE_TAG}-stable
* make startstable
Now
- git co ${BASE_TAG}-stable
- git cherry-pick any fixes
- make startstable
- Do the steps as above
- git co master
- `#` cherry pick the changes to the changelog - check the diff to make sure it
is correct
- git checkout ${BASE_TAG}-stable docs/content/changelog.md
- git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
- git push
## Sponsor logos
If updating the website note that the sponsor logos have been moved out of the
main repository.
You will need to checkout `/docs/static/img/logos` from <https://github.com/rclone/third-party-logos>
which is a private repo containing artwork from sponsors.
## Update the website between releases
Create an update website branch based off the last release
```console
git co -b update-website
```
If the branch already exists, double check there are no commits that need saving.
Now reset the branch to the last release
```console
git reset --hard v1.64.0
```
Create the changes, check them in, test with `make serve` then
```console
make upload_test_website
```
Check out <https://test.rclone.org> and when happy
```console
make upload_website
```
Cherry pick any changes back to master and the stable branch if it is active.
* git co ${BASE_TAG}-stable
* git cherry-pick any fixes
* Do the steps as above
* make startstable
* git co master
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
* git push
## Making a manual build of docker
To do a basic build of rclone's docker image to debug builds locally:
The rclone docker image should autobuild on via GitHub actions. If it doesn't
or needs to be updated then rebuild like this.
```console
docker buildx build --load -t rclone/rclone:testing --progress=plain .
docker run --rm rclone/rclone:testing version
See: https://github.com/ilteoood/docker_buildx/issues/19
See: https://github.com/ilteoood/docker_buildx/blob/master/scripts/install_buildx.sh
```
git co v1.54.1
docker pull golang
export DOCKER_CLI_EXPERIMENTAL=enabled
docker buildx create --name actions_builder --use
docker run --rm --privileged docker/binfmt:820fdd95a9972a5308930a2bdfb8573dd4447ad3
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
SUPPORTED_PLATFORMS=$(docker buildx inspect --bootstrap | grep 'Platforms:*.*' | cut -d : -f2,3)
echo "Supported platforms: $SUPPORTED_PLATFORMS"
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
docker buildx stop actions_builder
```
To test the multipatform build
### Old build for linux/amd64 only
```console
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
```
To make a full build then set the tags correctly and add `--push`
Note that you can't only build one architecture - you need to build them all.
```console
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
docker pull golang
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
docker push rclone/rclone:1.52.0
docker push rclone/rclone:1.52
docker push rclone/rclone:1
docker push rclone/rclone:latest
```

View File

@@ -1 +1 @@
v1.72.0
v1.56.0

View File

@@ -1,4 +1,3 @@
// Package alias implements a virtual provider to rename existing remotes.
package alias
import (
@@ -21,7 +20,7 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "remote",
Help: "Remote or path to alias.\n\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
Required: true,
}},
}

View File

@@ -20,11 +20,11 @@ var (
)
func prepare(t *testing.T, root string) {
configfile.Install()
configfile.LoadConfig(context.Background())
// Configure the remote
config.FileSetValue(remoteName, "type", "alias")
config.FileSetValue(remoteName, "remote", root)
config.FileSet(remoteName, "type", "alias")
config.FileSet(remoteName, "remote", root)
}
func TestNewFS(t *testing.T) {
@@ -81,12 +81,10 @@ func TestNewFS(t *testing.T) {
for i, gotEntry := range gotEntries {
what := fmt.Sprintf("%s, entry=%d", what, i)
wantEntry := test.entries[i]
_, isDir := gotEntry.(fs.Directory)
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
if !isDir {
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
}
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
_, isDir := gotEntry.(fs.Directory)
require.Equal(t, wantEntry.isDir, isDir, what)
}
}

View File

@@ -1,69 +1,46 @@
// Package all imports all the backends
package all
import (
// Active file systems
_ "github.com/rclone/rclone/backend/alias"
_ "github.com/rclone/rclone/backend/archive"
_ "github.com/rclone/rclone/backend/amazonclouddrive"
_ "github.com/rclone/rclone/backend/azureblob"
_ "github.com/rclone/rclone/backend/azurefiles"
_ "github.com/rclone/rclone/backend/b2"
_ "github.com/rclone/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/chunker"
_ "github.com/rclone/rclone/backend/cloudinary"
_ "github.com/rclone/rclone/backend/combine"
_ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/doi"
_ "github.com/rclone/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/dropbox"
_ "github.com/rclone/rclone/backend/fichier"
_ "github.com/rclone/rclone/backend/filefabric"
_ "github.com/rclone/rclone/backend/filelu"
_ "github.com/rclone/rclone/backend/filescom"
_ "github.com/rclone/rclone/backend/ftp"
_ "github.com/rclone/rclone/backend/gofile"
_ "github.com/rclone/rclone/backend/googlecloudstorage"
_ "github.com/rclone/rclone/backend/googlephotos"
_ "github.com/rclone/rclone/backend/hasher"
_ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/hidrive"
_ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/iclouddrive"
_ "github.com/rclone/rclone/backend/imagekit"
_ "github.com/rclone/rclone/backend/internetarchive"
_ "github.com/rclone/rclone/backend/hubic"
_ "github.com/rclone/rclone/backend/jottacloud"
_ "github.com/rclone/rclone/backend/koofr"
_ "github.com/rclone/rclone/backend/linkbox"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/mailru"
_ "github.com/rclone/rclone/backend/mega"
_ "github.com/rclone/rclone/backend/memory"
_ "github.com/rclone/rclone/backend/netstorage"
_ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
_ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/pikpak"
_ "github.com/rclone/rclone/backend/pixeldrain"
_ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/protondrive"
_ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/quatrix"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sia"
_ "github.com/rclone/rclone/backend/smb"
_ "github.com/rclone/rclone/backend/storj"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/ulozto"
_ "github.com/rclone/rclone/backend/tardigrade"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/uptobox"
_ "github.com/rclone/rclone/backend/webdav"
_ "github.com/rclone/rclone/backend/yandex"
_ "github.com/rclone/rclone/backend/zoho"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,20 @@
// Test AmazonCloudDrive filesystem interface
// +build acd
package amazonclouddrive_test
import (
"testing"
"github.com/rclone/rclone/backend/amazonclouddrive"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
fstests.RemoteName = "TestAmazonCloudDrive:"
fstests.Run(t)
}

View File

@@ -1,679 +0,0 @@
//go:build !plan9
// Package archive implements a backend to access archive files in a remote
package archive
// FIXME factor common code between backends out - eg VFS initialization
// FIXME can we generalize the VFS handle caching and use it in zip backend
// Factor more stuff out if possible
// Odd stats which are probably coming from the VFS
// * tensorflow.sqfs: 0% /3.074Gi, 204.426Ki/s, 4h22m46s
// FIXME this will perform poorly for unpacking as the VFS Reader is bad
// at multiple streams - need cache mode setting?
import (
"context"
"errors"
"fmt"
"io"
"path"
"strings"
"sync"
"time"
// Import all the required archivers here
_ "github.com/rclone/rclone/backend/archive/squashfs"
_ "github.com/rclone/rclone/backend/archive/zip"
"github.com/rclone/rclone/backend/archive/archiver"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
)
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "archive",
Description: "Read archives",
NewFs: NewFs,
MetadataInfo: &fs.MetadataInfo{
Help: `Any metadata supported by the underlying remote is read and written.`,
},
Options: []fs.Option{{
Name: "remote",
Help: `Remote to wrap to read archives from.
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
"myremote:bucket" or "myremote:".
If this is left empty, then the archive backend will use the root as
the remote.
This means that you can use :archive:remote:path and it will be
equivalent to setting remote="remote:path".
`,
Required: false,
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
Remote string `config:"remote"`
}
// Fs represents a archive of upstreams
type Fs struct {
name string // name of this remote
features *fs.Features // optional features
opt Options // options for this Fs
root string // the path we are working on
f fs.Fs // remote we are wrapping
wrapper fs.Fs // fs that wraps us
mu sync.Mutex // protects the below
archives map[string]*archive // the archives we have, by path
}
// A single open archive
type archive struct {
archiver archiver.Archiver // archiver responsible
remote string // path to the archive
prefix string // prefix to add on to listings
root string // root of the archive to remove from listings
mu sync.Mutex // protects the following variables
f fs.Fs // the archive Fs, may be nil
}
// If remote is an archive then return it otherwise return nil
func findArchive(remote string) *archive {
// FIXME use something faster than linear search?
for _, archiver := range archiver.Archivers {
if strings.HasSuffix(remote, archiver.Extension) {
return &archive{
archiver: archiver,
remote: remote,
prefix: remote,
root: "",
}
}
}
return nil
}
// Find an archive buried in remote
func subArchive(remote string) *archive {
archive := findArchive(remote)
if archive != nil {
return archive
}
parent := path.Dir(remote)
if parent == "/" || parent == "." {
return nil
}
return subArchive(parent)
}
// If remote is an archive then return it otherwise return nil
func (f *Fs) findArchive(remote string) (archive *archive) {
archive = findArchive(remote)
if archive != nil {
f.mu.Lock()
f.archives[remote] = archive
f.mu.Unlock()
}
return archive
}
// Instantiate archive if it hasn't been instantiated yet
//
// This is done lazily so that we can list a directory full of
// archives without opening them all.
func (a *archive) init(ctx context.Context, f fs.Fs) (fs.Fs, error) {
a.mu.Lock()
defer a.mu.Unlock()
if a.f != nil {
return a.f, nil
}
newFs, err := a.archiver.New(ctx, f, a.remote, a.prefix, a.root)
if err != nil && err != fs.ErrorIsFile {
return nil, fmt.Errorf("failed to create archive %q: %w", a.remote, err)
}
a.f = newFs
return a.f, nil
}
// NewFs constructs an Fs from the path.
//
// The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs.Fs, err error) {
// defer log.Trace(nil, "name=%q, root=%q, m=%v", name, root, m)("f=%+v, err=%v", &outFs, &err)
// Parse config into Options struct
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
remote := opt.Remote
origRoot := root
// If remote is empty, use the root instead
if remote == "" {
remote = root
root = ""
}
isDirectory := strings.HasSuffix(remote, "/")
remote = strings.TrimRight(remote, "/")
if remote == "" {
remote = "/"
}
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point archive remote at itself - check the value of the upstreams setting")
}
_ = isDirectory
foundArchive := subArchive(remote)
if foundArchive != nil {
fs.Debugf(nil, "Found archiver for %q remote %q", foundArchive.archiver.Extension, foundArchive.remote)
// Archive path
foundArchive.root = strings.Trim(remote[len(foundArchive.remote):], "/")
// Path to the archive
archiveRemote := remote[:len(foundArchive.remote)]
// Remote is archive leaf name
foundArchive.remote = path.Base(archiveRemote)
foundArchive.prefix = ""
// Point remote to archive file
remote = archiveRemote
}
// Make sure to remove trailing . referring to the current dir
if path.Base(root) == "." {
root = strings.TrimSuffix(root, ".")
}
remotePath := fspath.JoinRootPath(remote, root)
wrappedFs, err := cache.Get(ctx, remotePath)
if err != fs.ErrorIsFile && err != nil {
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
}
f := &Fs{
name: name,
//root: path.Join(remotePath, root),
root: origRoot,
opt: *opt,
f: wrappedFs,
archives: make(map[string]*archive),
}
cache.PinUntilFinalized(f.f, f)
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
f.features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: false,
ReadMimeType: true,
WriteMimeType: true,
CanHaveEmptyDirectories: true,
BucketBased: true,
SetTier: true,
GetTier: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
if foundArchive != nil {
fs.Debugf(f, "Root is an archive")
if err != fs.ErrorIsFile {
return nil, fmt.Errorf("expecting to find a file at %q", remote)
}
return foundArchive.init(ctx, f.f)
}
// Correct root if definitely pointing to a file
if err == fs.ErrorIsFile {
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
return f, err
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("archive root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.f.Rmdir(ctx, dir)
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() hash.Set {
return f.f.Hashes()
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.f.Mkdir(ctx, dir)
}
// Purge all files in the directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) error {
do := f.f.Features().Purge
if do == nil {
return fs.ErrorCantPurge
}
return do(ctx, dir)
}
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
do := f.f.Features().Copy
if do == nil {
return nil, fs.ErrorCantCopy
}
// FIXME
// o, ok := src.(*Object)
// if !ok {
// return nil, fs.ErrorCantCopy
// }
return do(ctx, src, remote)
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
do := f.f.Features().Move
if do == nil {
return nil, fs.ErrorCantMove
}
// FIXME
// o, ok := src.(*Object)
// if !ok {
// return nil, fs.ErrorCantMove
// }
return do(ctx, src, remote)
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
do := f.f.Features().DirMove
if do == nil {
return fs.ErrorCantDirMove
}
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
return do(ctx, srcFs.f, srcRemote, dstRemote)
}
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
// At least one value will be written to the channel,
// specifying the initial value and updated values might
// follow. A 0 Duration should pause the polling.
// The ChangeNotify implementation must empty the channel
// regularly. When the channel gets closed, the implementation
// should stop polling and release resources.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), ch <-chan time.Duration) {
do := f.f.Features().ChangeNotify
if do == nil {
return
}
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
notifyFunc(path, entryType)
}
do(ctx, wrappedNotifyFunc, ch)
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
do := f.f.Features().DirCacheFlush
if do != nil {
do()
}
}
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
var o fs.Object
var err error
if stream {
o, err = f.f.Features().PutStream(ctx, in, src, options...)
} else {
o, err = f.f.Put(ctx, in, src, options...)
}
if err != nil {
return nil, err
}
return o, nil
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
return f.put(ctx, in, src, false, options...)
default:
return nil, err
}
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
return f.put(ctx, in, src, true, options...)
default:
return nil, err
}
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.f.Features().About
if do == nil {
return nil, errors.New("not supported by underlying remote")
}
return do(ctx)
}
// Find the Fs for the directory
func (f *Fs) findFs(ctx context.Context, dir string) (subFs fs.Fs, err error) {
f.mu.Lock()
defer f.mu.Unlock()
subFs = f.f
// FIXME should do this with a better datastructure like a prefix tree
// FIXME want to find the longest first otherwise nesting won't work
dirSlash := dir + "/"
for archiverRemote, archive := range f.archives {
subRemote := archiverRemote + "/"
if strings.HasPrefix(dirSlash, subRemote) {
subFs, err = archive.init(ctx, f.f)
if err != nil {
return nil, err
}
break
}
}
return subFs, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
subFs, err := f.findFs(ctx, dir)
if err != nil {
return nil, err
}
entries, err = subFs.List(ctx, dir)
if err != nil {
return nil, err
}
for i, entry := range entries {
// Can only unarchive files
if o, ok := entry.(fs.Object); ok {
remote := o.Remote()
archive := f.findArchive(remote)
if archive != nil {
// Overwrite entry with directory
entries[i] = fs.NewDir(remote, o.ModTime(ctx))
}
}
}
return entries, nil
}
// NewObject creates a new remote archive file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
dir := path.Dir(remote)
if dir == "/" || dir == "." {
dir = ""
}
subFs, err := f.findFs(ctx, dir)
if err != nil {
return nil, err
}
o, err := subFs.NewObject(ctx, remote)
if err != nil {
return nil, err
}
return o, nil
}
// Precision is the greatest precision of all the archivers
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
if do := f.f.Features().Shutdown; do != nil {
return do(ctx)
}
return nil
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
do := f.f.Features().PublicLink
if do == nil {
return "", errors.New("PublicLink not supported")
}
return do(ctx, remote, expire, unlink)
}
// PutUnchecked in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
//
// May create duplicates or return errors if src already
// exists.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.f.Features().PutUnchecked
if do == nil {
return nil, errors.New("can't PutUnchecked")
}
o, err := do(ctx, in, src, options...)
if err != nil {
return nil, err
}
return o, nil
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
if len(dirs) == 0 {
return nil
}
do := f.f.Features().MergeDirs
if do == nil {
return errors.New("MergeDirs not supported")
}
return do(ctx, dirs)
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
func (f *Fs) CleanUp(ctx context.Context) error {
do := f.f.Features().CleanUp
if do == nil {
return errors.New("not supported by underlying remote")
}
return do(ctx)
}
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
do := f.f.Features().OpenWriterAt
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx, remote, size)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.f
}
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
do := f.f.Features().OpenChunkWriter
if do == nil {
return info, nil, fs.ErrorNotImplemented
}
return do(ctx, remote, src, options...)
}
// UserInfo returns info about the connected user
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
do := f.f.Features().UserInfo
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx)
}
// Disconnect the current user
func (f *Fs) Disconnect(ctx context.Context) error {
do := f.f.Features().Disconnect
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx)
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.OpenWriterAter = (*Fs)(nil)
_ fs.OpenChunkWriter = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
// FIXME _ fs.FullObject = (*Object)(nil)
)

View File

@@ -1,221 +0,0 @@
//go:build !plan9
package archive
import (
"bytes"
"context"
"fmt"
"os"
"os/exec"
"path"
"path/filepath"
"strconv"
"strings"
"testing"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// FIXME need to test Open with seek
// run - run a shell command
func run(t *testing.T, args ...string) {
cmd := exec.Command(args[0], args[1:]...)
fs.Debugf(nil, "run args = %v", args)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf(`
----------------------------
Failed to run %v: %v
Command output was:
%s
----------------------------
`, args, err, out)
}
}
// check the dst and src are identical
func checkTree(ctx context.Context, name string, t *testing.T, dstArchive, src string, expectedCount int) {
t.Run(name, func(t *testing.T) {
fs.Debugf(nil, "check %q vs %q", dstArchive, src)
Farchive, err := cache.Get(ctx, dstArchive)
if err != fs.ErrorIsFile {
require.NoError(t, err)
}
Fsrc, err := cache.Get(ctx, src)
if err != fs.ErrorIsFile {
require.NoError(t, err)
}
var matches bytes.Buffer
opt := operations.CheckOpt{
Fdst: Farchive,
Fsrc: Fsrc,
Match: &matches,
}
for _, action := range []string{"Check", "Download"} {
t.Run(action, func(t *testing.T) {
matches.Reset()
if action == "Download" {
assert.NoError(t, operations.CheckDownload(ctx, &opt))
} else {
assert.NoError(t, operations.Check(ctx, &opt))
}
if expectedCount > 0 {
assert.Equal(t, expectedCount, strings.Count(matches.String(), "\n"))
}
})
}
t.Run("NewObject", func(t *testing.T) {
// Check we can run NewObject on all files and read them
assert.NoError(t, operations.ListFn(ctx, Fsrc, func(srcObj fs.Object) {
if t.Failed() {
return
}
remote := srcObj.Remote()
archiveObj, err := Farchive.NewObject(ctx, remote)
require.NoError(t, err, remote)
assert.Equal(t, remote, archiveObj.Remote(), remote)
// Test that the contents are the same
archiveBuf := fstests.ReadObject(ctx, t, archiveObj, -1)
srcBuf := fstests.ReadObject(ctx, t, srcObj, -1)
assert.Equal(t, srcBuf, archiveBuf)
if len(srcBuf) < 81 {
return
}
// Tests that Open works with SeekOption
assert.Equal(t, srcBuf[50:], fstests.ReadObject(ctx, t, archiveObj, -1, &fs.SeekOption{Offset: 50}), "contents differ after seek")
// Tests that Open works with RangeOption
for _, test := range []struct {
ro fs.RangeOption
wantStart, wantEnd int
}{
{fs.RangeOption{Start: 5, End: 15}, 5, 16},
{fs.RangeOption{Start: 80, End: -1}, 80, len(srcBuf)},
{fs.RangeOption{Start: 81, End: 100000}, 81, len(srcBuf)},
{fs.RangeOption{Start: -1, End: 20}, len(srcBuf) - 20, len(srcBuf)}, // if start is omitted this means get the final bytes
// {fs.RangeOption{Start: -1, End: -1}, 0, len(srcBuf)}, - this seems to work but the RFC doesn't define it
} {
got := fstests.ReadObject(ctx, t, archiveObj, -1, &test.ro)
foundAt := strings.Index(srcBuf, got)
help := fmt.Sprintf("%#v failed want [%d:%d] got [%d:%d]", test.ro, test.wantStart, test.wantEnd, foundAt, foundAt+len(got))
assert.Equal(t, srcBuf[test.wantStart:test.wantEnd], got, help)
}
// Test that the modtimes are correct
fstest.AssertTimeEqualWithPrecision(t, remote, srcObj.ModTime(ctx), archiveObj.ModTime(ctx), Farchive.Precision())
// Test that the sizes are correct
assert.Equal(t, srcObj.Size(), archiveObj.Size())
// Test that Strings are OK
assert.Equal(t, srcObj.String(), archiveObj.String())
}))
})
// t.Logf("Fdst ------------- %v", Fdst)
// operations.List(ctx, Fdst, os.Stdout)
// t.Logf("Fsrc ------------- %v", Fsrc)
// operations.List(ctx, Fsrc, os.Stdout)
})
}
// test creating and reading back some archives
//
// Note that this uses rclone and zip as external binaries.
func testArchive(t *testing.T, archiveName string, archiveFn func(t *testing.T, output, input string)) {
ctx := context.Background()
checkFiles := 1000
// create random test input files
inputRoot := t.TempDir()
input := filepath.Join(inputRoot, archiveName)
require.NoError(t, os.Mkdir(input, 0777))
run(t, "rclone", "test", "makefiles", "--files", strconv.Itoa(checkFiles), "--ascii", input)
// Create the archive
output := t.TempDir()
zipFile := path.Join(output, archiveName)
archiveFn(t, zipFile, input)
// Check the archive itself
checkTree(ctx, "Archive", t, ":archive:"+zipFile, input, checkFiles)
// Now check a subdirectory
fis, err := os.ReadDir(input)
require.NoError(t, err)
subDir := "NOT FOUND"
aFile := "NOT FOUND"
for _, fi := range fis {
if fi.IsDir() {
subDir = fi.Name()
} else {
aFile = fi.Name()
}
}
checkTree(ctx, "SubDir", t, ":archive:"+zipFile+"/"+subDir, filepath.Join(input, subDir), 0)
// Now check a single file
fiCtx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ "+aFile))
require.NoError(t, fi.AddRule("- *"))
checkTree(fiCtx, "SingleFile", t, ":archive:"+zipFile+"/"+aFile, filepath.Join(input, aFile), 0)
// Now check the level above
checkTree(ctx, "Root", t, ":archive:"+output, inputRoot, checkFiles)
// run(t, "cp", "-a", inputRoot, output, "/tmp/test-"+archiveName)
}
// Make sure we have the executable named
func skipIfNoExe(t *testing.T, exeName string) {
_, err := exec.LookPath(exeName)
if err != nil {
t.Skipf("%s executable not installed", exeName)
}
}
// Test creating and reading back some archives
//
// Note that this uses rclone and zip as external binaries.
func TestArchiveZip(t *testing.T) {
fstest.Initialise()
skipIfNoExe(t, "zip")
skipIfNoExe(t, "rclone")
testArchive(t, "test.zip", func(t *testing.T, output, input string) {
oldcwd, err := os.Getwd()
require.NoError(t, err)
require.NoError(t, os.Chdir(input))
defer func() {
require.NoError(t, os.Chdir(oldcwd))
}()
run(t, "zip", "-9r", output, ".")
})
}
// Test creating and reading back some archives
//
// Note that this uses rclone and squashfs as external binaries.
func TestArchiveSquashfs(t *testing.T) {
fstest.Initialise()
skipIfNoExe(t, "mksquashfs")
skipIfNoExe(t, "rclone")
testArchive(t, "test.sqfs", func(t *testing.T, output, input string) {
run(t, "mksquashfs", input, output)
})
}

View File

@@ -1,67 +0,0 @@
//go:build !plan9
// Test Archive filesystem interface
package archive_test
import (
"testing"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/memory"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
var (
unimplementableFsMethods = []string{"ListR", "ListP", "MkdirMetadata", "DirSetModTime"}
// In these tests we receive objects from the underlying remote which don't implement these methods
unimplementableObjectMethods = []string{"GetTier", "ID", "Metadata", "MimeType", "SetTier", "UnWrap", "SetMetadata"}
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
t.Skip("Skipping as -remote not set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}
func TestLocal(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
remote := t.TempDir()
name := "TestArchiveLocal"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "archive"},
{Name: name, Key: "remote", Value: remote},
},
QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}
func TestMemory(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
remote := ":memory:"
name := "TestArchiveMemory"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "archive"},
{Name: name, Key: "remote", Value: remote},
},
QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}

View File

@@ -1,7 +0,0 @@
// Build for archive for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9
// Package archive implements a backend to access archive files in a remote
package archive

View File

@@ -1,24 +0,0 @@
// Package archiver registers all the archivers
package archiver
import (
"context"
"github.com/rclone/rclone/fs"
)
// Archiver describes an archive package
type Archiver struct {
// New constructs an Fs from the (wrappedFs, remote) with the objects
// prefix with prefix and rooted at root
New func(ctx context.Context, f fs.Fs, remote, prefix, root string) (fs.Fs, error)
Extension string
}
// Archivers is a slice of all registered archivers
var Archivers []Archiver
// Register adds the archivers provided to the list of known archivers
func Register(as ...Archiver) {
Archivers = append(Archivers, as...)
}

View File

@@ -1,233 +0,0 @@
// Package base is a base archive Fs
package base
import (
"context"
"errors"
"fmt"
"io"
"path"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/vfs"
)
// Fs represents a wrapped fs.Fs
type Fs struct {
f fs.Fs
wrapper fs.Fs
name string
features *fs.Features // optional features
vfs *vfs.VFS
node vfs.Node // archive object
remote string // remote of the archive object
prefix string // position for objects
prefixSlash string // position for objects with a slash on
root string // position to read from within the archive
}
var errNotImplemented = errors.New("internal error: method not implemented in archiver")
// New constructs an Fs from the (wrappedFs, remote) with the objects
// prefix with prefix and rooted at root
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (*Fs, error) {
// FIXME vfs cache?
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
fs.Debugf(nil, "New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
VFS := vfs.New(wrappedFs, nil)
node, err := VFS.Stat(remote)
if err != nil {
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
}
f := &Fs{
f: wrappedFs,
name: path.Join(fs.ConfigString(wrappedFs), remote),
vfs: VFS,
node: node,
remote: remote,
root: root,
prefix: prefix,
prefixSlash: prefix + "/",
}
// FIXME
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
//
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
f.features = (&fs.Features{
CaseInsensitive: false,
DuplicateFiles: false,
ReadMimeType: false, // MimeTypes not supported with gzip
WriteMimeType: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// String returns a description of the FS
func (f *Fs) String() string {
return f.name
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return nil, errNotImplemented
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
return nil, errNotImplemented
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
return nil, vfs.EROFS
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.f
}
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// Object describes an object to be read from the raw zip file
type Object struct {
f *Fs
remote string
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.f
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return -1
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
return time.Now()
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return vfs.EROFS
}
// Storable raturns a boolean indicating if this object is storable
func (o *Object) Storable() bool {
return true
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
return nil, errNotImplemented
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return vfs.EROFS
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return vfs.EROFS
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

View File

@@ -1,165 +0,0 @@
package squashfs
// Could just be using bare object Open with RangeRequest which
// would transfer the minimum amount of data but may be slower.
import (
"errors"
"fmt"
"io/fs"
"os"
"sync"
"github.com/diskfs/go-diskfs/backend"
"github.com/rclone/rclone/vfs"
)
// Cache file handles for accessing the file
type cache struct {
node vfs.Node
fhsMu sync.Mutex
fhs []cacheHandle
}
// A cached file handle
type cacheHandle struct {
offset int64
fh vfs.Handle
}
// Make a new cache
func newCache(node vfs.Node) *cache {
return &cache{
node: node,
}
}
// Get a vfs.Handle from the pool or open one
//
// This tries to find an open file handle which doesn't require seeking.
func (c *cache) open(off int64) (fh vfs.Handle, err error) {
c.fhsMu.Lock()
defer c.fhsMu.Unlock()
if len(c.fhs) > 0 {
// Look for exact match first
for i, cfh := range c.fhs {
if cfh.offset == off {
// fs.Debugf(nil, "CACHE MATCH")
c.fhs = append(c.fhs[:i], c.fhs[i+1:]...)
return cfh.fh, nil
}
}
// fs.Debugf(nil, "CACHE MISS")
// Just take the first one if not found
cfh := c.fhs[0]
c.fhs = c.fhs[1:]
return cfh.fh, nil
}
fh, err = c.node.Open(os.O_RDONLY)
if err != nil {
return nil, fmt.Errorf("failed to open squashfs archive: %w", err)
}
return fh, nil
}
// Close a vfs.Handle or return it to the pool
//
// off should be the offset the file handle would read from without seeking
func (c *cache) close(fh vfs.Handle, off int64) {
c.fhsMu.Lock()
defer c.fhsMu.Unlock()
c.fhs = append(c.fhs, cacheHandle{
offset: off,
fh: fh,
})
}
// ReadAt reads len(p) bytes into p starting at offset off in the underlying
// input source. It returns the number of bytes read (0 <= n <= len(p)) and any
// error encountered.
//
// When ReadAt returns n < len(p), it returns a non-nil error explaining why
// more bytes were not returned. In this respect, ReadAt is stricter than Read.
//
// Even if ReadAt returns n < len(p), it may use all of p as scratch
// space during the call. If some data is available but not len(p) bytes,
// ReadAt blocks until either all the data is available or an error occurs.
// In this respect ReadAt is different from Read.
//
// If the n = len(p) bytes returned by ReadAt are at the end of the input
// source, ReadAt may return either err == EOF or err == nil.
//
// If ReadAt is reading from an input source with a seek offset, ReadAt should
// not affect nor be affected by the underlying seek offset.
//
// Clients of ReadAt can execute parallel ReadAt calls on the same input
// source.
//
// Implementations must not retain p.
func (c *cache) ReadAt(p []byte, off int64) (n int, err error) {
fh, err := c.open(off)
if err != nil {
return n, err
}
defer func() {
c.close(fh, off+int64(len(p)))
}()
// fs.Debugf(nil, "ReadAt(p[%d], off=%d, fh=%p)", len(p), off, fh)
return fh.ReadAt(p, off)
}
var errCacheNotImplemented = errors.New("internal error: squashfs cache doesn't implement method")
// WriteAt method dummy stub to satisfy interface
func (c *cache) WriteAt(p []byte, off int64) (n int, err error) {
return 0, errCacheNotImplemented
}
// Seek method dummy stub to satisfy interface
func (c *cache) Seek(offset int64, whence int) (int64, error) {
return 0, errCacheNotImplemented
}
// Read method dummy stub to satisfy interface
func (c *cache) Read(p []byte) (n int, err error) {
return 0, errCacheNotImplemented
}
func (c *cache) Stat() (fs.FileInfo, error) {
return nil, errCacheNotImplemented
}
// Close the file
func (c *cache) Close() (err error) {
c.fhsMu.Lock()
defer c.fhsMu.Unlock()
// Close any open file handles
for i := range c.fhs {
fh := &c.fhs[i]
newErr := fh.fh.Close()
if err == nil {
err = newErr
}
}
c.fhs = nil
return err
}
// Sys returns OS-specific file for ioctl calls via fd
func (c *cache) Sys() (*os.File, error) {
return nil, errCacheNotImplemented
}
// Writable returns file for read-write operations
func (c *cache) Writable() (backend.WritableFile, error) {
return nil, errCacheNotImplemented
}
// check interfaces
var _ backend.Storage = (*cache)(nil)

View File

@@ -1,446 +0,0 @@
// Package squashfs implements a squashfs archiver for the archive backend
package squashfs
import (
"context"
"fmt"
"io"
"path"
"strings"
"time"
"github.com/diskfs/go-diskfs/filesystem/squashfs"
"github.com/rclone/rclone/backend/archive/archiver"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
)
func init() {
archiver.Register(archiver.Archiver{
New: New,
Extension: ".sqfs",
})
}
// Fs represents a wrapped fs.Fs
type Fs struct {
f fs.Fs
wrapper fs.Fs
name string
features *fs.Features // optional features
vfs *vfs.VFS
sqfs *squashfs.FileSystem // interface to the squashfs
c *cache
node vfs.Node // squashfs file object - set if reading
remote string // remote of the squashfs file object
prefix string // position for objects
prefixSlash string // position for objects with a slash on
root string // position to read from within the archive
}
// New constructs an Fs from the (wrappedFs, remote) with the objects
// prefix with prefix and rooted at root
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) {
// FIXME vfs cache?
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
fs.Debugf(nil, "Squashfs: New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
vfsOpt := vfscommon.Opt
vfsOpt.ReadWait = 0
VFS := vfs.New(wrappedFs, &vfsOpt)
node, err := VFS.Stat(remote)
if err != nil {
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
}
c := newCache(node)
// FIXME blocksize
sqfs, err := squashfs.Read(c, node.Size(), 0, 1024*1024)
if err != nil {
return nil, fmt.Errorf("failed to read squashfs: %w", err)
}
f := &Fs{
f: wrappedFs,
name: path.Join(fs.ConfigString(wrappedFs), remote),
vfs: VFS,
node: node,
sqfs: sqfs,
c: c,
remote: remote,
root: strings.Trim(root, "/"),
prefix: prefix,
prefixSlash: prefix + "/",
}
if prefix == "" {
f.prefixSlash = ""
}
singleObject := false
// Find the directory the root points to
if f.root != "" && !strings.HasSuffix(root, "/") {
native, err := f.toNative("")
if err == nil {
native = strings.TrimRight(native, "/")
_, err := f.newObjectNative(native)
if err == nil {
// If it pointed to a file, find the directory above
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
}
}
// FIXME
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
//
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
f.features = (&fs.Features{
CaseInsensitive: false,
DuplicateFiles: false,
ReadMimeType: false, // MimeTypes not supported with gsquashfs
WriteMimeType: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
if singleObject {
return f, fs.ErrorIsFile
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("Squashfs %q", f.name)
}
// This turns a remote into a native path in the squashfs starting with a /
func (f *Fs) toNative(remote string) (string, error) {
native := strings.Trim(remote, "/")
if f.prefix == "" {
native = "/" + native
} else if native == f.prefix {
native = "/"
} else if !strings.HasPrefix(native, f.prefixSlash) {
return "", fmt.Errorf("internal error: %q doesn't start with prefix %q", native, f.prefixSlash)
} else {
native = native[len(f.prefix):]
}
if f.root != "" {
native = "/" + f.root + native
}
return native, nil
}
// Turn a (nativeDir, leaf) into a remote
func (f *Fs) fromNative(nativeDir string, leaf string) string {
// fs.Debugf(nil, "nativeDir = %q, leaf = %q, root=%q", nativeDir, leaf, f.root)
dir := nativeDir
if f.root != "" {
dir = strings.TrimPrefix(dir, "/"+f.root)
}
remote := f.prefixSlash + strings.Trim(path.Join(dir, leaf), "/")
// fs.Debugf(nil, "dir = %q, remote=%q", dir, remote)
return remote
}
// Convert a FileInfo into an Object from native dir
func (f *Fs) objectFromFileInfo(nativeDir string, item squashfs.FileStat) *Object {
return &Object{
fs: f,
remote: f.fromNative(nativeDir, item.Name()),
size: item.Size(),
modTime: item.ModTime(),
item: item,
}
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
nativeDir, err := f.toNative(dir)
if err != nil {
return nil, err
}
items, err := f.sqfs.ReadDir(nativeDir)
if err != nil {
return nil, fmt.Errorf("read squashfs: couldn't read directory: %w", err)
}
entries = make(fs.DirEntries, 0, len(items))
for _, fi := range items {
item, ok := fi.(squashfs.FileStat)
if !ok {
return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi)
}
// fs.Debugf(item.Name(), "entry = %#v", item)
var entry fs.DirEntry
if err != nil {
return nil, fmt.Errorf("error reading item %q: %q", item.Name(), err)
}
if item.IsDir() {
var remote = f.fromNative(nativeDir, item.Name())
entry = fs.NewDir(remote, item.ModTime())
} else {
if item.Mode().IsRegular() {
entry = f.objectFromFileInfo(nativeDir, item)
} else {
fs.Debugf(item.Name(), "FIXME Not regular file - skipping")
continue
}
}
entries = append(entries, entry)
}
// fs.Debugf(f, "dir=%q, entries=%v", dir, entries)
return entries, nil
}
// newObjectNative finds the object at the native path passed in
func (f *Fs) newObjectNative(nativePath string) (o fs.Object, err error) {
// get the path and filename
dir, leaf := path.Split(nativePath)
dir = strings.TrimRight(dir, "/")
leaf = strings.Trim(leaf, "/")
// FIXME need to detect directory not found
fis, err := f.sqfs.ReadDir(dir)
if err != nil {
return nil, fs.ErrorObjectNotFound
}
for _, fi := range fis {
if fi.Name() == leaf {
if fi.IsDir() {
return nil, fs.ErrorNotAFile
}
item, ok := fi.(squashfs.FileStat)
if !ok {
return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi)
}
o = f.objectFromFileInfo(dir, item)
break
}
}
if o == nil {
return nil, fs.ErrorObjectNotFound
}
return o, nil
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err)
nativePath, err := f.toNative(remote)
if err != nil {
return nil, err
}
return f.newObjectNative(nativePath)
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
return nil, vfs.EROFS
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.f
}
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// Object describes an object to be read from the raw squashfs file
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
item squashfs.FileStat
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Turn a squashfs path into a full path for the parent Fs
// func (o *Object) path(remote string) string {
// return path.Join(o.fs.prefix, remote)
// }
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return vfs.EROFS
}
// Storable raturns a boolean indicating if this object is storable
func (o *Object) Storable() bool {
return true
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
remote, err := o.fs.toNative(o.remote)
if err != nil {
return nil, err
}
fs.Debugf(o, "Opening %q", remote)
//fh, err := o.fs.sqfs.OpenFile(remote, os.O_RDONLY)
fh, err := o.item.Open()
if err != nil {
return nil, err
}
// discard data from start as necessary
if offset > 0 {
_, err = fh.Seek(offset, io.SeekStart)
if err != nil {
return nil, err
}
}
// If limited then don't return everything
if limit >= 0 {
fs.Debugf(nil, "limit=%d, offset=%d, options=%v", limit, offset, options)
return readers.NewLimitedReadCloser(fh, limit), nil
}
return fh, nil
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return vfs.EROFS
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return vfs.EROFS
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

View File

@@ -1,385 +0,0 @@
// Package zip implements a zip archiver for the archive backend
package zip
import (
"archive/zip"
"context"
"errors"
"fmt"
"io"
"os"
"path"
"strings"
"time"
"github.com/rclone/rclone/backend/archive/archiver"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
)
func init() {
archiver.Register(archiver.Archiver{
New: New,
Extension: ".zip",
})
}
// Fs represents a wrapped fs.Fs
type Fs struct {
f fs.Fs
wrapper fs.Fs
name string
features *fs.Features // optional features
vfs *vfs.VFS
node vfs.Node // zip file object - set if reading
remote string // remote of the zip file object
prefix string // position for objects
prefixSlash string // position for objects with a slash on
root string // position to read from within the archive
dt dirtree.DirTree // read from zipfile
}
// New constructs an Fs from the (wrappedFs, remote) with the objects
// prefix with prefix and rooted at root
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) {
// FIXME vfs cache?
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
fs.Debugf(nil, "Zip: New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
vfsOpt := vfscommon.Opt
vfsOpt.ReadWait = 0
VFS := vfs.New(wrappedFs, &vfsOpt)
node, err := VFS.Stat(remote)
if err != nil {
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
}
f := &Fs{
f: wrappedFs,
name: path.Join(fs.ConfigString(wrappedFs), remote),
vfs: VFS,
node: node,
remote: remote,
root: root,
prefix: prefix,
prefixSlash: prefix + "/",
}
// Read the contents of the zip file
singleObject, err := f.readZip()
if err != nil {
return nil, fmt.Errorf("failed to open zip file: %w", err)
}
// FIXME
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
//
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
f.features = (&fs.Features{
CaseInsensitive: false,
DuplicateFiles: false,
ReadMimeType: false, // MimeTypes not supported with gzip
WriteMimeType: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
if singleObject {
return f, fs.ErrorIsFile
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("Zip %q", f.name)
}
// readZip the zip file into f
//
// Returns singleObject=true if f.root points to a file
func (f *Fs) readZip() (singleObject bool, err error) {
if f.node == nil {
return singleObject, fs.ErrorDirNotFound
}
size := f.node.Size()
if size < 0 {
return singleObject, errors.New("can't read from zip file with unknown size")
}
r, err := f.node.Open(os.O_RDONLY)
if err != nil {
return singleObject, fmt.Errorf("failed to open zip file: %w", err)
}
zr, err := zip.NewReader(r, size)
if err != nil {
return singleObject, fmt.Errorf("failed to read zip file: %w", err)
}
dt := dirtree.New()
for _, file := range zr.File {
remote := strings.Trim(path.Clean(file.Name), "/")
if remote == "." {
remote = ""
}
remote = path.Join(f.prefix, remote)
if f.root != "" {
// Ignore all files outside the root
if !strings.HasPrefix(remote, f.root) {
continue
}
if remote == f.root {
remote = ""
} else {
remote = strings.TrimPrefix(remote, f.root+"/")
}
}
if strings.HasSuffix(file.Name, "/") {
dir := fs.NewDir(remote, file.Modified)
dt.AddDir(dir)
} else {
if remote == "" {
remote = path.Base(f.root)
singleObject = true
dt = dirtree.New()
}
o := &Object{
f: f,
remote: remote,
fh: &file.FileHeader,
file: file,
}
dt.Add(o)
if singleObject {
break
}
}
}
dt.CheckParents("")
dt.Sort()
f.dt = dt
//fs.Debugf(nil, "dt = %v", dt)
return singleObject, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
// _, err = f.strip(dir)
// if err != nil {
// return nil, err
// }
entries, ok := f.dt[dir]
if !ok {
return nil, fs.ErrorDirNotFound
}
fs.Debugf(f, "dir=%q, entries=%v", dir, entries)
return entries, nil
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err)
if f.dt == nil {
return nil, fs.ErrorObjectNotFound
}
_, entry := f.dt.Find(remote)
if entry == nil {
return nil, fs.ErrorObjectNotFound
}
o, ok := entry.(*Object)
if !ok {
return nil, fs.ErrorNotAFile
}
return o, nil
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
return nil, vfs.EROFS
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.CRC32)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.f
}
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// Object describes an object to be read from the raw zip file
type Object struct {
f *Fs
remote string
fh *zip.FileHeader
file *zip.File
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.f
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return int64(o.fh.UncompressedSize64)
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.fh.Modified
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return vfs.EROFS
}
// Storable raturns a boolean indicating if this object is storable
func (o *Object) Storable() bool {
return true
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
if ht == hash.CRC32 {
// FIXME return empty CRC if writing
if o.f.dt == nil {
return "", nil
}
return fmt.Sprintf("%08x", o.fh.CRC32), nil
}
return "", hash.ErrUnsupported
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
rc, err = o.file.Open()
if err != nil {
return nil, err
}
// discard data from start as necessary
if offset > 0 {
_, err = io.CopyN(io.Discard, rc, offset)
if err != nil {
return nil, err
}
}
// If limited then don't return everything
if limit >= 0 {
return readers.NewLimitedReadCloser(rc, limit), nil
}
return rc, nil
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return vfs.EROFS
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return vfs.EROFS
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

File diff suppressed because it is too large Load Diff

View File

@@ -1,151 +1,35 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js,go1.14
package azureblob
import (
"context"
"encoding/base64"
"strings"
"testing"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestBlockIDCreator(t *testing.T) {
// Check creation and random number
bic, err := newBlockIDCreator()
require.NoError(t, err)
bic2, err := newBlockIDCreator()
require.NoError(t, err)
assert.NotEqual(t, bic.random, bic2.random)
assert.NotEqual(t, bic.random, [8]byte{})
// Set random to known value for tests
bic.random = [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
chunkNumber := uint64(0xFEDCBA9876543210)
// Check creation of ID
want := base64.StdEncoding.EncodeToString([]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, 1, 2, 3, 4, 5, 6, 7, 8})
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", want)
got := bic.newBlockID(chunkNumber)
assert.Equal(t, want, got)
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", got)
// Test checkID is working
assert.NoError(t, bic.checkID(chunkNumber, got))
assert.ErrorContains(t, bic.checkID(chunkNumber, "$"+got), "illegal base64")
assert.ErrorContains(t, bic.checkID(chunkNumber, "AAAA"+got), "bad block ID length")
assert.ErrorContains(t, bic.checkID(chunkNumber+1, got), "expecting decoded")
assert.ErrorContains(t, bic2.checkID(chunkNumber, got), "random bytes")
}
func (f *Fs) testFeatures(t *testing.T) {
// Check first feature flags are set on this remote
func (f *Fs) InternalTest(t *testing.T) {
// Check first feature flags are set on this
// remote
enabled := f.Features().SetTier
assert.True(t, enabled)
enabled = f.Features().GetTier
assert.True(t, enabled)
}
type ReadSeekCloser struct {
*strings.Reader
}
func (r *ReadSeekCloser) Close() error {
return nil
}
// Stage a block at remote but don't commit it
func (f *Fs) stageBlockWithoutCommit(ctx context.Context, t *testing.T, remote string) {
var (
containerName, blobPath = f.split(remote)
containerClient = f.cntSVC(containerName)
blobClient = containerClient.NewBlockBlobClient(blobPath)
data = "uncommitted data"
blockID = "1"
blockIDBase64 = base64.StdEncoding.EncodeToString([]byte(blockID))
)
r := &ReadSeekCloser{strings.NewReader(data)}
_, err := blobClient.StageBlock(ctx, blockIDBase64, r, nil)
require.NoError(t, err)
// Verify the block is staged but not committed
blockList, err := blobClient.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
require.NoError(t, err)
found := false
for _, block := range blockList.UncommittedBlocks {
if *block.Name == blockIDBase64 {
found = true
break
}
func TestIncrement(t *testing.T) {
for _, test := range []struct {
in []byte
want []byte
}{
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
} {
increment(test.in)
assert.Equal(t, test.want, test.in)
}
require.True(t, found, "Block ID not found in uncommitted blocks")
}
// This tests uploading a blob where it has uncommitted blocks with a different ID size.
//
// https://gauravmantri.com/2013/05/18/windows-azure-blob-storage-dealing-with-the-specified-blob-or-block-content-is-invalid-error/
//
// TestIntegration/FsMkdir/FsPutFiles/Internal/WriteUncommittedBlocks
func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
var (
ctx = context.Background()
remote = "testBlob"
)
// Multipart copy the blob please
oldUseCopyBlob, oldCopyCutoff := f.opt.UseCopyBlob, f.opt.CopyCutoff
f.opt.UseCopyBlob = false
f.opt.CopyCutoff = f.opt.ChunkSize
defer func() {
f.opt.UseCopyBlob, f.opt.CopyCutoff = oldUseCopyBlob, oldCopyCutoff
}()
// Create a blob with uncommitted blocks
f.stageBlockWithoutCommit(ctx, t, remote)
// Now attempt to overwrite the block with a different sized block ID to provoke this error
// Check the object does not exist
_, err := f.NewObject(ctx, remote)
require.Equal(t, fs.ErrorObjectNotFound, err)
// Upload a multipart file over the block with uncommitted chunks of a different ID size
size := 4*int(f.opt.ChunkSize) - 1
contents := random.String(size)
item := fstest.NewItem(remote, contents, fstest.Time("2001-05-06T04:05:06.499Z"))
o := fstests.PutTestContents(ctx, t, f, &item, contents, true)
// Check size
assert.Equal(t, int64(size), o.Size())
// Create a new blob with uncommitted blocks
newRemote := "testBlob2"
f.stageBlockWithoutCommit(ctx, t, newRemote)
// Copy over that block
dst, err := f.Copy(ctx, o, newRemote)
require.NoError(t, err)
// Check basics
assert.Equal(t, int64(size), dst.Size())
assert.Equal(t, newRemote, dst.Remote())
// Check contents
gotContents := fstests.ReadObject(ctx, t, dst, -1)
assert.Equal(t, contents, gotContents)
// Remove the object
require.NoError(t, dst.Remove(ctx))
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Features", f.testFeatures)
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
}

View File

@@ -1,50 +1,26 @@
// Test AzureBlob filesystem interface
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js,go1.14
package azureblob
import (
"context"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
name := "TestAzureBlob"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool", "Cold"},
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize,
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "use_copy_blob", Value: "false"},
},
})
}
// TestIntegration2 runs integration tests against the remote
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
name := "TestAzureBlob"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool", "Cold"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize,
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"},
{Name: name, Key: "use_copy_blob", Value: "false"},
MaxChunkSize: maxChunkSize,
},
})
}
@@ -53,34 +29,36 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setCopyCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetCopyCutoffer = (*Fs)(nil)
)
func TestValidateAccessTier(t *testing.T) {
tests := map[string]struct {
accessTier string
want bool
}{
"hot": {"hot", true},
"HOT": {"HOT", true},
"Hot": {"Hot", true},
"cool": {"cool", true},
"cold": {"cold", true},
"archive": {"archive", true},
"empty": {"", false},
"unknown": {"unknown", false},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
got := validateAccessTier(test.accessTier)
assert.Equal(t, test.want, got)
})
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
func TestServicePrincipalFileSuccess(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"password": "my secret",
"tenant": "my active directory tenant ID"
}
`
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
if assert.NoError(t, err) {
assert.NotNil(t, tokenRefresher)
}
}
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
func TestServicePrincipalFileFailure(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"tenant": "my active directory tenant ID"
}
`
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
assert.Error(t, err)
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
}

View File

@@ -1,7 +1,6 @@
// Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || solaris || js
// +build plan9 solaris js !go1.14
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
package azureblob

137
backend/azureblob/imds.go Normal file
View File

@@ -0,0 +1,137 @@
// +build !plan9,!solaris,!js,go1.14
package azureblob
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
)
const (
azureResource = "https://storage.azure.com"
imdsAPIVersion = "2018-02-01"
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
)
// This custom type is used to add the port the test server has bound to
// to the request context.
type testPortKey string
type msiIdentifierType int
const (
msiClientID msiIdentifierType = iota
msiObjectID
msiResourceID
)
type userMSI struct {
Type msiIdentifierType
Value string
}
type httpError struct {
Response *http.Response
}
func (e httpError) Error() string {
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
}
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
// Metadata Service.
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
// Attempt to get an MSI token; silently continue if unsuccessful.
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
result := adal.Token{}
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
if err != nil {
fs.Debugf(nil, "Failed to create request: %v", err)
return result, err
}
params := req.URL.Query()
params.Set("resource", azureResource)
params.Set("api-version", imdsAPIVersion)
// Specify user-assigned identity if requested.
if identity != nil {
switch identity.Type {
case msiClientID:
params.Set("client_id", identity.Value)
case msiObjectID:
params.Set("object_id", identity.Value)
case msiResourceID:
params.Set("mi_res_id", identity.Value)
default:
// If this happens, the calling function and this one don't agree on
// what valid ID types exist.
return result, fmt.Errorf("unknown MSI identity type specified")
}
}
req.URL.RawQuery = params.Encode()
// The Metadata header is required by all calls to IMDS.
req.Header.Set("Metadata", "true")
// If this function is run in a test, query the test server instead of IMDS.
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
if isTest {
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
req.Host = req.URL.Host
}
// Send request
httpClient := fshttp.NewClient(ctx)
resp, err := httpClient.Do(req)
if err != nil {
return result, errors.Wrap(err, "MSI is not enabled on this VM")
}
defer func() { // resp and Body should not be nil
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
}
err = resp.Body.Close()
if err != nil {
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
}
}()
// Check if the status code indicates success
// The request returns 200 currently, add 201 and 202 as well for possible extension.
switch resp.StatusCode {
case 200, 201, 202:
break
default:
body, _ := ioutil.ReadAll(resp.Body)
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
return result, httpError{Response: resp}
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, errors.Wrap(err, "Couldn't read IMDS response")
}
// Remove BOM, if any. azcopy does this so I'm following along.
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
// This would be a good place to persist the token if a large number of rclone
// invocations are being made in a short amount of time. If the token is
// persisted, the azureblob code will need to check for expiry before every
// storage API call.
err = json.Unmarshal(b, &result)
if err != nil {
return result, errors.Wrap(err, "Couldn't unmarshal IMDS response")
}
return result, nil
}

View File

@@ -0,0 +1,117 @@
// +build !plan9,!solaris,!js,go1.14
package azureblob
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
require.NoError(t, err)
parameters := r.URL.Query()
(*actual)["path"] = r.URL.Path
(*actual)["Metadata"] = r.Header.Get("Metadata")
(*actual)["method"] = r.Method
for paramName := range parameters {
(*actual)[paramName] = parameters.Get(paramName)
}
// Make response.
response := adal.Token{}
responseBytes, err := json.Marshal(response)
require.NoError(t, err)
_, err = w.Write(responseBytes)
require.NoError(t, err)
}
}
func TestManagedIdentity(t *testing.T) {
// test user-assigned identity specifiers to use
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
tests := []struct {
identity *userMSI
identityParameterName string
expectedAbsent []string
}{
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
}
alwaysExpected := map[string]string{
"path": "/metadata/identity/oauth2/token",
"resource": "https://storage.azure.com",
"Metadata": "true",
"api-version": "2018-02-01",
"method": "GET",
}
for _, test := range tests {
actual := make(map[string]string, 10)
testServer := httptest.NewServer(handler(t, &actual))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, test.identity)
require.NoError(t, err)
// Validate expected query parameters present
expected := make(map[string]string)
for k, v := range alwaysExpected {
expected[k] = v
}
if test.identity != nil {
expected[test.identityParameterName] = test.identity.Value
}
for key := range expected {
value, exists := actual[key]
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
test.identityParameterName, key) {
assert.Equalf(t, expected[key], value,
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
}
}
// Validate unexpected query parameters absent
for _, key := range test.expectedAbsent {
_, exists := actual[key]
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
}
}
}
func errorHandler(resultCode int) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Test error generated", resultCode)
}
}
func TestIMDSErrors(t *testing.T) {
errorCodes := []int{404, 429, 500}
for _, code := range errorCodes {
testServer := httptest.NewServer(errorHandler(code))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, nil)
require.Error(t, err)
httpErr, ok := err.(httpError)
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,69 +0,0 @@
//go:build !plan9 && !js
package azurefiles
import (
"context"
"math/rand"
"strings"
"testing"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
)
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Authentication", f.InternalTestAuth)
}
var _ fstests.InternalTester = (*Fs)(nil)
func (f *Fs) InternalTestAuth(t *testing.T) {
t.Skip("skipping since this requires authentication credentials which are not part of repo")
shareName := "test-rclone-oct-2023"
testCases := []struct {
name string
options *Options
}{
{
name: "ConnectionString",
options: &Options{
ShareName: shareName,
ConnectionString: "",
},
},
{
name: "AccountAndKey",
options: &Options{
ShareName: shareName,
Account: "",
Key: "",
}},
{
name: "SASUrl",
options: &Options{
ShareName: shareName,
SASURL: "",
}},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
fs, err := newFsFromOptions(context.TODO(), "TestAzureFiles", "", tc.options)
assert.NoError(t, err)
dirName := randomString(10)
assert.NoError(t, fs.Mkdir(context.TODO(), dirName))
})
}
}
const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX"
func randomString(charCount int) string {
strBldr := strings.Builder{}
for range charCount {
randPos := rand.Int63n(52)
strBldr.WriteByte(chars[randPos])
}
return strBldr.String()
}

View File

@@ -1,17 +0,0 @@
//go:build !plan9 && !js
package azurefiles
import (
"testing"
"github.com/rclone/rclone/fstest/fstests"
)
func TestIntegration(t *testing.T) {
var objPtr *Object
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureFiles:",
NilObject: objPtr,
})
}

View File

@@ -1,7 +0,0 @@
// Build for azurefiles for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || js
// Package azurefiles provides an interface to Microsoft Azure Files
package azurefiles

View File

@@ -1,13 +1,13 @@
// Package api provides types used by the Backblaze B2 API.
package api
import (
"fmt"
"path"
"strconv"
"strings"
"time"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/version"
)
// Error describes a B2 error response
@@ -33,27 +33,10 @@ var _ fserrors.Fataler = (*Error)(nil)
// Bucket describes a B2 bucket
type Bucket struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
}
// LifecycleRule is a single lifecycle rule
type LifecycleRule struct {
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
DaysFromStartingToCancelingUnfinishedLargeFiles *int `json:"daysFromStartingToCancelingUnfinishedLargeFiles"`
FileNamePrefix string `json:"fileNamePrefix"`
}
// ServerSideEncryption is a configuration object for B2 Server-Side Encryption
type ServerSideEncryption struct {
Mode string `json:"mode"`
Algorithm string `json:"algorithm"` // Encryption algorithm to use
CustomerKey string `json:"customerKey"` // User provided Base64 encoded key that is used by the server to encrypt files
CustomerKeyMd5 string `json:"customerKeyMd5"` // An MD5 hash of the decoded key
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
}
// Timestamp is a UTC time when this file was uploaded. It is a base
@@ -80,17 +63,16 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
return nil
}
// HasVersion returns true if it looks like the passed filename has a timestamp on it.
//
// Note that the passed filename's timestamp may still be invalid even if this
// function returns true.
func HasVersion(remote string) bool {
return version.Match(remote)
}
const versionFormat = "-v2006-01-02-150405.000"
// AddVersion adds the timestamp as a version string into the filename passed in.
func (t Timestamp) AddVersion(remote string) string {
return version.Add(remote, time.Time(t))
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
s := time.Time(t).Format(versionFormat)
// Replace the '.' with a '-'
s = strings.Replace(s, ".", "-", -1)
return base + s + ext
}
// RemoveVersion removes the timestamp from a filename as a version string.
@@ -98,9 +80,24 @@ func (t Timestamp) AddVersion(remote string) string {
// It returns the new file name and a timestamp, or the old filename
// and a zero timestamp.
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
time, newRemote := version.Remove(remote)
t = Timestamp(time)
return
newRemote = remote
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
if len(base) < len(versionFormat) {
return
}
versionStart := len(base) - len(versionFormat)
// Check it ends in -xxx
if base[len(base)-4] != '-' {
return
}
// Replace with .xxx for parsing
base = base[:len(base)-4] + "." + base[len(base)-3:]
newT, err := time.Parse(versionFormat, base[versionStart:])
if err != nil {
return
}
return Timestamp(newT), base[:versionStart] + ext
}
// IsZero returns true if the timestamp is uninitialized
@@ -138,10 +135,10 @@ type AuthorizeAccountResponse struct {
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
AccountID string `json:"accountId"` // The identifier for the account.
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
} `json:"allowed"`
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
@@ -223,10 +220,9 @@ type FileInfo struct {
// CreateBucketRequest is used to create a bucket
type CreateBucketRequest struct {
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
}
// DeleteBucketRequest is used to create a bucket
@@ -257,7 +253,7 @@ type GetFileInfoRequest struct {
// If the original source of the file being uploaded has a last
// modified time concept, Backblaze recommends using
// src_last_modified_millis as the name, and a string holding the base
// 10 number of milliseconds since midnight, January 1, 1970
// 10 number number of milliseconds since midnight, January 1, 1970
// UTC. This fits in a 64 bit integer such as the type "long" in the
// programming language Java. It is intended to be compatible with
// Java's time long. For example, it can be passed directly into the
@@ -269,22 +265,21 @@ type GetFileInfoRequest struct {
//
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
type StartLargeFileRequest struct {
BucketID string `json:"bucketId"` // The ID of the bucket that the file will go in.
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
ServerSideEncryption *ServerSideEncryption `json:"serverSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption
BucketID string `json:"bucketId"` //The ID of the bucket that the file will go in.
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
}
// StartLargeFileResponse is the response to StartLargeFileRequest
type StartLargeFileResponse struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId"` // The unique ID of the bucket.
ContentType string `json:"contentType"` // The MIME type of the file.
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
UploadTimestamp Timestamp `json:"uploadTimestamp,omitempty"` // This is a UTC time when this file was uploaded.
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId"` // The unique ID of the bucket.
ContentType string `json:"contentType"` // The MIME type of the file.
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
}
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
@@ -334,31 +329,19 @@ type CancelLargeFileResponse struct {
// CopyFileRequest is as passed to b2_copy_file
type CopyFileRequest struct {
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
Name string `json:"fileName"` // The name of the new file being created.
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
SourceServerSideEncryption *ServerSideEncryption `json:"sourceServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the source file
DestinationServerSideEncryption *ServerSideEncryption `json:"destinationServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the destination file
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
Name string `json:"fileName"` // The name of the new file being created.
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
}
// CopyPartRequest is the request for b2_copy_part - the response is UploadPartResponse
type CopyPartRequest struct {
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file.
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
SourceServerSideEncryption *ServerSideEncryption `json:"sourceServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the source file
DestinationServerSideEncryption *ServerSideEncryption `json:"destinationServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the destination file
}
// UpdateBucketRequest describes a request to modify a B2 bucket
type UpdateBucketRequest struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Type string `json:"bucketType,omitempty"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file.
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
}

View File

@@ -13,6 +13,7 @@ import (
var (
emptyT api.Timestamp
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
t0r = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123000000Z"))
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
)
@@ -35,6 +36,40 @@ func TestTimestampUnmarshalJSON(t *testing.T) {
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
}
func TestTimestampAddVersion(t *testing.T) {
for _, test := range []struct {
t api.Timestamp
in string
expected string
}{
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
{t1, "potato", "potato-v2001-02-03-040506-123"},
{t1, "", "-v2001-02-03-040506-123"},
} {
actual := test.t.AddVersion(test.in)
assert.Equal(t, test.expected, actual, test.in)
}
}
func TestTimestampRemoveVersion(t *testing.T) {
for _, test := range []struct {
in string
expectedT api.Timestamp
expectedRemote string
}{
{"potato.txt", emptyT, "potato.txt"},
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
{"potato-v2001-02-03-040506-123", t1, "potato"},
{"-v2001-02-03-040506-123", t1, ""},
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
} {
actualT, actualRemote := api.RemoveVersion(test.in)
assert.Equal(t, test.expectedT, actualT, test.in)
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
}
}
func TestTimestampIsZero(t *testing.T) {
assert.True(t, emptyT.IsZero())
assert.False(t, t0.IsZero())
@@ -42,11 +77,11 @@ func TestTimestampIsZero(t *testing.T) {
}
func TestTimestampEqual(t *testing.T) {
assert.False(t, emptyT.Equal(emptyT)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
assert.False(t, emptyT.Equal(emptyT))
assert.False(t, t0.Equal(emptyT))
assert.False(t, emptyT.Equal(t0))
assert.False(t, t0.Equal(t1))
assert.False(t, t1.Equal(t0))
assert.True(t, t0.Equal(t0)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
assert.True(t, t1.Equal(t1)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
assert.True(t, t0.Equal(t0))
assert.True(t, t1.Equal(t1))
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,31 +1,14 @@
package b2
import (
"context"
"crypto/sha1"
"fmt"
"path"
"sort"
"strings"
"testing"
"time"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/version"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Test b2 string encoding
// https://www.backblaze.com/docs/cloud-storage-native-api-string-encoding
// https://www.backblaze.com/b2/docs/string_encoding.html
var encodeTest = []struct {
fullyEncoded string
@@ -185,435 +168,3 @@ func TestParseTimeString(t *testing.T) {
}
}
// Return a map of the headers in the options with keys stripped of the "x-bz-info-" prefix
func OpenOptionToMetaData(options []fs.OpenOption) map[string]string {
var headers = make(map[string]string)
for _, option := range options {
k, v := option.Header()
k = strings.ToLower(k)
if strings.HasPrefix(k, headerPrefix) {
headers[k[len(headerPrefix):]] = v
}
}
return headers
}
func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string, chunkSize string) {
what := fmt.Sprintf("Size%s/UploadCutoff%s/ChunkSize%s", size, uploadCutoff, chunkSize)
t.Run(what, func(t *testing.T) {
ctx := context.Background()
ss := fs.SizeSuffix(0)
err := ss.Set(size)
require.NoError(t, err)
original := random.String(int(ss))
contents := fstest.Gz(t, original)
mimeType := "text/html"
if chunkSize != "" {
ss := fs.SizeSuffix(0)
err := ss.Set(chunkSize)
require.NoError(t, err)
_, err = f.SetUploadChunkSize(ss)
require.NoError(t, err)
}
if uploadCutoff != "" {
ss := fs.SizeSuffix(0)
err := ss.Set(uploadCutoff)
require.NoError(t, err)
_, err = f.SetUploadCutoff(ss)
require.NoError(t, err)
}
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
btime := time.Now()
metadata := fs.Metadata{
// Just mtime for now - limit to milliseconds since x-bz-info-src_last_modified_millis can't support any
"mtime": "2009-05-06T04:05:06.499Z",
}
// Need to specify HTTP options with the header prefix since they are passed as-is
options := []fs.OpenOption{
&fs.HTTPOption{Key: "X-Bz-Info-a", Value: "1"},
&fs.HTTPOption{Key: "X-Bz-Info-b", Value: "2"},
}
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, mimeType, metadata, options...)
defer func() {
assert.NoError(t, obj.Remove(ctx))
}()
o := obj.(*Object)
gotMetadata, err := o.getMetaData(ctx)
require.NoError(t, err)
// X-Bz-Info-a & X-Bz-Info-b
optMetadata := OpenOptionToMetaData(options)
for k, v := range optMetadata {
got := gotMetadata.Info[k]
assert.Equal(t, v, got, k)
}
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
// Modification time from the x-bz-info-src_last_modified_millis header
var mtime api.Timestamp
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
if err != nil {
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
}
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
// Upload time
gotBtime := time.Time(gotMetadata.UploadTimestamp)
dt := gotBtime.Sub(btime)
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
t.Run("GzipEncoding", func(t *testing.T) {
// Test that the gzipped file we uploaded can be
// downloaded
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
gotContents := fstests.ReadObject(ctx, t, o, -1)
assert.Equal(t, wantContents, gotContents)
assert.Equal(t, wantSize, o.Size())
gotHash, err := o.Hash(ctx, hash.SHA1)
require.NoError(t, err)
assert.Equal(t, wantHash, gotHash)
}
t.Run("NoDecompress", func(t *testing.T) {
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
})
})
})
}
func (f *Fs) InternalTestMetadata(t *testing.T) {
// 1 kB regular file
f.internalTestMetadata(t, "1kiB", "", "")
// 10 MiB large file
f.internalTestMetadata(t, "10MiB", "6MiB", "6MiB")
}
func sha1Sum(t *testing.T, s string) string {
hash := sha1.Sum([]byte(s))
return fmt.Sprintf("%x", hash)
}
// This is adapted from the s3 equivalent.
func (f *Fs) InternalTestVersions(t *testing.T) {
ctx := context.Background()
// Small pause to make the LastModified different since AWS
// only seems to track them to 1 second granularity
time.Sleep(2 * time.Second)
// Create an object
const dirName = "versions"
const fileName = dirName + "/" + "test-versions.txt"
contents := random.String(100)
item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
defer func() {
assert.NoError(t, obj.Remove(ctx))
}()
objMetadata, err := obj.(*Object).getMetaData(ctx)
require.NoError(t, err)
// Small pause
time.Sleep(2 * time.Second)
// Remove it
assert.NoError(t, obj.Remove(ctx))
// Small pause to make the LastModified different since AWS only seems to track them to 1 second granularity
time.Sleep(2 * time.Second)
// And create it with different size and contents
newContents := random.String(101)
newItem := fstest.NewItem(fileName, newContents, fstest.Time("2002-05-06T04:05:06.499999999Z"))
newObj := fstests.PutTestContents(ctx, t, f, &newItem, newContents, true)
newObjMetadata, err := newObj.(*Object).getMetaData(ctx)
require.NoError(t, err)
t.Run("Versions", func(t *testing.T) {
// Set --b2-versions for this test
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
// Read the contents
entries, err := f.List(ctx, dirName)
require.NoError(t, err)
tests := 0
var fileNameVersion string
for _, entry := range entries {
t.Log(entry)
remote := entry.Remote()
if remote == fileName {
t.Run("ReadCurrent", func(t *testing.T) {
assert.Equal(t, newContents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
})
tests++
} else if versionTime, p := version.Remove(remote); !versionTime.IsZero() && p == fileName {
t.Run("ReadVersion", func(t *testing.T) {
assert.Equal(t, contents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
})
assert.WithinDuration(t, time.Time(objMetadata.UploadTimestamp), versionTime, time.Second, "object time must be with 1 second of version time")
fileNameVersion = remote
tests++
}
}
assert.Equal(t, 2, tests, "object missing from listing")
// Check we can read the object with a version suffix
t.Run("NewObject", func(t *testing.T) {
o, err := f.NewObject(ctx, fileNameVersion)
require.NoError(t, err)
require.NotNil(t, o)
assert.Equal(t, int64(100), o.Size(), o.Remote())
})
// Check we can make a NewFs from that object with a version suffix
t.Run("NewFs", func(t *testing.T) {
newPath := bucket.Join(fs.ConfigStringFull(f), fileNameVersion)
// Make sure --b2-versions is set in the config of the new remote
fs.Debugf(nil, "oldPath = %q", newPath)
lastColon := strings.LastIndex(newPath, ":")
require.True(t, lastColon >= 0)
newPath = newPath[:lastColon] + ",versions" + newPath[lastColon:]
fs.Debugf(nil, "newPath = %q", newPath)
fNew, err := cache.Get(ctx, newPath)
// This should return pointing to a file
require.Equal(t, fs.ErrorIsFile, err)
require.NotNil(t, fNew)
// With the directory above
assert.Equal(t, dirName, path.Base(fs.ConfigStringFull(fNew)))
})
})
t.Run("VersionAt", func(t *testing.T) {
// We set --b2-version-at for this test so make sure we reset it at the end
defer func() {
f.opt.VersionAt = fs.Time{}
}()
var (
firstObjectTime = time.Time(objMetadata.UploadTimestamp)
secondObjectTime = time.Time(newObjMetadata.UploadTimestamp)
)
for _, test := range []struct {
what string
at time.Time
want []fstest.Item
wantErr error
wantSize int64
}{
{
what: "Before",
at: firstObjectTime.Add(-time.Second),
want: fstests.InternalTestFiles,
wantErr: fs.ErrorObjectNotFound,
},
{
what: "AfterOne",
at: firstObjectTime.Add(time.Second),
want: append([]fstest.Item{item}, fstests.InternalTestFiles...),
wantSize: 100,
},
{
what: "AfterDelete",
at: secondObjectTime.Add(-time.Second),
want: fstests.InternalTestFiles,
wantErr: fs.ErrorObjectNotFound,
},
{
what: "AfterTwo",
at: secondObjectTime.Add(time.Second),
want: append([]fstest.Item{newItem}, fstests.InternalTestFiles...),
wantSize: 101,
},
} {
t.Run(test.what, func(t *testing.T) {
f.opt.VersionAt = fs.Time(test.at)
t.Run("List", func(t *testing.T) {
fstest.CheckListing(t, f, test.want)
})
t.Run("NewObject", func(t *testing.T) {
gotObj, gotErr := f.NewObject(ctx, fileName)
assert.Equal(t, test.wantErr, gotErr)
if gotErr == nil {
assert.Equal(t, test.wantSize, gotObj.Size())
}
})
})
}
})
t.Run("Cleanup", func(t *testing.T) {
t.Run("DryRun", func(t *testing.T) {
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
// Listing should be unchanged after dry run
before := listAllFiles(ctx, t, f, dirName)
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
require.NoError(t, f.cleanUp(ctx, true, false, 0))
after := listAllFiles(ctx, t, f, dirName)
assert.Equal(t, before, after)
})
t.Run("RealThing", func(t *testing.T) {
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
// Listing should reflect current state after cleanup
require.NoError(t, f.cleanUp(ctx, true, false, 0))
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
fstest.CheckListing(t, f, items)
})
})
// Purge gets tested later
}
func (f *Fs) InternalTestCleanupUnfinished(t *testing.T) {
ctx := context.Background()
// B2CleanupHidden tests cleaning up hidden files
t.Run("CleanupUnfinished", func(t *testing.T) {
dirName := "unfinished"
fileCount := 5
expectedFiles := []string{}
for i := 1; i < fileCount; i++ {
fileName := fmt.Sprintf("%s/unfinished-%d", dirName, i)
expectedFiles = append(expectedFiles, fileName)
obj := &Object{
fs: f,
remote: fileName,
}
objInfo := object.NewStaticObjectInfo(fileName, fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
_, err := f.newLargeUpload(ctx, obj, nil, objInfo, f.opt.ChunkSize, false, nil)
require.NoError(t, err)
}
checkListing(ctx, t, f, dirName, expectedFiles)
t.Run("DryRun", func(t *testing.T) {
// Listing should not change after dry run
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
require.NoError(t, f.cleanUp(ctx, false, true, 0))
checkListing(ctx, t, f, dirName, expectedFiles)
})
t.Run("RealThing", func(t *testing.T) {
// Listing should be empty after real cleanup
require.NoError(t, f.cleanUp(ctx, false, true, 0))
checkListing(ctx, t, f, dirName, []string{})
})
})
}
func listAllFiles(ctx context.Context, t *testing.T, f *Fs, dirName string) []string {
bucket, directory := f.split(dirName)
foundFiles := []string{}
require.NoError(t, f.list(ctx, bucket, directory, "", false, true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
if !isDirectory {
foundFiles = append(foundFiles, object.Name)
}
return nil
}))
sort.Strings(foundFiles)
return foundFiles
}
func checkListing(ctx context.Context, t *testing.T, f *Fs, dirName string, expectedFiles []string) {
foundFiles := listAllFiles(ctx, t, f, dirName)
sort.Strings(expectedFiles)
assert.Equal(t, expectedFiles, foundFiles)
}
func (f *Fs) InternalTestLifecycleRules(t *testing.T) {
ctx := context.Background()
opt := map[string]string{}
t.Run("InitState", func(t *testing.T) {
// There should be no lifecycle rules at the outset
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
})
t.Run("DryRun", func(t *testing.T) {
// There should still be no lifecycle rules after each dry run operation
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
delete(opt, "daysFromHidingToDeleting")
opt["daysFromUploadingToHiding"] = "40"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
})
t.Run("RealThing", func(t *testing.T) {
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
delete(opt, "daysFromHidingToDeleting")
opt["daysFromUploadingToHiding"] = "40"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
})
}
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Metadata", f.InternalTestMetadata)
t.Run("Versions", f.InternalTestVersions)
t.Run("CleanupUnfinished", f.InternalTestCleanupUnfinished)
t.Run("LifecycleRules", f.InternalTestLifecycleRules)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -28,12 +28,7 @@ func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setCopyCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
_ fstests.SetCopyCutoffer = (*Fs)(nil)
)

View File

@@ -1,10 +1,11 @@
// Upload large files for b2
//
// Docs - https://www.backblaze.com/docs/cloud-storage-large-files
// Docs - https://www.backblaze.com/b2/docs/large_files.html
package b2
import (
"bytes"
"context"
"crypto/sha1"
"encoding/hex"
@@ -14,13 +15,12 @@ import (
"strings"
"sync"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/sync/errgroup"
)
@@ -78,31 +78,38 @@ type largeUpload struct {
wrap accounting.WrapFn // account parts being transferred
id string // ID of the file being uploaded
size int64 // total size
parts int // calculated number of parts, if known
sha1smu sync.Mutex // mutex to protect sha1s
parts int64 // calculated number of parts, if known
sha1s []string // slice of SHA1s for each part
uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
chunkSize int64 // chunk size to use
src *Object // if copying, object we are reading from
info *api.FileInfo // final response with info about the object
}
// newLargeUpload starts an upload of object o from in with metadata in src
//
// If newInfo is set then metadata from that will be used instead of reading it from src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File, options ...fs.OpenOption) (up *largeUpload, err error) {
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, chunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
remote := o.remote
size := src.Size()
parts := 0
chunkSize := defaultChunkSize
parts := int64(0)
sha1SliceSize := int64(maxParts)
if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else {
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
parts = int(size / int64(chunkSize))
parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 {
parts++
}
if parts > maxParts {
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
}
sha1SliceSize = parts
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_start_large_file",
}
bucket, bucketPath := o.split()
bucketID, err := f.getBucketID(ctx, bucket)
@@ -113,27 +120,12 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
BucketID: bucketID,
Name: f.opt.Enc.FromStandardPath(bucketPath),
}
optionsToSend := make([]fs.OpenOption, 0, len(options))
if newInfo == nil {
modTime, err := o.getModTime(ctx, src, options)
if err != nil {
return nil, err
}
modTime := src.ModTime(ctx)
request.ContentType = fs.MimeType(ctx, src)
request.Info = map[string]string{
timeKey: timeString(modTime),
}
// Custom upload headers - remove header prefix since they are sent in the body
for _, option := range options {
k, v := option.Header()
k = strings.ToLower(k)
if strings.HasPrefix(k, headerPrefix) {
request.Info[k[len(headerPrefix):]] = v
} else {
optionsToSend = append(optionsToSend, option)
}
}
// Set the SHA1 if known
if !o.fs.opt.DisableCheckSum || doCopy {
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
@@ -144,19 +136,6 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
request.ContentType = newInfo.ContentType
request.Info = newInfo.Info
}
if o.fs.opt.SSECustomerKey != "" && o.fs.opt.SSECustomerKeyMD5 != "" {
request.ServerSideEncryption = &api.ServerSideEncryption{
Mode: "SSE-C",
Algorithm: o.fs.opt.SSECustomerAlgorithm,
CustomerKey: o.fs.opt.SSECustomerKeyBase64,
CustomerKeyMd5: o.fs.opt.SSECustomerKeyMD5,
}
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_start_large_file",
Options: optionsToSend,
}
var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
@@ -173,7 +152,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
id: response.ID,
size: size,
parts: parts,
sha1s: make([]string, 0, 16),
sha1s: make([]string, sha1SliceSize),
chunkSize: int64(chunkSize),
}
// unwrap the accounting from the input, we use wrap to put it
@@ -192,26 +171,24 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
// This should be returned with returnUploadURL when finished
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
up.uploadMu.Lock()
if len(up.uploads) > 0 {
defer up.uploadMu.Unlock()
if len(up.uploads) == 0 {
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get upload URL")
}
} else {
upload, up.uploads = up.uploads[0], up.uploads[1:]
up.uploadMu.Unlock()
return upload, nil
}
up.uploadMu.Unlock()
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err = up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
return upload, nil
}
@@ -226,39 +203,10 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
up.uploadMu.Unlock()
}
// Add an sha1 to the being built up sha1s
func (up *largeUpload) addSha1(chunkNumber int, sha1 string) {
up.sha1smu.Lock()
defer up.sha1smu.Unlock()
if len(up.sha1s) < chunkNumber+1 {
up.sha1s = append(up.sha1s, make([]string, chunkNumber+1-len(up.sha1s))...)
}
up.sha1s[chunkNumber] = sha1
}
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) {
// Only account after the checksum reads have been done
if do, ok := reader.(pool.DelayAccountinger); ok {
// To figure out this number, do a transfer and if the accounted size is 0 or a
// multiple of what it should be, increase or decrease this number.
do.DelayAccounting(1)
}
err = up.f.pacer.Call(func() (bool, error) {
// Discover the size by seeking to the end
size, err = reader.Seek(0, io.SeekEnd)
if err != nil {
return false, err
}
// rewind the reader on retry and after reading size
_, err = reader.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
fs.Debugf(up.o, "Sending chunk %d length %d", chunkNumber, size)
// Transfer a chunk
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
// Get upload URL
upload, err := up.getUploadURL(ctx)
@@ -266,8 +214,8 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
return false, err
}
in := newHashAppendingReader(reader, sha1.New())
sizeWithHash := size + int64(in.AdditionalLength())
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
size := int64(len(body)) + int64(in.AdditionalLength())
// Authorization
//
@@ -282,14 +230,14 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
//
// The number of bytes in the file being uploaded. Note that
// this header is required; you cannot leave it out and just
// use chunked encoding. The minimum size of every part but
// the last one is 100 MB (100,000,000 bytes)
// use chunked encoding. The minimum size of every part but
// the last one is 100MB.
//
// X-Bz-Content-Sha1
//
// The SHA1 checksum of the this part of the file. B2 will
// check this when the part is uploaded, to make sure that the
// data arrived correctly. The same SHA1 checksum must be
// data arrived correctly. The same SHA1 checksum must be
// passed to b2_finish_large_file.
opts := rest.Opts{
Method: "POST",
@@ -297,16 +245,10 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
Body: up.wrap(in),
ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken,
"X-Bz-Part-Number": fmt.Sprintf("%d", chunkNumber+1),
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
sha1Header: "hex_digits_at_end",
},
ContentLength: &sizeWithHash,
}
if up.o.fs.opt.SSECustomerKey != "" && up.o.fs.opt.SSECustomerKeyMD5 != "" {
opts.ExtraHeaders[sseAlgorithmHeader] = up.o.fs.opt.SSECustomerAlgorithm
opts.ExtraHeaders[sseKeyHeader] = up.o.fs.opt.SSECustomerKeyBase64
opts.ExtraHeaders[sseMd5Header] = up.o.fs.opt.SSECustomerKeyMD5
ContentLength: &size,
}
var response api.UploadPartResponse
@@ -314,7 +256,7 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
retry, err := up.f.shouldRetry(ctx, resp, err)
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err)
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
}
// On retryable error clear PartUploadURL
if retry {
@@ -322,50 +264,39 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
upload = nil
}
up.returnUploadURL(upload)
up.addSha1(chunkNumber, in.HexSum())
up.sha1s[part-1] = in.HexSum()
return retry, err
})
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d: %v", chunkNumber, err)
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
} else {
fs.Debugf(up.o, "Done sending chunk %d", chunkNumber)
fs.Debugf(up.o, "Done sending chunk %d", part)
}
return size, err
return err
}
// Copy a chunk
func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64) error {
func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_part",
}
offset := int64(part) * up.chunkSize // where we are in the source file
offset := (part - 1) * up.chunkSize // where we are in the source file
var request = api.CopyPartRequest{
SourceID: up.src.id,
LargeFileID: up.id,
PartNumber: int64(part + 1),
PartNumber: part,
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
}
if up.o.fs.opt.SSECustomerKey != "" && up.o.fs.opt.SSECustomerKeyMD5 != "" {
serverSideEncryptionConfig := api.ServerSideEncryption{
Mode: "SSE-C",
Algorithm: up.o.fs.opt.SSECustomerAlgorithm,
CustomerKey: up.o.fs.opt.SSECustomerKeyBase64,
CustomerKeyMd5: up.o.fs.opt.SSECustomerKeyMD5,
}
request.SourceServerSideEncryption = &serverSideEncryptionConfig
request.DestinationServerSideEncryption = &serverSideEncryptionConfig
}
var response api.UploadPartResponse
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
retry, err := up.f.shouldRetry(ctx, resp, err)
if err != nil {
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
}
up.addSha1(part, response.SHA1)
up.sha1s[part-1] = response.SHA1
return retry, err
})
if err != nil {
@@ -376,8 +307,8 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
return err
}
// Close closes off the large upload
func (up *largeUpload) Close(ctx context.Context) error {
// finish closes off the large upload
func (up *largeUpload) finish(ctx context.Context) error {
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
opts := rest.Opts{
Method: "POST",
@@ -395,12 +326,11 @@ func (up *largeUpload) Close(ctx context.Context) error {
if err != nil {
return err
}
up.info = &response
return nil
return up.o.decodeMetaDataFileInfo(&response)
}
// Abort aborts the large upload
func (up *largeUpload) Abort(ctx context.Context) error {
// cancel aborts the large upload
func (up *largeUpload) cancel(ctx context.Context) error {
fs.Debugf(up.o, "Cancelling large file %s", up.what)
opts := rest.Opts{
Method: "POST",
@@ -425,102 +355,128 @@ func (up *largeUpload) Abort(ctx context.Context) error {
// reaches EOF.
//
// Note that initialUploadBlock must be returned to f.putBuf()
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW) (err error) {
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
var (
g, gCtx = errgroup.WithContext(ctx)
hasMoreParts = true
)
up.size = initialUploadBlock.Size()
up.parts = 0
for part := 0; hasMoreParts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
var rw *pool.RW
if part == 0 {
rw = initialUploadBlock
} else {
rw = up.f.getRW(false)
}
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putRW(rw)
break
}
// Read the chunk
var n int64
if part == 0 {
n = rw.Size()
} else {
n, err = io.CopyN(rw, up.in, up.chunkSize)
if err == io.EOF {
if n == 0 {
fs.Debugf(up.o, "Not sending empty chunk after EOF - ending.")
up.f.putRW(rw)
break
} else {
fs.Debugf(up.o, "Read less than a full chunk %d, making this the last one.", n)
}
hasMoreParts = false
} else if err != nil {
// other kinds of errors indicate failure
up.f.putRW(rw)
return err
up.size = int64(len(initialUploadBlock))
g.Go(func() error {
for part := int64(1); hasMoreParts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
var buf []byte
if part == 1 {
buf = initialUploadBlock
} else {
buf = up.f.getBuf(false)
}
}
// Keep stats up to date
up.parts += 1
up.size += n
if part > maxParts {
up.f.putRW(rw)
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putBuf(buf, false)
return nil
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putRW(rw)
_, err = up.WriteChunk(gCtx, part, rw)
return err
})
}
// Read the chunk
var n int
if part == 1 {
n = len(buf)
} else {
n, err = io.ReadFull(up.in, buf)
if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putBuf(buf, false)
return nil
} else if err != nil {
// other kinds of errors indicate failure
up.f.putBuf(buf, false)
return err
}
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
up.f.putBuf(buf, false)
return errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, false)
return up.transferChunk(gCtx, part, buf)
})
}
return nil
})
err = g.Wait()
if err != nil {
return err
}
return up.Close(ctx)
up.sha1s = up.sha1s[:up.parts]
return up.finish(ctx)
}
// Copy the chunks from the source to the destination
func (up *largeUpload) Copy(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
// Upload uploads the chunks from the input
func (up *largeUpload) Upload(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
var (
g, gCtx = errgroup.WithContext(ctx)
remaining = up.size
)
g.SetLimit(up.f.opt.UploadConcurrency)
for part := range up.parts {
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in copying all the other parts.
if gCtx.Err() != nil {
break
g.Go(func() error {
for part := int64(1); part <= up.parts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
buf := up.f.getBuf(up.doCopy)
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putBuf(buf, up.doCopy)
return nil
}
reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
if !up.doCopy {
// Read the chunk
buf = buf[:reqSize]
_, err = io.ReadFull(up.in, buf)
if err != nil {
up.f.putBuf(buf, up.doCopy)
return err
}
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, up.doCopy)
if !up.doCopy {
err = up.transferChunk(gCtx, part, buf)
} else {
err = up.copyChunk(gCtx, part, reqSize)
}
return err
})
remaining -= reqSize
}
reqSize := min(remaining, up.chunkSize)
part := part // for the closure
g.Go(func() (err error) {
return up.copyChunk(gCtx, part, reqSize)
})
remaining -= reqSize
}
return nil
})
err = g.Wait()
if err != nil {
return err
}
return up.Close(ctx)
return up.finish(ctx)
}

View File

@@ -14,7 +14,7 @@ const (
timeFormat = `"` + time.RFC3339 + `"`
)
// Time represents date and time information for the
// Time represents represents date and time information for the
// box API, by using RFC3339
type Time time.Time
@@ -36,13 +36,13 @@ func (t *Time) UnmarshalJSON(data []byte) error {
// Error is returned from box when things go wrong
type Error struct {
Type string `json:"type"`
Status int `json:"status"`
Code string `json:"code"`
ContextInfo json.RawMessage `json:"context_info"`
HelpURL string `json:"help_url"`
Message string `json:"message"`
RequestID string `json:"request_id"`
Type string `json:"type"`
Status int `json:"status"`
Code string `json:"code"`
ContextInfo json.RawMessage
HelpURL string `json:"help_url"`
Message string `json:"message"`
RequestID string `json:"request_id"`
}
// Error returns a string for the error and satisfies the error interface
@@ -52,7 +52,7 @@ func (e *Error) Error() string {
out += ": " + e.Message
}
if e.ContextInfo != nil {
out += fmt.Sprintf(" (%s)", string(e.ContextInfo))
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
}
return out
}
@@ -61,9 +61,9 @@ func (e *Error) Error() string {
var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link"
// Types of things in Item/ItemMini
// Types of things in Item
const (
ItemTypeFolder = "folder"
ItemTypeFile = "file"
@@ -72,41 +72,24 @@ const (
ItemStatusDeleted = "deleted"
)
// ItemMini is a subset of the elements in a full Item returned by some API calls
type ItemMini struct {
Type string `json:"type"`
ID string `json:"id"`
SequenceID int64 `json:"sequence_id,string"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
}
// Item describes a folder or a file as returned by Get Folder Items and others
type Item struct {
Type string `json:"type"`
ID string `json:"id"`
SequenceID int64 `json:"sequence_id,string"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
CreatedAt Time `json:"created_at"`
ModifiedAt Time `json:"modified_at"`
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
Parent ItemMini `json:"parent"`
Type string `json:"type"`
ID string `json:"id"`
SequenceID string `json:"sequence_id"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
CreatedAt Time `json:"created_at"`
ModifiedAt Time `json:"modified_at"`
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
SharedLink struct {
URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"`
} `json:"shared_link"`
OwnedBy struct {
Type string `json:"type"`
ID string `json:"id"`
Name string `json:"name"`
Login string `json:"login"`
} `json:"owned_by"`
}
// ModTime returns the modification time of the item
@@ -120,26 +103,14 @@ func (i *Item) ModTime() (t time.Time) {
// FolderItems is returned from the GetFolderItems call
type FolderItems struct {
TotalCount int `json:"total_count"`
Entries []Item `json:"entries"`
Offset int `json:"offset"`
Limit int `json:"limit"`
NextMarker *string `json:"next_marker,omitempty"`
// There is some confusion about how this is actually
// returned. The []struct has worked for many years, but in
// https://github.com/rclone/rclone/issues/8776 box was
// returning it returned not as a list. We don't actually use
// this so comment it out.
//
// Order struct {
// By string `json:"by"`
// Direction string `json:"direction"`
// } `json:"order"`
//
// Order []struct {
// By string `json:"by"`
// Direction string `json:"direction"`
// } `json:"order"`
TotalCount int `json:"total_count"`
Entries []Item `json:"entries"`
Offset int `json:"offset"`
Limit int `json:"limit"`
Order []struct {
By string `json:"by"`
Direction string `json:"direction"`
} `json:"order"`
}
// Parent defined the ID of the parent directory
@@ -161,26 +132,6 @@ type UploadFile struct {
ContentModifiedAt Time `json:"content_modified_at"`
}
// PreUploadCheck is the request for upload preflight check
type PreUploadCheck struct {
Name string `json:"name"`
Parent Parent `json:"parent"`
Size *int64 `json:"size,omitempty"`
}
// PreUploadCheckResponse is the response from upload preflight check
// if successful
type PreUploadCheckResponse struct {
UploadToken string `json:"upload_token"`
UploadURL string `json:"upload_url"`
}
// PreUploadCheckConflict is returned in the ContextInfo error field
// from PreUploadCheck when the error code is "item_name_in_use"
type PreUploadCheckConflict struct {
Conflicts ItemMini `json:"conflicts"`
}
// UpdateFileModTime is used in Update File Info
type UpdateFileModTime struct {
ContentModifiedAt Time `json:"content_modified_at"`
@@ -282,39 +233,12 @@ type User struct {
ModifiedAt time.Time `json:"modified_at"`
Language string `json:"language"`
Timezone string `json:"timezone"`
SpaceAmount float64 `json:"space_amount"`
SpaceUsed float64 `json:"space_used"`
MaxUploadSize float64 `json:"max_upload_size"`
SpaceAmount int64 `json:"space_amount"`
SpaceUsed int64 `json:"space_used"`
MaxUploadSize int64 `json:"max_upload_size"`
Status string `json:"status"`
JobTitle string `json:"job_title"`
Phone string `json:"phone"`
Address string `json:"address"`
AvatarURL string `json:"avatar_url"`
}
// FileTreeChangeEventTypes are the events that can require cache invalidation
var FileTreeChangeEventTypes = map[string]struct{}{
"ITEM_COPY": {},
"ITEM_CREATE": {},
"ITEM_MAKE_CURRENT_VERSION": {},
"ITEM_MODIFY": {},
"ITEM_MOVE": {},
"ITEM_RENAME": {},
"ITEM_TRASH": {},
"ITEM_UNDELETE_VIA_TRASH": {},
"ITEM_UPLOAD": {},
}
// Event is an array element in the response returned from /events
type Event struct {
EventType string `json:"event_type"`
EventID string `json:"event_id"`
Source Item `json:"source"`
}
// Events is returned from /events
type Events struct {
ChunkSize int64 `json:"chunk_size"`
Entries []Event `json:"entries"`
NextStreamPosition int64 `json:"next_stream_position"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -8,7 +8,6 @@ import (
"crypto/sha1"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
@@ -16,6 +15,7 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -105,7 +105,7 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
const defaultDelay = 10
var tries int
outer:
for tries = range maxTries {
for tries = 0; tries < maxTries; tries++ {
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
if err != nil {
@@ -140,7 +140,7 @@ outer:
}
}
default:
return nil, fmt.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
return nil, errors.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
}
}
fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why)
@@ -151,7 +151,7 @@ outer:
}
err = json.Unmarshal(body, &result)
if err != nil {
return nil, fmt.Errorf("couldn't decode commit response: %q: %w", body, err)
return nil, errors.Wrapf(err, "couldn't decode commit response: %q", body)
}
return result, nil
}
@@ -177,7 +177,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
// Create upload session
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
if err != nil {
return fmt.Errorf("multipart upload create session failed: %w", err)
return errors.Wrap(err, "multipart upload create session failed")
}
chunkSize := session.PartSize
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
@@ -203,7 +203,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
errs := make(chan error, 1)
var wg sync.WaitGroup
outer:
for part := range session.TotalParts {
for part := 0; part < session.TotalParts; part++ {
// Check any errors
select {
case err = <-errs:
@@ -211,7 +211,10 @@ outer:
default:
}
reqSize := min(remaining, chunkSize)
reqSize := remaining
if reqSize >= chunkSize {
reqSize = chunkSize
}
// Make a block of memory
buf := make([]byte, reqSize)
@@ -219,7 +222,7 @@ outer:
// Read the chunk
_, err = io.ReadFull(in, buf)
if err != nil {
err = fmt.Errorf("multipart upload failed to read source: %w", err)
err = errors.Wrap(err, "multipart upload failed to read source")
break outer
}
@@ -235,7 +238,7 @@ outer:
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...)
if err != nil {
err = fmt.Errorf("multipart upload failed to upload part: %w", err)
err = errors.Wrap(err, "multipart upload failed to upload part")
select {
case errs <- err:
default:
@@ -263,11 +266,11 @@ outer:
// Finalise the upload session
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
if err != nil {
return fmt.Errorf("multipart upload failed to finalize: %w", err)
return errors.Wrap(err, "multipart upload failed to finalize")
}
if result.TotalCount != 1 || len(result.Entries) != 1 {
return fmt.Errorf("multipart upload failed %v - not sure why", o)
return errors.Errorf("multipart upload failed %v - not sure why", o)
}
return o.setMetaData(&result.Entries[0])
}

159
backend/cache/cache.go vendored
View File

@@ -1,11 +1,9 @@
//go:build !plan9 && !js
// +build !plan9,!js
// Package cache implements a virtual provider to cache existing remotes.
package cache
import (
"context"
"errors"
"fmt"
"io"
"math"
@@ -20,6 +18,7 @@ import (
"syscall"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
@@ -29,7 +28,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
@@ -70,28 +68,26 @@ func init() {
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to cache.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Help: "Remote to cache.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true,
}, {
Name: "plex_url",
Help: "The URL of the Plex server.",
Help: "The URL of the Plex server",
}, {
Name: "plex_username",
Help: "The username of the Plex user.",
Sensitive: true,
Name: "plex_username",
Help: "The username of the Plex user",
}, {
Name: "plex_password",
Help: "The password of the Plex user.",
Help: "The password of the Plex user",
IsPassword: true,
}, {
Name: "plex_token",
Help: "The plex token for authentication - auto set normally.",
Hide: fs.OptionHideBoth,
Advanced: true,
Sensitive: true,
Name: "plex_token",
Help: "The plex token for authentication - auto set normally",
Hide: fs.OptionHideBoth,
Advanced: true,
}, {
Name: "plex_insecure",
Help: "Skip all certificate verification when connecting to the Plex server.",
Help: "Skip all certificate verification when connecting to the Plex server",
Advanced: true,
}, {
Name: "chunk_size",
@@ -102,14 +98,14 @@ changed, any downloaded chunks will be invalid and cache-chunk-path
will need to be cleared or unexpected EOF errors will occur.`,
Default: DefCacheChunkSize,
Examples: []fs.OptionExample{{
Value: "1M",
Help: "1 MiB",
Value: "1m",
Help: "1MB",
}, {
Value: "5M",
Help: "5 MiB",
Help: "5 MB",
}, {
Value: "10M",
Help: "10 MiB",
Help: "10 MB",
}},
}, {
Name: "info_age",
@@ -136,22 +132,22 @@ oldest chunks until it goes under this value.`,
Default: DefCacheTotalChunkSize,
Examples: []fs.OptionExample{{
Value: "500M",
Help: "500 MiB",
Help: "500 MB",
}, {
Value: "1G",
Help: "1 GiB",
Help: "1 GB",
}, {
Value: "10G",
Help: "10 GiB",
Help: "10 GB",
}},
}, {
Name: "db_path",
Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
Help: "Directory to store file structure metadata DB.\n\nThe remote name is used as the DB file name.",
Default: filepath.Join(config.CacheDir, "cache-backend"),
Help: "Directory to store file structure metadata DB.\nThe remote name is used as the DB file name.",
Advanced: true,
}, {
Name: "chunk_path",
Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
Default: filepath.Join(config.CacheDir, "cache-backend"),
Help: `Directory to cache chunk files.
Path to where partial file data (chunks) are stored locally. The remote
@@ -171,7 +167,6 @@ then "--cache-chunk-path" will use the same path as "--cache-db-path".`,
Name: "chunk_clean_interval",
Default: DefCacheChunkCleanInterval,
Help: `How often should the cache perform cleanups of the chunk storage.
The default value should be ok for most people. If you find that the
cache goes over "cache-chunk-total-size" too often then try to lower
this value to force it to perform cleanups more often.`,
@@ -225,7 +220,7 @@ available on the local machine.`,
}, {
Name: "rps",
Default: int(DefCacheRps),
Help: `Limits the number of requests per second to the source FS (-1 to disable).
Help: `Limits the number of requests per second to the source FS (-1 to disable)
This setting places a hard limit on the number of requests per second
that cache will be doing to the cloud provider remote and try to
@@ -246,7 +241,7 @@ still pass.`,
}, {
Name: "writes",
Default: DefCacheWrites,
Help: `Cache file data on writes through the FS.
Help: `Cache file data on writes through the FS
If you need to read files immediately after you upload them through
cache you can enable this flag to have their data stored in the
@@ -267,7 +262,7 @@ provider`,
}, {
Name: "tmp_wait_time",
Default: DefCacheTmpWaitTime,
Help: `How long should files be stored in local cache before being uploaded.
Help: `How long should files be stored in local cache before being uploaded
This is the duration that a file must wait in the temporary location
_cache-tmp-upload-path_ before it is selected for upload.
@@ -278,7 +273,7 @@ to start the upload if a queue formed for this purpose.`,
}, {
Name: "db_wait_time",
Default: DefCacheDbWaitTime,
Help: `How long to wait for the DB to be available - 0 is unlimited.
Help: `How long to wait for the DB to be available - 0 is unlimited
Only one process can have the DB open at any one time, so rclone waits
for this duration for the DB to become available before it gives an
@@ -344,14 +339,8 @@ func parseRootPath(path string) (string, error) {
return strings.Trim(path, "/"), nil
}
var warnDeprecated sync.Once
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
warnDeprecated.Do(func() {
fs.Logf(nil, "WARNING: Cache backend is deprecated and may be removed in future. Please use VFS instead.")
})
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -359,7 +348,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
return nil, err
}
if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
return nil, fmt.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
return nil, errors.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
}
@@ -369,13 +358,13 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
rpath, err := parseRootPath(rootPath)
if err != nil {
return nil, fmt.Errorf("failed to clean root path %q: %w", rootPath, err)
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
}
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
wrappedFs, wrapErr := cache.Get(ctx, remotePath)
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remotePath, wrapErr)
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
}
var fsErr error
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
@@ -397,29 +386,27 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
notifiedRemotes: make(map[string]bool),
}
cache.PinUntilFinalized(f.Fs, f)
rps := rate.Inf
if opt.Rps > 0 {
rps = rate.Limit(float64(opt.Rps))
}
f.rateLimiter = rate.NewLimiter(rps, opt.TotalWorkers)
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
f.plexConnector = &plexConnector{}
if opt.PlexURL != "" {
if opt.PlexToken != "" {
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure)
if err != nil {
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
}
} else if opt.PlexPassword != "" && opt.PlexUsername != "" {
decPass, err := obscure.Reveal(opt.PlexPassword)
if err != nil {
decPass = opt.PlexPassword
}
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
m.Set("plex_token", token)
})
if err != nil {
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
} else {
if opt.PlexPassword != "" && opt.PlexUsername != "" {
decPass, err := obscure.Reveal(opt.PlexPassword)
if err != nil {
decPass = opt.PlexPassword
}
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
m.Set("plex_token", token)
})
if err != nil {
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
}
}
}
}
@@ -427,8 +414,8 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
dbPath := f.opt.DbPath
chunkPath := f.opt.ChunkPath
// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
if dbPath != filepath.Join(config.GetCacheDir(), "cache-backend") &&
chunkPath == filepath.Join(config.GetCacheDir(), "cache-backend") {
if dbPath != filepath.Join(config.CacheDir, "cache-backend") &&
chunkPath == filepath.Join(config.CacheDir, "cache-backend") {
chunkPath = dbPath
}
if filepath.Ext(dbPath) != "" {
@@ -439,11 +426,11 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
}
err = os.MkdirAll(dbPath, os.ModePerm)
if err != nil {
return nil, fmt.Errorf("failed to create cache directory %v: %w", dbPath, err)
return nil, errors.Wrapf(err, "failed to create cache directory %v", dbPath)
}
err = os.MkdirAll(chunkPath, os.ModePerm)
if err != nil {
return nil, fmt.Errorf("failed to create cache directory %v: %w", chunkPath, err)
return nil, errors.Wrapf(err, "failed to create cache directory %v", chunkPath)
}
dbPath = filepath.Join(dbPath, name+".db")
@@ -455,7 +442,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
DbWaitTime: time.Duration(opt.DbWaitTime),
})
if err != nil {
return nil, fmt.Errorf("failed to start cache db: %w", err)
return nil, errors.Wrapf(err, "failed to start cache db")
}
// Trap SIGINT and SIGTERM to close the DB handle gracefully
c := make(chan os.Signal, 1)
@@ -489,12 +476,12 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
if f.opt.TempWritePath != "" {
err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm)
if err != nil {
return nil, fmt.Errorf("failed to create cache directory %v: %w", f.opt.TempWritePath, err)
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
}
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
if err != nil {
return nil, fmt.Errorf("failed to create temp fs: %w", err)
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
}
fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
@@ -611,7 +598,7 @@ func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err er
out = make(rc.Params)
m, err := f.Stats()
if err != nil {
return out, fmt.Errorf("error while getting cache stats")
return out, errors.Errorf("error while getting cache stats")
}
out["status"] = "ok"
out["stats"] = m
@@ -638,7 +625,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
out = make(rc.Params)
remoteInt, ok := in["remote"]
if !ok {
return out, fmt.Errorf("remote is needed")
return out, errors.Errorf("remote is needed")
}
remote := remoteInt.(string)
withData := false
@@ -649,7 +636,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
remote = f.unwrapRemote(remote)
if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
return out, fmt.Errorf("%s doesn't exist in cache", remote)
return out, errors.Errorf("%s doesn't exist in cache", remote)
}
co := NewObject(f, remote)
@@ -658,7 +645,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
cd := NewDirectory(f, remote)
err := f.cache.ExpireDir(cd)
if err != nil {
return out, fmt.Errorf("error expiring directory: %w", err)
return out, errors.WithMessage(err, "error expiring directory")
}
// notify vfs too
f.notifyChangeUpstream(cd.Remote(), fs.EntryDirectory)
@@ -669,7 +656,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
// expire the entry
err = f.cache.ExpireObject(co, withData)
if err != nil {
return out, fmt.Errorf("error expiring file: %w", err)
return out, errors.WithMessage(err, "error expiring file")
}
// notify vfs too
f.notifyChangeUpstream(co.Remote(), fs.EntryObject)
@@ -684,30 +671,30 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
start, end int64
}
parseChunks := func(ranges string) (crs []chunkRange, err error) {
for part := range strings.SplitSeq(ranges, ",") {
for _, part := range strings.Split(ranges, ",") {
var start, end int64 = 0, math.MaxInt64
switch ints := strings.Split(part, ":"); len(ints) {
case 1:
start, err = strconv.ParseInt(ints[0], 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid range: %q", part)
return nil, errors.Errorf("invalid range: %q", part)
}
end = start + 1
case 2:
if ints[0] != "" {
start, err = strconv.ParseInt(ints[0], 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid range: %q", part)
return nil, errors.Errorf("invalid range: %q", part)
}
}
if ints[1] != "" {
end, err = strconv.ParseInt(ints[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid range: %q", part)
return nil, errors.Errorf("invalid range: %q", part)
}
}
default:
return nil, fmt.Errorf("invalid range: %q", part)
return nil, errors.Errorf("invalid range: %q", part)
}
crs = append(crs, chunkRange{start: start, end: end})
}
@@ -762,18 +749,18 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
delete(in, "chunks")
crs, err := parseChunks(s)
if err != nil {
return nil, fmt.Errorf("invalid chunks parameter: %w", err)
return nil, errors.Wrap(err, "invalid chunks parameter")
}
var files [][2]string
for k, v := range in {
if !strings.HasPrefix(k, "file") {
return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
}
switch v := v.(type) {
case string:
files = append(files, [2]string{v, f.unwrapRemote(v)})
default:
return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
}
}
type fileStatus struct {
@@ -1038,7 +1025,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
fs.Debugf(dir, "list: remove entry: %v", entryRemote)
}
entries = nil //nolint:ineffassign
entries = nil
// and then iterate over the ones from source (temp Objects will override source ones)
var batchDirectories []*Directory
@@ -1087,13 +1074,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return cachedEntries, nil
}
func (f *Fs) recurse(ctx context.Context, dir string, list *list.Helper) error {
func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
entries, err := f.List(ctx, dir)
if err != nil {
return err
}
for i := range entries {
for i := 0; i < len(entries); i++ {
innerDir, ok := entries[i].(fs.Directory)
if ok {
err := f.recurse(ctx, innerDir.Remote(), list)
@@ -1129,7 +1116,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
case fs.Directory:
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
default:
return fmt.Errorf("unknown object type %T", entry)
return errors.Errorf("Unknown object type %T", entry)
}
}
@@ -1139,7 +1126,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
}
// if we're here, we're gonna do a standard recursive traversal and cache everything
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
err = f.recurse(ctx, dir, list)
if err != nil {
return err
@@ -1429,7 +1416,7 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
}()
// wait until both are done
for range 2 {
for c := 0; c < 2; c++ {
<-done
}
}
@@ -1748,13 +1735,13 @@ func (f *Fs) CleanUp(ctx context.Context) error {
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.Fs.Features().About
if do == nil {
return nil, errors.New("not supported by underlying remote")
return nil, errors.New("About not supported")
}
return do(ctx)
}
// Stats returns stats about the cache storage
func (f *Fs) Stats() (map[string]map[string]any, error) {
func (f *Fs) Stats() (map[string]map[string]interface{}, error) {
return f.cache.Stats()
}
@@ -1787,7 +1774,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
}
}
// StopBackgroundRunners will signal all the runners to stop their work
// StopBackgroundRunners will signall all the runners to stop their work
// can be triggered from a terminate signal or from testing between runs
func (f *Fs) StopBackgroundRunners() {
f.cleanupChan <- false
@@ -1934,7 +1921,7 @@ var commandHelp = []fs.CommandHelp{
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
switch name {
case "stats":
return f.Stats()

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js && !race
// +build !plan9,!js
// +build !race
package cache_test
@@ -6,20 +7,21 @@ import (
"bytes"
"context"
"encoding/base64"
"errors"
goflag "flag"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
"path"
"path/filepath"
"runtime"
"runtime/debug"
"strings"
"testing"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/cache"
"github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive"
@@ -28,11 +30,10 @@ import (
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/testy"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/stretchr/testify/require"
)
@@ -92,7 +93,7 @@ func TestMain(m *testing.M) {
goflag.Parse()
var rc int
fs.Logf(nil, "Running with the following params: \n remote: %v", remoteName)
log.Printf("Running with the following params: \n remote: %v", remoteName)
runInstance = newRun()
rc = m.Run()
os.Exit(rc)
@@ -100,12 +101,14 @@ func TestMain(m *testing.M) {
func TestInternalListRootAndInnerRemotes(t *testing.T) {
id := fmt.Sprintf("tilrair%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
// Instantiate inner fs
innerFolder := "inner"
runInstance.mkdir(t, rootFs, innerFolder)
rootFs2, _ := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil)
rootFs2, boltDb2 := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs2, boltDb2)
runInstance.writeObjectString(t, rootFs2, "one", "content")
listRoot, err := runInstance.list(t, rootFs, "")
@@ -122,10 +125,10 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
/* TODO: is this testing something?
func TestInternalVfsCache(t *testing.T) {
vfscommon.Opt.DirCacheTime = time.Second * 30
vfsflags.Opt.DirCacheTime = time.Second * 30
testSize := int64(524288000)
vfscommon.Opt.CacheMode = vfs.CacheModeWrites
vfsflags.Opt.CacheMode = vfs.CacheModeWrites
id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
@@ -163,7 +166,7 @@ func TestInternalVfsCache(t *testing.T) {
li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")}
for _, r := range li2 {
var err error
ci, err := os.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
ci, err := ioutil.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
if err != nil || len(ci) == 0 {
log.Printf("========== '%v' not in cache", r)
} else {
@@ -222,7 +225,8 @@ func TestInternalVfsCache(t *testing.T) {
func TestInternalObjWrapFsFound(t *testing.T) {
id := fmt.Sprintf("tiowff%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -254,7 +258,8 @@ func TestInternalObjWrapFsFound(t *testing.T) {
func TestInternalObjNotFound(t *testing.T) {
id := fmt.Sprintf("tionf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
obj, err := rootFs.NewObject(context.Background(), "404")
require.Error(t, err)
@@ -264,7 +269,8 @@ func TestInternalObjNotFound(t *testing.T) {
func TestInternalCachedWrittenContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -287,11 +293,9 @@ func TestInternalCachedWrittenContentMatches(t *testing.T) {
}
func TestInternalDoubleWrittenContentMatches(t *testing.T) {
if runtime.GOOS == "windows" && runtime.GOARCH == "386" {
t.Skip("Skip test on windows/386")
}
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
// write the object
runInstance.writeRemoteString(t, rootFs, "one", "one content")
@@ -309,7 +313,8 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticucm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
var err error
// create some rand test data
@@ -337,8 +342,9 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
vfsflags.Opt.DirCacheTime = time.Second
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote")
}
@@ -360,15 +366,16 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
require.NoError(t, err)
require.Equal(t, int64(len(checkSample)), o.Size())
for i := range checkSample {
for i := 0; i < len(checkSample); i++ {
require.Equal(t, testData[i], checkSample[i])
}
}
func TestInternalLargeWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
vfsflags.Opt.DirCacheTime = time.Second
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote")
}
@@ -387,14 +394,15 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
require.NoError(t, err)
for i := range readData {
for i := 0; i < len(readData); i++ {
require.Equalf(t, testData[i], readData[i], "at byte %v", i)
}
}
func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
id := fmt.Sprintf("tiwfcns%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -407,7 +415,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
// update in the wrapped fs
originalSize, err := runInstance.size(t, rootFs, "data.bin")
require.NoError(t, err)
fs.Logf(nil, "original size: %v", originalSize)
log.Printf("original size: %v", originalSize)
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err)
@@ -416,7 +424,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
if runInstance.rootIsCrypt {
data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64)
require.NoError(t, err)
expectedSize++ // FIXME newline gets in, likely test data issue
expectedSize = expectedSize + 1 // FIXME newline gets in, likely test data issue
} else {
data2 = []byte("test content")
}
@@ -424,7 +432,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
require.NoError(t, err)
require.Equal(t, int64(len(data2)), o.Size())
fs.Logf(nil, "updated size: %v", len(data2))
log.Printf("updated size: %v", len(data2))
// get a new instance from the cache
if runInstance.wrappedIsExternal {
@@ -434,7 +442,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
return err
}
if coSize != expectedSize {
return fmt.Errorf("%v <> %v", coSize, expectedSize)
return errors.Errorf("%v <> %v", coSize, expectedSize)
}
return nil
}, 12, time.Second*10)
@@ -448,7 +456,8 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
func TestInternalMoveWithNotify(t *testing.T) {
id := fmt.Sprintf("timwn%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if !runInstance.wrappedIsExternal {
t.Skipf("Not external")
}
@@ -484,49 +493,49 @@ func TestInternalMoveWithNotify(t *testing.T) {
err = runInstance.retryBlock(func() error {
li, err := runInstance.list(t, rootFs, "test")
if err != nil {
fs.Logf(nil, "err: %v", err)
log.Printf("err: %v", err)
return err
}
if len(li) != 2 {
fs.Logf(nil, "not expected listing /test: %v", li)
return fmt.Errorf("not expected listing /test: %v", li)
log.Printf("not expected listing /test: %v", li)
return errors.Errorf("not expected listing /test: %v", li)
}
li, err = runInstance.list(t, rootFs, "test/one")
if err != nil {
fs.Logf(nil, "err: %v", err)
log.Printf("err: %v", err)
return err
}
if len(li) != 0 {
fs.Logf(nil, "not expected listing /test/one: %v", li)
return fmt.Errorf("not expected listing /test/one: %v", li)
log.Printf("not expected listing /test/one: %v", li)
return errors.Errorf("not expected listing /test/one: %v", li)
}
li, err = runInstance.list(t, rootFs, "test/second")
if err != nil {
fs.Logf(nil, "err: %v", err)
log.Printf("err: %v", err)
return err
}
if len(li) != 1 {
fs.Logf(nil, "not expected listing /test/second: %v", li)
return fmt.Errorf("not expected listing /test/second: %v", li)
log.Printf("not expected listing /test/second: %v", li)
return errors.Errorf("not expected listing /test/second: %v", li)
}
if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "data.bin" {
fs.Logf(nil, "not expected name: %v", fi.Name())
return fmt.Errorf("not expected name: %v", fi.Name())
log.Printf("not expected name: %v", fi.Name())
return errors.Errorf("not expected name: %v", fi.Name())
}
} else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/second/data.bin" {
fs.Logf(nil, "not expected remote: %v", di.Remote())
return fmt.Errorf("not expected remote: %v", di.Remote())
log.Printf("not expected remote: %v", di.Remote())
return errors.Errorf("not expected remote: %v", di.Remote())
}
} else {
fs.Logf(nil, "unexpected listing: %v", li)
return fmt.Errorf("unexpected listing: %v", li)
log.Printf("unexpected listing: %v", li)
return errors.Errorf("unexpected listing: %v", li)
}
fs.Logf(nil, "complete listing: %v", li)
log.Printf("complete listing: %v", li)
return nil
}, 12, time.Second*10)
require.NoError(t, err)
@@ -534,7 +543,8 @@ func TestInternalMoveWithNotify(t *testing.T) {
func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
id := fmt.Sprintf("tincep%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if !runInstance.wrappedIsExternal {
t.Skipf("Not external")
}
@@ -576,43 +586,43 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
err = runInstance.retryBlock(func() error {
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
if !found {
fs.Logf(nil, "not found /test")
return fmt.Errorf("not found /test")
log.Printf("not found /test")
return errors.Errorf("not found /test")
}
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
if !found {
fs.Logf(nil, "not found /test/one")
return fmt.Errorf("not found /test/one")
log.Printf("not found /test/one")
return errors.Errorf("not found /test/one")
}
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
if !found {
fs.Logf(nil, "not found /test/one/test2")
return fmt.Errorf("not found /test/one/test2")
log.Printf("not found /test/one/test2")
return errors.Errorf("not found /test/one/test2")
}
li, err := runInstance.list(t, rootFs, "test/one")
if err != nil {
fs.Logf(nil, "err: %v", err)
log.Printf("err: %v", err)
return err
}
if len(li) != 1 {
fs.Logf(nil, "not expected listing /test/one: %v", li)
return fmt.Errorf("not expected listing /test/one: %v", li)
log.Printf("not expected listing /test/one: %v", li)
return errors.Errorf("not expected listing /test/one: %v", li)
}
if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "test2" {
fs.Logf(nil, "not expected name: %v", fi.Name())
return fmt.Errorf("not expected name: %v", fi.Name())
log.Printf("not expected name: %v", fi.Name())
return errors.Errorf("not expected name: %v", fi.Name())
}
} else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/one/test2" {
fs.Logf(nil, "not expected remote: %v", di.Remote())
return fmt.Errorf("not expected remote: %v", di.Remote())
log.Printf("not expected remote: %v", di.Remote())
return errors.Errorf("not expected remote: %v", di.Remote())
}
} else {
fs.Logf(nil, "unexpected listing: %v", li)
return fmt.Errorf("unexpected listing: %v", li)
log.Printf("unexpected listing: %v", li)
return errors.Errorf("unexpected listing: %v", li)
}
fs.Logf(nil, "complete listing /test/one/test2")
log.Printf("complete listing /test/one/test2")
return nil
}, 12, time.Second*10)
require.NoError(t, err)
@@ -620,7 +630,8 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -652,7 +663,8 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
func TestInternalCacheWrites(t *testing.T) {
id := "ticw"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"writes": "true"})
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -669,11 +681,9 @@ func TestInternalCacheWrites(t *testing.T) {
}
func TestInternalMaxChunkSizeRespected(t *testing.T) {
if runtime.GOOS == "windows" && runtime.GOARCH == "386" {
t.Skip("Skip test on windows/386")
}
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"workers": "1"})
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -688,7 +698,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
co, ok := o.(*cache.Object)
require.True(t, ok)
for i := range 4 { // read first 4
for i := 0; i < 4; i++ { // read first 4
_ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
}
cfs.CleanUpCache(true)
@@ -707,8 +717,9 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
func TestInternalExpiredEntriesRemoved(t *testing.T) {
id := fmt.Sprintf("tieer%v", time.Now().Unix())
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 4) // needs to be lower than the defined
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"info_age": "5s"}, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -742,10 +753,12 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
}
func TestInternalBug2117(t *testing.T) {
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 10)
vfsflags.Opt.DirCacheTime = time.Second * 10
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt {
t.Skipf("skipping crypt")
@@ -770,24 +783,24 @@ func TestInternalBug2117(t *testing.T) {
di, err := runInstance.list(t, rootFs, "test/dir1/dir2")
require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di))
log.Printf("len: %v", len(di))
require.Len(t, di, 1)
time.Sleep(time.Second * 30)
di, err = runInstance.list(t, rootFs, "test/dir1/dir2")
require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di))
log.Printf("len: %v", len(di))
require.Len(t, di, 1)
di, err = runInstance.list(t, rootFs, "test/dir1")
require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di))
log.Printf("len: %v", len(di))
require.Len(t, di, 4)
di, err = runInstance.list(t, rootFs, "test")
require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di))
log.Printf("len: %v", len(di))
require.Len(t, di, 4)
}
@@ -821,14 +834,14 @@ func newRun() *run {
}
if uploadDir == "" {
r.tmpUploadDir, err = os.MkdirTemp("", "rclonecache-tmp")
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
if err != nil {
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
log.Fatalf("Failed to create temp dir: %v", err)
}
} else {
r.tmpUploadDir = uploadDir
}
fs.Logf(nil, "Temp Upload Dir: %v", r.tmpUploadDir)
log.Printf("Temp Upload Dir: %v", r.tmpUploadDir)
return r
}
@@ -846,11 +859,11 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
return enc
}
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, cfg map[string]string, flags map[string]string) (fs.Fs, *cache.Persistent) {
fstest.Initialise()
remoteExists := false
for _, s := range config.GetRemotes() {
if s.Name == remote {
for _, s := range config.FileSections() {
if s == remote {
remoteExists = true
}
}
@@ -874,12 +887,12 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
cacheRemote := remote
if !remoteExists {
localRemote := remote + "-local"
config.FileSetValue(localRemote, "type", "local")
config.FileSetValue(localRemote, "nounc", "true")
config.FileSet(localRemote, "type", "local")
config.FileSet(localRemote, "nounc", "true")
m.Set("type", "cache")
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
} else {
remoteType := config.GetValue(remote, "type")
remoteType := config.FileGet(remote, "type")
if remoteType == "" {
t.Skipf("skipped due to invalid remote type for %v", remote)
return nil, nil
@@ -890,14 +903,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
m.Set("password", cryptPassword1)
m.Set("password2", cryptPassword2)
}
remoteRemote := config.GetValue(remote, "remote")
remoteRemote := config.FileGet(remote, "remote")
if remoteRemote == "" {
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
return nil, nil
}
remoteRemoteParts := strings.Split(remoteRemote, ":")
remoteWrapping := remoteRemoteParts[0]
remoteType := config.GetValue(remoteWrapping, "type")
remoteType := config.FileGet(remoteWrapping, "type")
if remoteType != "cache" {
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
return nil, nil
@@ -906,9 +919,9 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
}
}
runInstance.rootIsCrypt = rootIsCrypt
runInstance.dbPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote+".db")
runInstance.chunkPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote)
runInstance.vfsCachePath = filepath.Join(config.GetCacheDir(), "vfs", remote)
runInstance.dbPath = filepath.Join(config.CacheDir, "cache-backend", cacheRemote+".db")
runInstance.chunkPath = filepath.Join(config.CacheDir, "cache-backend", cacheRemote)
runInstance.vfsCachePath = filepath.Join(config.CacheDir, "vfs", remote)
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
require.NoError(t, err)
@@ -934,20 +947,16 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
}
if purge {
_ = operations.Purge(context.Background(), f, "")
_ = f.Features().Purge(context.Background(), "")
require.NoError(t, err)
}
err = f.Mkdir(context.Background(), "")
require.NoError(t, err)
t.Cleanup(func() {
runInstance.cleanupFs(t, f)
})
return f, boltDb
}
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
err := operations.Purge(context.Background(), f, "")
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
err := f.Features().Purge(context.Background(), "")
require.NoError(t, err)
cfs, err := r.getCacheFs(f)
require.NoError(t, err)
@@ -968,10 +977,10 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
chunk := int64(1024)
cnt := size / chunk
left := size % chunk
f, err := os.CreateTemp("", "rclonecache-tempfile")
f, err := ioutil.TempFile("", "rclonecache-tempfile")
require.NoError(t, err)
for range int(cnt) {
for i := 0; i < int(cnt); i++ {
data := randStringBytes(int(chunk))
_, _ = f.Write(data)
}
@@ -1046,7 +1055,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
if !noLengthCheck && size != int64(len(checkSample)) {
return checkSample, fmt.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
}
return checkSample, nil
}
@@ -1085,9 +1094,9 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
return err
}
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]any, error) {
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
var err error
var l []any
var l []interface{}
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
for _, ll := range list {
@@ -1096,6 +1105,27 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]any, error) {
return l, err
}
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer func() {
_ = in.Close()
}()
out, err := os.Create(dst)
if err != nil {
return err
}
defer func() {
_ = out.Close()
}()
_, err = io.Copy(out, in)
return err
}
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error
@@ -1191,7 +1221,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
func (r *run) cleanSize(t *testing.T, size int64) int64 {
if r.rootIsCrypt {
denominator := int64(65536 + 16)
size -= 32
size = size - 32
quotient := size / denominator
remainder := size % denominator
return (quotient*65536 + remainder - 16)
@@ -1215,12 +1245,12 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
var err error
var state cache.BackgroundUploadState
for range 2 {
for i := 0; i < 2; i++ {
select {
case state = <-buCh:
// continue
case <-time.After(maxDuration):
waitCh <- fmt.Errorf("Timed out waiting for background upload: %v", remote)
waitCh <- errors.Errorf("Timed out waiting for background upload: %v", remote)
return
}
checkRemote := state.Remote
@@ -1237,7 +1267,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
return
}
}
waitCh <- fmt.Errorf("Too many attempts to wait for the background upload: %v", remote)
waitCh <- errors.Errorf("Too many attempts to wait for the background upload: %v", remote)
}()
return waitCh
}
@@ -1293,7 +1323,7 @@ func (r *run) completeAllBackgroundUploads(t *testing.T, f fs.Fs, lastRemote str
func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error {
var err error
for range maxRetries {
for i := 0; i < maxRetries; i++ {
err = block()
if err == nil {
return nil

View File

@@ -1,6 +1,7 @@
// Test Cache filesystem interface
//go:build !plan9 && !js && !race
// +build !plan9,!js
// +build !race
package cache_test
@@ -15,11 +16,10 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata", "ListP"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"},
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
})
}

View File

@@ -1,7 +1,6 @@
// Build for cache for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || js
// +build plan9 js
// Package cache implements a virtual provider to cache existing remotes.
package cache

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js && !race
// +build !plan9,!js
// +build !race
package cache_test
@@ -20,8 +21,10 @@ import (
func TestInternalUploadTempDirCreated(t *testing.T) {
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
runInstance.newCacheFs(t, remoteName, id, false, true,
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
defer runInstance.cleanupFs(t, rootFs, boltDb)
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
require.NoError(t, err)
@@ -60,7 +63,9 @@ func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltD
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
@@ -68,15 +73,19 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
func TestInternalUploadMoveExistingFile(t *testing.T) {
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one")
require.NoError(t, err)
@@ -110,8 +119,10 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
func TestInternalUploadTempPathCleaned(t *testing.T) {
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one")
require.NoError(t, err)
@@ -151,19 +162,21 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
func TestInternalUploadQueueMoreFiles(t *testing.T) {
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "test")
require.NoError(t, err)
minSize := 5242880
maxSize := 10485760
totalFiles := 10
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
rand.Seed(time.Now().Unix())
lastFile := ""
for i := range totalFiles {
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
for i := 0; i < totalFiles; i++ {
size := int64(rand.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin"
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
@@ -200,7 +213,9 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
func TestInternalUploadTempFileOperations(t *testing.T) {
id := "tiutfo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()
@@ -328,7 +343,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
func TestInternalUploadUploadingFileOperations(t *testing.T) {
id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@@ -1,10 +1,9 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
import (
"context"
"errors"
"fmt"
"io"
"path"
@@ -13,6 +12,7 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
)
@@ -118,7 +118,7 @@ func (r *Handle) startReadWorkers() {
r.scaleWorkers(totalWorkers)
}
// scaleWorkers will increase the worker pool count by the provided amount
// scaleOutWorkers will increase the worker pool count by the provided amount
func (r *Handle) scaleWorkers(desired int) {
current := r.workers
if current == desired {
@@ -182,7 +182,7 @@ func (r *Handle) queueOffset(offset int64) {
}
}
for i := range r.workers {
for i := 0; i < r.workers; i++ {
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
if o < 0 || o >= r.cachedObject.Size() {
continue
@@ -208,7 +208,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
// we align the start offset of the first chunk to a likely chunk in the storage
chunkStart -= offset
chunkStart = chunkStart - offset
r.queueOffset(chunkStart)
found := false
@@ -222,7 +222,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
if !found {
// we're gonna give the workers a chance to pickup the chunk
// and retry a couple of times
for i := range r.cacheFs().opt.ReadRetries * 8 {
for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
if err == nil {
found = true
@@ -242,7 +242,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
return nil, io.ErrUnexpectedEOF
}
return nil, fmt.Errorf("chunk not found %v", chunkStart)
return nil, errors.Errorf("chunk not found %v", chunkStart)
}
// first chunk will be aligned with the start
@@ -322,12 +322,12 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
r.offset = r.cachedObject.Size() + offset
default:
err = fmt.Errorf("cache: unimplemented seek whence %v", whence)
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
}
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
chunkStart -= int64(r.cacheFs().opt.ChunkSize)
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
}
r.queueOffset(chunkStart)
@@ -415,8 +415,10 @@ func (w *worker) run() {
continue
}
}
} else if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
continue
} else {
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
continue
}
}
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)

View File

@@ -1,15 +1,15 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
import (
"context"
"fmt"
"io"
"path"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers"
@@ -177,14 +177,10 @@ func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
}
if o.isTempFile() {
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
if err != nil {
err = fmt.Errorf("in parent fs %v: %w", o.ParentFs, err)
}
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
} else {
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
if err != nil {
err = fmt.Errorf("in cache fs %v: %w", o.CacheFs.Fs, err)
}
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
}
if err != nil {
fs.Errorf(o, "error refreshing object in : %v", err)
@@ -256,7 +252,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
if o.isTempFile() && o.tempFileStartedUpload() {
return fmt.Errorf("%v is currently uploading, can't update", o)
return errors.Errorf("%v is currently uploading, can't update", o)
}
}
fs.Debugf(o, "updating object contents with size %v", src.Size())
@@ -295,7 +291,7 @@ func (o *Object) Remove(ctx context.Context) error {
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
if o.isTempFile() && o.tempFileStartedUpload() {
return fmt.Errorf("%v is currently uploading, can't delete", o)
return errors.Errorf("%v is currently uploading, can't delete", o)
}
}
err := o.Object.Remove(ctx)

16
backend/cache/plex.go vendored
View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
@@ -7,7 +7,7 @@ import (
"crypto/tls"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
@@ -166,7 +166,7 @@ func (p *plexConnector) listenWebsocket() {
continue
}
var data []byte
data, err = io.ReadAll(resp.Body)
data, err = ioutil.ReadAll(resp.Body)
if err != nil {
continue
}
@@ -209,10 +209,10 @@ func (p *plexConnector) authenticate() error {
if err != nil {
return err
}
var data map[string]any
var data map[string]interface{}
err = json.NewDecoder(resp.Body).Decode(&data)
if err != nil {
return fmt.Errorf("failed to obtain token: %w", err)
return fmt.Errorf("failed to obtain token: %v", err)
}
tokenGen, ok := get(data, "user", "authToken")
if !ok {
@@ -273,11 +273,11 @@ func (p *plexConnector) isPlaying(co *Object) bool {
}
// adapted from: https://stackoverflow.com/a/28878037 (credit)
func get(m any, path ...any) (any, bool) {
func get(m interface{}, path ...interface{}) (interface{}, bool) {
for _, p := range path {
switch idx := p.(type) {
case string:
if mm, ok := m.(map[string]any); ok {
if mm, ok := m.(map[string]interface{}); ok {
if val, found := mm[idx]; found {
m = val
continue
@@ -285,7 +285,7 @@ func get(m any, path ...any) (any, bool) {
}
return nil, false
case int:
if mm, ok := m.([]any); ok {
if mm, ok := m.([]interface{}); ok {
if len(mm) > idx {
m = mm[idx]
continue

View File

@@ -1,14 +1,14 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
import (
"fmt"
"strconv"
"strings"
"time"
cache "github.com/patrickmn/go-cache"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
@@ -52,7 +52,7 @@ func (m *Memory) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
return data, nil
}
return nil, fmt.Errorf("couldn't get cached object data at offset %v", offset)
return nil, errors.Errorf("couldn't get cached object data at offset %v", offset)
}
// AddChunk adds a new chunk of a cached object
@@ -75,7 +75,10 @@ func (m *Memory) CleanChunksByAge(chunkAge time.Duration) {
// CleanChunksByNeed will cleanup chunks after the FS passes a specific chunk
func (m *Memory) CleanChunksByNeed(offset int64) {
for key := range m.db.Items() {
var items map[string]cache.Item
items = m.db.Items()
for key := range items {
sepIdx := strings.LastIndex(key, "-")
keyOffset, err := strconv.ParseInt(key[sepIdx+1:], 10, 64)
if err != nil {

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache
@@ -8,6 +8,7 @@ import (
"encoding/binary"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
@@ -15,10 +16,10 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
bolt "go.etcd.io/bbolt"
"go.etcd.io/bbolt/errors"
)
// Constants
@@ -118,11 +119,11 @@ func (b *Persistent) connect() error {
err = os.MkdirAll(b.dataPath, os.ModePerm)
if err != nil {
return fmt.Errorf("failed to create a data directory %q: %w", b.dataPath, err)
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
}
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
if err != nil {
return fmt.Errorf("failed to open a cache connection to %q: %w", b.dbPath, err)
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
}
if b.features.PurgeDb {
b.Purge()
@@ -174,7 +175,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(remote, false, tx)
if bucket == nil {
return fmt.Errorf("couldn't open bucket (%v)", remote)
return errors.Errorf("couldn't open bucket (%v)", remote)
}
data := bucket.Get([]byte("."))
@@ -182,7 +183,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
return json.Unmarshal(data, cd)
}
return fmt.Errorf("%v not found", remote)
return errors.Errorf("%v not found", remote)
})
return cd, err
@@ -207,7 +208,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
bucket = b.getBucket(cachedDirs[0].Dir, true, tx)
}
if bucket == nil {
return fmt.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
return errors.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
}
for _, cachedDir := range cachedDirs {
@@ -224,7 +225,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
encoded, err := json.Marshal(cachedDir)
if err != nil {
return fmt.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
return errors.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
}
err = b.Put([]byte("."), encoded)
if err != nil {
@@ -242,17 +243,17 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedDir.abs(), false, tx)
if bucket == nil {
return fmt.Errorf("couldn't open bucket (%v)", cachedDir.abs())
return errors.Errorf("couldn't open bucket (%v)", cachedDir.abs())
}
val := bucket.Get([]byte("."))
if val != nil {
err := json.Unmarshal(val, cachedDir)
if err != nil {
return fmt.Errorf("error during unmarshalling obj: %w", err)
return errors.Errorf("error during unmarshalling obj: %v", err)
}
} else {
return fmt.Errorf("missing cached dir: %v", cachedDir)
return errors.Errorf("missing cached dir: %v", cachedDir)
}
c := bucket.Cursor()
@@ -267,7 +268,7 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
// we try to find a cached meta for the dir
currentBucket := c.Bucket().Bucket(k)
if currentBucket == nil {
return fmt.Errorf("couldn't open bucket (%v)", string(k))
return errors.Errorf("couldn't open bucket (%v)", string(k))
}
metaKey := currentBucket.Get([]byte("."))
@@ -316,7 +317,7 @@ func (b *Persistent) RemoveDir(fp string) error {
err = b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cleanPath(parentDir), false, tx)
if bucket == nil {
return fmt.Errorf("couldn't open bucket (%v)", fp)
return errors.Errorf("couldn't open bucket (%v)", fp)
}
// delete the cached dir
err := bucket.DeleteBucket([]byte(cleanPath(dirName)))
@@ -376,13 +377,13 @@ func (b *Persistent) GetObject(cachedObject *Object) (err error) {
return b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedObject.Dir, false, tx)
if bucket == nil {
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
return errors.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
}
val := bucket.Get([]byte(cachedObject.Name))
if val != nil {
return json.Unmarshal(val, cachedObject)
}
return fmt.Errorf("couldn't find object (%v)", cachedObject.Name)
return errors.Errorf("couldn't find object (%v)", cachedObject.Name)
})
}
@@ -391,16 +392,16 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedObject.Dir, true, tx)
if bucket == nil {
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject)
return errors.Errorf("couldn't open parent bucket for %v", cachedObject)
}
// cache Object Info
encoded, err := json.Marshal(cachedObject)
if err != nil {
return fmt.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
}
err = bucket.Put([]byte(cachedObject.Name), encoded)
if err != nil {
return fmt.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
}
return nil
})
@@ -412,7 +413,7 @@ func (b *Persistent) RemoveObject(fp string) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cleanPath(parentDir), false, tx)
if bucket == nil {
return fmt.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
return errors.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
}
err := bucket.Delete([]byte(cleanPath(objName)))
if err != nil {
@@ -444,7 +445,7 @@ func (b *Persistent) HasEntry(remote string) bool {
err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(dir, false, tx)
if bucket == nil {
return fmt.Errorf("couldn't open parent bucket for %v", remote)
return errors.Errorf("couldn't open parent bucket for %v", remote)
}
if f := bucket.Bucket([]byte(name)); f != nil {
return nil
@@ -453,9 +454,12 @@ func (b *Persistent) HasEntry(remote string) bool {
return nil
}
return fmt.Errorf("couldn't find object (%v)", remote)
return errors.Errorf("couldn't find object (%v)", remote)
})
return err == nil
if err == nil {
return true
}
return false
}
// HasChunk confirms the existence of a single chunk of an object
@@ -472,7 +476,7 @@ func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error
var data []byte
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
data, err := os.ReadFile(fp)
data, err := ioutil.ReadFile(fp)
if err != nil {
return nil, err
}
@@ -485,7 +489,7 @@ func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error {
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
err := os.WriteFile(filePath, data, os.ModePerm)
err := ioutil.WriteFile(filePath, data, os.ModePerm)
if err != nil {
return err
}
@@ -550,7 +554,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
err := b.db.Update(func(tx *bolt.Tx) error {
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
if dataTsBucket == nil {
return fmt.Errorf("couldn't open (%v) bucket", DataTsBucket)
return errors.Errorf("Couldn't open (%v) bucket", DataTsBucket)
}
// iterate through ts
c := dataTsBucket.Cursor()
@@ -598,7 +602,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
})
if err != nil {
if err == errors.ErrDatabaseNotOpen {
if err == bolt.ErrDatabaseNotOpen {
// we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore
return
}
@@ -607,16 +611,16 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
}
// Stats returns a go map with the stats key values
func (b *Persistent) Stats() (map[string]map[string]any, error) {
r := make(map[string]map[string]any)
r["data"] = make(map[string]any)
func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
r := make(map[string]map[string]interface{})
r["data"] = make(map[string]interface{})
r["data"]["oldest-ts"] = time.Now()
r["data"]["oldest-file"] = ""
r["data"]["newest-ts"] = time.Now()
r["data"]["newest-file"] = ""
r["data"]["total-chunks"] = 0
r["data"]["total-size"] = int64(0)
r["files"] = make(map[string]any)
r["files"] = make(map[string]interface{})
r["files"]["oldest-ts"] = time.Now()
r["files"]["oldest-name"] = ""
r["files"]["newest-ts"] = time.Now()
@@ -728,7 +732,7 @@ func (b *Persistent) GetChunkTs(path string, offset int64) (time.Time, error) {
return nil
}
}
return fmt.Errorf("not found %v-%v", path, offset)
return errors.Errorf("not found %v-%v", path, offset)
})
return t, err
@@ -768,7 +772,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return fmt.Errorf("couldn't bucket for %v", tempBucket)
return errors.Errorf("couldn't bucket for %v", tempBucket)
}
tempObj := &tempUploadInfo{
DestPath: destPath,
@@ -779,11 +783,11 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
// cache Object Info
encoded, err := json.Marshal(tempObj)
if err != nil {
return fmt.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
}
err = bucket.Put([]byte(destPath), encoded)
if err != nil {
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
}
return nil
@@ -798,7 +802,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
err = b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return fmt.Errorf("couldn't bucket for %v", tempBucket)
return errors.Errorf("couldn't bucket for %v", tempBucket)
}
c := bucket.Cursor()
@@ -831,7 +835,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
return nil
}
return fmt.Errorf("no pending upload found")
return errors.Errorf("no pending upload found")
})
return destPath, err
@@ -842,14 +846,14 @@ func (b *Persistent) SearchPendingUpload(remote string) (started bool, err error
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(tempBucket))
if bucket == nil {
return fmt.Errorf("couldn't bucket for %v", tempBucket)
return errors.Errorf("couldn't bucket for %v", tempBucket)
}
var tempObj = &tempUploadInfo{}
v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj)
if err != nil {
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
return errors.Errorf("pending upload (%v) not found %v", remote, err)
}
started = tempObj.Started
@@ -864,7 +868,7 @@ func (b *Persistent) searchPendingUploadFromDir(dir string) (remotes []string, e
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(tempBucket))
if bucket == nil {
return fmt.Errorf("couldn't bucket for %v", tempBucket)
return errors.Errorf("couldn't bucket for %v", tempBucket)
}
c := bucket.Cursor()
@@ -894,22 +898,22 @@ func (b *Persistent) rollbackPendingUpload(remote string) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return fmt.Errorf("couldn't bucket for %v", tempBucket)
return errors.Errorf("couldn't bucket for %v", tempBucket)
}
var tempObj = &tempUploadInfo{}
v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj)
if err != nil {
return fmt.Errorf("pending upload (%v) not found: %w", remote, err)
return errors.Errorf("pending upload (%v) not found %v", remote, err)
}
tempObj.Started = false
v2, err := json.Marshal(tempObj)
if err != nil {
return fmt.Errorf("pending upload not updated: %w", err)
return errors.Errorf("pending upload not updated %v", err)
}
err = bucket.Put([]byte(tempObj.DestPath), v2)
if err != nil {
return fmt.Errorf("pending upload not updated: %w", err)
return errors.Errorf("pending upload not updated %v", err)
}
return nil
})
@@ -922,7 +926,7 @@ func (b *Persistent) removePendingUpload(remote string) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return fmt.Errorf("couldn't bucket for %v", tempBucket)
return errors.Errorf("couldn't bucket for %v", tempBucket)
}
return bucket.Delete([]byte(remote))
})
@@ -937,17 +941,17 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return fmt.Errorf("couldn't bucket for %v", tempBucket)
return errors.Errorf("couldn't bucket for %v", tempBucket)
}
var tempObj = &tempUploadInfo{}
v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj)
if err != nil {
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
return errors.Errorf("pending upload (%v) not found %v", remote, err)
}
if tempObj.Started {
return fmt.Errorf("pending upload already started %v", remote)
return errors.Errorf("pending upload already started %v", remote)
}
err = fn(tempObj)
if err != nil {
@@ -965,11 +969,11 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
}
v2, err := json.Marshal(tempObj)
if err != nil {
return fmt.Errorf("pending upload not updated: %w", err)
return errors.Errorf("pending upload not updated %v", err)
}
err = bucket.Put([]byte(tempObj.DestPath), v2)
if err != nil {
return fmt.Errorf("pending upload not updated: %w", err)
return errors.Errorf("pending upload not updated %v", err)
}
return nil
@@ -1010,11 +1014,11 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
// cache Object Info
encoded, err := json.Marshal(tempObj)
if err != nil {
return fmt.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
}
err = bucket.Put([]byte(destPath), encoded)
if err != nil {
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
}
fs.Debugf(cacheFs, "reconciled temporary upload: %v", destPath)
}

View File

@@ -1,5 +1,3 @@
//go:build !plan9 && !js
package cache
import bolt "go.etcd.io/bbolt"

View File

@@ -8,10 +8,10 @@ import (
"crypto/sha1"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
gohash "hash"
"io"
"io/ioutil"
"math/rand"
"path"
"regexp"
@@ -21,6 +21,7 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
@@ -29,9 +30,9 @@ import (
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/encoder"
)
//
// Chunker's composite files have one or more chunks
// and optional metadata object. If it's present,
// meta object is named after the original file.
@@ -64,7 +65,7 @@ import (
// length of 13 decimals it makes a 7-digit base-36 number.
//
// When transactions is set to the norename style, data chunks will
// keep their temporary chunk names (with the transaction identifier
// keep their temporary chunk names (with the transacion identifier
// suffix). To distinguish them from temporary chunks, the txn field
// of the metadata file is set to match the transaction identifier of
// the data chunks.
@@ -78,6 +79,7 @@ import (
// Metadata format v1 does not define any control chunk types,
// they are currently ignored aka reserved.
// In future they can be used to implement resumable uploads etc.
//
const (
ctrlTypeRegStr = `[a-z][a-z0-9]{2,6}`
tempSuffixFormat = `_%04s`
@@ -102,10 +104,8 @@ var (
//
// And still chunker's primary function is to chunk large files
// rather than serve as a generic metadata container.
const (
maxMetadataSize = 1023
maxMetadataSizeWritten = 255
)
const maxMetadataSize = 1023
const maxMetadataSizeWritten = 255
// Current/highest supported metadata format.
const metadataVersion = 2
@@ -150,13 +150,12 @@ func init() {
Name: "remote",
Required: true,
Help: `Remote to chunk/unchunk.
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
"myremote:bucket" or maybe "myremote:" (not recommended).`,
}, {
Name: "chunk_size",
Advanced: false,
Default: fs.SizeSuffix(2147483648), // 2 GiB
Default: fs.SizeSuffix(2147483648), // 2GB
Help: `Files larger than chunk size will be split in chunks.`,
}, {
Name: "name_format",
@@ -164,7 +163,6 @@ Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
Hide: fs.OptionHideCommandLine,
Default: `*.rclone_chunk.###`,
Help: `String format of chunk file names.
The two placeholders are: base file name (*) and chunk number (#...).
There must be one and only one asterisk and one or more consecutive hash characters.
If chunk number has less digits than the number of hashes, it is left-padded by zeros.
@@ -176,57 +174,48 @@ Possible chunk files are ignored if their name does not match given format.`,
Hide: fs.OptionHideCommandLine,
Default: 1,
Help: `Minimum valid chunk number. Usually 0 or 1.
By default chunk numbers start from 1.`,
}, {
Name: "meta_format",
Advanced: true,
Hide: fs.OptionHideCommandLine,
Default: "simplejson",
Help: `Format of the metadata object or "none".
By default "simplejson".
Help: `Format of the metadata object or "none". By default "simplejson".
Metadata is a small JSON file named after the composite file.`,
Examples: []fs.OptionExample{{
Value: "none",
Help: `Do not use metadata files at all.
Requires hash type "none".`,
Help: `Do not use metadata files at all. Requires hash type "none".`,
}, {
Value: "simplejson",
Help: `Simple JSON supports hash sums and chunk validation.
It has the following fields: ver, size, nchunks, md5, sha1.`,
}},
}, {
Name: "hash_type",
Advanced: false,
Default: "md5",
Help: `Choose how chunker handles hash sums.
All modes but "none" require metadata.`,
Help: `Choose how chunker handles hash sums. All modes but "none" require metadata.`,
Examples: []fs.OptionExample{{
Value: "none",
Help: `Pass any hash supported by wrapped remote for non-chunked files.
Return nothing otherwise.`,
Help: `Pass any hash supported by wrapped remote for non-chunked files, return nothing otherwise`,
}, {
Value: "md5",
Help: `MD5 for composite files.`,
Help: `MD5 for composite files`,
}, {
Value: "sha1",
Help: `SHA1 for composite files.`,
Help: `SHA1 for composite files`,
}, {
Value: "md5all",
Help: `MD5 for all files.`,
Help: `MD5 for all files`,
}, {
Value: "sha1all",
Help: `SHA1 for all files.`,
Help: `SHA1 for all files`,
}, {
Value: "md5quick",
Help: `Copying a file to chunker will request MD5 from the source.
Falling back to SHA1 if unsupported.`,
Help: `Copying a file to chunker will request MD5 from the source falling back to SHA1 if unsupported`,
}, {
Value: "sha1quick",
Help: `Similar to "md5quick" but prefers SHA1 over MD5.`,
Help: `Similar to "md5quick" but prefers SHA1 over MD5`,
}},
}, {
Name: "fail_hard",
@@ -290,13 +279,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
baseName, basePath, err := fspath.SplitFs(remote)
if err != nil {
return nil, fmt.Errorf("failed to parse remote %q to wrap: %w", remote, err)
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
}
// Look for a file first
remotePath := fspath.JoinRootPath(basePath, rpath)
baseFs, err := cache.Get(ctx, baseName+remotePath)
if err != fs.ErrorIsFile && err != nil {
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", baseName+remotePath, err)
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", baseName+remotePath)
}
if !operations.CanServerSideMove(baseFs) {
return nil, errors.New("can't use chunker on a backend which doesn't support server-side move or copy")
@@ -308,6 +297,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
root: rpath,
opt: *opt,
}
cache.PinUntilFinalized(f.base, f)
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType, opt.Transactions); err != nil {
@@ -319,45 +309,29 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
// i.e. `rpath` does not exist in the wrapped remote, but chunker
// detects a composite file because it finds the first chunk!
// (yet can't satisfy fstest.CheckListing, will ignore)
if err == nil && !f.useMeta {
if err == nil && !f.useMeta && strings.Contains(rpath, "/") {
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
newBase, testErr := cache.Get(ctx, baseName+firstChunkPath)
_, testErr := cache.Get(ctx, baseName+firstChunkPath)
if testErr == fs.ErrorIsFile {
f.base = newBase
err = testErr
}
}
cache.PinUntilFinalized(f.base, f)
// Correct root if definitely pointing to a file
if err == fs.ErrorIsFile {
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
// Note 1: the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs.
// Note 2: features.Fill() points features.PutStream to our PutStream,
// but features.Mask() will nullify it if wrappedFs does not have it.
f.features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: true,
ReadMimeType: false, // Object.MimeType not supported
WriteMimeType: true,
BucketBased: true,
CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: true,
CaseInsensitive: true,
DuplicateFiles: true,
ReadMimeType: false, // Object.MimeType not supported
WriteMimeType: true,
BucketBased: true,
CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: true,
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
f.features.ListR = nil // Recursive listing may cause chunker skip files
f.features.ListP = nil // ListP not supported yet
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
return f, err
}
@@ -401,7 +375,7 @@ type Fs struct {
// configure must be called only from NewFs or by unit tests.
func (f *Fs) configure(nameFormat, metaFormat, hashType, transactionMode string) error {
if err := f.setChunkNameFormat(nameFormat); err != nil {
return fmt.Errorf("invalid name format '%s': %w", nameFormat, err)
return errors.Wrapf(err, "invalid name format '%s'", nameFormat)
}
if err := f.setMetaFormat(metaFormat); err != nil {
return err
@@ -458,10 +432,10 @@ func (f *Fs) setHashType(hashType string) error {
f.hashFallback = true
case "md5all":
f.useMD5 = true
f.hashAll = !f.base.Hashes().Contains(hash.MD5) || f.base.Features().SlowHash
f.hashAll = !f.base.Hashes().Contains(hash.MD5)
case "sha1all":
f.useSHA1 = true
f.hashAll = !f.base.Hashes().Contains(hash.SHA1) || f.base.Features().SlowHash
f.hashAll = !f.base.Hashes().Contains(hash.SHA1)
default:
return fmt.Errorf("unsupported hash type '%s'", hashType)
}
@@ -530,7 +504,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
strRegex := regexp.QuoteMeta(pattern)
strRegex = reHashes.ReplaceAllLiteralString(strRegex, reDataOrCtrl)
strRegex = strings.ReplaceAll(strRegex, "\\*", mainNameRegStr)
strRegex = strings.Replace(strRegex, "\\*", mainNameRegStr, -1)
strRegex = fmt.Sprintf("^%s(?:%s|%s)?$", strRegex, tempSuffixRegStr, tempSuffixRegOld)
f.nameRegexp = regexp.MustCompile(strRegex)
@@ -539,7 +513,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
if numDigits > 1 {
fmtDigits = fmt.Sprintf("%%0%dd", numDigits)
}
strFmt := strings.ReplaceAll(pattern, "%", "%%")
strFmt := strings.Replace(pattern, "%", "%%", -1)
strFmt = strings.Replace(strFmt, "*", "%s", 1)
f.dataNameFmt = reHashes.ReplaceAllLiteralString(strFmt, fmtDigits)
f.ctrlNameFmt = reHashes.ReplaceAllLiteralString(strFmt, "_%s")
@@ -557,6 +531,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
//
// xactID is a transaction identifier. Empty xactID denotes active chunk,
// otherwise temporary chunk name is produced.
//
func (f *Fs) makeChunkName(filePath string, chunkNo int, ctrlType, xactID string) string {
dir, parentName := path.Split(filePath)
var name, tempSuffix string
@@ -633,7 +608,7 @@ func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ct
// forbidChunk prints error message or raises error if file is chunk.
// First argument sets log prefix, use `false` to suppress message.
func (f *Fs) forbidChunk(o any, filePath string) error {
func (f *Fs) forbidChunk(o interface{}, filePath string) error {
if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" {
if f.opt.FailHard {
return fmt.Errorf("chunk overlap with %q", parentPath)
@@ -681,7 +656,7 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
circleSec := unixSec % closestPrimeZzzzSeconds
first4chars := strconv.FormatInt(circleSec, 36)
for range maxTransactionProbes {
for tries := 0; tries < maxTransactionProbes; tries++ {
f.xactIDMutex.Lock()
randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1)
f.xactIDMutex.Unlock()
@@ -722,6 +697,7 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
// directory together with dead chunks.
// In future a flag named like `--chunker-list-hidden` may be added to
// rclone that will tell List to reveal hidden chunks.
//
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entries, err = f.base.List(ctx, dir)
if err != nil {
@@ -831,11 +807,12 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
}
case fs.Directory:
isSubdir[entry.Remote()] = true
wrapDir := fs.NewDirWrapper(entry.Remote(), entry)
wrapDir := fs.NewDirCopy(ctx, entry)
wrapDir.SetRemote(entry.Remote())
tempEntries = append(tempEntries, wrapDir)
default:
if f.opt.FailHard {
return nil, fmt.Errorf("unknown object type %T", entry)
return nil, fmt.Errorf("Unknown object type %T", entry)
}
fs.Debugf(f, "unknown object type %T", entry)
}
@@ -880,6 +857,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
// Note that chunker prefers analyzing file names rather than reading
// the content of meta object assuming that directory scans are fast
// but opening even a small file can be slow on some backends.
//
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.scanObject(ctx, remote, false)
}
@@ -889,7 +867,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// ignores non-chunked objects and skips chunk size checks.
func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.Object, error) {
if err := f.forbidChunk(false, remote); err != nil {
return nil, fmt.Errorf("can't access: %w", err)
return nil, errors.Wrap(err, "can't access")
}
var (
@@ -938,7 +916,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
case fs.ErrorDirNotFound:
entries = nil
default:
return nil, fmt.Errorf("can't detect composite file: %w", err)
return nil, errors.Wrap(err, "can't detect composite file")
}
if f.useNoRename {
@@ -964,11 +942,6 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
}
if caseInsensitive {
sameMain = strings.EqualFold(mainRemote, remote)
if sameMain && f.base.Features().IsLocal {
// on local, make sure the EqualFold still holds true when accounting for encoding.
// sometimes paths with special characters will only normalize the same way in Standard Encoding.
sameMain = strings.EqualFold(encoder.OS.FromStandardPath(mainRemote), encoder.OS.FromStandardPath(remote))
}
} else {
sameMain = mainRemote == remote
}
@@ -982,13 +955,13 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
}
continue
}
// fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
//fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
if err := o.addChunk(entry, chunkNo); err != nil {
return nil, err
}
}
if o.main == nil && len(o.chunks) == 0 {
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
// Scanning hasn't found data chunks with conforming names.
if f.useMeta || quickScan {
// Metadata is required but absent and there are no chunks.
@@ -1059,7 +1032,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
if err != nil {
return err
}
metadata, err := io.ReadAll(reader)
metadata, err := ioutil.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows
if err != nil {
return err
@@ -1083,7 +1056,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
case ErrMetaTooBig, ErrMetaUnknown:
return err // return these errors unwrapped for unit tests
default:
return fmt.Errorf("invalid metadata: %w", err)
return errors.Wrap(err, "invalid metadata")
}
if o.size != metaInfo.Size() || len(o.chunks) != metaInfo.nChunks {
return errors.New("metadata doesn't match file size")
@@ -1100,7 +1073,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
// readXactID returns the transaction ID stored in the passed metadata object
func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
// if xactID has already been read and cached return it now
// if xactID has already been read and cahced return it now
if o.xIDCached {
return o.xactID, nil
}
@@ -1118,7 +1091,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
if err != nil {
return "", err
}
data, err := io.ReadAll(reader)
data, err := ioutil.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows
if err != nil {
return "", err
@@ -1126,7 +1099,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
switch o.f.opt.MetaFormat {
case "simplejson":
if len(data) > maxMetadataSizeWritten {
if data != nil && len(data) > maxMetadataSizeWritten {
return "", nil // this was likely not a metadata object, return empty xactID but don't throw error
}
var metadata metaSimpleJSON
@@ -1144,11 +1117,11 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
// put implements Put, PutStream, PutUnchecked, Update
func (f *Fs) put(
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
basePut putFn, action string, target fs.Object,
) (obj fs.Object, err error) {
basePut putFn, action string, target fs.Object) (obj fs.Object, err error) {
// Perform consistency checks
if err := f.forbidChunk(src, remote); err != nil {
return nil, fmt.Errorf("%s refused: %w", action, err)
return nil, errors.Wrap(err, action+" refused")
}
if target == nil {
// Get target object with a quick directory scan
@@ -1162,7 +1135,7 @@ func (f *Fs) put(
obj := target.(*Object)
if err := obj.readMetadata(ctx); err == ErrMetaUnknown {
// refuse to update a file of unsupported format
return nil, fmt.Errorf("refusing to %s: %w", action, err)
return nil, errors.Wrap(err, "refusing to "+action)
}
}
@@ -1190,7 +1163,10 @@ func (f *Fs) put(
}
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID)
size := min(c.sizeLeft, c.chunkSize)
size := c.sizeLeft
if size > c.chunkSize {
size = c.chunkSize
}
savedReadCount := c.readCount
// If a single chunk is expected, avoid the extra rename operation
@@ -1238,7 +1214,7 @@ func (f *Fs) put(
// and skips the "EOF" read. Hence, switch to next limit here.
if !(c.chunkLimit == 0 || c.chunkLimit == c.chunkSize || c.sizeTotal == -1 || c.done) {
silentlyRemove(ctx, chunk)
return nil, fmt.Errorf("destination ignored %d data bytes", c.chunkLimit)
return nil, fmt.Errorf("Destination ignored %d data bytes", c.chunkLimit)
}
c.chunkLimit = c.chunkSize
@@ -1247,7 +1223,7 @@ func (f *Fs) put(
// Validate uploaded size
if c.sizeTotal != -1 && c.readCount != c.sizeTotal {
return nil, fmt.Errorf("incorrect upload size %d != %d", c.readCount, c.sizeTotal)
return nil, fmt.Errorf("Incorrect upload size %d != %d", c.readCount, c.sizeTotal)
}
// Check for input that looks like valid metadata
@@ -1284,7 +1260,7 @@ func (f *Fs) put(
sizeTotal += chunk.Size()
}
if sizeTotal != c.readCount {
return nil, fmt.Errorf("incorrect chunks size %d != %d", sizeTotal, c.readCount)
return nil, fmt.Errorf("Incorrect chunks size %d != %d", sizeTotal, c.readCount)
}
// If previous object was chunked, remove its chunks
@@ -1472,10 +1448,13 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
c.accountBytes(size)
return nil
}
const bufLen = 1048576 // 1 MiB
const bufLen = 1048576 // 1MB
buf := make([]byte, bufLen)
for size > 0 {
n := min(size, bufLen)
n := size
if n > bufLen {
n = bufLen
}
if _, err := io.ReadFull(in, buf[0:n]); err != nil {
return err
}
@@ -1574,19 +1553,11 @@ func (f *Fs) Hashes() hash.Set {
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
if err := f.forbidChunk(dir, dir); err != nil {
return fmt.Errorf("can't mkdir: %w", err)
return errors.Wrap(err, "can't mkdir")
}
return f.base.Mkdir(ctx, dir)
}
// MkdirMetadata makes the root directory of the Fs object
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
if do := f.base.Features().MkdirMetadata; do != nil {
return do(ctx, dir, metadata)
}
return nil, fs.ErrorNotImplemented
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
@@ -1604,6 +1575,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// This command will chain to `purge` from wrapped remote.
// As a result it removes not only composite chunker files with their
// active chunks but also all hidden temporary chunks in the directory.
//
func (f *Fs) Purge(ctx context.Context, dir string) error {
do := f.base.Features().Purge
if do == nil {
@@ -1645,11 +1617,12 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Unsupported control chunks will get re-picked by a more recent
// rclone version with unexpected results. This can be helped by
// the `delete hidden` flag above or at least the user has been warned.
//
func (o *Object) Remove(ctx context.Context) (err error) {
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
// operations.Move can still call Remove if chunker's Move refuses
// to corrupt file in hard mode. Hence, refuse to Remove, too.
return fmt.Errorf("refuse to corrupt: %w", err)
return errors.Wrap(err, "refuse to corrupt")
}
if err := o.readMetadata(ctx); err == ErrMetaUnknown {
// Proceed but warn user that unexpected things can happen.
@@ -1677,12 +1650,12 @@ func (o *Object) Remove(ctx context.Context) (err error) {
// copyOrMove implements copy or move
func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMoveFn, md5, sha1, opName string) (fs.Object, error) {
if err := f.forbidChunk(o, remote); err != nil {
return nil, fmt.Errorf("can't %s: %w", opName, err)
return nil, errors.Wrapf(err, "can't %s", opName)
}
if err := o.readMetadata(ctx); err != nil {
// Refuse to copy/move composite files with invalid or future
// metadata format which might involve unsupported chunk types.
return nil, fmt.Errorf("can't %s this file: %w", opName, err)
return nil, errors.Wrapf(err, "can't %s this file", opName)
}
if !o.isComposite() {
fs.Debugf(o, "%s non-chunked object...", opName)
@@ -1820,9 +1793,9 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1841,9 +1814,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -1861,8 +1834,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// baseMove chains to the wrapped Move or simulates it by Copy+Delete
func (f *Fs) baseMove(ctx context.Context, src fs.Object, remote string, delMode int) (fs.Object, error) {
ctx, ci := fs.AddConfig(ctx)
ci.NameTransform = nil // ensure operations.Move does not double-transform here
var (
dest fs.Object
err error
@@ -1906,14 +1877,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return do(ctx, srcFs.base, srcRemote, dstRemote)
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
if do := f.base.Features().DirSetModTime; do != nil {
return do(ctx, dir, modTime)
}
return fs.ErrorNotImplemented
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
@@ -1921,7 +1884,7 @@ func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) e
func (f *Fs) CleanUp(ctx context.Context) error {
do := f.base.Features().CleanUp
if do == nil {
return errors.New("not supported by underlying remote")
return errors.New("can't CleanUp")
}
return do(ctx)
}
@@ -1930,7 +1893,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.base.Features().About
if do == nil {
return nil, errors.New("not supported by underlying remote")
return nil, errors.New("About not supported")
}
return do(ctx)
}
@@ -1962,7 +1925,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
return
}
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
//fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
if entryType == fs.EntryObject {
mainPath, _, _, xactID := f.parseChunkName(path)
metaXactID := ""
@@ -2151,6 +2114,7 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
// file, then tries to read it from metadata. This in theory
// handles the unusual case when a small file has been tampered
// on the level of wrapped remote but chunker is unaware of that.
//
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) {
if err := o.readMetadata(ctx); err != nil {
return "", err // valid metadata is required to get hash, abort
@@ -2188,7 +2152,7 @@ func (o *Object) UnWrap() fs.Object {
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
if err := o.readMetadata(ctx); err != nil {
// refuse to open unsupported format
return nil, fmt.Errorf("can't open: %w", err)
return nil, errors.Wrap(err, "can't open")
}
if !o.isComposite() {
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
@@ -2439,6 +2403,7 @@ type metaSimpleJSON struct {
// - for files larger than chunk size
// - if file contents can be mistaken as meta object
// - if consistent hashing is On but wrapped remote can't provide given hash
//
func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1, xactID string) ([]byte, error) {
version := metadataVersion
if xactID == "" && version == 2 {
@@ -2471,13 +2436,14 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1,
// New format will have a higher version number and cannot be correctly
// handled by current implementation.
// The version check below will then explicitly ask user to upgrade rclone.
//
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
// Be strict about JSON format
// to reduce possibility that a random small file resembles metadata.
if len(data) > maxMetadataSizeWritten {
if data != nil && len(data) > maxMetadataSizeWritten {
return nil, false, ErrMetaTooBig
}
if len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
return nil, false, errors.New("invalid json")
}
var metadata metaSimpleJSON
@@ -2574,8 +2540,6 @@ var (
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)

View File

@@ -5,15 +5,13 @@ import (
"context"
"flag"
"fmt"
"io"
"io/ioutil"
"path"
"regexp"
"strings"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
@@ -35,35 +33,11 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
Size: int64(kilobytes) * int64(fs.Kibi),
Size: int64(kilobytes) * int64(fs.KibiByte),
})
})
}
type settings map[string]any
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
configMap := configmap.Simple{}
for key, val := range opts {
configMap[key] = fmt.Sprintf("%v", val)
}
rpath := fspath.JoinRootPath(f.Root(), path)
remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), rpath)
fixFs, err := fs.NewFs(ctx, remote)
require.NoError(t, err)
return fixFs
}
var mtime1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
func testPutFile(ctx context.Context, t *testing.T, f fs.Fs, name, contents, message string, check bool) fs.Object {
item := fstest.Item{Path: name, ModTime: mtime1}
obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
assert.NotNil(t, obj, message)
return obj
}
// test chunk name parser
func testChunkNameFormat(t *testing.T, f *Fs) {
saveOpt := f.opt
@@ -413,7 +387,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
if r == nil {
return
}
data, err := io.ReadAll(r)
data, err := ioutil.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()
@@ -440,7 +414,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
checkSmallFile := func(name, contents string) {
filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
_, put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
assert.NotNil(t, put)
checkSmallFileInternals(put)
checkContents(put, contents)
@@ -489,7 +463,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
newFile := func(name string) fs.Object {
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
return obj
}
@@ -538,7 +512,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
assert.NoError(t, err)
var chunkContents []byte
assert.NotPanics(t, func() {
chunkContents, err = io.ReadAll(r)
chunkContents, err = ioutil.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
@@ -573,7 +547,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
r, err = willyChunk.Open(ctx)
assert.NoError(t, err)
assert.NotPanics(t, func() {
_, err = io.ReadAll(r)
_, err = ioutil.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
@@ -599,7 +573,7 @@ func testChunkNumberOverflow(t *testing.T, f *Fs) {
newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) {
filename = path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
_, obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
if chunkObj, isChunkObj := obj.(*Object); isChunkObj {
txnID = chunkObj.xactID
@@ -643,13 +617,22 @@ func testMetadataInput(t *testing.T, f *Fs) {
}()
f.opt.FailHard = false
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
putFile := func(f fs.Fs, name, contents, message string, check bool) fs.Object {
item := fstest.Item{Path: name, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
assert.NotNil(t, obj, message)
return obj
}
runSubtest := func(contents, name string) {
description := fmt.Sprintf("file with %s metadata", name)
filename := path.Join(dir, name)
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
part := testPutFile(ctx, t, f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
_ = testPutFile(ctx, t, f, filename, contents, "upload "+description, false)
part := putFile(f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
_ = putFile(f, filename, contents, "upload "+description, false)
obj, err := f.NewObject(ctx, filename)
assert.NoError(t, err, "access "+description)
@@ -672,7 +655,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
assert.NoError(t, err, "open "+description)
assert.NotNil(t, r, "open stream of "+description)
if err == nil && r != nil {
data, err := io.ReadAll(r)
data, err := ioutil.ReadAll(r)
assert.NoError(t, err, "read all of "+description)
assert.Equal(t, contents, string(data), description+" contents is ok")
_ = r.Close()
@@ -695,7 +678,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
// Test that chunker refuses to change on objects with future/unknown metadata
func testFutureProof(t *testing.T, f *Fs) {
if !f.useMeta {
if f.opt.MetaFormat == "none" {
t.Skip("this test requires metadata support")
}
@@ -716,7 +699,7 @@ func testFutureProof(t *testing.T, f *Fs) {
name = f.makeChunkName(name, part-1, "", "")
}
item := fstest.Item{Path: name, ModTime: modTime}
obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
_, obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
assert.NotNil(t, obj, msg)
}
@@ -758,8 +741,8 @@ func testFutureProof(t *testing.T, f *Fs) {
assert.Error(t, err)
// Rcat must fail
in := io.NopCloser(bytes.NewBufferString("abc"))
robj, err := operations.Rcat(ctx, f, file, in, modTime, nil)
in := ioutil.NopCloser(bytes.NewBufferString("abc"))
robj, err := operations.Rcat(ctx, f, file, in, modTime)
assert.Nil(t, robj)
assert.NotNil(t, err)
if err != nil {
@@ -790,7 +773,7 @@ func testBackwardsCompatibility(t *testing.T, f *Fs) {
newFile := func(f fs.Fs, name string) (fs.Object, string) {
filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
return obj, filename
}
@@ -844,7 +827,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
item := fstest.Item{Path: "movefile", ModTime: modTime}
contents := "abcdef"
file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
_, file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
dstOverwritten, _ := fs2.NewObject(ctx, "movefile")
dstFile, err := operations.Move(ctx, fs2, dstOverwritten, "movefile", file)
@@ -854,51 +837,13 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
r, err := dstFile.Open(ctx)
assert.NoError(t, err)
assert.NotNil(t, r)
data, err := io.ReadAll(r)
data, err := ioutil.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()
_ = operations.Purge(ctx, f.base, dir)
}
// Test that md5all creates metadata even for small files
func testMD5AllSlow(t *testing.T, f *Fs) {
ctx := context.Background()
fsResult := deriveFs(ctx, t, f, "md5all", settings{
"chunk_size": "1P",
"name_format": "*.#",
"hash_type": "md5all",
"transactions": "rename",
"meta_format": "simplejson",
})
chunkFs, ok := fsResult.(*Fs)
require.True(t, ok, "fs must be a chunker remote")
baseFs := chunkFs.base
if !baseFs.Features().SlowHash {
t.Skipf("this test needs a base fs with slow hash, e.g. local")
}
assert.True(t, chunkFs.useMD5, "must use md5")
assert.True(t, chunkFs.hashAll, "must hash all files")
_ = testPutFile(ctx, t, chunkFs, "file", "-", "error", true)
obj, err := chunkFs.NewObject(ctx, "file")
require.NoError(t, err)
sum, err := obj.Hash(ctx, hash.MD5)
assert.NoError(t, err)
assert.Equal(t, "336d5ebc5436534e61d16e63ddfca327", sum)
list, err := baseFs.List(ctx, "")
require.NoError(t, err)
assert.Equal(t, 2, len(list))
_, err = baseFs.NewObject(ctx, "file")
assert.NoError(t, err, "metadata must be created")
_, err = baseFs.NewObject(ctx, "file.1")
assert.NoError(t, err, "first chunk must be created")
require.NoError(t, operations.Purge(ctx, baseFs, ""))
}
// InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("PutLarge", func(t *testing.T) {
@@ -931,9 +876,6 @@ func (f *Fs) InternalTest(t *testing.T) {
t.Run("ChunkerServerSideMove", func(t *testing.T) {
testChunkerServerSideMove(t, f)
})
t.Run("MD5AllSlow", func(t *testing.T) {
testMD5AllSlow(t, f)
})
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -35,18 +35,14 @@ func TestIntegration(t *testing.T) {
"MimeType",
"GetTier",
"SetTier",
"Metadata",
"SetMetadata",
},
UnimplementableFsMethods: []string{
"PublicLink",
"OpenWriterAt",
"OpenChunkWriter",
"MergeDirs",
"DirCacheFlush",
"UserInfo",
"Disconnect",
"ListP",
},
}
if *fstest.RemoteName == "" {
@@ -57,7 +53,6 @@ func TestIntegration(t *testing.T) {
{Name: name, Key: "type", Value: "chunker"},
{Name: name, Key: "remote", Value: tempDir},
}
opt.QuickTestOK = true
}
fstests.Run(t, &opt)
}

View File

@@ -1,48 +0,0 @@
// Package api has type definitions for cloudinary
package api
import (
"fmt"
)
// CloudinaryEncoder extends the built-in encoder
type CloudinaryEncoder interface {
// FromStandardPath takes a / separated path in Standard encoding
// and converts it to a / separated path in this encoding.
FromStandardPath(string) string
// FromStandardName takes name in Standard encoding and converts
// it in this encoding.
FromStandardName(string) string
// ToStandardPath takes a / separated path in this encoding
// and converts it to a / separated path in Standard encoding.
ToStandardPath(string) string
// ToStandardName takes name in this encoding and converts
// it in Standard encoding.
ToStandardName(string, string) string
// Encoded root of the remote (as passed into NewFs)
FromStandardFullPath(string) string
}
// UpdateOptions was created to pass options from Update to Put
type UpdateOptions struct {
PublicID string
ResourceType string
DeliveryType string
AssetFolder string
DisplayName string
}
// Header formats the option as a string
func (o *UpdateOptions) Header() (string, string) {
return "UpdateOption", fmt.Sprintf("%s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
}
// Mandatory returns whether the option must be parsed or can be ignored
func (o *UpdateOptions) Mandatory() bool {
return false
}
// String formats the option into human-readable form
func (o *UpdateOptions) String() string {
return fmt.Sprintf("Fully qualified Public ID: %s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
}

View File

@@ -1,754 +0,0 @@
// Package cloudinary provides an interface to the Cloudinary DAM
package cloudinary
import (
"context"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"path"
"slices"
"strconv"
"strings"
"time"
"github.com/cloudinary/cloudinary-go/v2"
SDKApi "github.com/cloudinary/cloudinary-go/v2/api"
"github.com/cloudinary/cloudinary-go/v2/api/admin"
"github.com/cloudinary/cloudinary-go/v2/api/admin/search"
"github.com/cloudinary/cloudinary-go/v2/api/uploader"
"github.com/rclone/rclone/backend/cloudinary/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"github.com/zeebo/blake3"
)
// Cloudinary shouldn't have a trailing dot if there is no path
func cldPathDir(somePath string) string {
if somePath == "" || somePath == "." {
return somePath
}
dir := path.Dir(somePath)
if dir == "." {
return ""
}
return dir
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "cloudinary",
Description: "Cloudinary",
NewFs: NewFs,
Options: []fs.Option{
{
Name: "cloud_name",
Help: "Cloudinary Environment Name",
Required: true,
Sensitive: true,
},
{
Name: "api_key",
Help: "Cloudinary API Key",
Required: true,
Sensitive: true,
},
{
Name: "api_secret",
Help: "Cloudinary API Secret",
Required: true,
Sensitive: true,
},
{
Name: "upload_prefix",
Help: "Specify the API endpoint for environments out of the US",
},
{
Name: "upload_preset",
Help: "Upload Preset to select asset manipulation on upload",
},
{
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
encoder.EncodeSlash |
encoder.EncodeLtGt |
encoder.EncodeDoubleQuote |
encoder.EncodeQuestion |
encoder.EncodeAsterisk |
encoder.EncodePipe |
encoder.EncodeHash |
encoder.EncodePercent |
encoder.EncodeBackSlash |
encoder.EncodeDel |
encoder.EncodeCtl |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8 |
encoder.EncodeDot),
},
{
Name: "eventually_consistent_delay",
Default: fs.Duration(0),
Advanced: true,
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
},
{
Name: "adjust_media_files_extensions",
Default: true,
Advanced: true,
Help: "Cloudinary handles media formats as a file attribute and strips it from the name, which is unlike most other file systems",
},
{
Name: "media_extensions",
Default: []string{
"3ds", "3g2", "3gp", "ai", "arw", "avi", "avif", "bmp", "bw",
"cr2", "cr3", "djvu", "dng", "eps3", "fbx", "flif", "flv", "gif",
"glb", "gltf", "hdp", "heic", "heif", "ico", "indd", "jp2", "jpe",
"jpeg", "jpg", "jxl", "jxr", "m2ts", "mov", "mp4", "mpeg", "mts",
"mxf", "obj", "ogv", "pdf", "ply", "png", "psd", "svg", "tga",
"tif", "tiff", "ts", "u3ma", "usdz", "wdp", "webm", "webp", "wmv"},
Advanced: true,
Help: "Cloudinary supported media extensions",
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
CloudName string `config:"cloud_name"`
APIKey string `config:"api_key"`
APISecret string `config:"api_secret"`
UploadPrefix string `config:"upload_prefix"`
UploadPreset string `config:"upload_preset"`
Enc encoder.MultiEncoder `config:"encoding"`
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
MediaExtensions []string `config:"media_extensions"`
AdjustMediaFilesExtensions bool `config:"adjust_media_files_extensions"`
}
// Fs represents a remote cloudinary server
type Fs struct {
name string
root string
opt Options
features *fs.Features
pacer *fs.Pacer
srv *rest.Client // For downloading assets via the Cloudinary CDN
cld *cloudinary.Cloudinary // API calls are going through the Cloudinary SDK
lastCRUD time.Time
}
// Object describes a cloudinary object
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
url string
md5sum string
publicID string
resourceType string
deliveryType string
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Initialize the Cloudinary client
cld, err := cloudinary.NewFromParams(opt.CloudName, opt.APIKey, opt.APISecret)
if err != nil {
return nil, fmt.Errorf("failed to create Cloudinary client: %w", err)
}
cld.Admin.Client = *fshttp.NewClient(ctx)
cld.Upload.Client = *fshttp.NewClient(ctx)
if opt.UploadPrefix != "" {
cld.Config.API.UploadPrefix = opt.UploadPrefix
}
client := fshttp.NewClient(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
cld: cld,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1000), pacer.MaxSleep(10000), pacer.DecayConstant(2))),
srv: rest.NewClient(client),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
if root != "" {
// Check to see if the root actually an existing file
remote := path.Base(root)
f.root = cldPathDir(root)
_, err := f.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
// File doesn't exist so return the previous root
f.root = root
return f, nil
}
return nil, err
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// ------------------------------------------------------------
// FromStandardPath implementation of the api.CloudinaryEncoder
func (f *Fs) FromStandardPath(s string) string {
return strings.ReplaceAll(f.opt.Enc.FromStandardPath(s), "&", "\uFF06")
}
// FromStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) FromStandardName(s string) string {
if f.opt.AdjustMediaFilesExtensions {
parsedURL, err := url.Parse(s)
ext := ""
if err != nil {
fs.Logf(nil, "Error parsing URL: %v", err)
} else {
ext = path.Ext(parsedURL.Path)
if slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
s = strings.TrimSuffix(parsedURL.Path, ext)
}
}
}
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
}
// ToStandardPath implementation of the api.CloudinaryEncoder
func (f *Fs) ToStandardPath(s string) string {
return strings.ReplaceAll(f.opt.Enc.ToStandardPath(s), "\uFF06", "&")
}
// ToStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) ToStandardName(s string, assetURL string) string {
ext := ""
if f.opt.AdjustMediaFilesExtensions {
parsedURL, err := url.Parse(assetURL)
if err != nil {
fs.Logf(nil, "Error parsing URL: %v", err)
} else {
ext = path.Ext(parsedURL.Path)
if !slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
ext = ""
}
}
}
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&") + ext
}
// FromStandardFullPath encodes a full path to Cloudinary standard
func (f *Fs) FromStandardFullPath(dir string) string {
return path.Join(api.CloudinaryEncoder.FromStandardPath(f, f.root), api.CloudinaryEncoder.FromStandardPath(f, dir))
}
// ToAssetFolderAPI encodes folders as expected by the Cloudinary SDK
func (f *Fs) ToAssetFolderAPI(dir string) string {
return strings.ReplaceAll(dir, "%", "%25")
}
// ToDisplayNameElastic encodes a special case of elasticsearch
func (f *Fs) ToDisplayNameElastic(dir string) string {
return strings.ReplaceAll(dir, "!", "\\!")
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// WaitEventuallyConsistent waits till the FS is eventually consistent
func (f *Fs) WaitEventuallyConsistent() {
if f.opt.EventuallyConsistentDelay == fs.Duration(0) {
return
}
delay := time.Duration(f.opt.EventuallyConsistentDelay)
timeSinceLastCRUD := time.Since(f.lastCRUD)
if timeSinceLastCRUD < delay {
time.Sleep(delay - timeSinceLastCRUD)
}
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Cloudinary root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// List the objects and directories in dir into entries
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
remotePrefix := f.FromStandardFullPath(dir)
if remotePrefix != "" && !strings.HasSuffix(remotePrefix, "/") {
remotePrefix += "/"
}
var entries fs.DirEntries
dirs := make(map[string]struct{})
nextCursor := ""
f.WaitEventuallyConsistent()
for {
// user the folders api to list folders.
folderParams := admin.SubFoldersParams{
Folder: f.ToAssetFolderAPI(remotePrefix),
MaxResults: 500,
}
if nextCursor != "" {
folderParams.NextCursor = nextCursor
}
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
if err != nil {
return nil, fmt.Errorf("failed to list sub-folders: %w", err)
}
if results.Error.Message != "" {
if strings.HasPrefix(results.Error.Message, "Can't find folder with path") {
return nil, fs.ErrorDirNotFound
}
return nil, fmt.Errorf("failed to list sub-folders: %s", results.Error.Message)
}
for _, folder := range results.Folders {
relativePath := api.CloudinaryEncoder.ToStandardPath(f, strings.TrimPrefix(folder.Path, remotePrefix))
parts := strings.Split(relativePath, "/")
// It's a directory
dirName := parts[len(parts)-1]
if _, found := dirs[dirName]; !found {
d := fs.NewDir(path.Join(dir, dirName), time.Time{})
entries = append(entries, d)
dirs[dirName] = struct{}{}
}
}
// Break if there are no more results
if results.NextCursor == "" {
break
}
nextCursor = results.NextCursor
}
for {
// Use the assets.AssetsByAssetFolder API to list assets
assetsParams := admin.AssetsByAssetFolderParams{
AssetFolder: remotePrefix,
MaxResults: 500,
}
if nextCursor != "" {
assetsParams.NextCursor = nextCursor
}
results, err := f.cld.Admin.AssetsByAssetFolder(ctx, assetsParams)
if err != nil {
return nil, fmt.Errorf("failed to list assets: %w", err)
}
for _, asset := range results.Assets {
remote := path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName, asset.SecureURL))
o := &Object{
fs: f,
remote: remote,
size: int64(asset.Bytes),
modTime: asset.CreatedAt,
url: asset.SecureURL,
publicID: asset.PublicID,
resourceType: asset.AssetType,
deliveryType: asset.Type,
}
entries = append(entries, o)
}
// Break if there are no more results
if results.NextCursor == "" {
break
}
nextCursor = results.NextCursor
}
return entries, nil
}
// NewObject finds the Object at remote. If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
searchParams := search.Query{
Expression: fmt.Sprintf("asset_folder:\"%s\" AND display_name:\"%s\"",
f.FromStandardFullPath(cldPathDir(remote)),
f.ToDisplayNameElastic(api.CloudinaryEncoder.FromStandardName(f, path.Base(remote)))),
SortBy: []search.SortByField{{"uploaded_at": "desc"}},
MaxResults: 2,
}
var results *admin.SearchResult
f.WaitEventuallyConsistent()
err := f.pacer.Call(func() (bool, error) {
var err1 error
results, err1 = f.cld.Admin.Search(ctx, searchParams)
if err1 == nil && results.TotalCount != len(results.Assets) {
err1 = errors.New("partial response so waiting for eventual consistency")
}
return shouldRetry(ctx, nil, err1)
})
if err != nil {
return nil, fs.ErrorObjectNotFound
}
if results.TotalCount == 0 || len(results.Assets) == 0 {
return nil, fs.ErrorObjectNotFound
}
asset := results.Assets[0]
o := &Object{
fs: f,
remote: remote,
size: int64(asset.Bytes),
modTime: asset.UploadedAt,
url: asset.SecureURL,
md5sum: asset.Etag,
publicID: asset.PublicID,
resourceType: asset.ResourceType,
deliveryType: asset.Type,
}
return o, nil
}
func (f *Fs) getSuggestedPublicID(assetFolder string, displayName string, modTime time.Time) string {
payload := []byte(path.Join(assetFolder, displayName))
hash := blake3.Sum256(payload)
return hex.EncodeToString(hash[:])
}
// Put uploads content to Cloudinary
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if src.Size() == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
params := uploader.UploadParams{
UploadPreset: f.opt.UploadPreset,
}
updateObject := false
var modTime time.Time
for _, option := range options {
if updateOptions, ok := option.(*api.UpdateOptions); ok {
if updateOptions.PublicID != "" {
updateObject = true
params.Overwrite = SDKApi.Bool(true)
params.Invalidate = SDKApi.Bool(true)
params.PublicID = updateOptions.PublicID
params.ResourceType = updateOptions.ResourceType
params.Type = SDKApi.DeliveryType(updateOptions.DeliveryType)
params.AssetFolder = updateOptions.AssetFolder
params.DisplayName = updateOptions.DisplayName
modTime = src.ModTime(ctx)
}
}
}
if !updateObject {
params.AssetFolder = f.FromStandardFullPath(cldPathDir(src.Remote()))
params.DisplayName = api.CloudinaryEncoder.FromStandardName(f, path.Base(src.Remote()))
// We want to conform to the unique asset ID of rclone, which is (asset_folder,display_name,last_modified).
// We also want to enable customers to choose their own public_id, in case duplicate names are not a crucial use case.
// Upload_presets that apply randomness to the public ID would not work well with rclone duplicate assets support.
params.FilenameOverride = f.getSuggestedPublicID(params.AssetFolder, params.DisplayName, src.ModTime(ctx))
}
uploadResult, err := f.cld.Upload.Upload(ctx, in, params)
f.lastCRUD = time.Now()
if err != nil {
return nil, fmt.Errorf("failed to upload to Cloudinary: %w", err)
}
if !updateObject {
modTime = uploadResult.CreatedAt
}
if uploadResult.Error.Message != "" {
return nil, errors.New(uploadResult.Error.Message)
}
o := &Object{
fs: f,
remote: src.Remote(),
size: int64(uploadResult.Bytes),
modTime: modTime,
url: uploadResult.SecureURL,
md5sum: uploadResult.Etag,
publicID: uploadResult.PublicID,
resourceType: uploadResult.ResourceType,
deliveryType: uploadResult.Type,
}
return o, nil
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash sets
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// Mkdir creates empty folders
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
params := admin.CreateFolderParams{Folder: f.ToAssetFolderAPI(f.FromStandardFullPath(dir))}
res, err := f.cld.Admin.CreateFolder(ctx, params)
f.lastCRUD = time.Now()
if err != nil {
return err
}
if res.Error.Message != "" {
return errors.New(res.Error.Message)
}
return nil
}
// Rmdir deletes empty folders
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// Additional test because Cloudinary will delete folders without
// assets, regardless of empty sub-folders
folder := f.ToAssetFolderAPI(f.FromStandardFullPath(dir))
folderParams := admin.SubFoldersParams{
Folder: folder,
MaxResults: 1,
}
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
if err != nil {
return err
}
if results.TotalCount > 0 {
return fs.ErrorDirectoryNotEmpty
}
params := admin.DeleteFolderParams{Folder: folder}
res, err := f.cld.Admin.DeleteFolder(ctx, params)
f.lastCRUD = time.Now()
if err != nil {
return err
}
if res.Error.Message != "" {
if strings.HasPrefix(res.Error.Message, "Can't find folder with path") {
return fs.ErrorDirNotFound
}
return errors.New(res.Error.Message)
}
return nil
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
420, // Too Many Requests (legacy)
429, // Too Many Requests
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if err != nil {
tryAgain := "Try again on "
if idx := strings.Index(err.Error(), tryAgain); idx != -1 {
layout := "2006-01-02 15:04:05 UTC"
dateStr := err.Error()[idx+len(tryAgain) : idx+len(tryAgain)+len(layout)]
timestamp, err2 := time.Parse(layout, dateStr)
if err2 == nil {
return true, fserrors.NewErrorRetryAfter(time.Until(timestamp))
}
}
fs.Debugf(nil, "Retrying API error %v", err)
return true, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// ------------------------------------------------------------
// Hash returns the MD5 of an object
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
if ty != hash.MD5 {
return "", hash.ErrUnsupported
}
return o.md5sum, nil
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// Size of object in bytes
func (o *Object) Size() int64 {
return o.size
}
// Storable returns if this object is storable
func (o *Object) Storable() bool {
return true
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return fs.ErrorCantSetModTime
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
var resp *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: o.url,
Options: options,
}
var offset int64
var count int64
var key string
var value string
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
offset, count = x.Decode(o.size)
if count < 0 {
count = o.size - offset
}
key, value = option.Header()
case *fs.SeekOption:
offset = x.Offset
count = o.size - offset
key, value = option.Header()
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
if key != "" && value != "" {
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders[key] = value
}
// Make sure that the asset is fully available
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
if err == nil {
cl, clErr := strconv.Atoi(resp.Header.Get("content-length"))
if clErr == nil && count == int64(cl) {
return false, nil
}
}
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed download of \"%s\": %w", o.url, err)
}
return resp.Body, err
}
// Update the object with the contents of the io.Reader
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
options = append(options, &api.UpdateOptions{
PublicID: o.publicID,
ResourceType: o.resourceType,
DeliveryType: o.deliveryType,
DisplayName: api.CloudinaryEncoder.FromStandardName(o.fs, path.Base(o.Remote())),
AssetFolder: o.fs.FromStandardFullPath(cldPathDir(o.Remote())),
})
updatedObj, err := o.fs.Put(ctx, in, src, options...)
if err != nil {
return err
}
if uo, ok := updatedObj.(*Object); ok {
o.size = uo.size
o.modTime = time.Now() // Skipping uo.modTime because the API returns the create time
o.url = uo.url
o.md5sum = uo.md5sum
o.publicID = uo.publicID
o.resourceType = uo.resourceType
o.deliveryType = uo.deliveryType
}
return nil
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
params := uploader.DestroyParams{
PublicID: o.publicID,
ResourceType: o.resourceType,
Type: o.deliveryType,
}
res, dErr := o.fs.cld.Upload.Destroy(ctx, params)
o.fs.lastCRUD = time.Now()
if dErr != nil {
return dErr
}
if res.Error.Message != "" {
return errors.New(res.Error.Message)
}
if res.Result != "ok" {
return errors.New(res.Result)
}
return nil
}

View File

@@ -1,23 +0,0 @@
// Test Cloudinary filesystem interface
package cloudinary_test
import (
"testing"
"github.com/rclone/rclone/backend/cloudinary"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
name := "TestCloudinary"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*cloudinary.Object)(nil),
SkipInvalidUTF8: true,
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
},
})
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,94 +0,0 @@
package combine
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestAdjustmentDo(t *testing.T) {
for _, test := range []struct {
root string
mountpoint string
in string
want string
wantErr error
}{
{
root: "",
mountpoint: "mountpoint",
in: "path/to/file.txt",
want: "mountpoint/path/to/file.txt",
},
{
root: "mountpoint",
mountpoint: "mountpoint",
in: "path/to/file.txt",
want: "path/to/file.txt",
},
{
root: "mountpoint/path",
mountpoint: "mountpoint",
in: "path/to/file.txt",
want: "to/file.txt",
},
{
root: "mountpoint/path",
mountpoint: "mountpoint",
in: "wrongpath/to/file.txt",
want: "",
wantErr: errNotUnderRoot,
},
} {
what := fmt.Sprintf("%+v", test)
a := newAdjustment(test.root, test.mountpoint)
got, gotErr := a.do(test.in)
assert.Equal(t, test.wantErr, gotErr)
assert.Equal(t, test.want, got, what)
}
}
func TestAdjustmentUndo(t *testing.T) {
for _, test := range []struct {
root string
mountpoint string
in string
want string
wantErr error
}{
{
root: "",
mountpoint: "mountpoint",
in: "mountpoint/path/to/file.txt",
want: "path/to/file.txt",
},
{
root: "mountpoint",
mountpoint: "mountpoint",
in: "path/to/file.txt",
want: "path/to/file.txt",
},
{
root: "mountpoint/path",
mountpoint: "mountpoint",
in: "to/file.txt",
want: "path/to/file.txt",
},
{
root: "wrongmountpoint/path",
mountpoint: "mountpoint",
in: "to/file.txt",
want: "",
wantErr: errNotUnderRoot,
},
} {
what := fmt.Sprintf("%+v", test)
a := newAdjustment(test.root, test.mountpoint)
got, gotErr := a.undo(test.in)
assert.Equal(t, test.wantErr, gotErr)
assert.Equal(t, test.want, got, what)
}
}

View File

@@ -1,92 +0,0 @@
// Test Combine filesystem interface
package combine_test
import (
"testing"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/memory"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
var (
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "OpenChunkWriter"}
unimplementableObjectMethods = []string{}
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
t.Skip("Skipping as -remote not set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}
func TestLocal(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := MakeTestDirs(t, 3)
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=" + dirs[2]
name := "TestCombineLocal"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":dir1",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}
func TestMemory(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
upstreams := "dir1=:memory:dir1 dir2=:memory:dir2 dir3=:memory:dir3"
name := "TestCombineMemory"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":dir1",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}
func TestMixed(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := MakeTestDirs(t, 2)
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=:memory:dir3"
name := "TestCombineMixed"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":dir1",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}
// MakeTestDirs makes directories in /tmp for testing
func MakeTestDirs(t *testing.T, n int) (dirs []string) {
for i := 1; i <= n; i++ {
dir := t.TempDir()
dirs = append(dirs, dir)
}
return dirs
}

File diff suppressed because it is too large Load Diff

View File

@@ -14,26 +14,23 @@ import (
"github.com/rclone/rclone/fstest/fstests"
)
var defaultOpt = fstests.Opt{
RemoteName: "TestCompress:",
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"OpenChunkWriter",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{},
}
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &defaultOpt)
opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{}}
fstests.Run(t, &opt)
}
// TestRemoteGzip tests GZIP compression
@@ -43,33 +40,26 @@ func TestRemoteGzip(t *testing.T) {
}
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
name := "TestCompressGzip"
opt := defaultOpt
opt.RemoteName = name + ":"
opt.ExtraConfig = []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "mode", Value: "gzip"},
{Name: name, Key: "level", Value: "-1"},
}
opt.QuickTestOK = true
fstests.Run(t, &opt)
}
// TestRemoteZstd tests ZSTD compression
func TestRemoteZstd(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-zstd")
name := "TestCompressZstd"
opt := defaultOpt
opt.RemoteName = name + ":"
opt.ExtraConfig = []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "mode", Value: "zstd"},
{Name: name, Key: "level", Value: "2"},
}
opt.QuickTestOK = true
fstests.Run(t, &opt)
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
UnimplementableObjectMethods: []string{
"GetTier",
"SetTier",
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
},
})
}

View File

@@ -1,207 +0,0 @@
package compress
import (
"bufio"
"bytes"
"context"
"crypto/md5"
"encoding/hex"
"errors"
"io"
"github.com/buengese/sgzip"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/chunkedreader"
"github.com/rclone/rclone/fs/hash"
)
// gzipModeHandler implements compressionModeHandler for gzip
type gzipModeHandler struct{}
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
// the configured threshold
func (g *gzipModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
var b bytes.Buffer
var n int64
w, err := sgzip.NewWriterLevel(&b, sgzip.DefaultCompression)
if err != nil {
return false, err
}
n, err = io.Copy(w, r)
if err != nil {
return false, err
}
err = w.Close()
if err != nil {
return false, err
}
ratio := float64(n) / float64(b.Len())
return ratio > minCompressionRatio, nil
}
// newObjectGetOriginalSize returns the original file size from the metadata
func (g *gzipModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
if meta.CompressionMetadataGzip == nil {
return 0, errors.New("missing gzip metadata")
}
return meta.CompressionMetadataGzip.Size, nil
}
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
func (g *gzipModeHandler) openGetReadCloser(
ctx context.Context,
o *Object,
offset int64,
limit int64,
cr chunkedreader.ChunkedReader,
closer io.Closer,
options ...fs.OpenOption,
) (rc io.ReadCloser, err error) {
var file io.Reader
if offset != 0 {
file, err = sgzip.NewReaderAt(cr, o.meta.CompressionMetadataGzip, offset)
} else {
file, err = sgzip.NewReader(cr)
}
if err != nil {
return nil, err
}
var fileReader io.Reader
if limit != -1 {
fileReader = io.LimitReader(file, limit)
} else {
fileReader = file
}
// Return a ReadCloser
return ReadCloserWrapper{Reader: fileReader, Closer: closer}, nil
}
// processFileNameGetFileExtension returns the file extension for the given compression mode
func (g *gzipModeHandler) processFileNameGetFileExtension(compressionMode int) string {
if compressionMode == Gzip {
return gzFileExt
}
return ""
}
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
func (g *gzipModeHandler) putCompress(
ctx context.Context,
f *Fs,
in io.Reader,
src fs.ObjectInfo,
options []fs.OpenOption,
mimeType string,
) (fs.Object, *ObjectMetadata, error) {
// Unwrap reader accounting
in, wrap := accounting.UnWrap(in)
// Add the metadata hasher
metaHasher := md5.New()
in = io.TeeReader(in, metaHasher)
// Compress the file
pipeReader, pipeWriter := io.Pipe()
resultsGzip := make(chan compressionResult[sgzip.GzipMetadata])
go func() {
gz, err := sgzip.NewWriterLevel(pipeWriter, f.opt.CompressionLevel)
if err != nil {
resultsGzip <- compressionResult[sgzip.GzipMetadata]{err: err, meta: sgzip.GzipMetadata{}}
close(resultsGzip)
return
}
_, err = io.Copy(gz, in)
gzErr := gz.Close()
if gzErr != nil && err == nil {
err = gzErr
}
closeErr := pipeWriter.Close()
if closeErr != nil && err == nil {
err = closeErr
}
resultsGzip <- compressionResult[sgzip.GzipMetadata]{err: err, meta: gz.MetaData()}
close(resultsGzip)
}()
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize)) // Probably no longer needed as sgzip has it's own buffering
// Find a hash the destination supports to compute a hash of
// the compressed data.
ht := f.Fs.Hashes().GetOne()
var hasher *hash.MultiHasher
var err error
if ht != hash.None {
// unwrap the accounting again
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
if err != nil {
return nil, nil, err
}
// add the hasher and re-wrap the accounting
wrappedIn = io.TeeReader(wrappedIn, hasher)
wrappedIn = wrap(wrappedIn)
}
// Transfer the data
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
if err != nil {
if o != nil {
if removeErr := o.Remove(ctx); removeErr != nil {
fs.Errorf(o, "Failed to remove partially transferred object: %v", removeErr)
}
}
return nil, nil, err
}
// Check whether we got an error during compression
result := <-resultsGzip
if result.err != nil {
if o != nil {
if removeErr := o.Remove(ctx); removeErr != nil {
fs.Errorf(o, "Failed to remove partially compressed object: %v", removeErr)
}
}
return nil, nil, result.err
}
// Generate metadata
meta := g.newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
// Check the hashes of the compressed data if we were comparing them
if ht != hash.None && hasher != nil {
err = f.verifyObjectHash(ctx, o, hasher, ht)
if err != nil {
return nil, nil, err
}
}
return o, meta, nil
}
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
func (g *gzipModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
return o, g.newMetadata(o.Size(), mode, sgzip.GzipMetadata{}, hex.EncodeToString(sum), mimeType), nil
}
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
// Warning: This function panics if cmeta is not of the expected type.
func (g *gzipModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
meta, ok := cmeta.(sgzip.GzipMetadata)
if !ok {
panic("invalid cmeta type: expected sgzip.GzipMetadata")
}
objMeta := new(ObjectMetadata)
objMeta.Size = size
objMeta.Mode = mode
objMeta.CompressionMetadataGzip = &meta
objMeta.CompressionMetadataZstd = nil
objMeta.MD5 = md5
objMeta.MimeType = mimeType
return objMeta
}

View File

@@ -1,327 +0,0 @@
package compress
import (
"context"
"errors"
"io"
"runtime"
"sync"
szstd "github.com/a1ex3/zstd-seekable-format-go/pkg"
"github.com/klauspost/compress/zstd"
)
const szstdChunkSize int = 1 << 20 // 1 MiB chunk size
// SzstdMetadata holds metadata for szstd compressed files.
type SzstdMetadata struct {
BlockSize int // BlockSize is the size of the blocks in the zstd file
Size int64 // Size is the uncompressed size of the file
BlockData []uint32 // BlockData is the block data for the zstd file, used for seeking
}
// SzstdWriter is a writer that compresses data in szstd format.
type SzstdWriter struct {
enc *zstd.Encoder
w szstd.ConcurrentWriter
metadata SzstdMetadata
mu sync.Mutex
}
// NewWriterSzstd creates a new szstd writer with the specified options.
// It initializes the szstd writer with a zstd encoder and returns a pointer to the SzstdWriter.
// The writer can be used to write data in chunks, and it will automatically handle block sizes and metadata.
func NewWriterSzstd(w io.Writer, opts ...zstd.EOption) (*SzstdWriter, error) {
encoder, err := zstd.NewWriter(nil, opts...)
if err != nil {
return nil, err
}
sw, err := szstd.NewWriter(w, encoder)
if err != nil {
if err := encoder.Close(); err != nil {
return nil, err
}
return nil, err
}
return &SzstdWriter{
enc: encoder,
w: sw,
metadata: SzstdMetadata{
BlockSize: szstdChunkSize,
Size: 0,
},
}, nil
}
// Write writes data to the szstd writer in chunks of szstdChunkSize.
// It handles the block size and metadata updates automatically.
func (w *SzstdWriter) Write(p []byte) (int, error) {
if len(p) == 0 {
return 0, nil
}
if w.metadata.BlockData == nil {
numBlocks := (len(p) + w.metadata.BlockSize - 1) / w.metadata.BlockSize
w.metadata.BlockData = make([]uint32, 1, numBlocks+1)
w.metadata.BlockData[0] = 0
}
start := 0
total := len(p)
var writerFunc szstd.FrameSource = func() ([]byte, error) {
if start >= total {
return nil, nil
}
end := min(start+w.metadata.BlockSize, total)
chunk := p[start:end]
size := end - start
w.mu.Lock()
w.metadata.Size += int64(size)
w.mu.Unlock()
start = end
return chunk, nil
}
// write sizes of compressed blocks in the callback
err := w.w.WriteMany(context.Background(), writerFunc,
szstd.WithWriteCallback(func(size uint32) {
w.mu.Lock()
lastOffset := w.metadata.BlockData[len(w.metadata.BlockData)-1]
w.metadata.BlockData = append(w.metadata.BlockData, lastOffset+size)
w.mu.Unlock()
}),
)
if err != nil {
return 0, err
}
return total, nil
}
// Close closes the SzstdWriter and its underlying encoder.
func (w *SzstdWriter) Close() error {
if err := w.w.Close(); err != nil {
return err
}
if err := w.enc.Close(); err != nil {
return err
}
return nil
}
// GetMetadata returns the metadata of the szstd writer.
func (w *SzstdWriter) GetMetadata() SzstdMetadata {
return w.metadata
}
// SzstdReaderAt is a reader that allows random access in szstd compressed data.
type SzstdReaderAt struct {
r szstd.Reader
decoder *zstd.Decoder
metadata *SzstdMetadata
pos int64
mu sync.Mutex
}
// NewReaderAtSzstd creates a new SzstdReaderAt at the specified io.ReadSeeker.
func NewReaderAtSzstd(rs io.ReadSeeker, meta *SzstdMetadata, offset int64, opts ...zstd.DOption) (*SzstdReaderAt, error) {
decoder, err := zstd.NewReader(nil, opts...)
if err != nil {
return nil, err
}
r, err := szstd.NewReader(rs, decoder)
if err != nil {
decoder.Close()
return nil, err
}
sr := &SzstdReaderAt{
r: r,
decoder: decoder,
metadata: meta,
pos: 0,
}
// Set initial position to the provided offset
if _, err := sr.Seek(offset, io.SeekStart); err != nil {
if err := sr.Close(); err != nil {
return nil, err
}
return nil, err
}
return sr, nil
}
// Seek sets the offset for the next Read.
func (s *SzstdReaderAt) Seek(offset int64, whence int) (int64, error) {
s.mu.Lock()
defer s.mu.Unlock()
pos, err := s.r.Seek(offset, whence)
if err == nil {
s.pos = pos
}
return pos, err
}
func (s *SzstdReaderAt) Read(p []byte) (int, error) {
s.mu.Lock()
defer s.mu.Unlock()
n, err := s.r.Read(p)
if err == nil {
s.pos += int64(n)
}
return n, err
}
// ReadAt reads data at the specified offset.
func (s *SzstdReaderAt) ReadAt(p []byte, off int64) (int, error) {
if off < 0 {
return 0, errors.New("invalid offset")
}
if off >= s.metadata.Size {
return 0, io.EOF
}
endOff := min(off+int64(len(p)), s.metadata.Size)
// Find all blocks covered by the range
type blockInfo struct {
index int // Block index
offsetInBlock int64 // Offset within the block for starting reading
bytesToRead int64 // How many bytes to read from this block
}
var blocks []blockInfo
uncompressedOffset := int64(0)
currentOff := off
for i := 0; i < len(s.metadata.BlockData)-1; i++ {
blockUncompressedEnd := min(uncompressedOffset+int64(s.metadata.BlockSize), s.metadata.Size)
if currentOff < blockUncompressedEnd && endOff > uncompressedOffset {
offsetInBlock := max(0, currentOff-uncompressedOffset)
bytesToRead := min(blockUncompressedEnd-uncompressedOffset-offsetInBlock, endOff-currentOff)
blocks = append(blocks, blockInfo{
index: i,
offsetInBlock: offsetInBlock,
bytesToRead: bytesToRead,
})
currentOff += bytesToRead
if currentOff >= endOff {
break
}
}
uncompressedOffset = blockUncompressedEnd
}
if len(blocks) == 0 {
return 0, io.EOF
}
// Parallel block decoding
type decodeResult struct {
index int
data []byte
err error
}
resultCh := make(chan decodeResult, len(blocks))
var wg sync.WaitGroup
sem := make(chan struct{}, runtime.NumCPU())
for _, block := range blocks {
wg.Add(1)
go func(block blockInfo) {
defer wg.Done()
sem <- struct{}{}
defer func() { <-sem }()
startOffset := int64(s.metadata.BlockData[block.index])
endOffset := int64(s.metadata.BlockData[block.index+1])
compressedSize := endOffset - startOffset
compressed := make([]byte, compressedSize)
_, err := s.r.ReadAt(compressed, startOffset)
if err != nil && err != io.EOF {
resultCh <- decodeResult{index: block.index, err: err}
return
}
decoded, err := s.decoder.DecodeAll(compressed, nil)
if err != nil {
resultCh <- decodeResult{index: block.index, err: err}
return
}
resultCh <- decodeResult{index: block.index, data: decoded, err: nil}
}(block)
}
go func() {
wg.Wait()
close(resultCh)
}()
// Collect results in block index order
totalRead := 0
results := make(map[int]decodeResult)
expected := len(blocks)
minIndex := blocks[0].index
for res := range resultCh {
results[res.index] = res
for {
if result, ok := results[minIndex]; ok {
if result.err != nil {
return 0, result.err
}
// find the corresponding blockInfo
var blk blockInfo
for _, b := range blocks {
if b.index == result.index {
blk = b
break
}
}
start := blk.offsetInBlock
end := start + blk.bytesToRead
copy(p[totalRead:totalRead+int(blk.bytesToRead)], result.data[start:end])
totalRead += int(blk.bytesToRead)
minIndex++
if minIndex-blocks[0].index >= len(blocks) {
break
}
} else {
break
}
}
if len(results) == expected && minIndex-blocks[0].index >= len(blocks) {
break
}
}
return totalRead, nil
}
// Close closes the SzstdReaderAt and underlying decoder.
func (s *SzstdReaderAt) Close() error {
if err := s.r.Close(); err != nil {
return err
}
s.decoder.Close()
return nil
}

View File

@@ -1,65 +0,0 @@
package compress
import (
"context"
"fmt"
"io"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/chunkedreader"
)
// uncompressedModeHandler implements compressionModeHandler for uncompressed files
type uncompressedModeHandler struct{}
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
// the configured threshold
func (u *uncompressedModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
return false, nil
}
// newObjectGetOriginalSize returns the original file size from the metadata
func (u *uncompressedModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
return 0, nil
}
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
func (u *uncompressedModeHandler) openGetReadCloser(
ctx context.Context,
o *Object,
offset int64,
limit int64,
cr chunkedreader.ChunkedReader,
closer io.Closer,
options ...fs.OpenOption,
) (rc io.ReadCloser, err error) {
return o.Object.Open(ctx, options...)
}
// processFileNameGetFileExtension returns the file extension for the given compression mode
func (u *uncompressedModeHandler) processFileNameGetFileExtension(compressionMode int) string {
return ""
}
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
func (u *uncompressedModeHandler) putCompress(
ctx context.Context,
f *Fs,
in io.Reader,
src fs.ObjectInfo,
options []fs.OpenOption,
mimeType string,
) (fs.Object, *ObjectMetadata, error) {
return nil, nil, fmt.Errorf("unsupported compression mode %d", f.mode)
}
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
func (u *uncompressedModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
return nil, nil, fmt.Errorf("unsupported compression mode %d", Uncompressed)
}
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
// Warning: This function panics if cmeta is not of the expected type.
func (u *uncompressedModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
return nil
}

View File

@@ -1,65 +0,0 @@
package compress
import (
"context"
"fmt"
"io"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/chunkedreader"
)
// unknownModeHandler implements compressionModeHandler for unknown compression types
type unknownModeHandler struct{}
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
// the configured threshold
func (unk *unknownModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
return false, fmt.Errorf("unknown compression mode %d", compressionMode)
}
// newObjectGetOriginalSize returns the original file size from the metadata
func (unk *unknownModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
return 0, nil
}
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
func (unk *unknownModeHandler) openGetReadCloser(
ctx context.Context,
o *Object,
offset int64,
limit int64,
cr chunkedreader.ChunkedReader,
closer io.Closer,
options ...fs.OpenOption,
) (rc io.ReadCloser, err error) {
return nil, fmt.Errorf("unknown compression mode %d", o.meta.Mode)
}
// processFileNameGetFileExtension returns the file extension for the given compression mode
func (unk *unknownModeHandler) processFileNameGetFileExtension(compressionMode int) string {
return ""
}
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
func (unk *unknownModeHandler) putCompress(
ctx context.Context,
f *Fs,
in io.Reader,
src fs.ObjectInfo,
options []fs.OpenOption,
mimeType string,
) (fs.Object, *ObjectMetadata, error) {
return nil, nil, fmt.Errorf("unknown compression mode %d", f.mode)
}
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
func (unk *unknownModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
return nil, nil, fmt.Errorf("unknown compression mode")
}
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
// Warning: This function panics if cmeta is not of the expected type.
func (unk *unknownModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
return nil
}

View File

@@ -1,192 +0,0 @@
package compress
import (
"bufio"
"bytes"
"context"
"crypto/md5"
"encoding/hex"
"errors"
"io"
"github.com/klauspost/compress/zstd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/chunkedreader"
"github.com/rclone/rclone/fs/hash"
)
// zstdModeHandler implements compressionModeHandler for zstd
type zstdModeHandler struct{}
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
// the configured threshold
func (z *zstdModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
var b bytes.Buffer
var n int64
w, err := NewWriterSzstd(&b, zstd.WithEncoderLevel(zstd.SpeedDefault))
if err != nil {
return false, err
}
n, err = io.Copy(w, r)
if err != nil {
return false, err
}
err = w.Close()
if err != nil {
return false, err
}
ratio := float64(n) / float64(b.Len())
return ratio > minCompressionRatio, nil
}
// newObjectGetOriginalSize returns the original file size from the metadata
func (z *zstdModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
if meta.CompressionMetadataZstd == nil {
return 0, errors.New("missing zstd metadata")
}
return meta.CompressionMetadataZstd.Size, nil
}
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
func (z *zstdModeHandler) openGetReadCloser(
ctx context.Context,
o *Object,
offset int64,
limit int64,
cr chunkedreader.ChunkedReader,
closer io.Closer,
options ...fs.OpenOption,
) (rc io.ReadCloser, err error) {
var file io.Reader
if offset != 0 {
file, err = NewReaderAtSzstd(cr, o.meta.CompressionMetadataZstd, offset)
} else {
file, err = zstd.NewReader(cr)
}
if err != nil {
return nil, err
}
var fileReader io.Reader
if limit != -1 {
fileReader = io.LimitReader(file, limit)
} else {
fileReader = file
}
// Return a ReadCloser
return ReadCloserWrapper{Reader: fileReader, Closer: closer}, nil
}
// processFileNameGetFileExtension returns the file extension for the given compression mode
func (z *zstdModeHandler) processFileNameGetFileExtension(compressionMode int) string {
if compressionMode == Zstd {
return zstdFileExt
}
return ""
}
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
func (z *zstdModeHandler) putCompress(
ctx context.Context,
f *Fs,
in io.Reader,
src fs.ObjectInfo,
options []fs.OpenOption,
mimeType string,
) (fs.Object, *ObjectMetadata, error) {
// Unwrap reader accounting
in, wrap := accounting.UnWrap(in)
// Add the metadata hasher
metaHasher := md5.New()
in = io.TeeReader(in, metaHasher)
// Compress the file
pipeReader, pipeWriter := io.Pipe()
resultsZstd := make(chan compressionResult[SzstdMetadata])
go func() {
writer, err := NewWriterSzstd(pipeWriter, zstd.WithEncoderLevel(zstd.EncoderLevel(f.opt.CompressionLevel)))
if err != nil {
resultsZstd <- compressionResult[SzstdMetadata]{err: err}
close(resultsZstd)
return
}
_, err = io.Copy(writer, in)
if wErr := writer.Close(); wErr != nil && err == nil {
err = wErr
}
if cErr := pipeWriter.Close(); cErr != nil && err == nil {
err = cErr
}
resultsZstd <- compressionResult[SzstdMetadata]{err: err, meta: writer.GetMetadata()}
close(resultsZstd)
}()
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize))
ht := f.Fs.Hashes().GetOne()
var hasher *hash.MultiHasher
var err error
if ht != hash.None {
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
if err != nil {
return nil, nil, err
}
wrappedIn = io.TeeReader(wrappedIn, hasher)
wrappedIn = wrap(wrappedIn)
}
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
if err != nil {
return nil, nil, err
}
result := <-resultsZstd
if result.err != nil {
if o != nil {
_ = o.Remove(ctx)
}
return nil, nil, result.err
}
// Build metadata using uncompressed size for filename
meta := z.newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
if ht != hash.None && hasher != nil {
err = f.verifyObjectHash(ctx, o, hasher, ht)
if err != nil {
return nil, nil, err
}
}
return o, meta, nil
}
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
func (z *zstdModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
return o, z.newMetadata(o.Size(), mode, SzstdMetadata{}, hex.EncodeToString(sum), mimeType), nil
}
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
// Warning: This function panics if cmeta is not of the expected type.
func (z *zstdModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
meta, ok := cmeta.(SzstdMetadata)
if !ok {
panic("invalid cmeta type: expected SzstdMetadata")
}
objMeta := new(ObjectMetadata)
objMeta.Size = size
objMeta.Mode = mode
objMeta.CompressionMetadataGzip = nil
objMeta.CompressionMetadataZstd = &meta
objMeta.MD5 = md5
objMeta.MimeType = mimeType
return objMeta
}

View File

@@ -7,22 +7,17 @@ import (
gocipher "crypto/cipher"
"crypto/rand"
"encoding/base32"
"encoding/base64"
"errors"
"fmt"
"io"
"strconv"
"strings"
"sync"
"time"
"unicode/utf8"
"github.com/Max-Sum/base32768"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/version"
"github.com/rfjakob/eme"
"golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/scrypt"
@@ -38,6 +33,7 @@ const (
blockHeaderSize = secretbox.Overhead
blockDataSize = 64 * 1024
blockSize = blockHeaderSize + blockDataSize
encryptedSuffix = ".bin" // when file name encryption is off we add this suffix to make sure the cloud provider doesn't process the file
)
// Errors returned by cipher
@@ -53,9 +49,8 @@ var (
ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?")
ErrorBadBase32Encoding = errors.New("bad base32 filename encoding")
ErrorFileClosed = errors.New("file already closed")
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - does not match suffix")
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - no \"" + encryptedSuffix + "\" suffix")
ErrorBadSeek = errors.New("Seek beyond end of file")
ErrorSuffixMissingDot = errors.New("suffix config setting should include a '.'")
defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1}
obfuscQuoteRune = '!'
)
@@ -97,12 +92,12 @@ func NewNameEncryptionMode(s string) (mode NameEncryptionMode, err error) {
case "obfuscate":
mode = NameEncryptionObfuscated
default:
err = fmt.Errorf("unknown file name encryption mode %q", s)
err = errors.Errorf("Unknown file name encryption mode %q", s)
}
return mode, err
}
// String turns mode into a human-readable string
// String turns mode into a human readable string
func (mode NameEncryptionMode) String() (out string) {
switch mode {
case NameEncryptionOff:
@@ -117,83 +112,27 @@ func (mode NameEncryptionMode) String() (out string) {
return out
}
// fileNameEncoding are the encoding methods dealing with encrypted file names
type fileNameEncoding interface {
EncodeToString(src []byte) string
DecodeString(s string) ([]byte, error)
}
// caseInsensitiveBase32Encoding defines a file name encoding
// using a modified version of standard base32 as described in
// RFC4648
//
// The standard encoding is modified in two ways
// - it becomes lower case (no-one likes upper case filenames!)
// - we strip the padding character `=`
type caseInsensitiveBase32Encoding struct{}
// EncodeToString encodes a string using the modified version of
// base32 encoding.
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
encoded := base32.HexEncoding.EncodeToString(src)
encoded = strings.TrimRight(encoded, "=")
return strings.ToLower(encoded)
}
// DecodeString decodes a string as encoded by EncodeToString
func (caseInsensitiveBase32Encoding) DecodeString(s string) ([]byte, error) {
if strings.HasSuffix(s, "=") {
return nil, ErrorBadBase32Encoding
}
// First figure out how many padding characters to add
roundUpToMultipleOf8 := (len(s) + 7) &^ 7
equals := roundUpToMultipleOf8 - len(s)
s = strings.ToUpper(s) + "========"[:equals]
return base32.HexEncoding.DecodeString(s)
}
// NewNameEncoding creates a NameEncoding from a string
func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
s = strings.ToLower(s)
switch s {
case "base32":
enc = caseInsensitiveBase32Encoding{}
case "base64":
enc = base64.RawURLEncoding
case "base32768":
enc = base32768.SafeEncoding
default:
err = fmt.Errorf("unknown file name encoding mode %q", s)
}
return enc, err
}
// Cipher defines an encoding and decoding cipher for the crypt backend
type Cipher struct {
dataKey [32]byte // Key for secretbox
nameKey [32]byte // 16,24 or 32 bytes
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
block gocipher.Block
mode NameEncryptionMode
fileNameEnc fileNameEncoding
buffers sync.Pool // encrypt/decrypt buffers
cryptoRand io.Reader // read crypto random numbers from here
dirNameEncrypt bool
passBadBlocks bool // if set passed bad blocks as zeroed blocks
encryptedSuffix string
dataKey [32]byte // Key for secretbox
nameKey [32]byte // 16,24 or 32 bytes
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
block gocipher.Block
mode NameEncryptionMode
buffers sync.Pool // encrypt/decrypt buffers
cryptoRand io.Reader // read crypto random numbers from here
dirNameEncrypt bool
}
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*Cipher, error) {
c := &Cipher{
mode: mode,
fileNameEnc: enc,
cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt,
encryptedSuffix: ".bin",
mode: mode,
cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt,
}
c.buffers.New = func() any {
return new([blockSize]byte)
c.buffers.New = func() interface{} {
return make([]byte, blockSize)
}
err := c.Key(password, salt)
if err != nil {
@@ -202,29 +141,11 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
return c, nil
}
// setEncryptedSuffix set suffix, or an empty string
func (c *Cipher) setEncryptedSuffix(suffix string) {
if strings.EqualFold(suffix, "none") {
c.encryptedSuffix = ""
return
}
if !strings.HasPrefix(suffix, ".") {
fs.Errorf(nil, "crypt: bad suffix: %v", ErrorSuffixMissingDot)
suffix = "." + suffix
}
c.encryptedSuffix = suffix
}
// Call to set bad block pass through
func (c *Cipher) setPassBadBlocks(passBadBlocks bool) {
c.passBadBlocks = passBadBlocks
}
// Key creates all the internal keys from the password passed in using
// scrypt.
//
// If salt is "" we use a fixed salt just to make attackers lives
// slightly harder than using no salt.
// slighty harder than using no salt.
//
// Note that empty password makes all 0x00 keys which is used in the
// tests.
@@ -252,18 +173,45 @@ func (c *Cipher) Key(password, salt string) (err error) {
}
// getBlock gets a block from the pool of size blockSize
func (c *Cipher) getBlock() *[blockSize]byte {
return c.buffers.Get().(*[blockSize]byte)
func (c *Cipher) getBlock() []byte {
return c.buffers.Get().([]byte)
}
// putBlock returns a block to the pool of size blockSize
func (c *Cipher) putBlock(buf *[blockSize]byte) {
func (c *Cipher) putBlock(buf []byte) {
if len(buf) != blockSize {
panic("bad blocksize returned to pool")
}
c.buffers.Put(buf)
}
// encodeFileName encodes a filename using a modified version of
// standard base32 as described in RFC4648
//
// The standard encoding is modified in two ways
// * it becomes lower case (no-one likes upper case filenames!)
// * we strip the padding character `=`
func encodeFileName(in []byte) string {
encoded := base32.HexEncoding.EncodeToString(in)
encoded = strings.TrimRight(encoded, "=")
return strings.ToLower(encoded)
}
// decodeFileName decodes a filename as encoded by encodeFileName
func decodeFileName(in string) ([]byte, error) {
if strings.HasSuffix(in, "=") {
return nil, ErrorBadBase32Encoding
}
// First figure out how many padding characters to add
roundUpToMultipleOf8 := (len(in) + 7) &^ 7
equals := roundUpToMultipleOf8 - len(in)
in = strings.ToUpper(in) + "========"[:equals]
return base32.HexEncoding.DecodeString(in)
}
// encryptSegment encrypts a path segment
//
// This uses EME with AES.
// This uses EME with AES
//
// EME (ECB-Mix-ECB) is a wide-block encryption mode presented in the
// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and
@@ -273,15 +221,15 @@ func (c *Cipher) putBlock(buf *[blockSize]byte) {
// same filename must encrypt to the same thing.
//
// This means that
// - filenames with the same name will encrypt the same
// - filenames which start the same won't have a common prefix
// * filenames with the same name will encrypt the same
// * filenames which start the same won't have a common prefix
func (c *Cipher) encryptSegment(plaintext string) string {
if plaintext == "" {
return ""
}
paddedPlaintext := pkcs7.Pad(nameCipherBlockSize, []byte(plaintext))
ciphertext := eme.Transform(c.block, c.nameTweak[:], paddedPlaintext, eme.DirectionEncrypt)
return c.fileNameEnc.EncodeToString(ciphertext)
return encodeFileName(ciphertext)
}
// decryptSegment decrypts a path segment
@@ -289,7 +237,7 @@ func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
if ciphertext == "" {
return "", nil
}
rawCiphertext, err := c.fileNameEnc.DecodeString(ciphertext)
rawCiphertext, err := decodeFileName(ciphertext)
if err != nil {
return "", err
}
@@ -329,14 +277,14 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
for _, runeValue := range plaintext {
dir += int(runeValue)
}
dir %= 256
dir = dir % 256
// We'll use this number to store in the result filename...
var result bytes.Buffer
_, _ = result.WriteString(strconv.Itoa(dir) + ".")
// but we'll augment it with the nameKey for real calculation
for i := range len(c.nameKey) {
for i := 0; i < len(c.nameKey); i++ {
dir += int(c.nameKey[i])
}
@@ -418,7 +366,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
}
// add the nameKey to get the real rotate distance
for i := range len(c.nameKey) {
for i := 0; i < len(c.nameKey); i++ {
dir += int(c.nameKey[i])
}
@@ -450,7 +398,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
if pos >= 26 {
pos -= 6
}
pos -= thisdir
pos = pos - thisdir
if pos < 0 {
pos += 52
}
@@ -494,32 +442,11 @@ func (c *Cipher) encryptFileName(in string) string {
if !c.dirNameEncrypt && i != (len(segments)-1) {
continue
}
// Strip version string so that only the non-versioned part
// of the file name gets encrypted/obfuscated
hasVersion := false
var t time.Time
if i == (len(segments)-1) && version.Match(segments[i]) {
var s string
t, s = version.Remove(segments[i])
// version.Remove can fail, in which case it returns segments[i]
if s != segments[i] {
segments[i] = s
hasVersion = true
}
}
if c.mode == NameEncryptionStandard {
segments[i] = c.encryptSegment(segments[i])
} else {
segments[i] = c.obfuscateSegment(segments[i])
}
// Add back a version to the encrypted/obfuscated
// file name, if we stripped it off earlier
if hasVersion {
segments[i] = version.Add(segments[i], t)
}
}
return strings.Join(segments, "/")
}
@@ -527,7 +454,7 @@ func (c *Cipher) encryptFileName(in string) string {
// EncryptFileName encrypts a file path
func (c *Cipher) EncryptFileName(in string) string {
if c.mode == NameEncryptionOff {
return in + c.encryptedSuffix
return in + encryptedSuffix
}
return c.encryptFileName(in)
}
@@ -550,21 +477,6 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
if !c.dirNameEncrypt && i != (len(segments)-1) {
continue
}
// Strip version string so that only the non-versioned part
// of the file name gets decrypted/deobfuscated
hasVersion := false
var t time.Time
if i == (len(segments)-1) && version.Match(segments[i]) {
var s string
t, s = version.Remove(segments[i])
// version.Remove can fail, in which case it returns segments[i]
if s != segments[i] {
segments[i] = s
hasVersion = true
}
}
if c.mode == NameEncryptionStandard {
segments[i], err = c.decryptSegment(segments[i])
} else {
@@ -574,12 +486,6 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
if err != nil {
return "", err
}
// Add back a version to the decrypted/deobfuscated
// file name, if we stripped it off earlier
if hasVersion {
segments[i] = version.Add(segments[i], t)
}
}
return strings.Join(segments, "/"), nil
}
@@ -587,19 +493,11 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
// DecryptFileName decrypts a file path
func (c *Cipher) DecryptFileName(in string) (string, error) {
if c.mode == NameEncryptionOff {
remainingLength := len(in) - len(c.encryptedSuffix)
if remainingLength == 0 || !strings.HasSuffix(in, c.encryptedSuffix) {
return "", ErrorNotAnEncryptedFile
remainingLength := len(in) - len(encryptedSuffix)
if remainingLength > 0 && strings.HasSuffix(in, encryptedSuffix) {
return in[:remainingLength], nil
}
decrypted := in[:remainingLength]
if version.Match(decrypted) {
_, unversioned := version.Remove(decrypted)
if unversioned == "" {
return "", ErrorNotAnEncryptedFile
}
}
// Leave the version string on, if it was there
return decrypted, nil
return "", ErrorNotAnEncryptedFile
}
return c.decryptFileName(in)
}
@@ -628,9 +526,9 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
// fromReader fills the nonce from an io.Reader - normally the OSes
// crypto random number generator
func (n *nonce) fromReader(in io.Reader) error {
read, err := readers.ReadFill(in, (*n)[:])
read, err := io.ReadFull(in, (*n)[:])
if read != fileNonceSize {
return fmt.Errorf("short read of nonce: %w", err)
return errors.Wrap(err, "short read of nonce")
}
return nil
}
@@ -664,7 +562,7 @@ func (n *nonce) increment() {
// add a uint64 to the nonce
func (n *nonce) add(x uint64) {
carry := uint16(0)
for i := range 8 {
for i := 0; i < 8; i++ {
digit := (*n)[i]
xDigit := byte(x)
x >>= 8
@@ -683,8 +581,8 @@ type encrypter struct {
in io.Reader
c *Cipher
nonce nonce
buf *[blockSize]byte
readBuf *[blockSize]byte
buf []byte
readBuf []byte
bufIndex int
bufSize int
err error
@@ -709,9 +607,9 @@ func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
}
}
// Copy magic into buffer
copy((*fh.buf)[:], fileMagicBytes)
copy(fh.buf, fileMagicBytes)
// Copy nonce into buffer
copy((*fh.buf)[fileMagicSize:], fh.nonce[:])
copy(fh.buf[fileMagicSize:], fh.nonce[:])
return fh, nil
}
@@ -726,20 +624,22 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
if fh.bufIndex >= fh.bufSize {
// Read data
// FIXME should overlap the reads with a go-routine and 2 buffers?
readBuf := (*fh.readBuf)[:blockDataSize]
n, err = readers.ReadFill(fh.in, readBuf)
readBuf := fh.readBuf[:blockDataSize]
n, err = io.ReadFull(fh.in, readBuf)
if n == 0 {
// err can't be nil since:
// n == len(buf) if and only if err == nil.
return fh.finish(err)
}
// possibly err != nil here, but we will process the
// data and the next call to ReadFill will return 0, err
// data and the next call to ReadFull will return 0, err
// Encrypt the block using the nonce
secretbox.Seal((*fh.buf)[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
fh.bufIndex = 0
fh.bufSize = blockHeaderSize + n
fh.nonce.increment()
}
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufSize])
n = copy(p, fh.buf[fh.bufIndex:fh.bufSize])
fh.bufIndex += n
return n, nil
}
@@ -780,8 +680,8 @@ type decrypter struct {
nonce nonce
initialNonce nonce
c *Cipher
buf *[blockSize]byte
readBuf *[blockSize]byte
buf []byte
readBuf []byte
bufIndex int
bufSize int
err error
@@ -799,12 +699,12 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
limit: -1,
}
// Read file header (magic + nonce)
readBuf := (*fh.readBuf)[:fileHeaderSize]
n, err := readers.ReadFill(fh.rc, readBuf)
if n < fileHeaderSize && err == io.EOF {
readBuf := fh.readBuf[:fileHeaderSize]
_, err := io.ReadFull(fh.rc, readBuf)
if err == io.EOF || err == io.ErrUnexpectedEOF {
// This read from 0..fileHeaderSize-1 bytes
return nil, fh.finishAndClose(ErrorEncryptedFileTooShort)
} else if err != io.EOF && err != nil {
} else if err != nil {
return nil, fh.finishAndClose(err)
}
// check the magic
@@ -862,8 +762,10 @@ func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
func (fh *decrypter) fillBuffer() (err error) {
// FIXME should overlap the reads with a go-routine and 2 buffers?
readBuf := fh.readBuf
n, err := readers.ReadFill(fh.rc, (*readBuf)[:])
n, err := io.ReadFull(fh.rc, readBuf)
if n == 0 {
// err can't be nil since:
// n == len(buf) if and only if err == nil.
return err
}
// possibly err != nil here, but we will process the data and
@@ -871,25 +773,18 @@ func (fh *decrypter) fillBuffer() (err error) {
// Check header + 1 byte exists
if n <= blockHeaderSize {
if err != nil && err != io.EOF {
if err != nil {
return err // return pending error as it is likely more accurate
}
return ErrorEncryptedFileBadHeader
}
// Decrypt the block using the nonce
_, ok := secretbox.Open((*fh.buf)[:0], (*readBuf)[:n], fh.nonce.pointer(), &fh.c.dataKey)
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
if !ok {
if err != nil && err != io.EOF {
if err != nil {
return err // return pending error as it is likely more accurate
}
if !fh.c.passBadBlocks {
return ErrorEncryptedBadBlock
}
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
// Zero out the bad block and continue
for i := range (*fh.buf)[:n] {
fh.buf[i] = 0
}
return ErrorEncryptedBadBlock
}
fh.bufIndex = 0
fh.bufSize = n - blockHeaderSize
@@ -915,7 +810,7 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
if fh.limit >= 0 && fh.limit < int64(toCopy) {
toCopy = int(fh.limit)
}
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufIndex+toCopy])
n = copy(p, fh.buf[fh.bufIndex:fh.bufIndex+toCopy])
fh.bufIndex += n
if fh.limit >= 0 {
fh.limit -= int64(n)
@@ -926,8 +821,9 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
return n, nil
}
// calculateUnderlying converts an (offset, limit) in an encrypted file
// into an (underlyingOffset, underlyingLimit) for the underlying file.
// calculateUnderlying converts an (offset, limit) in a crypted file
// into an (underlyingOffset, underlyingLimit) for the underlying
// file.
//
// It also returns number of bytes to discard after reading the first
// block and number of blocks this is from the start so the nonce can
@@ -1008,7 +904,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
// Re-open the underlying object with the offset given
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
if err != nil {
return 0, fh.finish(fmt.Errorf("couldn't reopen file with offset and limit: %w", err))
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
}
// Set the file handle
@@ -1106,7 +1002,7 @@ func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
// DecryptDataSeek decrypts the data stream from offset
//
// The open function must return a ReadCloser opened to the offset supplied.
// The open function must return a ReadCloser opened to the offset supplied
//
// You must use this form of DecryptData if you might want to Seek the file handle
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {

File diff suppressed because it is too large Load Diff

View File

@@ -3,13 +3,13 @@ package crypt
import (
"context"
"errors"
"fmt"
"io"
"path"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
@@ -18,7 +18,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
)
// Globals
@@ -29,12 +28,9 @@ func init() {
Description: "Encrypt/Decrypt a remote",
NewFs: NewFs,
CommandHelp: commandHelp,
MetadataInfo: &fs.MetadataInfo{
Help: `Any metadata supported by the underlying remote is read and written.`,
},
Options: []fs.Option{{
Name: "remote",
Help: "Remote to encrypt/decrypt.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true,
}, {
Name: "filename_encryption",
@@ -43,13 +39,13 @@ func init() {
Examples: []fs.OptionExample{
{
Value: "standard",
Help: "Encrypt the filenames.\nSee the docs for the details.",
Help: "Encrypt the filenames see the docs for the details.",
}, {
Value: "obfuscate",
Help: "Very simple filename obfuscation.",
}, {
Value: "off",
Help: "Don't encrypt the file names.\nAdds a \".bin\", or \"suffix\" extension only.",
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
},
},
}, {
@@ -75,14 +71,12 @@ NB If filename_encryption is "off" then this option will do nothing.`,
Required: true,
}, {
Name: "password2",
Help: "Password or pass phrase for salt.\n\nOptional but recommended.\nShould be different to the previous password.",
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
IsPassword: true,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Deprecated: use --server-side-across-configs instead.
Allow server-side operations (e.g. copy) to work across different crypt configs.
Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
Normally this option is not what you want, but if you have two crypts
pointing to the same backend you can use it.
@@ -122,56 +116,6 @@ names, or for debugging purposes.`,
Help: "Encrypt file data.",
},
},
}, {
Name: "pass_bad_blocks",
Help: `If set this will pass bad blocks through as all 0.
This should not be set in normal operation, it should only be set if
trying to recover an encrypted file with errors and it is desired to
recover as much of the file as possible.`,
Default: false,
Advanced: true,
}, {
Name: "strict_names",
Help: `If set, this will raise an error when crypt comes across a filename that can't be decrypted.
(By default, rclone will just log a NOTICE and continue as normal.)
This can happen if encrypted and unencrypted files are stored in the same
directory (which is not recommended.) It may also indicate a more serious
problem that should be investigated.`,
Default: false,
Advanced: true,
}, {
Name: "filename_encoding",
Help: `How to encode the encrypted filename to text string.
This option could help with shortening the encrypted filename. The
suitable option would depend on the way your remote count the filename
length and if it's case sensitive.`,
Default: "base32",
Examples: []fs.OptionExample{
{
Value: "base32",
Help: "Encode using base32. Suitable for all remote.",
},
{
Value: "base64",
Help: "Encode using base64. Suitable for case sensitive remote.",
},
{
Value: "base32768",
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)",
},
},
Advanced: true,
}, {
Name: "suffix",
Help: `If this is set it will override the default suffix of ".bin".
Setting suffix to "none" will result in an empty suffix. This may be useful
when the path length is critical.`,
Default: ".bin",
Advanced: true,
}},
})
}
@@ -187,25 +131,19 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
}
password, err := obscure.Reveal(opt.Password)
if err != nil {
return nil, fmt.Errorf("failed to decrypt password: %w", err)
return nil, errors.Wrap(err, "failed to decrypt password")
}
var salt string
if opt.Password2 != "" {
salt, err = obscure.Reveal(opt.Password2)
if err != nil {
return nil, fmt.Errorf("failed to decrypt password2: %w", err)
return nil, errors.Wrap(err, "failed to decrypt password2")
}
}
enc, err := NewNameEncoding(opt.FilenameEncoding)
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "failed to make cipher")
}
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption, enc)
if err != nil {
return nil, fmt.Errorf("failed to make cipher: %w", err)
}
cipher.setEncryptedSuffix(opt.Suffix)
cipher.setPassBadBlocks(opt.PassBadBlocks)
return cipher, nil
}
@@ -254,7 +192,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
}
}
if err != fs.ErrorIsFile && err != nil {
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remote)
}
f := &Fs{
Fs: wrappedFs,
@@ -264,39 +202,20 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
cipher: cipher,
}
cache.PinUntilFinalized(f.Fs, f)
// Correct root if definitely pointing to a file
if err == fs.ErrorIsFile {
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
f.features = (&fs.Features{
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
DuplicateFiles: true,
ReadMimeType: false, // MimeTypes not supported with crypt
WriteMimeType: false,
BucketBased: true,
CanHaveEmptyDirectories: true,
SetTier: true,
GetTier: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: true,
PartialUploads: true,
CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
DuplicateFiles: true,
ReadMimeType: false, // MimeTypes not supported with crypt
WriteMimeType: false,
BucketBased: true,
CanHaveEmptyDirectories: true,
SetTier: true,
GetTier: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// Enable ListP always
f.features.ListP = f.ListP
return f, err
}
@@ -310,10 +229,6 @@ type Options struct {
Password2 string `config:"password2"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ShowMapping bool `config:"show_mapping"`
PassBadBlocks bool `config:"pass_bad_blocks"`
FilenameEncoding string `config:"filename_encoding"`
Suffix string `config:"suffix"`
StrictNames bool `config:"strict_names"`
}
// Fs represents a wrapped fs.Fs
@@ -348,64 +263,45 @@ func (f *Fs) String() string {
}
// Encrypt an object file name to entries.
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) error {
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
remote := obj.Remote()
decryptedRemote, err := f.cipher.DecryptFileName(remote)
if err != nil {
if f.opt.StrictNames {
return fmt.Errorf("%s: undecryptable file name detected: %v", remote, err)
}
fs.Logf(remote, "Skipping undecryptable file name: %v", err)
return nil
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
return
}
if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
}
*entries = append(*entries, f.newObject(obj))
return nil
}
// Encrypt a directory file name to entries.
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) error {
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil {
if f.opt.StrictNames {
return fmt.Errorf("%s: undecryptable dir name detected: %v", remote, err)
}
fs.Logf(remote, "Skipping undecryptable dir name: %v", err)
return nil
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
return
}
if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
}
*entries = append(*entries, f.newDir(ctx, dir))
return nil
}
// Encrypt some directory entries. This alters entries returning it as newEntries.
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
newEntries = entries[:0] // in place filter
errors := 0
var firsterr error
for _, entry := range entries {
switch x := entry.(type) {
case fs.Object:
err = f.add(&newEntries, x)
f.add(&newEntries, x)
case fs.Directory:
err = f.addDir(ctx, &newEntries, x)
f.addDir(ctx, &newEntries, x)
default:
return nil, fmt.Errorf("unknown object type %T", entry)
return nil, errors.Errorf("Unknown object type %T", entry)
}
if err != nil {
errors++
if firsterr == nil {
firsterr = err
}
}
}
if firsterr != nil {
return nil, fmt.Errorf("there were %v undecryptable name errors. first error: %v", errors, firsterr)
}
return newEntries, nil
}
@@ -420,40 +316,11 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.encryptEntries(ctx, entries)
if err != nil {
return err
}
return callback(entries)
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
if err != nil {
return nil, err
}
listP := f.Fs.Features().ListP
encryptedDir := f.cipher.EncryptDirName(dir)
if listP == nil {
entries, err := f.Fs.List(ctx, encryptedDir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, encryptedDir, wrappedCallback)
return f.encryptEntries(ctx, entries)
}
// ListR lists the objects and directories of the Fs starting
@@ -495,14 +362,8 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
// put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
ci := fs.GetConfig(ctx)
if f.opt.NoDataEncryption {
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
if err == nil && o != nil {
o = f.newObject(o)
}
return o, err
return put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
}
// Encrypt the data into wrappedIn
@@ -514,9 +375,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
// Find a hash the destination supports to compute a hash of
// the encrypted data
ht := f.Fs.Hashes().GetOne()
if ci.IgnoreChecksum {
ht = hash.None
}
var hasher *hash.MultiHasher
if ht != hash.None {
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
@@ -544,7 +402,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
var dstHash string
dstHash, err = o.Hash(ctx, ht)
if err != nil {
return nil, fmt.Errorf("failed to read destination hash: %w", err)
return nil, errors.Wrap(err, "failed to read destination hash")
}
if srcHash != "" && dstHash != "" {
if srcHash != dstHash {
@@ -553,7 +411,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash)
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
}
fs.Debugf(src, "%v = %s OK", ht, srcHash)
}
@@ -588,37 +446,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
}
// MkdirMetadata makes the root directory of the Fs object
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
do := f.Fs.Features().MkdirMetadata
if do == nil {
return nil, fs.ErrorNotImplemented
}
newDir, err := do(ctx, f.cipher.EncryptDirName(dir), metadata)
if err != nil {
return nil, err
}
var entries = make(fs.DirEntries, 0, 1)
err = f.addDir(ctx, &entries, newDir)
if err != nil {
return nil, err
}
newDir, ok := entries[0].(fs.Directory)
if !ok {
return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0])
}
return newDir, nil
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
do := f.Fs.Features().DirSetModTime
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx, f.cipher.EncryptDirName(dir), modTime)
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
@@ -642,9 +469,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -667,9 +494,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
// This is stored with the remote path given
//
// It returns the destination Object and a possible error.
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
@@ -738,7 +565,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
func (f *Fs) CleanUp(ctx context.Context) error {
do := f.Fs.Features().CleanUp
if do == nil {
return errors.New("not supported by underlying remote")
return errors.New("can't CleanUp")
}
return do(ctx)
}
@@ -747,7 +574,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.Fs.Features().About
if do == nil {
return nil, errors.New("not supported by underlying remote")
return nil, errors.New("About not supported")
}
return do(ctx)
}
@@ -785,24 +612,24 @@ func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Objec
// Open the src for input
in, err := src.Open(ctx)
if err != nil {
return "", fmt.Errorf("failed to open src: %w", err)
return "", errors.Wrap(err, "failed to open src")
}
defer fs.CheckClose(in, &err)
// Now encrypt the src with the nonce
out, err := f.cipher.newEncrypter(in, &nonce)
if err != nil {
return "", fmt.Errorf("failed to make encrypter: %w", err)
return "", errors.Wrap(err, "failed to make encrypter")
}
// pipe into hash
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil {
return "", fmt.Errorf("failed to make hasher: %w", err)
return "", errors.Wrap(err, "failed to make hasher")
}
_, err = io.Copy(m, out)
if err != nil {
return "", fmt.Errorf("failed to hash data: %w", err)
return "", errors.Wrap(err, "failed to hash data")
}
return m.Sums()[hashType], nil
@@ -821,12 +648,12 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
// use a limited read so we only read the header
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
if err != nil {
return "", fmt.Errorf("failed to open object to read nonce: %w", err)
return "", errors.Wrap(err, "failed to open object to read nonce")
}
d, err := f.cipher.newDecrypter(in)
if err != nil {
_ = in.Close()
return "", fmt.Errorf("failed to open object to read nonce: %w", err)
return "", errors.Wrap(err, "failed to open object to read nonce")
}
nonce := d.nonce
// fs.Debugf(o, "Read nonce % 2x", nonce)
@@ -845,7 +672,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
// Close d (and hence in) once we have read the nonce
err = d.Close()
if err != nil {
return "", fmt.Errorf("failed to close nonce read: %w", err)
return "", errors.Wrap(err, "failed to close nonce read")
}
return f.computeHashWithNonce(ctx, nonce, src, hashType)
@@ -860,7 +687,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
}
out := make([]fs.Directory, len(dirs))
for i, dir := range dirs {
out[i] = fs.NewDirWrapper(f.cipher.EncryptDirName(dir.Remote()), dir)
out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
}
return do(ctx, out)
}
@@ -923,30 +750,28 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
var commandHelp = []fs.CommandHelp{
{
Name: "encode",
Short: "Encode the given filename(s).",
Short: "Encode the given filename(s)",
Long: `This encodes the filenames given as arguments returning a list of
strings of the encoded results.
Usage examples:
Usage Example:
` + "```console" + `
rclone backend encode crypt: file1 [file2...]
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
` + "```",
rclone backend encode crypt: file1 [file2...]
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
`,
},
{
Name: "decode",
Short: "Decode the given filename(s).",
Short: "Decode the given filename(s)",
Long: `This decodes the filenames given as arguments returning a list of
strings of the decoded results. It will return an error if any of the
inputs are invalid.
Usage examples:
Usage Example:
` + "```console" + `
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
` + "```",
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
`,
},
}
@@ -959,14 +784,14 @@ rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "decode":
out := make([]string, 0, len(arg))
for _, encryptedFileName := range arg {
fileName, err := f.DecryptFileName(encryptedFileName)
if err != nil {
return out, fmt.Errorf("failed to decrypt: %s: %w", encryptedFileName, err)
return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName))
}
out = append(out, fileName)
}
@@ -1098,14 +923,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// newDir returns a dir with the Name decrypted
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
newDir := fs.NewDirCopy(ctx, dir)
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil {
fs.Debugf(remote, "Undecryptable dir name: %v", err)
} else {
remote = decryptedRemote
newDir.SetRemote(decryptedRemote)
}
newDir := fs.NewDirWrapper(remote, dir)
return newDir
}
@@ -1170,9 +995,6 @@ func (o *ObjectInfo) Size() int64 {
if size < 0 {
return size
}
if o.f.opt.NoDataEncryption {
return size
}
return o.f.cipher.EncryptedSize(size)
}
@@ -1184,11 +1006,10 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
// Get the underlying object if there is one
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
// Prefer direct interface assertion
} else if do, ok := o.ObjectInfo.(*fs.OverrideRemote); ok {
// Unwrap if it is an operations.OverrideRemote
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
// Otherwise likely is an operations.OverrideRemote
srcObj = do.UnWrap()
} else {
// Otherwise don't unwrap any further
return "", nil
}
// if this is wrapping a local object then we work out the hash
@@ -1200,50 +1021,6 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
return "", nil
}
// GetTier returns storage tier or class of the Object
func (o *ObjectInfo) GetTier() string {
do, ok := o.ObjectInfo.(fs.GetTierer)
if !ok {
return ""
}
return do.GetTier()
}
// ID returns the ID of the Object if known, or "" if not
func (o *ObjectInfo) ID() string {
do, ok := o.ObjectInfo.(fs.IDer)
if !ok {
return ""
}
return do.ID()
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *ObjectInfo) Metadata(ctx context.Context) (fs.Metadata, error) {
do, ok := o.ObjectInfo.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// MimeType returns the content type of the Object if
// known, or "" if not
//
// This is deliberately unsupported so we don't leak mime type info by
// default.
func (o *ObjectInfo) MimeType(ctx context.Context) string {
return ""
}
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (o *ObjectInfo) UnWrap() fs.Object {
return fs.UnWrapObjectInfo(o.ObjectInfo)
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
do, ok := o.Object.(fs.IDer)
@@ -1272,37 +1049,6 @@ func (o *Object) GetTier() string {
return do.GetTier()
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
do, ok := o.Object.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// SetMetadata sets metadata for an Object
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
do, ok := o.Object.(fs.SetMetadataer)
if !ok {
return fs.ErrorNotImplemented
}
return do.SetMetadata(ctx, metadata)
}
// MimeType returns the content type of the Object if
// known, or "" if not
//
// This is deliberately unsupported so we don't leak mime type info by
// default.
func (o *Object) MimeType(ctx context.Context) string {
return ""
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -1319,14 +1065,16 @@ var (
_ fs.Abouter = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.FullObjectInfo = (*ObjectInfo)(nil)
_ fs.FullObject = (*Object)(nil)
_ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.SetTierer = (*Object)(nil)
_ fs.GetTierer = (*Object)(nil)
)

View File

@@ -17,28 +17,41 @@ import (
"github.com/stretchr/testify/require"
)
type testWrapper struct {
fs.ObjectInfo
}
// UnWrap returns the Object that this Object is wrapping or nil if it
// isn't wrapping anything
func (o testWrapper) UnWrap() fs.Object {
if o, ok := o.ObjectInfo.(fs.Object); ok {
return o
}
return nil
}
// Create a temporary local fs to upload things from
func makeTempLocalFs(t *testing.T) (localFs fs.Fs) {
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
localFs, err := fs.TemporaryLocalFs(context.Background())
require.NoError(t, err)
t.Cleanup(func() {
cleanup = func() {
require.NoError(t, localFs.Rmdir(context.Background(), ""))
})
return localFs
}
return localFs, cleanup
}
// Upload a file to a remote
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object) {
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
inBuf := bytes.NewBufferString(contents)
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
obj, err := f.Put(context.Background(), inBuf, upSrc)
require.NoError(t, err)
t.Cleanup(func() {
cleanup = func() {
require.NoError(t, obj.Remove(context.Background()))
})
return obj
}
return obj, cleanup
}
// Test the ObjectInfo
@@ -52,9 +65,11 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
path = "_wrap"
}
localFs := makeTempLocalFs(t)
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
obj := uploadFile(t, localFs, path, contents)
obj, cleanupObj := uploadFile(t, localFs, path, contents)
defer cleanupObj()
// encrypt the data
inBuf := bytes.NewBufferString(contents)
@@ -68,7 +83,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
var oi fs.ObjectInfo = obj
if wrap {
// wrap the object in an fs.ObjectUnwrapper if required
oi = fs.NewOverrideRemote(oi, "new_remote")
oi = testWrapper{oi}
}
// wrap the object in a crypt for upload using the nonce we
@@ -76,9 +91,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
src := f.newObjectInfo(oi, nonce)
// Test ObjectInfo methods
if !f.opt.NoDataEncryption {
assert.Equal(t, int64(outBuf.Len()), src.Size())
}
assert.Equal(t, int64(outBuf.Len()), src.Size())
assert.Equal(t, f, src.Fs())
assert.NotEqual(t, path, src.Remote())
@@ -101,13 +114,16 @@ func testComputeHash(t *testing.T, f *Fs) {
t.Skipf("%v: does not support hashes", f.Fs)
}
localFs := makeTempLocalFs(t)
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
// Upload a file to localFs as a test object
localObj := uploadFile(t, localFs, path, contents)
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
defer cleanupLocalObj()
// Upload the same data to the remote Fs also
remoteObj := uploadFile(t, f, path, contents)
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
defer cleanupRemoteObj()
// Calculate the expected Hash of the remote object
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)

View File

@@ -4,7 +4,6 @@ package crypt_test
import (
"os"
"path/filepath"
"runtime"
"testing"
"github.com/rclone/rclone/backend/crypt"
@@ -24,13 +23,13 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*crypt.Object)(nil),
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
// TestStandard runs integration tests against the remote
func TestStandardBase32(t *testing.T) {
func TestStandard(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
@@ -45,53 +44,8 @@ func TestStandardBase32(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"},
},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
func TestStandardBase64(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
name := "TestCrypt"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*crypt.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "crypt"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base64"},
},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
func TestStandardBase32768(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
name := "TestCrypt"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*crypt.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "crypt"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base32768"},
},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -111,9 +65,8 @@ func TestOff(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "off"},
},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -122,9 +75,6 @@ func TestObfuscate(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
if runtime.GOOS == "darwin" {
t.Skip("Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
name := "TestCrypt3"
fstests.Run(t, &fstests.Opt{
@@ -137,9 +87,8 @@ func TestObfuscate(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
},
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -148,9 +97,6 @@ func TestNoDataObfuscate(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
if runtime.GOOS == "darwin" {
t.Skip("Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
name := "TestCrypt4"
fstests.Run(t, &fstests.Opt{
@@ -164,8 +110,7 @@ func TestNoDataObfuscate(t *testing.T) {
{Name: name, Key: "no_data_encryption", Value: "true"},
},
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}

View File

@@ -4,15 +4,15 @@
// buffers which are a multiple of an underlying crypto block size.
package pkcs7
import "errors"
import "github.com/pkg/errors"
// Errors Unpad can return
var (
ErrorPaddingNotFound = errors.New("bad PKCS#7 padding - not padded")
ErrorPaddingNotAMultiple = errors.New("bad PKCS#7 padding - not a multiple of blocksize")
ErrorPaddingTooLong = errors.New("bad PKCS#7 padding - too long")
ErrorPaddingTooShort = errors.New("bad PKCS#7 padding - too short")
ErrorPaddingNotAllTheSame = errors.New("bad PKCS#7 padding - not all the same")
ErrorPaddingNotFound = errors.New("Bad PKCS#7 padding - not padded")
ErrorPaddingNotAMultiple = errors.New("Bad PKCS#7 padding - not a multiple of blocksize")
ErrorPaddingTooLong = errors.New("Bad PKCS#7 padding - too long")
ErrorPaddingTooShort = errors.New("Bad PKCS#7 padding - too short")
ErrorPaddingNotAllTheSame = errors.New("Bad PKCS#7 padding - not all the same")
)
// Pad buf using PKCS#7 to a multiple of n.
@@ -25,7 +25,7 @@ func Pad(n int, buf []byte) []byte {
}
length := len(buf)
padding := n - (length % n)
for range padding {
for i := 0; i < padding; i++ {
buf = append(buf, byte(padding))
}
if (len(buf) % n) != 0 {
@@ -54,7 +54,7 @@ func Unpad(n int, buf []byte) ([]byte, error) {
if padding == 0 {
return nil, ErrorPaddingTooShort
}
for i := range padding {
for i := 0; i < padding; i++ {
if buf[length-1-i] != byte(padding) {
return nil, ErrorPaddingNotAllTheSame
}

View File

@@ -1,38 +0,0 @@
// Type definitions specific to Dataverse
package api
// DataverseDatasetResponse is returned by the Dataverse dataset API
type DataverseDatasetResponse struct {
Status string `json:"status"`
Data DataverseDataset `json:"data"`
}
// DataverseDataset is the representation of a dataset
type DataverseDataset struct {
LatestVersion DataverseDatasetVersion `json:"latestVersion"`
}
// DataverseDatasetVersion is the representation of a dataset version
type DataverseDatasetVersion struct {
LastUpdateTime string `json:"lastUpdateTime"`
Files []DataverseFile `json:"files"`
}
// DataverseFile is the representation of a file found in a dataset
type DataverseFile struct {
DirectoryLabel string `json:"directoryLabel"`
DataFile DataverseDataFile `json:"dataFile"`
}
// DataverseDataFile represents file metadata details
type DataverseDataFile struct {
ID int64 `json:"id"`
Filename string `json:"filename"`
ContentType string `json:"contentType"`
FileSize int64 `json:"filesize"`
OriginalFileFormat string `json:"originalFileFormat"`
OriginalFileSize int64 `json:"originalFileSize"`
OriginalFileName string `json:"originalFileName"`
MD5 string `json:"md5"`
}

View File

@@ -1,33 +0,0 @@
// Type definitions specific to InvenioRDM
package api
// InvenioRecordResponse is the representation of a record stored in InvenioRDM
type InvenioRecordResponse struct {
Links InvenioRecordResponseLinks `json:"links"`
}
// InvenioRecordResponseLinks represents a record's links
type InvenioRecordResponseLinks struct {
Self string `json:"self"`
}
// InvenioFilesResponse is the representation of a record's files
type InvenioFilesResponse struct {
Entries []InvenioFilesResponseEntry `json:"entries"`
}
// InvenioFilesResponseEntry is the representation of a file entry
type InvenioFilesResponseEntry struct {
Key string `json:"key"`
Checksum string `json:"checksum"`
Size int64 `json:"size"`
Updated string `json:"updated"`
MimeType string `json:"mimetype"`
Links InvenioFilesResponseEntryLinks `json:"links"`
}
// InvenioFilesResponseEntryLinks represents file links details
type InvenioFilesResponseEntryLinks struct {
Content string `json:"content"`
}

View File

@@ -1,26 +0,0 @@
// Package api has general type definitions for doi
package api
// DoiResolverResponse is returned by the DOI resolver API
//
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
type DoiResolverResponse struct {
ResponseCode int `json:"responseCode"`
Handle string `json:"handle"`
Values []DoiResolverResponseValue `json:"values"`
}
// DoiResolverResponseValue is a single handle record value
type DoiResolverResponseValue struct {
Index int `json:"index"`
Type string `json:"type"`
Data DoiResolverResponseValueData `json:"data"`
TTL int `json:"ttl"`
Timestamp string `json:"timestamp"`
}
// DoiResolverResponseValueData is the data held in a handle value
type DoiResolverResponseValueData struct {
Format string `json:"format"`
Value any `json:"value"`
}

View File

@@ -1,112 +0,0 @@
// Implementation for Dataverse
package doi
import (
"context"
"fmt"
"net/http"
"net/url"
"path"
"strings"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
// Returns true if resolvedURL is likely a DOI hosted on a Dataverse intallation
func activateDataverse(resolvedURL *url.URL) (isActive bool) {
queryValues := resolvedURL.Query()
persistentID := queryValues.Get("persistentId")
return persistentID != ""
}
// Resolve the main API endpoint for a DOI hosted on a Dataverse installation
func resolveDataverseEndpoint(resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
queryValues := resolvedURL.Query()
persistentID := queryValues.Get("persistentId")
query := url.Values{}
query.Add("persistentId", persistentID)
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/datasets/:persistentId/", RawQuery: query.Encode()})
return Dataverse, endpointURL, nil
}
// dataverseProvider implements the doiProvider interface for Dataverse installations
type dataverseProvider struct {
f *Fs
}
// ListEntries returns the full list of entries found at the remote, regardless of root
func (dp *dataverseProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
// Use the cache if populated
cachedEntries, found := dp.f.cache.GetMaybe("files")
if found {
parsedEntries, ok := cachedEntries.([]Object)
if ok {
for _, entry := range parsedEntries {
newEntry := entry
entries = append(entries, &newEntry)
}
return entries, nil
}
}
filesURL := dp.f.endpoint
var res *http.Response
var result api.DataverseDatasetResponse
opts := rest.Opts{
Method: "GET",
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
Parameters: filesURL.Query(),
}
err = dp.f.pacer.Call(func() (bool, error) {
res, err = dp.f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("readDir failed: %w", err)
}
modTime, modTimeErr := time.Parse(time.RFC3339, result.Data.LatestVersion.LastUpdateTime)
if modTimeErr != nil {
fs.Logf(dp.f, "error: could not parse last update time %v", modTimeErr)
modTime = timeUnset
}
for _, file := range result.Data.LatestVersion.Files {
contentURLPath := fmt.Sprintf("/api/access/datafile/%d", file.DataFile.ID)
query := url.Values{}
query.Add("format", "original")
contentURL := dp.f.endpoint.ResolveReference(&url.URL{Path: contentURLPath, RawQuery: query.Encode()})
entry := &Object{
fs: dp.f,
remote: path.Join(file.DirectoryLabel, file.DataFile.Filename),
contentURL: contentURL.String(),
size: file.DataFile.FileSize,
modTime: modTime,
md5: file.DataFile.MD5,
contentType: file.DataFile.ContentType,
}
if file.DataFile.OriginalFileName != "" {
entry.remote = path.Join(file.DirectoryLabel, file.DataFile.OriginalFileName)
entry.size = file.DataFile.OriginalFileSize
entry.contentType = file.DataFile.OriginalFileFormat
}
entries = append(entries, entry)
}
// Populate the cache
cacheEntries := []Object{}
for _, entry := range entries {
cacheEntries = append(cacheEntries, *entry)
}
dp.f.cache.Put("files", cacheEntries)
return entries, nil
}
func newDataverseProvider(f *Fs) doiProvider {
return &dataverseProvider{
f: f,
}
}

View File

@@ -1,653 +0,0 @@
// Package doi provides a filesystem interface for digital objects identified by DOIs.
//
// See: https://www.doi.org/the-identifier/what-is-a-doi/
package doi
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"path"
"strings"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/cache"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
const (
// the URL of the DOI resolver
//
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
doiResolverAPIURL = "https://doi.org/api"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
var (
errorReadOnly = errors.New("doi remotes are read only")
timeUnset = time.Unix(0, 0)
)
func init() {
fsi := &fs.RegInfo{
Name: "doi",
Description: "DOI datasets",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "doi",
Help: "The DOI or the doi.org URL.",
Required: true,
}, {
Name: fs.ConfigProvider,
Help: `DOI provider.
The DOI provider can be set when rclone does not automatically recognize a supported DOI provider.`,
Examples: []fs.OptionExample{
{
Value: "auto",
Help: "Auto-detect provider",
},
{
Value: string(Zenodo),
Help: "Zenodo",
}, {
Value: string(Dataverse),
Help: "Dataverse",
}, {
Value: string(Invenio),
Help: "Invenio",
}},
Required: false,
Advanced: true,
}, {
Name: "doi_resolver_api_url",
Help: `The URL of the DOI resolver API to use.
The DOI resolver can be set for testing or for cases when the the canonical DOI resolver API cannot be used.
Defaults to "https://doi.org/api".`,
Required: false,
Advanced: true,
}},
}
fs.Register(fsi)
}
// Provider defines the type of provider hosting the DOI
type Provider string
const (
// Zenodo provider, see https://zenodo.org
Zenodo Provider = "zenodo"
// Dataverse provider, see https://dataverse.harvard.edu
Dataverse Provider = "dataverse"
// Invenio provider, see https://inveniordm.docs.cern.ch
Invenio Provider = "invenio"
)
// Options defines the configuration for this backend
type Options struct {
Doi string `config:"doi"` // The DOI, a digital identifier of an object, usually a dataset
Provider string `config:"provider"` // The DOI provider
DoiResolverAPIURL string `config:"doi_resolver_api_url"` // The URL of the DOI resolver API to use.
}
// Fs stores the interface to the remote HTTP files
type Fs struct {
name string // name of this remote
root string // the path we are working on
provider Provider // the DOI provider
doiProvider doiProvider // the interface used to interact with the DOI provider
features *fs.Features // optional features
opt Options // options for this backend
ci *fs.ConfigInfo // global config
endpoint *url.URL // the main API endpoint for this remote
endpointURL string // endpoint as a string
srv *rest.Client // the connection to the server
pacer *fs.Pacer // pacer for API calls
cache *cache.Cache // a cache for the remote metadata
}
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
type Object struct {
fs *Fs // what this object is part of
remote string // the remote path
contentURL string // the URL where the contents of the file can be downloaded
size int64 // size of the object
modTime time.Time // modification time of the object
contentType string // content type of the object
md5 string // MD5 hash of the object content
}
// doiProvider is the interface used to list objects in a DOI
type doiProvider interface {
// ListEntries returns the full list of entries found at the remote, regardless of root
ListEntries(ctx context.Context) (entries []*Object, err error)
}
// Parse the input string as a DOI
// Examples:
// 10.1000/182 -> 10.1000/182
// https://doi.org/10.1000/182 -> 10.1000/182
// doi:10.1000/182 -> 10.1000/182
func parseDoi(doi string) string {
doiURL, err := url.Parse(doi)
if err != nil {
return doi
}
if doiURL.Scheme == "doi" {
return strings.TrimLeft(strings.TrimPrefix(doi, "doi:"), "/")
}
if strings.HasSuffix(doiURL.Hostname(), "doi.org") {
return strings.TrimLeft(doiURL.Path, "/")
}
return doi
}
// Resolve a DOI to a URL
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
func resolveDoiURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (doiURL *url.URL, err error) {
resolverURL := opt.DoiResolverAPIURL
if resolverURL == "" {
resolverURL = doiResolverAPIURL
}
var result api.DoiResolverResponse
params := url.Values{}
params.Add("index", "1")
opts := rest.Opts{
Method: "GET",
RootURL: resolverURL,
Path: "/handles/" + opt.Doi,
Parameters: params,
}
err = pacer.Call(func() (bool, error) {
res, err := srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, err
}
if result.ResponseCode != 1 {
return nil, fmt.Errorf("could not resolve DOI (error code %d)", result.ResponseCode)
}
resolvedURLStr := ""
for _, value := range result.Values {
if value.Type == "URL" && value.Data.Format == "string" {
valueStr, ok := value.Data.Value.(string)
if !ok {
return nil, fmt.Errorf("could not resolve DOI (incorrect response format)")
}
resolvedURLStr = valueStr
}
}
resolvedURL, err := url.Parse(resolvedURLStr)
if err != nil {
return nil, err
}
return resolvedURL, nil
}
// Resolve the passed configuration into a provider and enpoint
func resolveEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (provider Provider, endpoint *url.URL, err error) {
resolvedURL, err := resolveDoiURL(ctx, srv, pacer, opt)
if err != nil {
return "", nil, err
}
switch opt.Provider {
case string(Dataverse):
return resolveDataverseEndpoint(resolvedURL)
case string(Invenio):
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
case string(Zenodo):
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
}
hostname := strings.ToLower(resolvedURL.Hostname())
if hostname == "dataverse.harvard.edu" || activateDataverse(resolvedURL) {
return resolveDataverseEndpoint(resolvedURL)
}
if hostname == "zenodo.org" || strings.HasSuffix(hostname, ".zenodo.org") {
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
}
if activateInvenio(ctx, srv, pacer, resolvedURL) {
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
}
return "", nil, fmt.Errorf("provider '%s' is not supported", resolvedURL.Hostname())
}
// Make the http connection from the passed options
func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err error) {
provider, endpoint, err := resolveEndpoint(ctx, f.srv, f.pacer, opt)
if err != nil {
return false, err
}
// Update f with the new parameters
f.srv.SetRoot(endpoint.ResolveReference(&url.URL{Path: "/"}).String())
f.endpoint = endpoint
f.endpointURL = endpoint.String()
f.provider = provider
f.opt.Provider = string(provider)
switch f.provider {
case Dataverse:
f.doiProvider = newDataverseProvider(f)
case Invenio, Zenodo:
f.doiProvider = newInvenioProvider(f)
default:
return false, fmt.Errorf("provider type '%s' not supported", f.provider)
}
// Determine if the root is a file
entries, err := f.doiProvider.ListEntries(ctx)
if err != nil {
return false, err
}
for _, entry := range entries {
if entry.remote == f.root {
isFile = true
break
}
}
return isFile, nil
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this res and err
// deserve to be retried. It returns the err as a convenience.
func shouldRetry(ctx context.Context, res *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
}
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
root = strings.Trim(root, "/")
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
opt.Doi = parseDoi(opt.Doi)
client := fshttp.NewClient(ctx)
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: ci,
srv: rest.NewClient(client),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
cache: cache.New(),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
isFile, err := f.httpConnection(ctx, opt)
if err != nil {
return nil, err
}
if isFile {
// return an error with an fs which points to the parent
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.root = newRoot
return f, fs.ErrorIsFile
}
return f, nil
}
// Name returns the configured name of the file system
func (f *Fs) Name() string {
return f.name
}
// Root returns the root for the filesystem
func (f *Fs) Root() string {
return f.root
}
// String returns the URL for the filesystem
func (f *Fs) String() string {
return fmt.Sprintf("DOI %s", f.opt.Doi)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision is the remote http file system's modtime precision, which we have no way of knowing. We estimate at 1s
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
// return hash.Set(hash.None)
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return errorReadOnly
}
// Remove a remote http file object
func (o *Object) Remove(ctx context.Context) error {
return errorReadOnly
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return errorReadOnly
}
// NewObject creates a new remote http file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
entries, err := f.doiProvider.ListEntries(ctx)
if err != nil {
return nil, err
}
remoteFullPath := remote
if f.root != "" {
remoteFullPath = path.Join(f.root, remote)
}
for _, entry := range entries {
if entry.Remote() == remoteFullPath {
return entry, nil
}
}
return nil, fs.ErrorObjectNotFound
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
fileEntries, err := f.doiProvider.ListEntries(ctx)
if err != nil {
return nil, fmt.Errorf("error listing %q: %w", dir, err)
}
fullDir := path.Join(f.root, dir)
if fullDir != "" {
fullDir += "/"
}
dirPaths := map[string]bool{}
for _, entry := range fileEntries {
// First, filter out files not in `fullDir`
if !strings.HasPrefix(entry.remote, fullDir) {
continue
}
// Then, find entries in subfolers
remotePath := entry.remote
if fullDir != "" {
remotePath = strings.TrimLeft(strings.TrimPrefix(remotePath, fullDir), "/")
}
parts := strings.SplitN(remotePath, "/", 2)
if len(parts) == 1 {
newEntry := *entry
newEntry.remote = path.Join(dir, remotePath)
entries = append(entries, &newEntry)
} else {
dirPaths[path.Join(dir, parts[0])] = true
}
}
for dirPath := range dirPaths {
entry := fs.NewDir(dirPath, time.Time{})
entries = append(entries, entry)
}
return entries, nil
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly
}
// Fs is the filesystem this remote http file object is located within
func (o *Object) Fs() fs.Info {
return o.fs
}
// String returns the URL to the remote HTTP file
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote the name of the remote HTTP file, relative to the fs root
func (o *Object) Remote() string {
return o.remote
}
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
return o.md5, nil
}
// Size returns the size in bytes of the remote http file
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the remote http file
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// SetModTime sets the modification and access time to the specified time
//
// it also updates the info field
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return errorReadOnly
}
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
func (o *Object) Storable() bool {
return true
}
// Open a remote http file object for reading. Seek is supported
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.size)
opts := rest.Opts{
Method: "GET",
RootURL: o.contentURL,
Options: options,
}
var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("Open failed: %w", err)
}
// Handle non-compliant redirects
if res.Header.Get("Location") != "" {
newURL, err := res.Location()
if err == nil {
opts.RootURL = newURL.String()
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("Open failed: %w", err)
}
}
}
return res.Body, nil
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return errorReadOnly
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.contentType
}
var commandHelp = []fs.CommandHelp{{
Name: "metadata",
Short: "Show metadata about the DOI.",
Long: `This command returns a JSON object with some information about the DOI.
Usage example:
` + "```console" + `
rclone backend metadata doi:
` + "```" + `
It returns a JSON object representing metadata about the DOI.`,
}, {
Name: "set",
Short: "Set command for updating the config parameters.",
Long: `This set command can be used to update the config parameters
for a running doi backend.
Usage examples:
` + "```console" + `
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
` + "```" + `
The option keys are named as they are in the config file.
This rebuilds the connection to the doi backend when it is called with
the new parameters. Only new parameters need be passed as the values
will default to those currently in use.
It doesn't return anything.`,
}}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
switch name {
case "metadata":
return f.ShowMetadata(ctx)
case "set":
newOpt := f.opt
err := configstruct.Set(configmap.Simple(opt), &newOpt)
if err != nil {
return nil, fmt.Errorf("reading config: %w", err)
}
_, err = f.httpConnection(ctx, &newOpt)
if err != nil {
return nil, fmt.Errorf("updating session: %w", err)
}
f.opt = newOpt
keys := []string{}
for k := range opt {
keys = append(keys, k)
}
fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", "))
return nil, nil
default:
return nil, fs.ErrorCommandNotFound
}
}
// ShowMetadata returns some metadata about the corresponding DOI
func (f *Fs) ShowMetadata(ctx context.Context) (metadata any, err error) {
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
if err != nil {
return nil, err
}
info := map[string]any{}
info["DOI"] = f.opt.Doi
info["URL"] = doiURL.String()
info["metadataURL"] = f.endpointURL
info["provider"] = f.provider
return info, nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
)

View File

@@ -1,260 +0,0 @@
package doi
import (
"context"
"crypto/md5"
"encoding/hex"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"net/url"
"sort"
"strings"
"testing"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/hash"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var remoteName = "TestDoi"
func TestParseDoi(t *testing.T) {
// 10.1000/182 -> 10.1000/182
doi := "10.1000/182"
parsed := parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// https://doi.org/10.1000/182 -> 10.1000/182
doi = "https://doi.org/10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// https://dx.doi.org/10.1000/182 -> 10.1000/182
doi = "https://dxdoi.org/10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// doi:10.1000/182 -> 10.1000/182
doi = "doi:10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// doi://10.1000/182 -> 10.1000/182
doi = "doi://10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
}
// prepareMockDoiResolverServer prepares a test server to resolve DOIs
func prepareMockDoiResolverServer(t *testing.T, resolvedURL string) (doiResolverAPIURL string) {
mux := http.NewServeMux()
// Handle requests for resolving DOIs
mux.HandleFunc("GET /api/handles/{handle...}", func(w http.ResponseWriter, r *http.Request) {
// Check that we are resolving a DOI
handle := strings.TrimPrefix(r.URL.Path, "/api/handles/")
assert.NotEmpty(t, handle)
index := r.URL.Query().Get("index")
assert.Equal(t, "1", index)
// Return the most basic response
result := api.DoiResolverResponse{
ResponseCode: 1,
Handle: handle,
Values: []api.DoiResolverResponseValue{
{
Index: 1,
Type: "URL",
Data: api.DoiResolverResponseValueData{
Format: "string",
Value: resolvedURL,
},
},
},
}
resultBytes, err := json.Marshal(result)
require.NoError(t, err)
w.Header().Add("Content-Type", "application/json")
_, err = w.Write(resultBytes)
require.NoError(t, err)
})
// Make the test server
ts := httptest.NewServer(mux)
// Close the server at the end of the test
t.Cleanup(ts.Close)
return ts.URL + "/api"
}
func md5Sum(text string) string {
hash := md5.Sum([]byte(text))
return hex.EncodeToString(hash[:])
}
// prepareMockZenodoServer prepares a test server that mocks Zenodo.org
func prepareMockZenodoServer(t *testing.T, files map[string]string) *httptest.Server {
mux := http.NewServeMux()
// Handle requests for a single record
mux.HandleFunc("GET /api/records/{recordID...}", func(w http.ResponseWriter, r *http.Request) {
// Check that we are returning data about a single record
recordID := strings.TrimPrefix(r.URL.Path, "/api/records/")
assert.NotEmpty(t, recordID)
// Return the most basic response
selfURL, err := url.Parse("http://" + r.Host)
require.NoError(t, err)
selfURL = selfURL.JoinPath(r.URL.String())
result := api.InvenioRecordResponse{
Links: api.InvenioRecordResponseLinks{
Self: selfURL.String(),
},
}
resultBytes, err := json.Marshal(result)
require.NoError(t, err)
w.Header().Add("Content-Type", "application/json")
_, err = w.Write(resultBytes)
require.NoError(t, err)
})
// Handle requests for listing files in a record
mux.HandleFunc("GET /api/records/{record}/files", func(w http.ResponseWriter, r *http.Request) {
// Return the most basic response
filesBaseURL, err := url.Parse("http://" + r.Host)
require.NoError(t, err)
filesBaseURL = filesBaseURL.JoinPath("/api/files/")
entries := []api.InvenioFilesResponseEntry{}
for filename, contents := range files {
entries = append(entries,
api.InvenioFilesResponseEntry{
Key: filename,
Checksum: md5Sum(contents),
Size: int64(len(contents)),
Updated: time.Now().UTC().Format(time.RFC3339),
MimeType: "text/plain; charset=utf-8",
Links: api.InvenioFilesResponseEntryLinks{
Content: filesBaseURL.JoinPath(filename).String(),
},
},
)
}
result := api.InvenioFilesResponse{
Entries: entries,
}
resultBytes, err := json.Marshal(result)
require.NoError(t, err)
w.Header().Add("Content-Type", "application/json")
_, err = w.Write(resultBytes)
require.NoError(t, err)
})
// Handle requests for file contents
mux.HandleFunc("/api/files/{file}", func(w http.ResponseWriter, r *http.Request) {
// Check that we are returning the contents of a file
filename := strings.TrimPrefix(r.URL.Path, "/api/files/")
assert.NotEmpty(t, filename)
contents, found := files[filename]
if !found {
w.WriteHeader(404)
return
}
// Return the most basic response
_, err := w.Write([]byte(contents))
require.NoError(t, err)
})
// Make the test server
ts := httptest.NewServer(mux)
// Close the server at the end of the test
t.Cleanup(ts.Close)
return ts
}
func TestZenodoRemote(t *testing.T) {
recordID := "2600782"
doi := "10.5281/zenodo.2600782"
// The files in the dataset
files := map[string]string{
"README.md": "This is a dataset.",
"data.txt": "Some data",
}
ts := prepareMockZenodoServer(t, files)
resolvedURL := ts.URL + "/record/" + recordID
doiResolverAPIURL := prepareMockDoiResolverServer(t, resolvedURL)
testConfig := configmap.Simple{
"type": "doi",
"doi": doi,
"provider": "zenodo",
"doi_resolver_api_url": doiResolverAPIURL,
}
f, err := NewFs(context.Background(), remoteName, "", testConfig)
require.NoError(t, err)
// Test listing the DOI files
entries, err := f.List(context.Background(), "")
require.NoError(t, err)
sort.Sort(entries)
require.Equal(t, len(files), len(entries))
e := entries[0]
assert.Equal(t, "README.md", e.Remote())
assert.Equal(t, int64(18), e.Size())
_, ok := e.(*Object)
assert.True(t, ok)
e = entries[1]
assert.Equal(t, "data.txt", e.Remote())
assert.Equal(t, int64(9), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
// Test reading the DOI files
o, err := f.NewObject(context.Background(), "README.md")
require.NoError(t, err)
assert.Equal(t, int64(18), o.Size())
md5Hash, err := o.Hash(context.Background(), hash.MD5)
require.NoError(t, err)
assert.Equal(t, "464352b1cab5240e44528a56fda33d9d", md5Hash)
fd, err := o.Open(context.Background())
require.NoError(t, err)
data, err := io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, []byte(files["README.md"]), data)
do, ok := o.(fs.MimeTyper)
require.True(t, ok)
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
o, err = f.NewObject(context.Background(), "data.txt")
require.NoError(t, err)
assert.Equal(t, int64(9), o.Size())
md5Hash, err = o.Hash(context.Background(), hash.MD5)
require.NoError(t, err)
assert.Equal(t, "5b82f8bf4df2bfb0e66ccaa7306fd024", md5Hash)
fd, err = o.Open(context.Background())
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, []byte(files["data.txt"]), data)
do, ok = o.(fs.MimeTyper)
require.True(t, ok)
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
}

View File

@@ -1,16 +0,0 @@
// Test DOI filesystem interface
package doi
import (
"testing"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestDoi:",
NilObject: (*Object)(nil),
})
}

Some files were not shown because too many files have changed in this diff Show More