1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-06 00:03:32 +00:00

Compare commits

..

12 Commits

Author SHA1 Message Date
Nick Craig-Wood
486e713337 Version v1.61.1 2022-12-23 17:05:30 +00:00
Nick Craig-Wood
46e96918dc docs: fix unescaped HTML 2022-12-23 16:55:03 +00:00
Nick Craig-Wood
639b61de95 lib/http: shutdown all servers on exit to remove unix socket
Before this change only serve http was Shutting down its server which
was causing other servers such as serve restic to leave behind their
unix sockets.

This change moves the finalisation to lib/http so all servers have it
and removes it from serve http.

Fixes #6648
2022-12-23 12:32:46 +00:00
Nick Craig-Wood
b03ee4e9e7 serve webdav: fix running duplicate Serve call
Before this change we were starting the server twice for webdav which
is inefficient and causes problems at exit.
2022-12-23 12:32:46 +00:00
Nick Craig-Wood
176af2b217 serve restic: don't serve via http if serving via --stdio
Before this change, we started the http listener even if --stdio was
supplied.

This also moves the log message so the user won't see the serving via
HTTP message unless they are really using that.

Fixes #6646
2022-12-23 12:32:46 +00:00
Nick Craig-Wood
6be0644178 serve restic: fix immediate exit when not using stdio
In the lib/http refactor

    52443c2444 restic: refactor to use lib/http

We forgot to serve the data and wait for the server to finish. This is
not tested in the unit tests as it is part of the command line
handler.

Fixes #6644 Fixes #6647
2022-12-23 12:32:46 +00:00
Nick Craig-Wood
0ce5e57c30 serve webdav: fix --baseurl handling after lib/http refactor
The webdav library was confused by the Path manipulation done by
lib/http when stripping the prefix.

This patch adds the prefix back before calling it.

Fixes #6650
2022-12-23 12:32:46 +00:00
Nick Craig-Wood
bc214291d5 azureblob: fix "409 Public access is not permitted on this storage account"
This error was caused by rclone supplying an empty
`x-ms-blob-public-access:` header when creating a container for
private access, rather than omitting it completely.

This is a valid way of specifying containers should be private, but if
the storage account has the flag "Blob public access" unset then it
gives "409 Public access is not permitted on this storage account".

This patch fixes the problem by only supplying the header if the
access is set.

Fixes #6645
2022-12-23 12:32:46 +00:00
Kaloyan Raev
d3e09d86e0 s3/storj: update endpoints
Storj switched to a single global s3 endpoint backed by a BGP routing.
We want to stop advertizing the former regional endpoints and have the
global one as the only option.
2022-12-23 12:32:46 +00:00
Anagh Kumar Baranwal
5a9706ab61 rc: set url to the first value of rc-addr since it has been converted to an array of strings now -- fixes #6641
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2022-12-23 12:32:46 +00:00
albertony
cce4340d48 docs: show only significant parts of version number in version introduced label 2022-12-23 12:32:46 +00:00
Nick Craig-Wood
577693e501 Start v1.61.1-DEV development 2022-12-23 12:31:46 +00:00
1895 changed files with 120267 additions and 386163 deletions

4
.gitattributes vendored
View File

@@ -1,7 +1,3 @@
# Go writes go.mod and go.sum with lf even on windows
go.mod text eol=lf
go.sum text eol=lf
# Ignore generated files in GitHub language statistics and diffs # Ignore generated files in GitHub language statistics and diffs
/MANUAL.* linguist-generated=true /MANUAL.* linguist-generated=true
/rclone.1 linguist-generated=true /rclone.1 linguist-generated=true

4
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,4 @@
github: [ncw]
patreon: njcw
liberapay: ncw
custom: ["https://rclone.org/donate/"]

View File

@@ -1,6 +0,0 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

View File

@@ -8,33 +8,29 @@ name: build
on: on:
push: push:
branches: branches:
- '**' - '*'
tags: tags:
- '**' - '*'
pull_request: pull_request:
workflow_dispatch: workflow_dispatch:
inputs: inputs:
manual: manual:
description: Manual run (bypass default conditions) required: true
type: boolean
default: true default: true
jobs: jobs:
build: build:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 60 timeout-minutes: 60
defaults:
run:
shell: bash
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.24'] job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.17', 'go1.18']
include: include:
- job_name: linux - job_name: linux
os: ubuntu-latest os: ubuntu-latest
go: '>=1.25.0-rc.1' go: '1.19'
gotags: cmount gotags: cmount
build_flags: '-include "^linux/"' build_flags: '-include "^linux/"'
check: true check: true
@@ -45,14 +41,14 @@ jobs:
- job_name: linux_386 - job_name: linux_386
os: ubuntu-latest os: ubuntu-latest
go: '>=1.25.0-rc.1' go: '1.19'
goarch: 386 goarch: 386
gotags: cmount gotags: cmount
quicktest: true quicktest: true
- job_name: mac_amd64 - job_name: mac_amd64
os: macos-latest os: macos-11
go: '>=1.25.0-rc.1' go: '1.19'
gotags: 'cmount' gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo' build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true quicktest: true
@@ -60,15 +56,15 @@ jobs:
deploy: true deploy: true
- job_name: mac_arm64 - job_name: mac_arm64
os: macos-latest os: macos-11
go: '>=1.25.0-rc.1' go: '1.19'
gotags: 'cmount' gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib' build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true deploy: true
- job_name: windows - job_name: windows
os: windows-latest os: windows-latest
go: '>=1.25.0-rc.1' go: '1.19'
gotags: cmount gotags: cmount
cgo: '0' cgo: '0'
build_flags: '-include "^windows/"' build_flags: '-include "^windows/"'
@@ -78,14 +74,20 @@ jobs:
- job_name: other_os - job_name: other_os
os: ubuntu-latest os: ubuntu-latest
go: '>=1.25.0-rc.1' go: '1.19'
build_flags: '-exclude "^(windows/|darwin/|linux/)"' build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true compile_all: true
deploy: true deploy: true
- job_name: go1.24 - job_name: go1.17
os: ubuntu-latest os: ubuntu-latest
go: '1.24' go: '1.17'
quicktest: true
racequicktest: true
- job_name: go1.18
os: ubuntu-latest
go: '1.18'
quicktest: true quicktest: true
racequicktest: true racequicktest: true
@@ -95,17 +97,18 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v5 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Install Go - name: Install Go
uses: actions/setup-go@v6 uses: actions/setup-go@v3
with: with:
go-version: ${{ matrix.go }} go-version: ${{ matrix.go }}
check-latest: true check-latest: true
- name: Set environment variables - name: Set environment variables
shell: bash
run: | run: |
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
@@ -114,25 +117,20 @@ jobs:
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
- name: Install Libraries on Linux - name: Install Libraries on Linux
shell: bash
run: | run: |
sudo modprobe fuse sudo modprobe fuse
sudo chmod 666 /dev/fuse sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf sudo chown root:$USER /etc/fuse.conf
sudo apt-get update sudo apt-get install fuse libfuse-dev rpm pkg-config
sudo apt-get install -y fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
if: matrix.os == 'ubuntu-latest' if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS - name: Install Libraries on macOS
shell: bash
run: | run: |
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
unset HOMEBREW_NO_INSTALL_FROM_API
brew untap --force homebrew/core
brew untap --force homebrew/cask
brew update brew update
brew install --cask macfuse brew install --cask macfuse
brew install git-annex git-annex-remote-rclone if: matrix.os == 'macos-11'
if: matrix.os == 'macos-latest'
- name: Install Libraries on Windows - name: Install Libraries on Windows
shell: powershell shell: powershell
@@ -151,6 +149,7 @@ jobs:
if: matrix.os == 'windows-latest' if: matrix.os == 'windows-latest'
- name: Print Go version and environment - name: Print Go version and environment
shell: bash
run: | run: |
printf "Using go at: $(which go)\n" printf "Using go at: $(which go)\n"
printf "Go version: $(go version)\n" printf "Go version: $(go version)\n"
@@ -161,25 +160,38 @@ jobs:
printf "\n\nSystem environment:\n\n" printf "\n\nSystem environment:\n\n"
env env
- name: Go module cache
uses: actions/cache@v3
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Build rclone - name: Build rclone
shell: bash
run: | run: |
make make
- name: Rclone version - name: Rclone version
shell: bash
run: | run: |
rclone version rclone version
- name: Run tests - name: Run tests
shell: bash
run: | run: |
make quicktest make quicktest
if: matrix.quicktest if: matrix.quicktest
- name: Race test - name: Race test
shell: bash
run: | run: |
make racequicktest make racequicktest
if: matrix.racequicktest if: matrix.racequicktest
- name: Run librclone tests - name: Run librclone tests
shell: bash
run: | run: |
make -C librclone/ctest test make -C librclone/ctest test
make -C librclone/ctest clean make -C librclone/ctest clean
@@ -187,94 +199,46 @@ jobs:
if: matrix.librclonetest if: matrix.librclonetest
- name: Compile all architectures test - name: Compile all architectures test
shell: bash
run: | run: |
make make
make compile_all make compile_all
if: matrix.compile_all if: matrix.compile_all
- name: Deploy built binaries - name: Deploy built binaries
shell: bash
run: | run: |
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
if [[ "${{ matrix.os }}" == "windows-latest" ]]; then make release_dep_windows ; fi
make ci_beta make ci_beta
env: env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }} RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# working-directory: '$(modulePath)' # working-directory: '$(modulePath)'
# Deploy binaries if enabled in config && not a PR && not a fork # Deploy binaries if enabled in config && not a PR && not a fork
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone' if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
lint: lint:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 30 timeout-minutes: 30
name: "lint" name: "lint"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Get runner parameters
id: get-runner-parameters
run: |
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
- name: Checkout - name: Checkout
uses: actions/checkout@v5 uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Code quality test
uses: golangci/golangci-lint-action@v3
with:
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
version: latest
# Run govulncheck on the latest go version, the one we build binaries with
- name: Install Go - name: Install Go
id: setup-go uses: actions/setup-go@v3
uses: actions/setup-go@v6
with: with:
go-version: '>=1.24.0-rc.1' go-version: 1.19
check-latest: true check-latest: true
cache: false
- name: Cache
uses: actions/cache@v4
with:
path: |
~/go/pkg/mod
~/.cache/go-build
~/.cache/golangci-lint
key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }}
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
- name: Code quality test (Linux)
uses: golangci/golangci-lint-action@v8
with:
version: latest
skip-cache: true
- name: Code quality test (Windows)
uses: golangci/golangci-lint-action@v8
env:
GOOS: "windows"
with:
version: latest
skip-cache: true
- name: Code quality test (macOS)
uses: golangci/golangci-lint-action@v8
env:
GOOS: "darwin"
with:
version: latest
skip-cache: true
- name: Code quality test (FreeBSD)
uses: golangci/golangci-lint-action@v8
env:
GOOS: "freebsd"
with:
version: latest
skip-cache: true
- name: Code quality test (OpenBSD)
uses: golangci/golangci-lint-action@v8
env:
GOOS: "openbsd"
with:
version: latest
skip-cache: true
- name: Install govulncheck - name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest run: go install golang.org/x/vuln/cmd/govulncheck@latest
@@ -282,40 +246,34 @@ jobs:
- name: Scan for vulnerabilities - name: Scan for vulnerabilities
run: govulncheck ./... run: govulncheck ./...
- name: Check Markdown format
uses: DavidAnson/markdownlint-cli2-action@v20
with:
globs: |
CONTRIBUTING.md
MAINTAINERS.md
README.md
RELEASE.md
CODE_OF_CONDUCT.md
docs/content/{authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
- name: Scan edits of autogenerated files
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
if: github.event_name == 'pull_request'
android: android:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 30 timeout-minutes: 30
name: "android-all" name: "android-all"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v5 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
# Upgrade together with NDK version # Upgrade together with NDK version
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v6 uses: actions/setup-go@v3
with: with:
go-version: '>=1.25.0-rc.1' go-version: 1.19
- name: Go module cache
uses: actions/cache@v3
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Set global environment variables - name: Set global environment variables
shell: bash
run: | run: |
echo "VERSION=$(make version)" >> $GITHUB_ENV echo "VERSION=$(make version)" >> $GITHUB_ENV
@@ -334,6 +292,7 @@ jobs:
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
- name: arm-v7a Set environment variables - name: arm-v7a Set environment variables
shell: bash
run: | run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
@@ -347,6 +306,7 @@ jobs:
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a . run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
- name: arm64-v8a Set environment variables - name: arm64-v8a Set environment variables
shell: bash
run: | run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
@@ -359,6 +319,7 @@ jobs:
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a . run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
- name: x86 Set environment variables - name: x86 Set environment variables
shell: bash
run: | run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
@@ -371,6 +332,7 @@ jobs:
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 . run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
- name: x64 Set environment variables - name: x64 Set environment variables
shell: bash
run: | run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
@@ -388,4 +350,4 @@ jobs:
env: env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }} RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork # Upload artifacts if not a PR && not a fork
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone' if: github.head_ref == '' && github.repository == 'rclone/rclone'

View File

@@ -1,294 +1,26 @@
--- name: Docker beta build
# Github Actions release for rclone
# -*- compile-command: "yamllint -f parsable build_publish_docker_image.yml" -*-
name: Build & Push Docker Images
# Trigger the workflow on push or pull request
on: on:
push: push:
branches: branches:
- '**' - master
tags:
- '**'
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
jobs: jobs:
build-image: build:
if: inputs.manual || (github.repository == 'rclone/rclone' && github.event_name != 'pull_request') if: github.repository == 'rclone/rclone'
timeout-minutes: 60 runs-on: ubuntu-latest
strategy: name: Build image job
fail-fast: false steps:
matrix: - name: Checkout master
include: uses: actions/checkout@v3
- platform: linux/amd64 with:
runs-on: ubuntu-24.04 fetch-depth: 0
- platform: linux/386 - name: Build and publish image
runs-on: ubuntu-24.04 uses: ilteoood/docker_buildx@1.1.0
- platform: linux/arm64 with:
runs-on: ubuntu-24.04-arm tag: beta
- platform: linux/arm/v7 imageName: rclone/rclone
runs-on: ubuntu-24.04-arm platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
- platform: linux/arm/v6 publish: true
runs-on: ubuntu-24.04-arm dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
name: Build Docker Image for ${{ matrix.platform }}
runs-on: ${{ matrix.runs-on }}
steps:
- name: Free Space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout Repository
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set REPO_NAME Variable
run: |
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
- name: Set PLATFORM Variable
run: |
platform=${{ matrix.platform }}
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
- name: Set CACHE_NAME Variable
shell: python
run: |
import os, re
def slugify(input_string, max_length=63):
slug = input_string.lower()
slug = re.sub(r'[^a-z0-9 -]', ' ', slug)
slug = slug.strip()
slug = re.sub(r'\s+', '-', slug)
slug = re.sub(r'-+', '-', slug)
slug = slug[:max_length]
slug = re.sub(r'[-]+$', '', slug)
return slug
ref_name_slug = "cache"
if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request":
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"CACHE_NAME={ref_name_slug}\n")
- name: Get ImageOS
# There's no way around this, because "ImageOS" is only available to
# processes, but the setup-go action uses it in its key.
id: imageos
uses: actions/github-script@v8
with:
result-encoding: string
script: |
return process.env.ImageOS
- name: Extract Metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,manifest-descriptor # Important for digest annotation (used by Github packages)
with:
images: |
ghcr.io/${{ env.REPO_NAME }}
labels: |
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
org.opencontainers.image.vendor=${{ github.repository_owner }}
org.opencontainers.image.authors=rclone <https://github.com/rclone>
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
tags: |
type=sha
type=ref,event=pr
type=ref,event=branch
type=semver,pattern={{version}}
type=semver,pattern={{major}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=beta,enable={{is_default_branch}}
- name: Setup QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Load Go Build Cache for Docker
id: go-cache
uses: actions/cache@v4
with:
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
# Cache only the go builds, the module download is cached via the docker layer caching
path: |
go-build-cache
- name: Inject Go Build Cache into Docker
uses: reproducible-containers/buildkit-cache-dance@v3
with:
cache-map: |
{
"go-build-cache": "/root/.cache/go-build"
}
skip-extraction: ${{ steps.go-cache.outputs.cache-hit }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and Publish Image Digest
id: build
uses: docker/build-push-action@v6
with:
file: Dockerfile
context: .
provenance: false
# don't specify 'tags' here (error "get can't push tagged ref by digest")
# tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
annotations: ${{ steps.meta.outputs.annotations }}
platforms: ${{ matrix.platform }}
outputs: |
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
cache-from: |
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
cache-to: |
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd
- name: Export Image Digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
- name: Upload Image Digest
uses: actions/upload-artifact@v5
with:
name: digests-${{ env.PLATFORM }}
path: /tmp/digests/*
retention-days: 1
if-no-files-found: error
merge-image:
name: Merge & Push Final Docker Image
runs-on: ubuntu-24.04
needs:
- build-image
steps:
- name: Download Image Digests
uses: actions/download-artifact@v6
with:
path: /tmp/digests
pattern: digests-*
merge-multiple: true
- name: Set REPO_NAME Variable
run: |
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
- name: Extract Metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
with:
images: |
${{ env.REPO_NAME }}
ghcr.io/${{ env.REPO_NAME }}
labels: |
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
org.opencontainers.image.vendor=${{ github.repository_owner }}
org.opencontainers.image.authors=rclone <https://github.com/rclone>
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
tags: |
type=sha
type=ref,event=pr
type=ref,event=branch
type=semver,pattern={{version}}
type=semver,pattern={{major}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=beta,enable={{is_default_branch}}
- name: Extract Tags
shell: python
run: |
import json, os
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
metadata = json.loads(metadata_json)
tags = [f"--tag '{tag}'" for tag in metadata["tags"]]
tags_string = " ".join(tags)
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"TAGS={tags_string}\n")
- name: Extract Annotations
shell: python
run: |
import json, os
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
metadata = json.loads(metadata_json)
annotations = [f"--annotation '{annotation}'" for annotation in metadata["annotations"]]
annotations_string = " ".join(annotations)
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"ANNOTATIONS={annotations_string}\n")
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Create & Push Manifest List
working-directory: /tmp/digests
run: |
docker buildx imagetools create \
${{ env.TAGS }} \
${{ env.ANNOTATIONS }} \
$(printf 'ghcr.io/${{ env.REPO_NAME }}@sha256:%s ' *)
- name: Inspect and Run Multi-Platform Image
run: |
docker buildx imagetools inspect --raw ${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
docker buildx imagetools inspect --raw ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
docker run --rm ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} version

View File

@@ -1,49 +0,0 @@
---
# Github Actions release for rclone
# -*- compile-command: "yamllint -f parsable build_publish_docker_plugin.yml" -*-
name: Release Build for Docker Plugin
on:
release:
types: [published]
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
jobs:
build_docker_volume_plugin:
if: inputs.manual || github.repository == 'rclone/rclone'
name: Build docker plugin job
runs-on: ubuntu-latest
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Build and publish docker plugin
shell: bash
run: |
VER=${GITHUB_REF#refs/tags/}
PLUGIN_USER=rclone
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
export PLUGIN_USER PLUGIN_ARCH
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
done
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}

View File

@@ -0,0 +1,59 @@
name: Docker release build
on:
release:
types: [published]
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Get actual patch version
id: actual_patch_version
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
- name: Get actual minor version
id: actual_minor_version
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
- name: Get actual major version
id: actual_major_version
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
- name: Build and publish image
uses: ilteoood/docker_buildx@1.1.0
with:
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
build_docker_volume_plugin:
if: github.repository == 'rclone/rclone'
needs: build
runs-on: ubuntu-latest
name: Build docker plugin job
steps:
- name: Checkout master
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build and publish docker plugin
shell: bash
run: |
VER=${GITHUB_REF#refs/tags/}
PLUGIN_USER=rclone
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
export PLUGIN_USER PLUGIN_ARCH
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
done
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}

View File

@@ -1,15 +0,0 @@
name: Notify users based on issue labels
on:
issues:
types: [labeled]
jobs:
notify:
runs-on: ubuntu-latest
steps:
- uses: jenschelkopf/issue-label-notification-action@1.3
with:
token: ${{ secrets.NOTIFY_ACTION_TOKEN }}
recipients: |
Support Contract=@rclone/support

View File

@@ -1,14 +0,0 @@
name: Publish to Winget
on:
release:
types: [released]
jobs:
publish:
runs-on: ubuntu-latest
steps:
- uses: vedantmgoyal2009/winget-releaser@v2
with:
identifier: Rclone.Rclone
installers-regex: '-windows-\w+\.zip$'
token: ${{ secrets.WINGET_TOKEN }}

9
.gitignore vendored
View File

@@ -3,20 +3,15 @@ _junk/
rclone rclone
rclone.exe rclone.exe
build build
/docs/public/ docs/public
/docs/.hugo_build.lock
/docs/static/img/logos/
rclone.iml rclone.iml
.idea .idea
.history .history
.vscode
*.test *.test
*.log
*.iml *.iml
fuzz-build.zip fuzz-build.zip
*.orig *.orig
*.rej *.rej
Thumbs.db Thumbs.db
__pycache__ __pycache__
.DS_Store
resource_windows_*.syso
.devcontainer

View File

@@ -1,151 +1,30 @@
version: "2" # golangci-lint configuration options
linters: linters:
# Configure the linter set. To avoid unexpected results the implicit default
# set is ignored and all the ones to use are explicitly enabled.
default: none
enable: enable:
# Default - deadcode
- errcheck - errcheck
- govet
- ineffassign
- staticcheck
- unused
# Additional
- gocritic
- misspell
#- prealloc # TODO
- revive
- unconvert
# Configure checks. Mostly using defaults but with some commented exceptions.
settings:
govet:
enable-all: true
disable:
- fieldalignment
- shadow
staticcheck:
# With staticcheck there is only one setting, so to extend the implicit
# default value it must be explicitly included.
checks:
# Default
- all
- -ST1000
- -ST1003
- -ST1016
- -ST1020
- -ST1021
- -ST1022
# Disable quickfix checks
- -QF*
gocritic:
# With gocritic there are different settings, but since enabled-checks
# and disabled-checks cannot both be set, for full customization the
# alternative is to disable all defaults and explicitly enable the ones
# to use.
disable-all: true
enabled-checks:
#- appendAssign # Skip default
- argOrder
- assignOp
- badCall
- badCond
#- captLocal # Skip default
- caseOrder
- codegenComment
#- commentFormatting # Skip default
- defaultCaseOrder
- deprecatedComment
- dupArg
- dupBranchBody
- dupCase
- dupSubExpr
- elseif
#- exitAfterDefer # Skip default
- flagDeref
- flagName
#- ifElseChain # Skip default
- mapKey
- newDeref
- offBy1
- regexpMust
- ruleguard # Enable additional check that are not enabled by default
#- singleCaseSwitch # Skip default
- sloppyLen
- sloppyTypeAssert
- switchTrue
- typeSwitchVar
- underef
- unlambda
- unslice
- valSwap
- wrapperFunc
settings:
ruleguard:
rules: ${base-path}/bin/rules.go
revive:
# With revive there is in reality only one setting, and when at least one
# rule are specified then only these rules will be considered, defaults
# and all others are then implicitly disabled, so must explicitly enable
# all rules to be used.
rules:
- name: blank-imports
disabled: false
- name: context-as-argument
disabled: false
- name: context-keys-type
disabled: false
- name: dot-imports
disabled: false
#- name: empty-block # Skip default
# disabled: true
- name: error-naming
disabled: false
- name: error-return
disabled: false
- name: error-strings
disabled: false
- name: errorf
disabled: false
- name: exported
disabled: false
#- name: increment-decrement # Skip default
# disabled: true
- name: indent-error-flow
disabled: false
- name: package-comments
disabled: false
- name: range
disabled: false
- name: receiver-naming
disabled: false
#- name: redefines-builtin-id # Skip default
# disabled: true
#- name: superfluous-else # Skip default
# disabled: true
- name: time-naming
disabled: false
- name: unexported-return
disabled: false
#- name: unreachable-code # Skip default
# disabled: true
#- name: unused-parameter # Skip default
# disabled: true
- name: var-declaration
disabled: false
- name: var-naming
disabled: false
formatters:
enable:
- goimports - goimports
- revive
- ineffassign
- structcheck
- varcheck
- govet
- unconvert
#- prealloc
#- maligned
disable-all: true
issues: issues:
# Enable some lints excluded by default
exclude-use-default: false
# Maximum issues count per one linter. Set to 0 to disable. Default is 50. # Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-issues-per-linter: 0 max-issues-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3. # Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0 max-same-issues: 0
run: run:
# Timeout for total work, e.g. 30s, 5m, 5m30s. Default is 0 (disabled). # timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m timeout: 10m

View File

@@ -1,43 +0,0 @@
default: true
# Use specific styles, to be consistent accross all documents.
# Default is to accept any as long as it is consistent within the same document.
heading-style: # MD003
style: atx
ul-style: # MD004
style: dash
hr-style: # MD035
style: ---
code-block-style: # MD046
style: fenced
code-fence-style: # MD048
style: backtick
emphasis-style: # MD049
style: asterisk
strong-style: # MD050
style: asterisk
# Allow multiple headers with same text as long as they are not siblings.
no-duplicate-heading: # MD024
siblings_only: true
# Allow long lines in code blocks and tables.
line-length: # MD013
code_blocks: false
tables: false
# The Markdown files used to generated docs with Hugo contain a top level
# header, even though the YAML front matter has a title property (which is
# used for the HTML document title only). Suppress Markdownlint warning:
# Multiple top-level headings in the same document.
single-title: # MD025
level: 1
front_matter_title:
# The HTML docs generated by Hugo from Markdown files may have slightly
# different header anchors than GitHub rendered Markdown, e.g. Hugo trims
# leading dashes so "--config string" becomes "#config-string" while it is
# "#--config-string" in GitHub preview. When writing links to headers in the
# Markdown files we must use whatever works in the final HTML generated docs.
# Suppress Markdownlint warning: Link fragments should be valid.
link-fragments: false # MD051

View File

@@ -1,80 +0,0 @@
# Rclone Code of Conduct
Like the technical community as a whole, the Rclone team and community
is made up of a mixture of professionals and volunteers from all over
the world, working on every aspect of the mission - including
mentorship, teaching, and connecting people.
Diversity is one of our huge strengths, but it can also lead to
communication issues and unhappiness. To that end, we have a few
ground rules that we ask people to adhere to. This code applies
equally to founders, mentors and those seeking help and guidance.
This isn't an exhaustive list of things that you can't do. Rather,
take it in the spirit in which it's intended - a guide to make it
easier to enrich all of us and the technical communities in which we
participate.
This code of conduct applies to all spaces managed by the Rclone
project or Rclone Services Ltd. This includes the issue tracker, the
forum, the GitHub site, the wiki, any other online services or
in-person events. In addition, violations of this code outside these
spaces may affect a person's ability to participate within them.
- **Be friendly and patient.**
- **Be welcoming.** We strive to be a community that welcomes and
supports people of all backgrounds and identities. This includes,
but is not limited to members of any race, ethnicity, culture,
national origin, colour, immigration status, social and economic
class, educational level, sex, sexual orientation, gender identity
and expression, age, size, family status, political belief,
religion, and mental and physical ability.
- **Be considerate.** Your work will be used by other people, and you
in turn will depend on the work of others. Any decision you take
will affect users and colleagues, and you should take those
consequences into account when making decisions. Remember that we're
a world-wide community, so you might not be communicating in someone
else's primary language.
- **Be respectful.** Not all of us will agree all the time, but
disagreement is no excuse for poor behavior and poor manners. We
might all experience some frustration now and then, but we cannot
allow that frustration to turn into a personal attack. It's
important to remember that a community where people feel
uncomfortable or threatened is not a productive one. Members of the
Rclone community should be respectful when dealing with other
members as well as with people outside the Rclone community.
- **Be careful in the words that you choose.** We are a community of
professionals, and we conduct ourselves professionally. Be kind to
others. Do not insult or put down other participants. Harassment and
other exclusionary behavior aren't acceptable. This includes, but is
not limited to:
- Violent threats or language directed against another person.
- Discriminatory jokes and language.
- Posting sexually explicit or violent material.
- Posting (or threatening to post) other people's personally
identifying information ("doxing").
- Personal insults, especially those using racist or sexist terms.
- Unwelcome sexual attention.
- Advocating for, or encouraging, any of the above behavior.
- Repeated harassment of others. In general, if someone asks you to
stop, then stop.
- **When we disagree, try to understand why.** Disagreements, both
social and technical, happen all the time and Rclone is no
exception. It is important that we resolve disagreements and
differing views constructively. Remember that we're different. The
strength of Rclone comes from its varied community, people from a
wide range of backgrounds. Different people have different
perspectives on issues. Being unable to understand why someone holds
a viewpoint doesn't mean that they're wrong. Don't forget that it is
human to err and blaming each other doesn't get us anywhere.
Instead, focus on helping to resolve issues and learning from
mistakes.
If you believe someone is violating the code of conduct, we ask that
you report it by emailing [info@rclone.com](mailto:info@rclone.com).
Original text courtesy of the [Speak Up! project](http://web.archive.org/web/20141109123859/http://speakup.io/coc.html).
## Questions?
If you have questions, please feel free to [contact us](mailto:info@rclone.com).

View File

@@ -1,8 +1,8 @@
# Contributing to rclone # Contributing to rclone #
This is a short guide on how to contribute things to rclone. This is a short guide on how to contribute things to rclone.
## Reporting a bug ## Reporting a bug ##
If you've just got a question or aren't sure if you've found a bug If you've just got a question or aren't sure if you've found a bug
then please use the [rclone forum](https://forum.rclone.org/) instead then please use the [rclone forum](https://forum.rclone.org/) instead
@@ -12,227 +12,163 @@ When filing an issue, please include the following information if
possible as well as a description of the problem. Make sure you test possible as well as a description of the problem. Make sure you test
with the [latest beta of rclone](https://beta.rclone.org/): with the [latest beta of rclone](https://beta.rclone.org/):
- Rclone version (e.g. output from `rclone version`) * Rclone version (e.g. output from `rclone version`)
- Which OS you are using and how many bits (e.g. Windows 10, 64 bit) * Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
- The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`) * The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
- A log of the command with the `-vv` flag (e.g. output from * A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
`rclone -vv copy /tmp remote:tmp`) * if the log contains secrets then edit the file with a text editor first to obscure them
- if the log contains secrets then edit the file with a text editor first to
obscure them
## Submitting a new feature or bug fix ## Submitting a new feature or bug fix ##
If you find a bug that you'd like to fix, or a new feature that you'd If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via GitHub. like to implement then please submit a pull request via GitHub.
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) first so it can be discussed.
first so it can be discussed.
To prepare your pull request first press the fork button on [rclone's GitHub To prepare your pull request first press the fork button on [rclone's GitHub
page](https://github.com/rclone/rclone). page](https://github.com/rclone/rclone).
Then [install Git](https://git-scm.com/downloads) and set your public contribution Then [install Git](https://git-scm.com/downloads) and set your public contribution [name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git) and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
[name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git)
and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
Next open your terminal, change directory to your preferred folder and initialise Next open your terminal, change directory to your preferred folder and initialise your local rclone project:
your local rclone project:
```sh git clone https://github.com/rclone/rclone.git
git clone https://github.com/rclone/rclone.git cd rclone
cd rclone git remote rename origin upstream
git remote rename origin upstream # if you have SSH keys setup in your GitHub account:
# if you have SSH keys setup in your GitHub account: git remote add origin git@github.com:YOURUSER/rclone.git
git remote add origin git@github.com:YOURUSER/rclone.git # otherwise:
# otherwise: git remote add origin https://github.com/YOURUSER/rclone.git
git remote add origin https://github.com/YOURUSER/rclone.git
```
Note that most of the terminal commands in the rest of this guide must be Note that most of the terminal commands in the rest of this guide must be executed from the rclone folder created above.
executed from the rclone folder created above.
Now [install Go](https://golang.org/doc/install) and verify your installation: Now [install Go](https://golang.org/doc/install) and verify your installation:
```sh go version
go version
```
Great, you can now compile and execute your own version of rclone: Great, you can now compile and execute your own version of rclone:
```sh go build
go build ./rclone version
./rclone version
```
(Note that you can also replace `go build` with `make`, which will include a (Note that you can also replace `go build` with `make`, which will include a
more accurate version number in the executable as well as enable you to specify more accurate version number in the executable as well as enable you to specify
more build options.) Finally make a branch to add your new feature more build options.) Finally make a branch to add your new feature
```sh git checkout -b my-new-feature
git checkout -b my-new-feature
```
And get hacking. And get hacking.
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) and a quick view on the rclone [code organisation](#code-organisation).
and a quick view on the rclone [code organisation](#code-organisation).
When ready - test the affected functionality and run the unit tests for the When ready - test the affected functionality and run the unit tests for the code you changed
code you changed
```sh cd folder/with/changed/files
cd folder/with/changed/files go test -v
go test -v
```
Note that you may need to make a test remote, e.g. `TestSwift` for some Note that you may need to make a test remote, e.g. `TestSwift` for some
of the unit tests. of the unit tests.
This is typically enough if you made a simple bug fix, otherwise please read This is typically enough if you made a simple bug fix, otherwise please read the rclone [testing](#testing) section too.
the rclone [testing](#testing) section too.
Make sure you Make sure you
- Add [unit tests](#testing) for a new feature. * Add [unit tests](#testing) for a new feature.
- Add [documentation](#writing-documentation) for a new feature. * Add [documentation](#writing-documentation) for a new feature.
- [Commit your changes](#committing-your-changes) using the [commit message guidelines](#commit-messages). * [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
When you are done with that push your changes to GitHub: When you are done with that push your changes to GitHub:
```sh git push -u origin my-new-feature
git push -u origin my-new-feature
```
and open the GitHub website to [create your pull and open the GitHub website to [create your pull
request](https://help.github.com/articles/creating-a-pull-request/). request](https://help.github.com/articles/creating-a-pull-request/).
Your changes will then get reviewed and you might get asked to fix some stuff. Your changes will then get reviewed and you might get asked to fix some stuff. If so, then make the changes in the same branch, commit and push your updates to GitHub.
If so, then make the changes in the same branch, commit and push your updates to
GitHub.
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
or [squash your commits](#squashing-your-commits).
## Using Git and GitHub ## Using Git and GitHub ##
### Committing your changes ### Committing your changes ###
Follow the guideline for [commit messages](#commit-messages) and then: Follow the guideline for [commit messages](#commit-messages) and then:
```sh git checkout my-new-feature # To switch to your branch
git checkout my-new-feature # To switch to your branch git status # To see the new and changed files
git status # To see the new and changed files git add FILENAME # To select FILENAME for the commit
git add FILENAME # To select FILENAME for the commit git status # To verify the changes to be committed
git status # To verify the changes to be committed git commit # To do the commit
git commit # To do the commit git log # To verify the commit. Use q to quit the log
git log # To verify the commit. Use q to quit the log
```
You can modify the message or changes in the latest commit using: You can modify the message or changes in the latest commit using:
```sh git commit --amend
git commit --amend
```
If you amend to commits that have been pushed to GitHub, then you will have to If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Replacing your previously pushed commits ### Replacing your previously pushed commits ###
Note that you are about to rewrite the GitHub history of your branch. It is good Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
practice to involve your collaborators before modifying commits that have been
pushed to GitHub.
Your previously pushed commits are replaced by: Your previously pushed commits are replaced by:
```sh git push --force origin my-new-feature
git push --force origin my-new-feature
```
### Basing your changes on the latest master ### Basing your changes on the latest master ###
To base your changes on the latest version of the To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
[rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
```sh git checkout master
git checkout master git fetch upstream
git fetch upstream git merge --ff-only
git merge --ff-only git push origin --follow-tags # optional update of your fork in GitHub
git push origin --follow-tags # optional update of your fork in GitHub git checkout my-new-feature
git checkout my-new-feature git rebase master
git rebase master
```
If you rebase commits that have been pushed to GitHub, then you will have to If you rebase commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Squashing your commits ### Squashing your commits ###
To combine your commits into one commit: To combine your commits into one commit:
```sh git log # To count the commits to squash, e.g. the last 2
git log # To count the commits to squash, e.g. the last 2 git reset --soft HEAD~2 # To undo the 2 latest commits
git reset --soft HEAD~2 # To undo the 2 latest commits git status # To check everything is as expected
git status # To check everything is as expected
```
If everything is fine, then make the new combined commit: If everything is fine, then make the new combined commit:
```sh git commit # To commit the undone commits as one
git commit # To commit the undone commits as one
```
otherwise, you may roll back using: otherwise, you may roll back using:
```sh git reflog # To check that HEAD{1} is your previous state
git reflog # To check that HEAD{1} is your previous state git reset --soft 'HEAD@{1}' # To roll back to your previous state
git reset --soft 'HEAD@{1}' # To roll back to your previous state
```
If you squash commits that have been pushed to GitHub, then you will have to If you squash commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
Tip: You may like to use `git rebase -i master` if you are experienced or have a Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
more complex situation.
### GitHub Continuous Integration ### GitHub Continuous Integration ###
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
to build and test the project, which should be automatically available for your
fork too from the `Actions` tab in your repository.
## Testing ## Testing ##
### Code quality tests ### Quick testing ###
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then
you can run the same tests as get run in the CI which can be very helpful.
You can run them with `make check` or with `golangci-lint run ./...`.
Using these tests ensures that the rclone codebase all uses the same coding
standards. These tests also check for easy mistakes to make (like forgetting
to check an error return).
### Quick testing
rclone's tests are run from the go testing framework, so at the top rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests. level you can run this to run all the tests.
```sh go test -v ./...
go test -v ./...
```
You can also use `make`, if supported by your platform You can also use `make`, if supported by your platform
```sh make quicktest
make quicktest
```
The quicktest is [automatically run by GitHub](#github-continuous-integration) The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
when you push your branch to GitHub.
### Backend testing ### Backend testing ###
rclone contains a mixture of unit tests and integration tests. rclone contains a mixture of unit tests and integration tests.
Because it is difficult (and in some respects pointless) to test cloud Because it is difficult (and in some respects pointless) to test cloud
@@ -246,137 +182,94 @@ need to make a remote called `TestDrive`.
You can then run the unit tests in the drive directory. These tests You can then run the unit tests in the drive directory. These tests
are skipped if `TestDrive:` isn't defined. are skipped if `TestDrive:` isn't defined.
```sh cd backend/drive
cd backend/drive go test -v
go test -v
```
You can then run the integration tests which test all of rclone's You can then run the integration tests which test all of rclone's
operations. Normally these get run against the local file system, operations. Normally these get run against the local file system,
but they can be run against any of the remotes. but they can be run against any of the remotes.
```sh cd fs/sync
cd fs/sync go test -v -remote TestDrive:
go test -v -remote TestDrive: go test -v -remote TestDrive: -fast-list
go test -v -remote TestDrive: -fast-list
cd fs/operations cd fs/operations
go test -v -remote TestDrive: go test -v -remote TestDrive:
```
If you want to use the integration test framework to run these tests If you want to use the integration test framework to run these tests
altogether with an HTML report and test retries then from the altogether with an HTML report and test retries then from the
project root: project root:
```sh go install github.com/rclone/rclone/fstest/test_all
go install github.com/rclone/rclone/fstest/test_all test_all -backend drive
test_all -backends drive
```
### Full integration testing ### Full integration testing ###
If you want to run all the integration tests against all the remotes, If you want to run all the integration tests against all the remotes,
then change into the project root and run then change into the project root and run
```sh make check
make check make test
make test
```
The commands may require some extra go packages which you can install with The commands may require some extra go packages which you can install with
```sh make build_dep
make build_dep
```
The full integration tests are run daily on the integration test server. You can The full integration tests are run daily on the integration test server. You can
find the results at <https://pub.rclone.org/integration-tests/> find the results at https://pub.rclone.org/integration-tests/
## Code Organisation ## Code Organisation ##
Rclone code is organised into a small number of top level directories Rclone code is organised into a small number of top level directories
with modules beneath. with modules beneath.
- backend - the rclone backends for interfacing to cloud providers - * backend - the rclone backends for interfacing to cloud providers -
- all - import this to load all the cloud providers * all - import this to load all the cloud providers
- ...providers * ...providers
- bin - scripts for use while building or maintaining rclone * bin - scripts for use while building or maintaining rclone
- cmd - the rclone commands * cmd - the rclone commands
- all - import this to load all the commands * all - import this to load all the commands
- ...commands * ...commands
- cmdtest - end-to-end tests of commands, flags, environment variables,... * cmdtest - end-to-end tests of commands, flags, environment variables,...
- docs - the documentation and website * docs - the documentation and website
- content - adjust these docs only, except those marked autogenerated * content - adjust these docs only - everything else is autogenerated
or portions marked autogenerated where the corresponding .go file must be * command - these are auto-generated - edit the corresponding .go file
edited instead, and everything else is autogenerated * fs - main rclone definitions - minimal amount of code
- commands - these are auto-generated, edit the corresponding .go file * accounting - bandwidth limiting and statistics
- fs - main rclone definitions - minimal amount of code * asyncreader - an io.Reader which reads ahead
- accounting - bandwidth limiting and statistics * config - manage the config file and flags
- asyncreader - an io.Reader which reads ahead * driveletter - detect if a name is a drive letter
- config - manage the config file and flags * filter - implements include/exclude filtering
- driveletter - detect if a name is a drive letter * fserrors - rclone specific error handling
- filter - implements include/exclude filtering * fshttp - http handling for rclone
- fserrors - rclone specific error handling * fspath - path handling for rclone
- fshttp - http handling for rclone * hash - defines rclone's hash types and functions
- fspath - path handling for rclone * list - list a remote
- hash - defines rclone's hash types and functions * log - logging facilities
- list - list a remote * march - iterates directories in lock step
- log - logging facilities * object - in memory Fs objects
- march - iterates directories in lock step * operations - primitives for sync, e.g. Copy, Move
- object - in memory Fs objects * sync - sync directories
- operations - primitives for sync, e.g. Copy, Move * walk - walk a directory
- sync - sync directories * fstest - provides integration test framework
- walk - walk a directory * fstests - integration tests for the backends
- fstest - provides integration test framework * mockdir - mocks an fs.Directory
- fstests - integration tests for the backends * mockobject - mocks an fs.Object
- mockdir - mocks an fs.Directory * test_all - Runs integration tests for everything
- mockobject - mocks an fs.Object * graphics - the images used in the website, etc.
- test_all - Runs integration tests for everything * lib - libraries used by the backend
- graphics - the images used in the website, etc. * atexit - register functions to run when rclone exits
- lib - libraries used by the backend * dircache - directory ID to name caching
- atexit - register functions to run when rclone exits * oauthutil - helpers for using oauth
- dircache - directory ID to name caching * pacer - retries with backoff and paces operations
- oauthutil - helpers for using oauth * readers - a selection of useful io.Readers
- pacer - retries with backoff and paces operations * rest - a thin abstraction over net/http for REST
- readers - a selection of useful io.Readers * vfs - Virtual FileSystem layer for implementing rclone mount and similar
- rest - a thin abstraction over net/http for REST
- librclone - in memory interface to rclone's API for embedding rclone
- vfs - Virtual FileSystem layer for implementing rclone mount and similar
## Writing Documentation ## Writing Documentation ##
If you are adding a new feature then please update the documentation. If you are adding a new feature then please update the documentation.
The documentation sources are generally in Markdown format, in conformance
with the CommonMark specification and compatible with GitHub Flavored
Markdown (GFM). The markdown format is checked as part of the lint operation
that runs automatically on pull requests, to enforce standards and consistency.
This is based on the [markdownlint](https://github.com/DavidAnson/markdownlint)
tool, which can also be integrated into editors so you can perform the same
checks while writing.
HTML pages, served as website <rclone.org>, are generated from the Markdown,
using [Hugo](https://gohugo.io). Note that when generating the HTML pages,
there is currently used a different algorithm for generating header anchors
than what GitHub uses for its Markdown rendering. For example, in the HTML docs
generated by Hugo any leading `-` characters are ignored, which means when
linking to a header with text `--config string` we therefore need to use the
link `#config-string` in our Markdown source, which will not work in GitHub's
preview where `#--config-string` would be the correct link.
Most of the documentation are written directly in text files with extension
`.md`, mainly within folder `docs/content`. Note that several of such files
are autogenerated (e.g. the command documentation, and `docs/content/flags.md`),
or contain autogenerated portions (e.g. the backend documentation under
`docs/content/commands`). These are marked with an `autogenerated` comment.
The sources of the autogenerated text are usually Markdown formatted text
embedded as string values in the Go source code, so you need to locate these
and edit the `.go` file instead. The `MANUAL.*`, `rclone.1` and other text
files in the root of the repository are also autogenerated. The autogeneration
of files, and the website, will be done during the release process. See the
`make doc` and `make website` targets in the Makefile if you are interested in
how. You don't need to run these when adding a feature.
If you add a new general flag (not for a backend), then document it in If you add a new general flag (not for a backend), then document it in
`docs/content/docs.md` - the flags there are supposed to be in `docs/content/docs.md` - the flags there are supposed to be in
alphabetical order. alphabetical order.
@@ -384,48 +277,47 @@ alphabetical order.
If you add a new backend option/flag, then it should be documented in If you add a new backend option/flag, then it should be documented in
the source file in the `Help:` field. the source file in the `Help:` field.
- Start with the most important information about the option, * Start with the most important information about the option,
as a single sentence on a single line. as a single sentence on a single line.
- This text will be used for the command-line flag help. * This text will be used for the command-line flag help.
- It will be combined with other information, such as any default value, * It will be combined with other information, such as any default value,
and the result will look odd if not written as a single sentence. and the result will look odd if not written as a single sentence.
- It should end with a period/full stop character, which will be shown * It should end with a period/full stop character, which will be shown
in docs but automatically removed when producing the flag help. in docs but automatically removed when producing the flag help.
- Try to keep it below 80 characters, to reduce text wrapping in the terminal. * Try to keep it below 80 characters, to reduce text wrapping in the terminal.
- More details can be added in a new paragraph, after an empty line (`"\n\n"`). * More details can be added in a new paragraph, after an empty line (`"\n\n"`).
- Like with docs generated from Markdown, a single line break is ignored * Like with docs generated from Markdown, a single line break is ignored
and two line breaks creates a new paragraph. and two line breaks creates a new paragraph.
- This text will be shown to the user in `rclone config` * This text will be shown to the user in `rclone config`
and in the docs (where it will be added by `make backenddocs`, and in the docs (where it will be added by `make backenddocs`,
normally run some time before next release). normally run some time before next release).
- To create options of enumeration type use the `Examples:` field. * To create options of enumeration type use the `Examples:` field.
- Each example value have their own `Help:` field, but they are treated * Each example value have their own `Help:` field, but they are treated
a bit different than the main option help text. They will be shown a bit different than the main option help text. They will be shown
as an unordered list, therefore a single line break is enough to as an unordered list, therefore a single line break is enough to
create a new list item. Also, for enumeration texts like name of create a new list item. Also, for enumeration texts like name of
countries, it looks better without an ending period/full stop character. countries, it looks better without an ending period/full stop character.
When writing documentation for an entirely new backend, The only documentation you need to edit are the `docs/content/*.md`
see [backend documentation](#backend-documentation). files. The `MANUAL.*`, `rclone.1`, website, etc. are all auto-generated
from those during the release process. See the `make doc` and `make
website` targets in the Makefile if you are interested in how. You
don't need to run these when adding a feature.
If you are updating documentation for a command, you must do that in the Documentation for rclone sub commands is with their code, e.g.
command source code, e.g. `cmd/ls/ls.go`. Write flag help strings as a single `cmd/ls/ls.go`. Write flag help strings as a single sentence on a single
sentence on a single line, without a period/full stop character at the end, line, without a period/full stop character at the end, as it will be
as it will be combined unmodified with other information (such as any default combined unmodified with other information (such as any default value).
value).
Note that you can use Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
[GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository) for small changes in the docs which makes it very easy.
for small changes in the docs which makes it very easy. Just remember the
caveat when linking to header anchors, noted above, which means that GitHub's
Markdown preview may not be an entirely reliable verification of the results.
## Making a release ## Making a release ##
There are separate instructions for making a release in the RELEASE.md There are separate instructions for making a release in the RELEASE.md
file. file.
## Commit messages ## Commit messages ##
Please make the first line of your commit message a summary of the Please make the first line of your commit message a summary of the
change that a user (not a developer) of rclone would like to read, and change that a user (not a developer) of rclone would like to read, and
@@ -449,13 +341,13 @@ change will get linked into the issue.
Here is an example of a short commit message: Here is an example of a short commit message:
```text ```
drive: add team drive support - fixes #885 drive: add team drive support - fixes #885
``` ```
And here is an example of a longer one: And here is an example of a longer one:
```text ```
mount: fix hang on errored upload mount: fix hang on errored upload
In certain circumstances, if an upload failed then the mount could hang In certain circumstances, if an upload failed then the mount could hang
@@ -466,7 +358,7 @@ error fixing the hang.
Fixes #1498 Fixes #1498
``` ```
## Adding a dependency ## Adding a dependency ##
rclone uses the [go rclone uses the [go
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more) modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
@@ -478,9 +370,7 @@ To add a dependency `github.com/ncw/new_dependency` see the
instructions below. These will fetch the dependency and add it to instructions below. These will fetch the dependency and add it to
`go.mod` and `go.sum`. `go.mod` and `go.sum`.
```sh GO111MODULE=on go get github.com/ncw/new_dependency
go get github.com/ncw/new_dependency
```
You can add constraints on that package when doing `go get` (see the You can add constraints on that package when doing `go get` (see the
go docs linked above), but don't unless you really need to. go docs linked above), but don't unless you really need to.
@@ -488,17 +378,15 @@ go docs linked above), but don't unless you really need to.
Please check in the changes generated by `go mod` including `go.mod` Please check in the changes generated by `go mod` including `go.mod`
and `go.sum` in the same commit as your other changes. and `go.sum` in the same commit as your other changes.
## Updating a dependency ## Updating a dependency ##
If you need to update a dependency then run If you need to update a dependency then run
```sh GO111MODULE=on go get -u golang.org/x/crypto
go get golang.org/x/crypto
```
Check in a single commit as above. Check in a single commit as above.
## Updating all the dependencies ## Updating all the dependencies ##
In order to update all the dependencies then run `make update`. This In order to update all the dependencies then run `make update`. This
just uses the go modules to update all the modules to their latest just uses the go modules to update all the modules to their latest
@@ -507,7 +395,7 @@ stable release. Check in the changes in a single commit as above.
This should be done early in the release cycle to pick up new versions This should be done early in the release cycle to pick up new versions
of packages in time for them to get some testing. of packages in time for them to get some testing.
## Updating a backend ## Updating a backend ##
If you update a backend then please run the unit tests and the If you update a backend then please run the unit tests and the
integration tests for that backend. integration tests for that backend.
@@ -522,154 +410,105 @@ integration tests.
The next section goes into more detail about the tests. The next section goes into more detail about the tests.
## Writing a new backend ## Writing a new backend ##
Choose a name. The docs here will use `remote` as an example. Choose a name. The docs here will use `remote` as an example.
Note that in rclone terminology a file system backend is called a Note that in rclone terminology a file system backend is called a
remote or an fs. remote or an fs.
### Research Research
- Look at the interfaces defined in `fs/types.go` * Look at the interfaces defined in `fs/fs.go`
- Study one or more of the existing remotes * Study one or more of the existing remotes
### Getting going Getting going
- Create `backend/remote/remote.go` (copy this from a similar remote) * Create `backend/remote/remote.go` (copy this from a similar remote)
- box is a good one to start from if you have a directory-based remote (and * box is a good one to start from if you have a directory-based remote
shows how to use the directory cache) * b2 is a good one to start from if you have a bucket-based remote
- b2 is a good one to start from if you have a bucket-based remote * Add your remote to the imports in `backend/all/all.go`
- Add your remote to the imports in `backend/all/all.go` * HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
- HTTP based remotes are easiest to maintain if they use rclone's * Try to implement as many optional methods as possible as it makes the remote more usable.
[lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but * Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
if there is a really good Go SDK from the provider then use that instead. * `rclone purge -v TestRemote:rclone-info`
- Try to implement as many optional methods as possible as it makes the remote * `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
more usable. * `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to * open `remote.csv` in a spreadsheet and examine
make sure we can encode any path name and `rclone info` to help determine the
encodings needed
- `rclone purge -v TestRemote:rclone-info`
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
- open `remote.csv` in a spreadsheet and examine
### Guidelines for a speedy merge Unit tests
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) * Create a config entry called `TestRemote` for the unit tests to use
if you are implementing a REST like backend and parsing XML/JSON in the backend. * Create a `backend/remote/remote_test.go` - copy and adjust your example remote
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) * Make sure all tests pass with `go test -v`
if your backend is HTTP based - this adds features like `--dump bodies`,
`--tpslimit`, `--user-agent` without you having to code anything!
- **Do** follow your example backend exactly - use the same code order, function
names, layout, structure. **Don't** move stuff around and **Don't** delete the
comments.
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few
backends like that - don't follow them!)
- **Do** put your API type definitions in a separate file - by preference `api/types.go`
- **Remember** we have >50 backends to maintain so keeping them as similar as
possible to each other is a high priority!
### Unit tests Integration tests
- Create a config entry called `TestRemote` for the unit tests to use * Add your backend to `fstest/test_all/config.yaml`
- Create a `backend/remote/remote_test.go` - copy and adjust your example remote * Once you've done that then you can use the integration test framework from the project root:
- Make sure all tests pass with `go test -v` * go install ./...
* test_all -backends remote
### Integration tests
- Add your backend to `fstest/test_all/config.yaml`
- Once you've done that then you can use the integration test framework from
the project root:
- go install ./...
- test_all -backends remote
Or if you want to run the integration tests manually: Or if you want to run the integration tests manually:
- Make sure integration tests pass with * Make sure integration tests pass with
- `cd fs/operations` * `cd fs/operations`
- `go test -v -remote TestRemote:` * `go test -v -remote TestRemote:`
- `cd fs/sync` * `cd fs/sync`
- `go test -v -remote TestRemote:` * `go test -v -remote TestRemote:`
- If your remote defines `ListR` check with this also * If your remote defines `ListR` check with this also
- `go test -v -remote TestRemote: -fast-list` * `go test -v -remote TestRemote: -fast-list`
See the [testing](#testing) section for more information on integration tests. See the [testing](#testing) section for more information on integration tests.
### Backend documentation Add your fs to the docs - you'll need to pick an icon for it from
Add your backend to the docs - you'll need to pick an icon for it from
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
alphabetical order of full name of remote (e.g. `drive` is ordered as alphabetical order of full name of remote (e.g. `drive` is ordered as
`Google Drive`) but with the local file system last. `Google Drive`) but with the local file system last.
- `README.md` - main GitHub page * `README.md` - main GitHub page
- `docs/content/remote.md` - main docs page (note the backend options are * `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
automatically added to this file with `make backenddocs`) * make sure this has the `autogenerated options` comments in (see your reference backend docs)
- make sure this has the `autogenerated options` comments in (see your * update them with `make backenddocs` - revert any changes in other backends
reference backend docs) * `docs/content/overview.md` - overview docs
- update them in your backend with `bin/make_backend_docs.py remote` * `docs/content/docs.md` - list of remotes in config section
- `docs/content/overview.md` - overview docs - add an entry into the Features * `docs/content/_index.md` - front page of rclone.org
table and the Optional Features table. * `docs/layouts/chrome/navbar.html` - add it to the website navigation
- `docs/content/docs.md` - list of remotes in config section * `bin/make_manual.py` - add the page to the `docs` constant
- `docs/content/_index.md` - front page of rclone.org
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
- `bin/make_manual.py` - add the page to the `docs` constant
Once you've written the docs, run `make serve` and check they look OK Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work. in the web browser and the links (internal and external) all work.
## Adding a new s3 provider ## Writing a plugin ##
[Please see the guide in the S3 backend directory](backend/s3/README.md). New features (backends, commands) can also be added "out-of-tree", through Go plugins.
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
## Writing a plugin Usage
New features (backends, commands) can also be added "out-of-tree", through Go - Naming
plugins. Changes will be kept in a dynamically loaded file instead of being - Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
compiled into the main binary. This is useful if you can't merge your changes - `KIND` should be one of `backend`, `command` or `bundle`.
upstream or don't want to maintain a fork of rclone. - Example: A plugin with backend support for PiFS would be called
`librcloneplugin_backend_pifs.so`.
- Loading
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
- Supported on rclone v1.50 or greater.
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
- If this variable doesn't exist, plugin support is disabled.
- Plugins must be compiled against the exact version of rclone to work.
(The rclone used during building the plugin must be the same as the source of rclone)
### Usage Building
- Naming
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
- `KIND` should be one of `backend`, `command` or `bundle`.
- Example: A plugin with backend support for PiFS would be called
`librcloneplugin_backend_pifs.so`.
- Loading
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
- Supported on rclone v1.50 or greater.
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
- If this variable doesn't exist, plugin support is disabled.
- Plugins must be compiled against the exact version of rclone to work.
(The rclone used during building the plugin must be the same as the source
of rclone)
### Building
To turn your existing additions into a Go plugin, move them to an external repository To turn your existing additions into a Go plugin, move them to an external repository
and change the top-level package name to `main`. and change the top-level package name to `main`.
Check `rclone --version` and make sure that the plugin's rclone dependency and Check `rclone --version` and make sure that the plugin's rclone dependency and host Go version match.
host Go version match.
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin. Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin) [Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
## Keeping a backend or command out of tree [Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
Rclone was designed to be modular so it is very easy to keep a backend
or a command out of the main rclone source tree.
So for example if you had a backend which accessed your proprietary
systems or a command which was specialised for your needs you could
add them out of tree.
This may be easier than using a plugin and is supported on all
platforms not just macOS and Linux.
This is explained further in <https://github.com/rclone/rclone_out_of_tree_example>
which has an example of an out of tree backend `ram` (which is a
renamed version of the `memory` backend).

View File

@@ -1,47 +1,18 @@
FROM golang:alpine AS builder FROM golang AS builder
ARG CGO_ENABLED=0
COPY . /go/src/github.com/rclone/rclone/
WORKDIR /go/src/github.com/rclone/rclone/ WORKDIR /go/src/github.com/rclone/rclone/
RUN echo "**** Set Go Environment Variables ****" && \ RUN \
go env -w GOCACHE=/root/.cache/go-build CGO_ENABLED=0 \
make
RUN echo "**** Install Dependencies ****" && \ RUN ./rclone version
apk add --no-cache \
make \
bash \
gawk \
git
COPY go.mod .
COPY go.sum .
RUN echo "**** Download Go Dependencies ****" && \
go mod download -x
RUN echo "**** Verify Go Dependencies ****" && \
go mod verify
COPY . .
RUN --mount=type=cache,target=/root/.cache/go-build,sharing=locked \
echo "**** Build Binary ****" && \
make
RUN echo "**** Print Version Binary ****" && \
./rclone version
# Begin final image # Begin final image
FROM alpine:latest FROM alpine:latest
RUN echo "**** Install Dependencies ****" && \ RUN apk --no-cache add ca-certificates fuse tzdata && \
apk add --no-cache \ echo "user_allow_other" >> /etc/fuse.conf
ca-certificates \
fuse3 \
tzdata && \
echo "Enable user_allow_other in fuse" && \
echo "user_allow_other" >> /etc/fuse.conf
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/ COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/

View File

@@ -1,4 +1,4 @@
# Maintainers guide for rclone # Maintainers guide for rclone #
Current active maintainers of rclone are: Current active maintainers of rclone are:
@@ -16,116 +16,81 @@ Current active maintainers of rclone are:
| Max Sum | @Max-Sum | union backend | | Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend | | Fred | @creativeprojects | seafile backend |
| Caleb Case | @calebcase | storj backend | | Caleb Case | @calebcase | storj backend |
| wiserain | @wiserain | pikpak backend |
| albertony | @albertony | |
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
| Hideo Aoyama | @boukendesho | snap packaging |
| nielash | @nielash | bisync |
| Dan McArdle | @dmcardle | gitannex |
| Sam Harrison | @childish-sambino | filescom |
## This is a work in progress draft **This is a work in progress Draft**
This is a guide for how to be an rclone maintainer. This is mostly a write-up This is a guide for how to be an rclone maintainer. This is mostly a write-up of what I (@ncw) attempt to do.
of what I (@ncw) attempt to do.
## Triaging Tickets ## Triaging Tickets ##
When a ticket comes in it should be triaged. This means it should be classified When a ticket comes in it should be triaged. This means it should be classified by adding labels and placed into a milestone. Quite a lot of tickets need a bit of back and forth to determine whether it is a valid ticket so tickets may remain without labels or milestone for a while.
by adding labels and placed into a milestone. Quite a lot of tickets need a bit
of back and forth to determine whether it is a valid ticket so tickets may
remain without labels or milestone for a while.
Rclone uses the labels like this: Rclone uses the labels like this:
- `bug` - a definitely verified bug * `bug` - a definitely verified bug
- `can't reproduce` - a problem which we can't reproduce * `can't reproduce` - a problem which we can't reproduce
- `doc fix` - a bug in the documentation - if users need help understanding the * `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
docs add this label * `duplicate` - normally close these and ask the user to subscribe to the original
- `duplicate` - normally close these and ask the user to subscribe to the original * `enhancement: new remote` - a new rclone backend
- `enhancement: new remote` - a new rclone backend * `enhancement` - a new feature
- `enhancement` - a new feature * `FUSE` - to do with `rclone mount` command
- `FUSE` - to do with `rclone mount` command * `good first issue` - mark these if you find a small self-contained issue - these get shown to new visitors to the project
- `good first issue` - mark these if you find a small self-contained issue - * `help` wanted - mark these if you find a self-contained issue - these get shown to new visitors to the project
these get shown to new visitors to the project * `IMPORTANT` - note to maintainers not to forget to fix this for the release
- `help` wanted - mark these if you find a self-contained issue - these get * `maintenance` - internal enhancement, code re-organisation, etc.
shown to new visitors to the project * `Needs Go 1.XX` - waiting for that version of Go to be released
- `IMPORTANT` - note to maintainers not to forget to fix this for the release * `question` - not a `bug` or `enhancement` - direct to the forum for next time
- `maintenance` - internal enhancement, code re-organisation, etc. * `Remote: XXX` - which rclone backend this affects
- `Needs Go 1.XX` - waiting for that version of Go to be released * `thinking` - not decided on the course of action yet
- `question` - not a `bug` or `enhancement` - direct to the forum for next time
- `Remote: XXX` - which rclone backend this affects
- `thinking` - not decided on the course of action yet
If it turns out to be a bug or an enhancement it should be tagged as such, with If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
the appropriate other tags. Don't forget the "good first issue" tag to give new
contributors something easy to do to get going.
When a ticket is tagged it should be added to a milestone, either the next When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (e.g. the next go release).
release, the one after, Soon or Help Wanted. Bugs can be added to the
"Known Bugs" milestone if they aren't planned to be fixed or need to wait for
something (e.g. the next go release).
The milestones have these meanings: The milestones have these meanings:
- v1.XX - stuff we would like to fit into this release * v1.XX - stuff we would like to fit into this release
- v1.XX+1 - stuff we are leaving until the next release * v1.XX+1 - stuff we are leaving until the next release
- Soon - stuff we think is a good idea - waiting to be scheduled for a release * Soon - stuff we think is a good idea - waiting to be scheduled for a release
- Help wanted - blue sky stuff that might get moved up, or someone could help with * Help wanted - blue sky stuff that might get moved up, or someone could help with
- Known bugs - bugs waiting on external factors or we aren't going to fix for * Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
the moment
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
are good candidates for ones that have slipped between the gaps and need
following up.
## Closing Tickets ## Closing Tickets ##
Close tickets as soon as you can - make sure they are tagged with a release. Close tickets as soon as you can - make sure they are tagged with a release. Post a link to a beta in the ticket with the fix in, asking for feedback.
Post a link to a beta in the ticket with the fix in, asking for feedback.
## Pull requests ## Pull requests ##
Try to process pull requests promptly! Try to process pull requests promptly!
Merging pull requests on GitHub itself works quite well nowadays so you can Merging pull requests on GitHub itself works quite well nowadays so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
squash and rebase or rebase pull requests. rclone doesn't use merge commits.
Use the squash and rebase option if you need to edit the commit message.
After merging the commit, in your local master branch, do `git pull` then run After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
`bin/update-authors.py` to update the authors file then `git push`.
Sometimes pull requests need to be left open for a while - this especially true Sometimes pull requests need to be left open for a while - this especially true of contributions of new backends which take a long time to get right.
of contributions of new backends which take a long time to get right.
## Merges ## Merges ##
If you are merging a branch locally then do `git merge --ff-only branch-name` to If you are merging a branch locally then do `git merge --ff-only branch-name` to avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
## Release cycle ## Release cycle ##
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer if there is something big to merge that didn't stabilize properly or for personal reasons.
if there is something big to merge that didn't stabilize properly or for personal
reasons.
High impact regressions should be fixed before the next release. High impact regressions should be fixed before the next release.
Near the start of the release cycle, the dependencies should be updated with Near the start of the release cycle, the dependencies should be updated with `make update` to give time for bugs to surface.
`make update` to give time for bugs to surface.
Towards the end of the release cycle try not to merge anything too big so let Towards the end of the release cycle try not to merge anything too big so let things settle down.
things settle down.
Follow the instructions in RELEASE.md for making the release. Note that the Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time-consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
testing part is the most time-consuming often needing several rounds of test
and fix depending on exactly how many new features rclone has gained.
## Mailing list ## Mailing list ##
There is now an invite-only mailing list for rclone developers `rclone-dev` on There is now an invite-only mailing list for rclone developers `rclone-dev` on google groups.
google groups.
## TODO ## TODO ##
I should probably make a <dev@rclone.org> to register with cloud providers. I should probably make a dev@rclone.org to register with cloud providers.

60781
MANUAL.html generated

File diff suppressed because it is too large Load Diff

43472
MANUAL.md generated

File diff suppressed because it is too large Load Diff

33541
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -30,37 +30,29 @@ ifdef RELEASE_TAG
TAG := $(RELEASE_TAG) TAG := $(RELEASE_TAG)
endif endif
GO_VERSION := $(shell go version) GO_VERSION := $(shell go version)
GO_OS := $(shell go env GOOS)
ifdef BETA_SUBDIR ifdef BETA_SUBDIR
BETA_SUBDIR := /$(BETA_SUBDIR) BETA_SUBDIR := /$(BETA_SUBDIR)
endif endif
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR) BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/ BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
BETA_UPLOAD_ROOT := beta.rclone.org: BETA_UPLOAD_ROOT := memstore:beta-rclone-org
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH) BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
# Pass in GOTAGS=xyz on the make command line to set build tags # Pass in GOTAGS=xyz on the make command line to set build tags
ifdef GOTAGS ifdef GOTAGS
BUILDTAGS=-tags "$(GOTAGS)" BUILDTAGS=-tags "$(GOTAGS)"
LINTTAGS=--build-tags "$(GOTAGS)" LINTTAGS=--build-tags "$(GOTAGS)"
endif endif
LDFLAGS=--ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)"
.PHONY: rclone test_all vars version .PHONY: rclone test_all vars version
rclone: rclone:
ifeq ($(GO_OS),windows) go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
go run bin/resource_windows.go -version $(TAG) -syso resource_windows_`go env GOARCH`.syso
endif
go build -v $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS)
ifeq ($(GO_OS),windows)
rm resource_windows_`go env GOARCH`.syso
endif
mkdir -p `go env GOPATH`/bin/ mkdir -p `go env GOPATH`/bin/
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE` mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
test_all: test_all:
go install $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
vars: vars:
@echo SHELL="'$(SHELL)'" @echo SHELL="'$(SHELL)'"
@@ -74,10 +66,6 @@ btest:
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip @echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
@echo "Copied markdown of beta release to clip board" @echo "Copied markdown of beta release to clip board"
btesth:
@echo "<a href="$(BETA_URL)">$(TAG)</a> on branch <a href="https://github.com/rclone/rclone/tree/$(BRANCH)">$(BRANCH)</a> (uploaded in 15-30 mins)" | xclip -r -sel clip -t text/html
@echo "Copied beta release in HTML to clip board"
version: version:
@echo '$(TAG)' @echo '$(TAG)'
@@ -88,47 +76,50 @@ test: rclone test_all
# Quick test # Quick test
quicktest: quicktest:
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) ./... RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
racequicktest: racequicktest:
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -cpu=2 -race ./... RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
compiletest: compiletest:
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -run XXX ./... RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
# Do source code quality checks # Do source code quality checks
check: rclone check: rclone
@echo "-- START CODE QUALITY REPORT -------------------------------" @echo "-- START CODE QUALITY REPORT -------------------------------"
@golangci-lint run $(LINTTAGS) ./... @golangci-lint run $(LINTTAGS) ./...
@bin/markdown-lint
@echo "-- END CODE QUALITY REPORT ---------------------------------" @echo "-- END CODE QUALITY REPORT ---------------------------------"
# Get the build dependencies # Get the build dependencies
build_dep: build_dep:
go run bin/get-github-release.go -use-api -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz' go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
# Get the release dependencies we only install on linux # Get the release dependencies we only install on linux
release_dep_linux: release_dep_linux:
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
# Get the release dependencies we only install on Windows
release_dep_windows:
GOOS="" GOARCH="" go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest
# Update dependencies # Update dependencies
showupdates: showupdates:
@echo "*** Direct dependencies that could be updated ***" @echo "*** Direct dependencies that could be updated ***"
@go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null @GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
# Update direct dependencies only # Update direct dependencies only
updatedirect: updatedirect:
go get $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all) GO111MODULE=on go get -d $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
go mod tidy GO111MODULE=on go mod tidy
# Update direct and indirect dependencies and test dependencies # Update direct and indirect dependencies and test dependencies
update: update:
go get -u -t ./... GO111MODULE=on go get -d -u -t ./...
go mod tidy GO111MODULE=on go mod tidy
# Tidy the module dependencies # Tidy the module dependencies
tidy: tidy:
go mod tidy GO111MODULE=on go mod tidy
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
@@ -145,23 +136,17 @@ MANUAL.txt: MANUAL.md
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone commanddocs: rclone
go generate ./lib/transform XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
-@rmdir -p '$$HOME/.config/rclone'
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
go run bin/make_bisync_docs.go ./docs/content/
backenddocs: rclone bin/make_backend_docs.py backenddocs: rclone bin/make_backend_docs.py
-@rmdir -p '$$HOME/.config/rclone'
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
rcdocs: rclone rcdocs: rclone
bin/make_rc_docs.sh bin/make_rc_docs.sh
install: rclone install: rclone
install -d ${DESTDIR}/usr/bin install -d ${DESTDIR}/usr/bin
install ${GOPATH}/bin/rclone ${DESTDIR}/usr/bin install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
clean: clean:
go clean ./... go clean ./...
@@ -175,7 +160,7 @@ website:
@if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi @if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi
upload_website: website upload_website: website
rclone -v sync docs/public www.rclone.org: rclone -v sync docs/public memstore:www-rclone-org
upload_test_website: website upload_test_website: website
rclone -P sync docs/public test-rclone-org: rclone -P sync docs/public test-rclone-org:
@@ -202,8 +187,8 @@ check_sign:
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
upload: upload:
rclone -P copy build/ downloads.rclone.org:/$(TAG) rclone -P copy build/ memstore:downloads-rclone-org/$(TAG)
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "downloads.rclone.org:/$(TAG)/$$i" "downloads.rclone.org:/$$j"' rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"'
upload_github: upload_github:
./bin/upload-github $(TAG) ./bin/upload-github $(TAG)
@@ -213,7 +198,7 @@ cross: doc
beta: beta:
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG) go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
rclone -v copy build/ pub.rclone.org:/$(TAG) rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
@echo Beta release ready at https://pub.rclone.org/$(TAG)/ @echo Beta release ready at https://pub.rclone.org/$(TAG)/
log_since_last_release: log_since_last_release:
@@ -226,18 +211,18 @@ ci_upload:
sudo chown -R $$USER build sudo chown -R $$USER build
find build -type l -delete find build -type l -delete
gzip -r9v build gzip -r9v build
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds ./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),) ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest ./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
endif endif
@echo Beta release ready at $(BETA_URL)/testbuilds @echo Beta release ready at $(BETA_URL)/testbuilds
ci_beta: ci_beta:
git log $(LAST_TAG).. > /tmp/git-log.txt git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG) go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD) rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),) ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR) rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
endif endif
@echo Beta release ready at $(BETA_URL) @echo Beta release ready at $(BETA_URL)
@@ -246,7 +231,7 @@ fetch_binaries:
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/ rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
serve: website serve: website
cd docs && hugo server --logLevel info -w --disableFastRender --ignoreCache cd docs && hugo server -v -w --disableFastRender
tag: retag doc tag: retag doc
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new

239
README.md
View File

@@ -1,6 +1,4 @@
<!-- markdownlint-disable-next-line first-line-heading no-inline-html -->
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only) [<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
<!-- markdownlint-disable-next-line no-inline-html -->
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only) [<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
[Website](https://rclone.org) | [Website](https://rclone.org) |
@@ -18,111 +16,76 @@
# Rclone # Rclone
Rclone *("rsync for cloud storage")* is a command-line program to sync files and Rclone *("rsync for cloud storage")* is a command-line program to sync files and directories to and from different cloud storage providers.
directories to and from different cloud storage providers.
## Storage providers ## Storage providers
- 1Fichier [:page_facing_up:](https://rclone.org/fichier/) * 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
- Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/) * Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
- Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss) * Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
- Amazon S3 [:page_facing_up:](https://rclone.org/s3/) * Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
- ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos) * Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
- Backblaze B2 [:page_facing_up:](https://rclone.org/b2/) * Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
- Box [:page_facing_up:](https://rclone.org/box/) * Box [:page_facing_up:](https://rclone.org/box/)
- Ceph [:page_facing_up:](https://rclone.org/s3/#ceph) * Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos) * China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2) * Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/) * Arvan Cloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
- Cubbit DS3 [:page_facing_up:](https://rclone.org/s3/#Cubbit) * Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces) * DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage) * Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost) * Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
- Dropbox [:page_facing_up:](https://rclone.org/dropbox/) * Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/) * Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
- Exaba [:page_facing_up:](https://rclone.org/s3/#exaba) * FTP [:page_facing_up:](https://rclone.org/ftp/)
- Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files) * Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
- FileLu [:page_facing_up:](https://rclone.org/filelu/) * Google Drive [:page_facing_up:](https://rclone.org/drive/)
- Files.com [:page_facing_up:](https://rclone.org/filescom/) * Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
- FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade) * HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
- FTP [:page_facing_up:](https://rclone.org/ftp/) * HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
- GoFile [:page_facing_up:](https://rclone.org/gofile/) * HTTP [:page_facing_up:](https://rclone.org/http/)
- Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/) * Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
- Google Drive [:page_facing_up:](https://rclone.org/drive/) * Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
- Google Photos [:page_facing_up:](https://rclone.org/googlephotos/) * Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
- HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/) * IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
- Hetzner Object Storage [:page_facing_up:](https://rclone.org/s3/#hetzner) * IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
- Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box) * Koofr [:page_facing_up:](https://rclone.org/koofr/)
- HiDrive [:page_facing_up:](https://rclone.org/hidrive/) * Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
- HTTP [:page_facing_up:](https://rclone.org/http/) * Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
- Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs) * Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
- iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/) * Mega [:page_facing_up:](https://rclone.org/mega/)
- ImageKit [:page_facing_up:](https://rclone.org/imagekit/) * Memory [:page_facing_up:](https://rclone.org/memory/)
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/) * Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/) * Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3) * Minio [:page_facing_up:](https://rclone.org/s3/#minio)
- Intercolo Object Storage [:page_facing_up:](https://rclone.org/s3/#intercolo) * Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
- IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos) * OVH [:page_facing_up:](https://rclone.org/swift/)
- Koofr [:page_facing_up:](https://rclone.org/koofr/) * OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
- Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia) * OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
- Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage) * Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
- Linkbox [:page_facing_up:](https://rclone.org/linkbox) * Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
- Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode) * ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
- Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu) * pCloud [:page_facing_up:](https://rclone.org/pcloud/)
- Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/) * premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
- Memset Memstore [:page_facing_up:](https://rclone.org/swift/) * put.io [:page_facing_up:](https://rclone.org/putio/)
- MEGA [:page_facing_up:](https://rclone.org/mega/) * QingStor [:page_facing_up:](https://rclone.org/qingstor/)
- MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega) * Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
- Memory [:page_facing_up:](https://rclone.org/memory/) * Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
- Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/) * RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
- Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/) * Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
- Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/) * Seafile [:page_facing_up:](https://rclone.org/seafile/)
- Minio [:page_facing_up:](https://rclone.org/s3/#minio) * SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
- Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud) * SFTP [:page_facing_up:](https://rclone.org/sftp/)
- Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/) * SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
- OpenDrive [:page_facing_up:](https://rclone.org/opendrive/) * StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
- OpenStack Swift [:page_facing_up:](https://rclone.org/swift/) * Storj [:page_facing_up:](https://rclone.org/storj/)
- Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/) * SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
- Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/) * Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
- Outscale [:page_facing_up:](https://rclone.org/s3/#outscale) * Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
- OVHcloud Object Storage (Swift) [:page_facing_up:](https://rclone.org/swift/) * WebDAV [:page_facing_up:](https://rclone.org/webdav/)
- OVHcloud Object Storage (S3-compatible) [:page_facing_up:](https://rclone.org/s3/#ovhcloud) * Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
- ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud) * Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
- pCloud [:page_facing_up:](https://rclone.org/pcloud/) * The local filesystem [:page_facing_up:](https://rclone.org/local/)
- Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
- PikPak [:page_facing_up:](https://rclone.org/pikpak/)
- Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
- premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
- put.io [:page_facing_up:](https://rclone.org/putio/)
- Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
- QingStor [:page_facing_up:](https://rclone.org/qingstor/)
- Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
- Rabata Cloud Storage [:page_facing_up:](https://rclone.org/s3/#Rabata)
- Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
- Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
- RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
- rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
- Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
- Seafile [:page_facing_up:](https://rclone.org/seafile/)
- Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
- SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
- Servercore Object Storage [:page_facing_up:](https://rclone.org/s3/#servercore)
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
- Storj [:page_facing_up:](https://rclone.org/storj/)
- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
- Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
- Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
- Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
- Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
- WebDAV [:page_facing_up:](https://rclone.org/webdav/)
- Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
- Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
- Zata.ai [:page_facing_up:](https://rclone.org/s3/#Zata)
- The local filesystem [:page_facing_up:](https://rclone.org/local/)
Please see [the full list of all storage providers and their features](https://rclone.org/overview/) Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
@@ -130,55 +93,49 @@ Please see [the full list of all storage providers and their features](https://r
These backends adapt or modify other storage providers These backends adapt or modify other storage providers
- Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/) * Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
- Archive: read archive files [:page_facing_up:](https://rclone.org/archive/) * Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
- Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/) * Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
- Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/) * Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
- Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/) * Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
- Compress: compress files [:page_facing_up:](https://rclone.org/compress/) * Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
- Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/) * Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
- Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/) * Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
- Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
## Features ## Features
- MD5/SHA-1 hashes checked at all times for file integrity * MD5/SHA-1 hashes checked at all times for file integrity
- Timestamps preserved on files * Timestamps preserved on files
- Partial syncs supported on a whole file basis * Partial syncs supported on a whole file basis
- [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed * [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
files * [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
- [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory * [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
identical * Can sync to and from network, e.g. two different cloud accounts
- [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync * Optional large file chunking ([Chunker](https://rclone.org/chunker/))
bidirectionally * Optional transparent compression ([Compress](https://rclone.org/compress/))
- [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash * Optional encryption ([Crypt](https://rclone.org/crypt/))
equality * Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
- Can sync to and from network, e.g. two different cloud accounts * Multi-threaded downloads to local disk
- Optional large file chunking ([Chunker](https://rclone.org/chunker/)) * Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDAV/FTP/SFTP/DLNA
- Optional transparent compression ([Compress](https://rclone.org/compress/))
- Optional encryption ([Crypt](https://rclone.org/crypt/))
- Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
- Multi-threaded downloads to local disk
- Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files
over HTTP/WebDAV/FTP/SFTP/DLNA
## Installation & documentation ## Installation & documentation
Please see the [rclone website](https://rclone.org/) for: Please see the [rclone website](https://rclone.org/) for:
- [Installation](https://rclone.org/install/) * [Installation](https://rclone.org/install/)
- [Documentation & configuration](https://rclone.org/docs/) * [Documentation & configuration](https://rclone.org/docs/)
- [Changelog](https://rclone.org/changelog/) * [Changelog](https://rclone.org/changelog/)
- [FAQ](https://rclone.org/faq/) * [FAQ](https://rclone.org/faq/)
- [Storage providers](https://rclone.org/overview/) * [Storage providers](https://rclone.org/overview/)
- [Forum](https://forum.rclone.org/) * [Forum](https://forum.rclone.org/)
- ...and more * ...and more
## Downloads ## Downloads
- <https://rclone.org/downloads/> * https://rclone.org/downloads/
## License License
-------
This is free software under the terms of the MIT license (check the This is free software under the terms of the MIT license (check the
[COPYING file](/COPYING) included in this package). [COPYING file](/COPYING) included in this package).

View File

@@ -4,88 +4,48 @@ This file describes how to make the various kinds of releases
## Extra required software for making a release ## Extra required software for making a release
- [gh the github cli](https://github.com/cli/cli) for uploading packages * [gh the github cli](https://github.com/cli/cli) for uploading packages
- pandoc for making the html and man pages * pandoc for making the html and man pages
## Making a release ## Making a release
- git checkout master # see below for stable branch * git checkout master # see below for stable branch
- git pull # IMPORTANT * git pull
- git status - make sure everything is checked in * git status - make sure everything is checked in
- Check GitHub actions build for master is Green * Check GitHub actions build for master is Green
- make test # see integration test server or run locally * make test # see integration test server or run locally
- make tag * make tag
- edit docs/content/changelog.md # make sure to remove duplicate logs from point * edit docs/content/changelog.md # make sure to remove duplicate logs from point releases
releases * make tidy
- make tidy * make doc
- make doc * git status - to check for new man pages - git add them
- git status - to check for new man pages - git add them * git commit -a -v -m "Version v1.XX.0"
- git commit -a -v -m "Version v1.XX.0" * make retag
- make retag * git push --follow-tags origin
- git push origin # without --follow-tags so it doesn't push the tag if it fails * # Wait for the GitHub builds to complete then...
- git push --follow-tags origin * make fetch_binaries
- \# Wait for the GitHub builds to complete then... * make tarball
- make fetch_binaries * make vendorball
- make tarball * make sign_upload
- make vendorball * make check_sign
- make sign_upload * make upload
- make check_sign * make upload_website
- make upload * make upload_github
- make upload_website * make startdev # make startstable for stable branch
- make upload_github * # announce with forum post, twitter post, patreon post
- make startdev # make startstable for stable branch
- \# announce with forum post, twitter post, patreon post
## Update dependencies ## Update dependencies
Early in the next release cycle update the dependencies. Early in the next release cycle update the dependencies
- Review any pinned packages in go.mod and remove if possible * Review any pinned packages in go.mod and remove if possible
- `make updatedirect` * make updatedirect
- `make GOTAGS=cmount` * make
- `make compiletest` * git commit -a -v
- Fix anything which doesn't compile at this point and commit changes here * make update
- `git commit -a -v -m "build: update all dependencies"` * make
* roll back any updates which didn't compile
If the `make updatedirect` upgrades the version of go in the `go.mod` * git commit -a -v --amend
```text
go 1.22.0
```
then go to manual mode. `go1.22` here is the lowest supported version
in the `go.mod`.
If `make updatedirect` added a `toolchain` directive then remove it.
We don't want to force a toolchain on our users. Linux packagers are
often using a version of Go that is a few versions out of date.
```sh
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
go get -d $(cat /tmp/potential-upgrades)
go mod tidy -go=1.22 -compat=1.22
```
If the `go mod tidy` fails use the output from it to remove the
package which can't be upgraded from `/tmp/potential-upgrades` when
done
```sh
git co go.mod go.sum
```
And try again.
Optionally upgrade the direct and indirect dependencies. This is very
likely to fail if the manual method was used abve - in that case
ignore it as it is too time consuming to fix.
- `make update`
- `make GOTAGS=cmount`
- `make compiletest`
- roll back any updates which didn't compile
- `git commit -a -v --amend`
- **NB** watch out for this changing the default go version in `go.mod`
Note that `make update` updates all direct and indirect dependencies Note that `make update` updates all direct and indirect dependencies
and there can occasionally be forwards compatibility problems with and there can occasionally be forwards compatibility problems with
@@ -93,28 +53,11 @@ doing that so it may be necessary to roll back dependencies to the
version specified by `make updatedirect` in order to get rclone to version specified by `make updatedirect` in order to get rclone to
build. build.
Once it compiles locally, push it on a test branch and commit fixes
until the tests pass.
### Major versions
The above procedure will not upgrade major versions, so v2 to v3.
However this tool can show which major versions might need to be
upgraded:
```sh
go run github.com/icholy/gomajor@latest list -major
```
Expect API breakage when updating major versions.
## Tidy beta ## Tidy beta
At some point after the release run At some point after the release run
```sh bin/tidy-beta v1.55
bin/tidy-beta v1.55
```
where the version number is that of a couple ago to remove old beta binaries. where the version number is that of a couple ago to remove old beta binaries.
@@ -124,86 +67,57 @@ If rclone needs a point release due to some horrendous bug:
Set vars Set vars
- BASE_TAG=v1.XX # e.g. v1.52 * BASE_TAG=v1.XX # e.g. v1.52
- NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1 * NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
- echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1 * echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
First make the release branch. If this is a second point release then First make the release branch. If this is a second point release then
this will be done already. this will be done already.
- git co -b ${BASE_TAG}-stable ${BASE_TAG}.0 * git branch ${BASE_TAG} ${BASE_TAG}-stable
- make startstable * git co ${BASE_TAG}-stable
* make startstable
Now Now
- git co ${BASE_TAG}-stable * git co ${BASE_TAG}-stable
- git cherry-pick any fixes * git cherry-pick any fixes
- make startstable * Do the steps as above
- Do the steps as above * make startstable
- git co master * git co master
- `#` cherry pick the changes to the changelog - check the diff to make sure it * `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
is correct * git checkout ${BASE_TAG}-stable docs/content/changelog.md
- git checkout ${BASE_TAG}-stable docs/content/changelog.md * git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
- git commit -a -v -m "Changelog updates from Version ${NEW_TAG}" * git push
- git push
## Sponsor logos
If updating the website note that the sponsor logos have been moved out of the
main repository.
You will need to checkout `/docs/static/img/logos` from <https://github.com/rclone/third-party-logos>
which is a private repo containing artwork from sponsors.
## Update the website between releases
Create an update website branch based off the last release
```sh
git co -b update-website
```
If the branch already exists, double check there are no commits that need saving.
Now reset the branch to the last release
```sh
git reset --hard v1.64.0
```
Create the changes, check them in, test with `make serve` then
```sh
make upload_test_website
```
Check out <https://test.rclone.org> and when happy
```sh
make upload_website
```
Cherry pick any changes back to master and the stable branch if it is active.
## Making a manual build of docker ## Making a manual build of docker
To do a basic build of rclone's docker image to debug builds locally: The rclone docker image should autobuild on via GitHub actions. If it doesn't
or needs to be updated then rebuild like this.
```sh See: https://github.com/ilteoood/docker_buildx/issues/19
docker buildx build --load -t rclone/rclone:testing --progress=plain . See: https://github.com/ilteoood/docker_buildx/blob/master/scripts/install_buildx.sh
docker run --rm rclone/rclone:testing version
```
git co v1.54.1
docker pull golang
export DOCKER_CLI_EXPERIMENTAL=enabled
docker buildx create --name actions_builder --use
docker run --rm --privileged docker/binfmt:820fdd95a9972a5308930a2bdfb8573dd4447ad3
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
SUPPORTED_PLATFORMS=$(docker buildx inspect --bootstrap | grep 'Platforms:*.*' | cut -d : -f2,3)
echo "Supported platforms: $SUPPORTED_PLATFORMS"
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
docker buildx stop actions_builder
``` ```
To test the multipatform build ### Old build for linux/amd64 only
```sh
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
``` ```
docker pull golang
To make a full build then set the tags correctly and add `--push` docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
docker push rclone/rclone:1.52.0
Note that you can't only build one architecture - you need to build them all. docker push rclone/rclone:1.52
docker push rclone/rclone:1
```sh docker push rclone/rclone:latest
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
``` ```

View File

@@ -1 +1 @@
v1.72.0 v1.61.1

View File

@@ -23,8 +23,8 @@ func prepare(t *testing.T, root string) {
configfile.Install() configfile.Install()
// Configure the remote // Configure the remote
config.FileSetValue(remoteName, "type", "alias") config.FileSet(remoteName, "type", "alias")
config.FileSetValue(remoteName, "remote", root) config.FileSet(remoteName, "remote", root)
} }
func TestNewFS(t *testing.T) { func TestNewFS(t *testing.T) {
@@ -81,12 +81,10 @@ func TestNewFS(t *testing.T) {
for i, gotEntry := range gotEntries { for i, gotEntry := range gotEntries {
what := fmt.Sprintf("%s, entry=%d", what, i) what := fmt.Sprintf("%s, entry=%d", what, i)
wantEntry := test.entries[i] wantEntry := test.entries[i]
_, isDir := gotEntry.(fs.Directory)
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what) require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
if !isDir { require.Equal(t, wantEntry.size, gotEntry.Size(), what)
require.Equal(t, wantEntry.size, gotEntry.Size(), what) _, isDir := gotEntry.(fs.Directory)
}
require.Equal(t, wantEntry.isDir, isDir, what) require.Equal(t, wantEntry.isDir, isDir, what)
} }
} }

View File

@@ -4,38 +4,29 @@ package all
import ( import (
// Active file systems // Active file systems
_ "github.com/rclone/rclone/backend/alias" _ "github.com/rclone/rclone/backend/alias"
_ "github.com/rclone/rclone/backend/archive" _ "github.com/rclone/rclone/backend/amazonclouddrive"
_ "github.com/rclone/rclone/backend/azureblob" _ "github.com/rclone/rclone/backend/azureblob"
_ "github.com/rclone/rclone/backend/azurefiles"
_ "github.com/rclone/rclone/backend/b2" _ "github.com/rclone/rclone/backend/b2"
_ "github.com/rclone/rclone/backend/box" _ "github.com/rclone/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache" _ "github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/chunker" _ "github.com/rclone/rclone/backend/chunker"
_ "github.com/rclone/rclone/backend/cloudinary"
_ "github.com/rclone/rclone/backend/combine" _ "github.com/rclone/rclone/backend/combine"
_ "github.com/rclone/rclone/backend/compress" _ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt" _ "github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/doi"
_ "github.com/rclone/rclone/backend/drive" _ "github.com/rclone/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/dropbox" _ "github.com/rclone/rclone/backend/dropbox"
_ "github.com/rclone/rclone/backend/fichier" _ "github.com/rclone/rclone/backend/fichier"
_ "github.com/rclone/rclone/backend/filefabric" _ "github.com/rclone/rclone/backend/filefabric"
_ "github.com/rclone/rclone/backend/filelu"
_ "github.com/rclone/rclone/backend/filescom"
_ "github.com/rclone/rclone/backend/ftp" _ "github.com/rclone/rclone/backend/ftp"
_ "github.com/rclone/rclone/backend/gofile"
_ "github.com/rclone/rclone/backend/googlecloudstorage" _ "github.com/rclone/rclone/backend/googlecloudstorage"
_ "github.com/rclone/rclone/backend/googlephotos" _ "github.com/rclone/rclone/backend/googlephotos"
_ "github.com/rclone/rclone/backend/hasher" _ "github.com/rclone/rclone/backend/hasher"
_ "github.com/rclone/rclone/backend/hdfs" _ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/hidrive" _ "github.com/rclone/rclone/backend/hidrive"
_ "github.com/rclone/rclone/backend/http" _ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/iclouddrive"
_ "github.com/rclone/rclone/backend/imagekit"
_ "github.com/rclone/rclone/backend/internetarchive" _ "github.com/rclone/rclone/backend/internetarchive"
_ "github.com/rclone/rclone/backend/jottacloud" _ "github.com/rclone/rclone/backend/jottacloud"
_ "github.com/rclone/rclone/backend/koofr" _ "github.com/rclone/rclone/backend/koofr"
_ "github.com/rclone/rclone/backend/linkbox"
_ "github.com/rclone/rclone/backend/local" _ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/mailru" _ "github.com/rclone/rclone/backend/mailru"
_ "github.com/rclone/rclone/backend/mega" _ "github.com/rclone/rclone/backend/mega"
@@ -45,13 +36,9 @@ import (
_ "github.com/rclone/rclone/backend/opendrive" _ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/oracleobjectstorage" _ "github.com/rclone/rclone/backend/oracleobjectstorage"
_ "github.com/rclone/rclone/backend/pcloud" _ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/pikpak"
_ "github.com/rclone/rclone/backend/pixeldrain"
_ "github.com/rclone/rclone/backend/premiumizeme" _ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/protondrive"
_ "github.com/rclone/rclone/backend/putio" _ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor" _ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/quatrix"
_ "github.com/rclone/rclone/backend/s3" _ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/seafile" _ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp" _ "github.com/rclone/rclone/backend/sftp"
@@ -61,7 +48,6 @@ import (
_ "github.com/rclone/rclone/backend/storj" _ "github.com/rclone/rclone/backend/storj"
_ "github.com/rclone/rclone/backend/sugarsync" _ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift" _ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/ulozto"
_ "github.com/rclone/rclone/backend/union" _ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/uptobox" _ "github.com/rclone/rclone/backend/uptobox"
_ "github.com/rclone/rclone/backend/webdav" _ "github.com/rclone/rclone/backend/webdav"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,21 @@
// Test AmazonCloudDrive filesystem interface
//go:build acd
// +build acd
package amazonclouddrive_test
import (
"testing"
"github.com/rclone/rclone/backend/amazonclouddrive"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
fstests.RemoteName = "TestAmazonCloudDrive:"
fstests.Run(t)
}

View File

@@ -1,679 +0,0 @@
//go:build !plan9
// Package archive implements a backend to access archive files in a remote
package archive
// FIXME factor common code between backends out - eg VFS initialization
// FIXME can we generalize the VFS handle caching and use it in zip backend
// Factor more stuff out if possible
// Odd stats which are probably coming from the VFS
// * tensorflow.sqfs: 0% /3.074Gi, 204.426Ki/s, 4h22m46s
// FIXME this will perform poorly for unpacking as the VFS Reader is bad
// at multiple streams - need cache mode setting?
import (
"context"
"errors"
"fmt"
"io"
"path"
"strings"
"sync"
"time"
// Import all the required archivers here
_ "github.com/rclone/rclone/backend/archive/squashfs"
_ "github.com/rclone/rclone/backend/archive/zip"
"github.com/rclone/rclone/backend/archive/archiver"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
)
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "archive",
Description: "Read archives",
NewFs: NewFs,
MetadataInfo: &fs.MetadataInfo{
Help: `Any metadata supported by the underlying remote is read and written.`,
},
Options: []fs.Option{{
Name: "remote",
Help: `Remote to wrap to read archives from.
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
"myremote:bucket" or "myremote:".
If this is left empty, then the archive backend will use the root as
the remote.
This means that you can use :archive:remote:path and it will be
equivalent to setting remote="remote:path".
`,
Required: false,
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
Remote string `config:"remote"`
}
// Fs represents a archive of upstreams
type Fs struct {
name string // name of this remote
features *fs.Features // optional features
opt Options // options for this Fs
root string // the path we are working on
f fs.Fs // remote we are wrapping
wrapper fs.Fs // fs that wraps us
mu sync.Mutex // protects the below
archives map[string]*archive // the archives we have, by path
}
// A single open archive
type archive struct {
archiver archiver.Archiver // archiver responsible
remote string // path to the archive
prefix string // prefix to add on to listings
root string // root of the archive to remove from listings
mu sync.Mutex // protects the following variables
f fs.Fs // the archive Fs, may be nil
}
// If remote is an archive then return it otherwise return nil
func findArchive(remote string) *archive {
// FIXME use something faster than linear search?
for _, archiver := range archiver.Archivers {
if strings.HasSuffix(remote, archiver.Extension) {
return &archive{
archiver: archiver,
remote: remote,
prefix: remote,
root: "",
}
}
}
return nil
}
// Find an archive buried in remote
func subArchive(remote string) *archive {
archive := findArchive(remote)
if archive != nil {
return archive
}
parent := path.Dir(remote)
if parent == "/" || parent == "." {
return nil
}
return subArchive(parent)
}
// If remote is an archive then return it otherwise return nil
func (f *Fs) findArchive(remote string) (archive *archive) {
archive = findArchive(remote)
if archive != nil {
f.mu.Lock()
f.archives[remote] = archive
f.mu.Unlock()
}
return archive
}
// Instantiate archive if it hasn't been instantiated yet
//
// This is done lazily so that we can list a directory full of
// archives without opening them all.
func (a *archive) init(ctx context.Context, f fs.Fs) (fs.Fs, error) {
a.mu.Lock()
defer a.mu.Unlock()
if a.f != nil {
return a.f, nil
}
newFs, err := a.archiver.New(ctx, f, a.remote, a.prefix, a.root)
if err != nil && err != fs.ErrorIsFile {
return nil, fmt.Errorf("failed to create archive %q: %w", a.remote, err)
}
a.f = newFs
return a.f, nil
}
// NewFs constructs an Fs from the path.
//
// The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs.Fs, err error) {
// defer log.Trace(nil, "name=%q, root=%q, m=%v", name, root, m)("f=%+v, err=%v", &outFs, &err)
// Parse config into Options struct
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
remote := opt.Remote
origRoot := root
// If remote is empty, use the root instead
if remote == "" {
remote = root
root = ""
}
isDirectory := strings.HasSuffix(remote, "/")
remote = strings.TrimRight(remote, "/")
if remote == "" {
remote = "/"
}
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point archive remote at itself - check the value of the upstreams setting")
}
_ = isDirectory
foundArchive := subArchive(remote)
if foundArchive != nil {
fs.Debugf(nil, "Found archiver for %q remote %q", foundArchive.archiver.Extension, foundArchive.remote)
// Archive path
foundArchive.root = strings.Trim(remote[len(foundArchive.remote):], "/")
// Path to the archive
archiveRemote := remote[:len(foundArchive.remote)]
// Remote is archive leaf name
foundArchive.remote = path.Base(archiveRemote)
foundArchive.prefix = ""
// Point remote to archive file
remote = archiveRemote
}
// Make sure to remove trailing . referring to the current dir
if path.Base(root) == "." {
root = strings.TrimSuffix(root, ".")
}
remotePath := fspath.JoinRootPath(remote, root)
wrappedFs, err := cache.Get(ctx, remotePath)
if err != fs.ErrorIsFile && err != nil {
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
}
f := &Fs{
name: name,
//root: path.Join(remotePath, root),
root: origRoot,
opt: *opt,
f: wrappedFs,
archives: make(map[string]*archive),
}
cache.PinUntilFinalized(f.f, f)
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
f.features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: false,
ReadMimeType: true,
WriteMimeType: true,
CanHaveEmptyDirectories: true,
BucketBased: true,
SetTier: true,
GetTier: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
if foundArchive != nil {
fs.Debugf(f, "Root is an archive")
if err != fs.ErrorIsFile {
return nil, fmt.Errorf("expecting to find a file at %q", remote)
}
return foundArchive.init(ctx, f.f)
}
// Correct root if definitely pointing to a file
if err == fs.ErrorIsFile {
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
return f, err
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("archive root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.f.Rmdir(ctx, dir)
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() hash.Set {
return f.f.Hashes()
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.f.Mkdir(ctx, dir)
}
// Purge all files in the directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) error {
do := f.f.Features().Purge
if do == nil {
return fs.ErrorCantPurge
}
return do(ctx, dir)
}
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
do := f.f.Features().Copy
if do == nil {
return nil, fs.ErrorCantCopy
}
// FIXME
// o, ok := src.(*Object)
// if !ok {
// return nil, fs.ErrorCantCopy
// }
return do(ctx, src, remote)
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
do := f.f.Features().Move
if do == nil {
return nil, fs.ErrorCantMove
}
// FIXME
// o, ok := src.(*Object)
// if !ok {
// return nil, fs.ErrorCantMove
// }
return do(ctx, src, remote)
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
do := f.f.Features().DirMove
if do == nil {
return fs.ErrorCantDirMove
}
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
return do(ctx, srcFs.f, srcRemote, dstRemote)
}
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
// At least one value will be written to the channel,
// specifying the initial value and updated values might
// follow. A 0 Duration should pause the polling.
// The ChangeNotify implementation must empty the channel
// regularly. When the channel gets closed, the implementation
// should stop polling and release resources.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), ch <-chan time.Duration) {
do := f.f.Features().ChangeNotify
if do == nil {
return
}
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
notifyFunc(path, entryType)
}
do(ctx, wrappedNotifyFunc, ch)
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
do := f.f.Features().DirCacheFlush
if do != nil {
do()
}
}
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
var o fs.Object
var err error
if stream {
o, err = f.f.Features().PutStream(ctx, in, src, options...)
} else {
o, err = f.f.Put(ctx, in, src, options...)
}
if err != nil {
return nil, err
}
return o, nil
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
return f.put(ctx, in, src, false, options...)
default:
return nil, err
}
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
return f.put(ctx, in, src, true, options...)
default:
return nil, err
}
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.f.Features().About
if do == nil {
return nil, errors.New("not supported by underlying remote")
}
return do(ctx)
}
// Find the Fs for the directory
func (f *Fs) findFs(ctx context.Context, dir string) (subFs fs.Fs, err error) {
f.mu.Lock()
defer f.mu.Unlock()
subFs = f.f
// FIXME should do this with a better datastructure like a prefix tree
// FIXME want to find the longest first otherwise nesting won't work
dirSlash := dir + "/"
for archiverRemote, archive := range f.archives {
subRemote := archiverRemote + "/"
if strings.HasPrefix(dirSlash, subRemote) {
subFs, err = archive.init(ctx, f.f)
if err != nil {
return nil, err
}
break
}
}
return subFs, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
subFs, err := f.findFs(ctx, dir)
if err != nil {
return nil, err
}
entries, err = subFs.List(ctx, dir)
if err != nil {
return nil, err
}
for i, entry := range entries {
// Can only unarchive files
if o, ok := entry.(fs.Object); ok {
remote := o.Remote()
archive := f.findArchive(remote)
if archive != nil {
// Overwrite entry with directory
entries[i] = fs.NewDir(remote, o.ModTime(ctx))
}
}
}
return entries, nil
}
// NewObject creates a new remote archive file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
dir := path.Dir(remote)
if dir == "/" || dir == "." {
dir = ""
}
subFs, err := f.findFs(ctx, dir)
if err != nil {
return nil, err
}
o, err := subFs.NewObject(ctx, remote)
if err != nil {
return nil, err
}
return o, nil
}
// Precision is the greatest precision of all the archivers
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
if do := f.f.Features().Shutdown; do != nil {
return do(ctx)
}
return nil
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
do := f.f.Features().PublicLink
if do == nil {
return "", errors.New("PublicLink not supported")
}
return do(ctx, remote, expire, unlink)
}
// PutUnchecked in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
//
// May create duplicates or return errors if src already
// exists.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.f.Features().PutUnchecked
if do == nil {
return nil, errors.New("can't PutUnchecked")
}
o, err := do(ctx, in, src, options...)
if err != nil {
return nil, err
}
return o, nil
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
if len(dirs) == 0 {
return nil
}
do := f.f.Features().MergeDirs
if do == nil {
return errors.New("MergeDirs not supported")
}
return do(ctx, dirs)
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
func (f *Fs) CleanUp(ctx context.Context) error {
do := f.f.Features().CleanUp
if do == nil {
return errors.New("not supported by underlying remote")
}
return do(ctx)
}
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
do := f.f.Features().OpenWriterAt
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx, remote, size)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.f
}
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
do := f.f.Features().OpenChunkWriter
if do == nil {
return info, nil, fs.ErrorNotImplemented
}
return do(ctx, remote, src, options...)
}
// UserInfo returns info about the connected user
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
do := f.f.Features().UserInfo
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx)
}
// Disconnect the current user
func (f *Fs) Disconnect(ctx context.Context) error {
do := f.f.Features().Disconnect
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx)
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.OpenWriterAter = (*Fs)(nil)
_ fs.OpenChunkWriter = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
// FIXME _ fs.FullObject = (*Object)(nil)
)

View File

@@ -1,221 +0,0 @@
//go:build !plan9
package archive
import (
"bytes"
"context"
"fmt"
"os"
"os/exec"
"path"
"path/filepath"
"strconv"
"strings"
"testing"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// FIXME need to test Open with seek
// run - run a shell command
func run(t *testing.T, args ...string) {
cmd := exec.Command(args[0], args[1:]...)
fs.Debugf(nil, "run args = %v", args)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf(`
----------------------------
Failed to run %v: %v
Command output was:
%s
----------------------------
`, args, err, out)
}
}
// check the dst and src are identical
func checkTree(ctx context.Context, name string, t *testing.T, dstArchive, src string, expectedCount int) {
t.Run(name, func(t *testing.T) {
fs.Debugf(nil, "check %q vs %q", dstArchive, src)
Farchive, err := cache.Get(ctx, dstArchive)
if err != fs.ErrorIsFile {
require.NoError(t, err)
}
Fsrc, err := cache.Get(ctx, src)
if err != fs.ErrorIsFile {
require.NoError(t, err)
}
var matches bytes.Buffer
opt := operations.CheckOpt{
Fdst: Farchive,
Fsrc: Fsrc,
Match: &matches,
}
for _, action := range []string{"Check", "Download"} {
t.Run(action, func(t *testing.T) {
matches.Reset()
if action == "Download" {
assert.NoError(t, operations.CheckDownload(ctx, &opt))
} else {
assert.NoError(t, operations.Check(ctx, &opt))
}
if expectedCount > 0 {
assert.Equal(t, expectedCount, strings.Count(matches.String(), "\n"))
}
})
}
t.Run("NewObject", func(t *testing.T) {
// Check we can run NewObject on all files and read them
assert.NoError(t, operations.ListFn(ctx, Fsrc, func(srcObj fs.Object) {
if t.Failed() {
return
}
remote := srcObj.Remote()
archiveObj, err := Farchive.NewObject(ctx, remote)
require.NoError(t, err, remote)
assert.Equal(t, remote, archiveObj.Remote(), remote)
// Test that the contents are the same
archiveBuf := fstests.ReadObject(ctx, t, archiveObj, -1)
srcBuf := fstests.ReadObject(ctx, t, srcObj, -1)
assert.Equal(t, srcBuf, archiveBuf)
if len(srcBuf) < 81 {
return
}
// Tests that Open works with SeekOption
assert.Equal(t, srcBuf[50:], fstests.ReadObject(ctx, t, archiveObj, -1, &fs.SeekOption{Offset: 50}), "contents differ after seek")
// Tests that Open works with RangeOption
for _, test := range []struct {
ro fs.RangeOption
wantStart, wantEnd int
}{
{fs.RangeOption{Start: 5, End: 15}, 5, 16},
{fs.RangeOption{Start: 80, End: -1}, 80, len(srcBuf)},
{fs.RangeOption{Start: 81, End: 100000}, 81, len(srcBuf)},
{fs.RangeOption{Start: -1, End: 20}, len(srcBuf) - 20, len(srcBuf)}, // if start is omitted this means get the final bytes
// {fs.RangeOption{Start: -1, End: -1}, 0, len(srcBuf)}, - this seems to work but the RFC doesn't define it
} {
got := fstests.ReadObject(ctx, t, archiveObj, -1, &test.ro)
foundAt := strings.Index(srcBuf, got)
help := fmt.Sprintf("%#v failed want [%d:%d] got [%d:%d]", test.ro, test.wantStart, test.wantEnd, foundAt, foundAt+len(got))
assert.Equal(t, srcBuf[test.wantStart:test.wantEnd], got, help)
}
// Test that the modtimes are correct
fstest.AssertTimeEqualWithPrecision(t, remote, srcObj.ModTime(ctx), archiveObj.ModTime(ctx), Farchive.Precision())
// Test that the sizes are correct
assert.Equal(t, srcObj.Size(), archiveObj.Size())
// Test that Strings are OK
assert.Equal(t, srcObj.String(), archiveObj.String())
}))
})
// t.Logf("Fdst ------------- %v", Fdst)
// operations.List(ctx, Fdst, os.Stdout)
// t.Logf("Fsrc ------------- %v", Fsrc)
// operations.List(ctx, Fsrc, os.Stdout)
})
}
// test creating and reading back some archives
//
// Note that this uses rclone and zip as external binaries.
func testArchive(t *testing.T, archiveName string, archiveFn func(t *testing.T, output, input string)) {
ctx := context.Background()
checkFiles := 1000
// create random test input files
inputRoot := t.TempDir()
input := filepath.Join(inputRoot, archiveName)
require.NoError(t, os.Mkdir(input, 0777))
run(t, "rclone", "test", "makefiles", "--files", strconv.Itoa(checkFiles), "--ascii", input)
// Create the archive
output := t.TempDir()
zipFile := path.Join(output, archiveName)
archiveFn(t, zipFile, input)
// Check the archive itself
checkTree(ctx, "Archive", t, ":archive:"+zipFile, input, checkFiles)
// Now check a subdirectory
fis, err := os.ReadDir(input)
require.NoError(t, err)
subDir := "NOT FOUND"
aFile := "NOT FOUND"
for _, fi := range fis {
if fi.IsDir() {
subDir = fi.Name()
} else {
aFile = fi.Name()
}
}
checkTree(ctx, "SubDir", t, ":archive:"+zipFile+"/"+subDir, filepath.Join(input, subDir), 0)
// Now check a single file
fiCtx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ "+aFile))
require.NoError(t, fi.AddRule("- *"))
checkTree(fiCtx, "SingleFile", t, ":archive:"+zipFile+"/"+aFile, filepath.Join(input, aFile), 0)
// Now check the level above
checkTree(ctx, "Root", t, ":archive:"+output, inputRoot, checkFiles)
// run(t, "cp", "-a", inputRoot, output, "/tmp/test-"+archiveName)
}
// Make sure we have the executable named
func skipIfNoExe(t *testing.T, exeName string) {
_, err := exec.LookPath(exeName)
if err != nil {
t.Skipf("%s executable not installed", exeName)
}
}
// Test creating and reading back some archives
//
// Note that this uses rclone and zip as external binaries.
func TestArchiveZip(t *testing.T) {
fstest.Initialise()
skipIfNoExe(t, "zip")
skipIfNoExe(t, "rclone")
testArchive(t, "test.zip", func(t *testing.T, output, input string) {
oldcwd, err := os.Getwd()
require.NoError(t, err)
require.NoError(t, os.Chdir(input))
defer func() {
require.NoError(t, os.Chdir(oldcwd))
}()
run(t, "zip", "-9r", output, ".")
})
}
// Test creating and reading back some archives
//
// Note that this uses rclone and squashfs as external binaries.
func TestArchiveSquashfs(t *testing.T) {
fstest.Initialise()
skipIfNoExe(t, "mksquashfs")
skipIfNoExe(t, "rclone")
testArchive(t, "test.sqfs", func(t *testing.T, output, input string) {
run(t, "mksquashfs", input, output)
})
}

View File

@@ -1,67 +0,0 @@
//go:build !plan9
// Test Archive filesystem interface
package archive_test
import (
"testing"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/memory"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
var (
unimplementableFsMethods = []string{"ListR", "ListP", "MkdirMetadata", "DirSetModTime"}
// In these tests we receive objects from the underlying remote which don't implement these methods
unimplementableObjectMethods = []string{"GetTier", "ID", "Metadata", "MimeType", "SetTier", "UnWrap", "SetMetadata"}
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
t.Skip("Skipping as -remote not set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}
func TestLocal(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
remote := t.TempDir()
name := "TestArchiveLocal"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "archive"},
{Name: name, Key: "remote", Value: remote},
},
QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}
func TestMemory(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
remote := ":memory:"
name := "TestArchiveMemory"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "archive"},
{Name: name, Key: "remote", Value: remote},
},
QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}

View File

@@ -1,7 +0,0 @@
// Build for archive for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9
// Package archive implements a backend to access archive files in a remote
package archive

View File

@@ -1,24 +0,0 @@
// Package archiver registers all the archivers
package archiver
import (
"context"
"github.com/rclone/rclone/fs"
)
// Archiver describes an archive package
type Archiver struct {
// New constructs an Fs from the (wrappedFs, remote) with the objects
// prefix with prefix and rooted at root
New func(ctx context.Context, f fs.Fs, remote, prefix, root string) (fs.Fs, error)
Extension string
}
// Archivers is a slice of all registered archivers
var Archivers []Archiver
// Register adds the archivers provided to the list of known archivers
func Register(as ...Archiver) {
Archivers = append(Archivers, as...)
}

View File

@@ -1,233 +0,0 @@
// Package base is a base archive Fs
package base
import (
"context"
"errors"
"fmt"
"io"
"path"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/vfs"
)
// Fs represents a wrapped fs.Fs
type Fs struct {
f fs.Fs
wrapper fs.Fs
name string
features *fs.Features // optional features
vfs *vfs.VFS
node vfs.Node // archive object
remote string // remote of the archive object
prefix string // position for objects
prefixSlash string // position for objects with a slash on
root string // position to read from within the archive
}
var errNotImplemented = errors.New("internal error: method not implemented in archiver")
// New constructs an Fs from the (wrappedFs, remote) with the objects
// prefix with prefix and rooted at root
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (*Fs, error) {
// FIXME vfs cache?
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
fs.Debugf(nil, "New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
VFS := vfs.New(wrappedFs, nil)
node, err := VFS.Stat(remote)
if err != nil {
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
}
f := &Fs{
f: wrappedFs,
name: path.Join(fs.ConfigString(wrappedFs), remote),
vfs: VFS,
node: node,
remote: remote,
root: root,
prefix: prefix,
prefixSlash: prefix + "/",
}
// FIXME
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
//
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
f.features = (&fs.Features{
CaseInsensitive: false,
DuplicateFiles: false,
ReadMimeType: false, // MimeTypes not supported with gzip
WriteMimeType: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// String returns a description of the FS
func (f *Fs) String() string {
return f.name
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return nil, errNotImplemented
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
return nil, errNotImplemented
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
return nil, vfs.EROFS
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.f
}
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// Object describes an object to be read from the raw zip file
type Object struct {
f *Fs
remote string
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.f
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return -1
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
return time.Now()
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return vfs.EROFS
}
// Storable raturns a boolean indicating if this object is storable
func (o *Object) Storable() bool {
return true
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
return nil, errNotImplemented
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return vfs.EROFS
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return vfs.EROFS
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

View File

@@ -1,165 +0,0 @@
package squashfs
// Could just be using bare object Open with RangeRequest which
// would transfer the minimum amount of data but may be slower.
import (
"errors"
"fmt"
"io/fs"
"os"
"sync"
"github.com/diskfs/go-diskfs/backend"
"github.com/rclone/rclone/vfs"
)
// Cache file handles for accessing the file
type cache struct {
node vfs.Node
fhsMu sync.Mutex
fhs []cacheHandle
}
// A cached file handle
type cacheHandle struct {
offset int64
fh vfs.Handle
}
// Make a new cache
func newCache(node vfs.Node) *cache {
return &cache{
node: node,
}
}
// Get a vfs.Handle from the pool or open one
//
// This tries to find an open file handle which doesn't require seeking.
func (c *cache) open(off int64) (fh vfs.Handle, err error) {
c.fhsMu.Lock()
defer c.fhsMu.Unlock()
if len(c.fhs) > 0 {
// Look for exact match first
for i, cfh := range c.fhs {
if cfh.offset == off {
// fs.Debugf(nil, "CACHE MATCH")
c.fhs = append(c.fhs[:i], c.fhs[i+1:]...)
return cfh.fh, nil
}
}
// fs.Debugf(nil, "CACHE MISS")
// Just take the first one if not found
cfh := c.fhs[0]
c.fhs = c.fhs[1:]
return cfh.fh, nil
}
fh, err = c.node.Open(os.O_RDONLY)
if err != nil {
return nil, fmt.Errorf("failed to open squashfs archive: %w", err)
}
return fh, nil
}
// Close a vfs.Handle or return it to the pool
//
// off should be the offset the file handle would read from without seeking
func (c *cache) close(fh vfs.Handle, off int64) {
c.fhsMu.Lock()
defer c.fhsMu.Unlock()
c.fhs = append(c.fhs, cacheHandle{
offset: off,
fh: fh,
})
}
// ReadAt reads len(p) bytes into p starting at offset off in the underlying
// input source. It returns the number of bytes read (0 <= n <= len(p)) and any
// error encountered.
//
// When ReadAt returns n < len(p), it returns a non-nil error explaining why
// more bytes were not returned. In this respect, ReadAt is stricter than Read.
//
// Even if ReadAt returns n < len(p), it may use all of p as scratch
// space during the call. If some data is available but not len(p) bytes,
// ReadAt blocks until either all the data is available or an error occurs.
// In this respect ReadAt is different from Read.
//
// If the n = len(p) bytes returned by ReadAt are at the end of the input
// source, ReadAt may return either err == EOF or err == nil.
//
// If ReadAt is reading from an input source with a seek offset, ReadAt should
// not affect nor be affected by the underlying seek offset.
//
// Clients of ReadAt can execute parallel ReadAt calls on the same input
// source.
//
// Implementations must not retain p.
func (c *cache) ReadAt(p []byte, off int64) (n int, err error) {
fh, err := c.open(off)
if err != nil {
return n, err
}
defer func() {
c.close(fh, off+int64(len(p)))
}()
// fs.Debugf(nil, "ReadAt(p[%d], off=%d, fh=%p)", len(p), off, fh)
return fh.ReadAt(p, off)
}
var errCacheNotImplemented = errors.New("internal error: squashfs cache doesn't implement method")
// WriteAt method dummy stub to satisfy interface
func (c *cache) WriteAt(p []byte, off int64) (n int, err error) {
return 0, errCacheNotImplemented
}
// Seek method dummy stub to satisfy interface
func (c *cache) Seek(offset int64, whence int) (int64, error) {
return 0, errCacheNotImplemented
}
// Read method dummy stub to satisfy interface
func (c *cache) Read(p []byte) (n int, err error) {
return 0, errCacheNotImplemented
}
func (c *cache) Stat() (fs.FileInfo, error) {
return nil, errCacheNotImplemented
}
// Close the file
func (c *cache) Close() (err error) {
c.fhsMu.Lock()
defer c.fhsMu.Unlock()
// Close any open file handles
for i := range c.fhs {
fh := &c.fhs[i]
newErr := fh.fh.Close()
if err == nil {
err = newErr
}
}
c.fhs = nil
return err
}
// Sys returns OS-specific file for ioctl calls via fd
func (c *cache) Sys() (*os.File, error) {
return nil, errCacheNotImplemented
}
// Writable returns file for read-write operations
func (c *cache) Writable() (backend.WritableFile, error) {
return nil, errCacheNotImplemented
}
// check interfaces
var _ backend.Storage = (*cache)(nil)

View File

@@ -1,446 +0,0 @@
// Package squashfs implements a squashfs archiver for the archive backend
package squashfs
import (
"context"
"fmt"
"io"
"path"
"strings"
"time"
"github.com/diskfs/go-diskfs/filesystem/squashfs"
"github.com/rclone/rclone/backend/archive/archiver"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
)
func init() {
archiver.Register(archiver.Archiver{
New: New,
Extension: ".sqfs",
})
}
// Fs represents a wrapped fs.Fs
type Fs struct {
f fs.Fs
wrapper fs.Fs
name string
features *fs.Features // optional features
vfs *vfs.VFS
sqfs *squashfs.FileSystem // interface to the squashfs
c *cache
node vfs.Node // squashfs file object - set if reading
remote string // remote of the squashfs file object
prefix string // position for objects
prefixSlash string // position for objects with a slash on
root string // position to read from within the archive
}
// New constructs an Fs from the (wrappedFs, remote) with the objects
// prefix with prefix and rooted at root
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) {
// FIXME vfs cache?
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
fs.Debugf(nil, "Squashfs: New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
vfsOpt := vfscommon.Opt
vfsOpt.ReadWait = 0
VFS := vfs.New(wrappedFs, &vfsOpt)
node, err := VFS.Stat(remote)
if err != nil {
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
}
c := newCache(node)
// FIXME blocksize
sqfs, err := squashfs.Read(c, node.Size(), 0, 1024*1024)
if err != nil {
return nil, fmt.Errorf("failed to read squashfs: %w", err)
}
f := &Fs{
f: wrappedFs,
name: path.Join(fs.ConfigString(wrappedFs), remote),
vfs: VFS,
node: node,
sqfs: sqfs,
c: c,
remote: remote,
root: strings.Trim(root, "/"),
prefix: prefix,
prefixSlash: prefix + "/",
}
if prefix == "" {
f.prefixSlash = ""
}
singleObject := false
// Find the directory the root points to
if f.root != "" && !strings.HasSuffix(root, "/") {
native, err := f.toNative("")
if err == nil {
native = strings.TrimRight(native, "/")
_, err := f.newObjectNative(native)
if err == nil {
// If it pointed to a file, find the directory above
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
}
}
// FIXME
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
//
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
f.features = (&fs.Features{
CaseInsensitive: false,
DuplicateFiles: false,
ReadMimeType: false, // MimeTypes not supported with gsquashfs
WriteMimeType: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
if singleObject {
return f, fs.ErrorIsFile
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("Squashfs %q", f.name)
}
// This turns a remote into a native path in the squashfs starting with a /
func (f *Fs) toNative(remote string) (string, error) {
native := strings.Trim(remote, "/")
if f.prefix == "" {
native = "/" + native
} else if native == f.prefix {
native = "/"
} else if !strings.HasPrefix(native, f.prefixSlash) {
return "", fmt.Errorf("internal error: %q doesn't start with prefix %q", native, f.prefixSlash)
} else {
native = native[len(f.prefix):]
}
if f.root != "" {
native = "/" + f.root + native
}
return native, nil
}
// Turn a (nativeDir, leaf) into a remote
func (f *Fs) fromNative(nativeDir string, leaf string) string {
// fs.Debugf(nil, "nativeDir = %q, leaf = %q, root=%q", nativeDir, leaf, f.root)
dir := nativeDir
if f.root != "" {
dir = strings.TrimPrefix(dir, "/"+f.root)
}
remote := f.prefixSlash + strings.Trim(path.Join(dir, leaf), "/")
// fs.Debugf(nil, "dir = %q, remote=%q", dir, remote)
return remote
}
// Convert a FileInfo into an Object from native dir
func (f *Fs) objectFromFileInfo(nativeDir string, item squashfs.FileStat) *Object {
return &Object{
fs: f,
remote: f.fromNative(nativeDir, item.Name()),
size: item.Size(),
modTime: item.ModTime(),
item: item,
}
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
nativeDir, err := f.toNative(dir)
if err != nil {
return nil, err
}
items, err := f.sqfs.ReadDir(nativeDir)
if err != nil {
return nil, fmt.Errorf("read squashfs: couldn't read directory: %w", err)
}
entries = make(fs.DirEntries, 0, len(items))
for _, fi := range items {
item, ok := fi.(squashfs.FileStat)
if !ok {
return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi)
}
// fs.Debugf(item.Name(), "entry = %#v", item)
var entry fs.DirEntry
if err != nil {
return nil, fmt.Errorf("error reading item %q: %q", item.Name(), err)
}
if item.IsDir() {
var remote = f.fromNative(nativeDir, item.Name())
entry = fs.NewDir(remote, item.ModTime())
} else {
if item.Mode().IsRegular() {
entry = f.objectFromFileInfo(nativeDir, item)
} else {
fs.Debugf(item.Name(), "FIXME Not regular file - skipping")
continue
}
}
entries = append(entries, entry)
}
// fs.Debugf(f, "dir=%q, entries=%v", dir, entries)
return entries, nil
}
// newObjectNative finds the object at the native path passed in
func (f *Fs) newObjectNative(nativePath string) (o fs.Object, err error) {
// get the path and filename
dir, leaf := path.Split(nativePath)
dir = strings.TrimRight(dir, "/")
leaf = strings.Trim(leaf, "/")
// FIXME need to detect directory not found
fis, err := f.sqfs.ReadDir(dir)
if err != nil {
return nil, fs.ErrorObjectNotFound
}
for _, fi := range fis {
if fi.Name() == leaf {
if fi.IsDir() {
return nil, fs.ErrorNotAFile
}
item, ok := fi.(squashfs.FileStat)
if !ok {
return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi)
}
o = f.objectFromFileInfo(dir, item)
break
}
}
if o == nil {
return nil, fs.ErrorObjectNotFound
}
return o, nil
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err)
nativePath, err := f.toNative(remote)
if err != nil {
return nil, err
}
return f.newObjectNative(nativePath)
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
return nil, vfs.EROFS
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.f
}
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// Object describes an object to be read from the raw squashfs file
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
item squashfs.FileStat
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Turn a squashfs path into a full path for the parent Fs
// func (o *Object) path(remote string) string {
// return path.Join(o.fs.prefix, remote)
// }
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return vfs.EROFS
}
// Storable raturns a boolean indicating if this object is storable
func (o *Object) Storable() bool {
return true
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
remote, err := o.fs.toNative(o.remote)
if err != nil {
return nil, err
}
fs.Debugf(o, "Opening %q", remote)
//fh, err := o.fs.sqfs.OpenFile(remote, os.O_RDONLY)
fh, err := o.item.Open()
if err != nil {
return nil, err
}
// discard data from start as necessary
if offset > 0 {
_, err = fh.Seek(offset, io.SeekStart)
if err != nil {
return nil, err
}
}
// If limited then don't return everything
if limit >= 0 {
fs.Debugf(nil, "limit=%d, offset=%d, options=%v", limit, offset, options)
return readers.NewLimitedReadCloser(fh, limit), nil
}
return fh, nil
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return vfs.EROFS
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return vfs.EROFS
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

View File

@@ -1,385 +0,0 @@
// Package zip implements a zip archiver for the archive backend
package zip
import (
"archive/zip"
"context"
"errors"
"fmt"
"io"
"os"
"path"
"strings"
"time"
"github.com/rclone/rclone/backend/archive/archiver"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
)
func init() {
archiver.Register(archiver.Archiver{
New: New,
Extension: ".zip",
})
}
// Fs represents a wrapped fs.Fs
type Fs struct {
f fs.Fs
wrapper fs.Fs
name string
features *fs.Features // optional features
vfs *vfs.VFS
node vfs.Node // zip file object - set if reading
remote string // remote of the zip file object
prefix string // position for objects
prefixSlash string // position for objects with a slash on
root string // position to read from within the archive
dt dirtree.DirTree // read from zipfile
}
// New constructs an Fs from the (wrappedFs, remote) with the objects
// prefix with prefix and rooted at root
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) {
// FIXME vfs cache?
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
fs.Debugf(nil, "Zip: New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
vfsOpt := vfscommon.Opt
vfsOpt.ReadWait = 0
VFS := vfs.New(wrappedFs, &vfsOpt)
node, err := VFS.Stat(remote)
if err != nil {
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
}
f := &Fs{
f: wrappedFs,
name: path.Join(fs.ConfigString(wrappedFs), remote),
vfs: VFS,
node: node,
remote: remote,
root: root,
prefix: prefix,
prefixSlash: prefix + "/",
}
// Read the contents of the zip file
singleObject, err := f.readZip()
if err != nil {
return nil, fmt.Errorf("failed to open zip file: %w", err)
}
// FIXME
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
//
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
f.features = (&fs.Features{
CaseInsensitive: false,
DuplicateFiles: false,
ReadMimeType: false, // MimeTypes not supported with gzip
WriteMimeType: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
if singleObject {
return f, fs.ErrorIsFile
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("Zip %q", f.name)
}
// readZip the zip file into f
//
// Returns singleObject=true if f.root points to a file
func (f *Fs) readZip() (singleObject bool, err error) {
if f.node == nil {
return singleObject, fs.ErrorDirNotFound
}
size := f.node.Size()
if size < 0 {
return singleObject, errors.New("can't read from zip file with unknown size")
}
r, err := f.node.Open(os.O_RDONLY)
if err != nil {
return singleObject, fmt.Errorf("failed to open zip file: %w", err)
}
zr, err := zip.NewReader(r, size)
if err != nil {
return singleObject, fmt.Errorf("failed to read zip file: %w", err)
}
dt := dirtree.New()
for _, file := range zr.File {
remote := strings.Trim(path.Clean(file.Name), "/")
if remote == "." {
remote = ""
}
remote = path.Join(f.prefix, remote)
if f.root != "" {
// Ignore all files outside the root
if !strings.HasPrefix(remote, f.root) {
continue
}
if remote == f.root {
remote = ""
} else {
remote = strings.TrimPrefix(remote, f.root+"/")
}
}
if strings.HasSuffix(file.Name, "/") {
dir := fs.NewDir(remote, file.Modified)
dt.AddDir(dir)
} else {
if remote == "" {
remote = path.Base(f.root)
singleObject = true
dt = dirtree.New()
}
o := &Object{
f: f,
remote: remote,
fh: &file.FileHeader,
file: file,
}
dt.Add(o)
if singleObject {
break
}
}
}
dt.CheckParents("")
dt.Sort()
f.dt = dt
//fs.Debugf(nil, "dt = %v", dt)
return singleObject, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
// _, err = f.strip(dir)
// if err != nil {
// return nil, err
// }
entries, ok := f.dt[dir]
if !ok {
return nil, fs.ErrorDirNotFound
}
fs.Debugf(f, "dir=%q, entries=%v", dir, entries)
return entries, nil
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err)
if f.dt == nil {
return nil, fs.ErrorObjectNotFound
}
_, entry := f.dt.Find(remote)
if entry == nil {
return nil, fs.ErrorObjectNotFound
}
o, ok := entry.(*Object)
if !ok {
return nil, fs.ErrorNotAFile
}
return o, nil
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
return nil, vfs.EROFS
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.CRC32)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.f
}
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// Object describes an object to be read from the raw zip file
type Object struct {
f *Fs
remote string
fh *zip.FileHeader
file *zip.File
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.f
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return int64(o.fh.UncompressedSize64)
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.fh.Modified
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return vfs.EROFS
}
// Storable raturns a boolean indicating if this object is storable
func (o *Object) Storable() bool {
return true
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
if ht == hash.CRC32 {
// FIXME return empty CRC if writing
if o.f.dt == nil {
return "", nil
}
return fmt.Sprintf("%08x", o.fh.CRC32), nil
}
return "", hash.ErrUnsupported
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
rc, err = o.file.Open()
if err != nil {
return nil, err
}
// discard data from start as necessary
if offset > 0 {
_, err = io.CopyN(io.Discard, rc, offset)
if err != nil {
return nil, err
}
}
// If limited then don't return everything
if limit >= 0 {
return readers.NewLimitedReadCloser(rc, limit), nil
}
return rc, nil
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return vfs.EROFS
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return vfs.EROFS
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

File diff suppressed because it is too large Load Diff

View File

@@ -1,151 +1,36 @@
//go:build !plan9 && !solaris && !js //go:build !plan9 && !solaris && !js && go1.18
// +build !plan9,!solaris,!js,go1.18
package azureblob package azureblob
import ( import (
"context"
"encoding/base64"
"strings"
"testing" "testing"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestBlockIDCreator(t *testing.T) { func (f *Fs) InternalTest(t *testing.T) {
// Check creation and random number // Check first feature flags are set on this
bic, err := newBlockIDCreator() // remote
require.NoError(t, err)
bic2, err := newBlockIDCreator()
require.NoError(t, err)
assert.NotEqual(t, bic.random, bic2.random)
assert.NotEqual(t, bic.random, [8]byte{})
// Set random to known value for tests
bic.random = [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
chunkNumber := uint64(0xFEDCBA9876543210)
// Check creation of ID
want := base64.StdEncoding.EncodeToString([]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, 1, 2, 3, 4, 5, 6, 7, 8})
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", want)
got := bic.newBlockID(chunkNumber)
assert.Equal(t, want, got)
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", got)
// Test checkID is working
assert.NoError(t, bic.checkID(chunkNumber, got))
assert.ErrorContains(t, bic.checkID(chunkNumber, "$"+got), "illegal base64")
assert.ErrorContains(t, bic.checkID(chunkNumber, "AAAA"+got), "bad block ID length")
assert.ErrorContains(t, bic.checkID(chunkNumber+1, got), "expecting decoded")
assert.ErrorContains(t, bic2.checkID(chunkNumber, got), "random bytes")
}
func (f *Fs) testFeatures(t *testing.T) {
// Check first feature flags are set on this remote
enabled := f.Features().SetTier enabled := f.Features().SetTier
assert.True(t, enabled) assert.True(t, enabled)
enabled = f.Features().GetTier enabled = f.Features().GetTier
assert.True(t, enabled) assert.True(t, enabled)
} }
type ReadSeekCloser struct { func TestIncrement(t *testing.T) {
*strings.Reader for _, test := range []struct {
} in []byte
want []byte
func (r *ReadSeekCloser) Close() error { }{
return nil {[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
} {[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
// Stage a block at remote but don't commit it {[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
func (f *Fs) stageBlockWithoutCommit(ctx context.Context, t *testing.T, remote string) { {[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
var ( {[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
containerName, blobPath = f.split(remote) } {
containerClient = f.cntSVC(containerName) increment(test.in)
blobClient = containerClient.NewBlockBlobClient(blobPath) assert.Equal(t, test.want, test.in)
data = "uncommitted data"
blockID = "1"
blockIDBase64 = base64.StdEncoding.EncodeToString([]byte(blockID))
)
r := &ReadSeekCloser{strings.NewReader(data)}
_, err := blobClient.StageBlock(ctx, blockIDBase64, r, nil)
require.NoError(t, err)
// Verify the block is staged but not committed
blockList, err := blobClient.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
require.NoError(t, err)
found := false
for _, block := range blockList.UncommittedBlocks {
if *block.Name == blockIDBase64 {
found = true
break
}
} }
require.True(t, found, "Block ID not found in uncommitted blocks")
}
// This tests uploading a blob where it has uncommitted blocks with a different ID size.
//
// https://gauravmantri.com/2013/05/18/windows-azure-blob-storage-dealing-with-the-specified-blob-or-block-content-is-invalid-error/
//
// TestIntegration/FsMkdir/FsPutFiles/Internal/WriteUncommittedBlocks
func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
var (
ctx = context.Background()
remote = "testBlob"
)
// Multipart copy the blob please
oldUseCopyBlob, oldCopyCutoff := f.opt.UseCopyBlob, f.opt.CopyCutoff
f.opt.UseCopyBlob = false
f.opt.CopyCutoff = f.opt.ChunkSize
defer func() {
f.opt.UseCopyBlob, f.opt.CopyCutoff = oldUseCopyBlob, oldCopyCutoff
}()
// Create a blob with uncommitted blocks
f.stageBlockWithoutCommit(ctx, t, remote)
// Now attempt to overwrite the block with a different sized block ID to provoke this error
// Check the object does not exist
_, err := f.NewObject(ctx, remote)
require.Equal(t, fs.ErrorObjectNotFound, err)
// Upload a multipart file over the block with uncommitted chunks of a different ID size
size := 4*int(f.opt.ChunkSize) - 1
contents := random.String(size)
item := fstest.NewItem(remote, contents, fstest.Time("2001-05-06T04:05:06.499Z"))
o := fstests.PutTestContents(ctx, t, f, &item, contents, true)
// Check size
assert.Equal(t, int64(size), o.Size())
// Create a new blob with uncommitted blocks
newRemote := "testBlob2"
f.stageBlockWithoutCommit(ctx, t, newRemote)
// Copy over that block
dst, err := f.Copy(ctx, o, newRemote)
require.NoError(t, err)
// Check basics
assert.Equal(t, int64(size), dst.Size())
assert.Equal(t, newRemote, dst.Remote())
// Check contents
gotContents := fstests.ReadObject(ctx, t, dst, -1)
assert.Equal(t, contents, gotContents)
// Remove the object
require.NoError(t, dst.Remove(ctx))
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Features", f.testFeatures)
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
} }

View File

@@ -1,6 +1,7 @@
// Test AzureBlob filesystem interface // Test AzureBlob filesystem interface
//go:build !plan9 && !solaris && !js //go:build !plan9 && !solaris && !js && go1.18
// +build !plan9,!solaris,!js,go1.18
package azureblob package azureblob
@@ -8,44 +9,19 @@ import (
"testing" "testing"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
name := "TestAzureBlob"
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: name + ":", RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil), NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool", "Cold"}, TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{ ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize, MinChunkSize: defaultChunkSize,
}, },
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "use_copy_blob", Value: "false"},
},
})
}
// TestIntegration2 runs integration tests against the remote
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
name := "TestAzureBlob"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool", "Cold"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize,
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"},
{Name: name, Key: "use_copy_blob", Value: "false"},
},
}) })
} }
@@ -53,13 +29,8 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs) return f.setUploadChunkSize(cs)
} }
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setCopyCutoff(cs)
}
var ( var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil) _ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetCopyCutoffer = (*Fs)(nil)
) )
func TestValidateAccessTier(t *testing.T) { func TestValidateAccessTier(t *testing.T) {
@@ -71,7 +42,6 @@ func TestValidateAccessTier(t *testing.T) {
"HOT": {"HOT", true}, "HOT": {"HOT", true},
"Hot": {"Hot", true}, "Hot": {"Hot", true},
"cool": {"cool", true}, "cool": {"cool", true},
"cold": {"cold", true},
"archive": {"archive", true}, "archive": {"archive", true},
"empty": {"", false}, "empty": {"", false},
"unknown": {"unknown", false}, "unknown": {"unknown", false},

View File

@@ -1,7 +1,7 @@
// Build for azureblob for unsupported platforms to stop go complaining // Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files " // about "no buildable Go source files "
//go:build plan9 || solaris || js //go:build plan9 || solaris || js || !go1.18
// +build plan9 solaris js !go1.18
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
package azureblob package azureblob

File diff suppressed because it is too large Load Diff

View File

@@ -1,69 +0,0 @@
//go:build !plan9 && !js
package azurefiles
import (
"context"
"math/rand"
"strings"
"testing"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
)
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Authentication", f.InternalTestAuth)
}
var _ fstests.InternalTester = (*Fs)(nil)
func (f *Fs) InternalTestAuth(t *testing.T) {
t.Skip("skipping since this requires authentication credentials which are not part of repo")
shareName := "test-rclone-oct-2023"
testCases := []struct {
name string
options *Options
}{
{
name: "ConnectionString",
options: &Options{
ShareName: shareName,
ConnectionString: "",
},
},
{
name: "AccountAndKey",
options: &Options{
ShareName: shareName,
Account: "",
Key: "",
}},
{
name: "SASUrl",
options: &Options{
ShareName: shareName,
SASURL: "",
}},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
fs, err := newFsFromOptions(context.TODO(), "TestAzureFiles", "", tc.options)
assert.NoError(t, err)
dirName := randomString(10)
assert.NoError(t, fs.Mkdir(context.TODO(), dirName))
})
}
}
const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX"
func randomString(charCount int) string {
strBldr := strings.Builder{}
for range charCount {
randPos := rand.Int63n(52)
strBldr.WriteByte(chars[randPos])
}
return strBldr.String()
}

View File

@@ -1,17 +0,0 @@
//go:build !plan9 && !js
package azurefiles
import (
"testing"
"github.com/rclone/rclone/fstest/fstests"
)
func TestIntegration(t *testing.T) {
var objPtr *Object
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureFiles:",
NilObject: objPtr,
})
}

View File

@@ -1,7 +0,0 @@
// Build for azurefiles for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || js
// Package azurefiles provides an interface to Microsoft Azure Files
package azurefiles

View File

@@ -33,19 +33,10 @@ var _ fserrors.Fataler = (*Error)(nil)
// Bucket describes a B2 bucket // Bucket describes a B2 bucket
type Bucket struct { type Bucket struct {
ID string `json:"bucketId"` ID string `json:"bucketId"`
AccountID string `json:"accountId"` AccountID string `json:"accountId"`
Name string `json:"bucketName"` Name string `json:"bucketName"`
Type string `json:"bucketType"` Type string `json:"bucketType"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
}
// LifecycleRule is a single lifecycle rule
type LifecycleRule struct {
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
DaysFromStartingToCancelingUnfinishedLargeFiles *int `json:"daysFromStartingToCancelingUnfinishedLargeFiles"`
FileNamePrefix string `json:"fileNamePrefix"`
} }
// Timestamp is a UTC time when this file was uploaded. It is a base // Timestamp is a UTC time when this file was uploaded. It is a base
@@ -130,10 +121,10 @@ type AuthorizeAccountResponse struct {
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file. AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
AccountID string `json:"accountId"` // The identifier for the account. AccountID string `json:"accountId"` // The identifier for the account.
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it. Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket. BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has. Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
} `json:"allowed"` } `json:"allowed"`
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files. APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header. AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
@@ -215,10 +206,9 @@ type FileInfo struct {
// CreateBucketRequest is used to create a bucket // CreateBucketRequest is used to create a bucket
type CreateBucketRequest struct { type CreateBucketRequest struct {
AccountID string `json:"accountId"` AccountID string `json:"accountId"`
Name string `json:"bucketName"` Name string `json:"bucketName"`
Type string `json:"bucketType"` Type string `json:"bucketType"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
} }
// DeleteBucketRequest is used to create a bucket // DeleteBucketRequest is used to create a bucket
@@ -341,11 +331,3 @@ type CopyPartRequest struct {
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1) PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied. Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
} }
// UpdateBucketRequest describes a request to modify a B2 bucket
type UpdateBucketRequest struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Type string `json:"bucketType,omitempty"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
}

View File

@@ -42,11 +42,11 @@ func TestTimestampIsZero(t *testing.T) {
} }
func TestTimestampEqual(t *testing.T) { func TestTimestampEqual(t *testing.T) {
assert.False(t, emptyT.Equal(emptyT)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver assert.False(t, emptyT.Equal(emptyT))
assert.False(t, t0.Equal(emptyT)) assert.False(t, t0.Equal(emptyT))
assert.False(t, emptyT.Equal(t0)) assert.False(t, emptyT.Equal(t0))
assert.False(t, t0.Equal(t1)) assert.False(t, t0.Equal(t1))
assert.False(t, t1.Equal(t0)) assert.False(t, t1.Equal(t0))
assert.True(t, t0.Equal(t0)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver assert.True(t, t0.Equal(t0))
assert.True(t, t1.Equal(t1)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver assert.True(t, t1.Equal(t1))
} }

File diff suppressed because it is too large Load Diff

View File

@@ -1,31 +1,14 @@
package b2 package b2
import ( import (
"context"
"crypto/sha1"
"fmt"
"path"
"sort"
"strings"
"testing" "testing"
"time" "time"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/version"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
// Test b2 string encoding // Test b2 string encoding
// https://www.backblaze.com/docs/cloud-storage-native-api-string-encoding // https://www.backblaze.com/b2/docs/string_encoding.html
var encodeTest = []struct { var encodeTest = []struct {
fullyEncoded string fullyEncoded string
@@ -185,435 +168,3 @@ func TestParseTimeString(t *testing.T) {
} }
} }
// Return a map of the headers in the options with keys stripped of the "x-bz-info-" prefix
func OpenOptionToMetaData(options []fs.OpenOption) map[string]string {
var headers = make(map[string]string)
for _, option := range options {
k, v := option.Header()
k = strings.ToLower(k)
if strings.HasPrefix(k, headerPrefix) {
headers[k[len(headerPrefix):]] = v
}
}
return headers
}
func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string, chunkSize string) {
what := fmt.Sprintf("Size%s/UploadCutoff%s/ChunkSize%s", size, uploadCutoff, chunkSize)
t.Run(what, func(t *testing.T) {
ctx := context.Background()
ss := fs.SizeSuffix(0)
err := ss.Set(size)
require.NoError(t, err)
original := random.String(int(ss))
contents := fstest.Gz(t, original)
mimeType := "text/html"
if chunkSize != "" {
ss := fs.SizeSuffix(0)
err := ss.Set(chunkSize)
require.NoError(t, err)
_, err = f.SetUploadChunkSize(ss)
require.NoError(t, err)
}
if uploadCutoff != "" {
ss := fs.SizeSuffix(0)
err := ss.Set(uploadCutoff)
require.NoError(t, err)
_, err = f.SetUploadCutoff(ss)
require.NoError(t, err)
}
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
btime := time.Now()
metadata := fs.Metadata{
// Just mtime for now - limit to milliseconds since x-bz-info-src_last_modified_millis can't support any
"mtime": "2009-05-06T04:05:06.499Z",
}
// Need to specify HTTP options with the header prefix since they are passed as-is
options := []fs.OpenOption{
&fs.HTTPOption{Key: "X-Bz-Info-a", Value: "1"},
&fs.HTTPOption{Key: "X-Bz-Info-b", Value: "2"},
}
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, mimeType, metadata, options...)
defer func() {
assert.NoError(t, obj.Remove(ctx))
}()
o := obj.(*Object)
gotMetadata, err := o.getMetaData(ctx)
require.NoError(t, err)
// X-Bz-Info-a & X-Bz-Info-b
optMetadata := OpenOptionToMetaData(options)
for k, v := range optMetadata {
got := gotMetadata.Info[k]
assert.Equal(t, v, got, k)
}
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
// Modification time from the x-bz-info-src_last_modified_millis header
var mtime api.Timestamp
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
if err != nil {
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
}
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
// Upload time
gotBtime := time.Time(gotMetadata.UploadTimestamp)
dt := gotBtime.Sub(btime)
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
t.Run("GzipEncoding", func(t *testing.T) {
// Test that the gzipped file we uploaded can be
// downloaded
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
gotContents := fstests.ReadObject(ctx, t, o, -1)
assert.Equal(t, wantContents, gotContents)
assert.Equal(t, wantSize, o.Size())
gotHash, err := o.Hash(ctx, hash.SHA1)
require.NoError(t, err)
assert.Equal(t, wantHash, gotHash)
}
t.Run("NoDecompress", func(t *testing.T) {
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
})
})
})
}
func (f *Fs) InternalTestMetadata(t *testing.T) {
// 1 kB regular file
f.internalTestMetadata(t, "1kiB", "", "")
// 10 MiB large file
f.internalTestMetadata(t, "10MiB", "6MiB", "6MiB")
}
func sha1Sum(t *testing.T, s string) string {
hash := sha1.Sum([]byte(s))
return fmt.Sprintf("%x", hash)
}
// This is adapted from the s3 equivalent.
func (f *Fs) InternalTestVersions(t *testing.T) {
ctx := context.Background()
// Small pause to make the LastModified different since AWS
// only seems to track them to 1 second granularity
time.Sleep(2 * time.Second)
// Create an object
const dirName = "versions"
const fileName = dirName + "/" + "test-versions.txt"
contents := random.String(100)
item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
defer func() {
assert.NoError(t, obj.Remove(ctx))
}()
objMetadata, err := obj.(*Object).getMetaData(ctx)
require.NoError(t, err)
// Small pause
time.Sleep(2 * time.Second)
// Remove it
assert.NoError(t, obj.Remove(ctx))
// Small pause to make the LastModified different since AWS only seems to track them to 1 second granularity
time.Sleep(2 * time.Second)
// And create it with different size and contents
newContents := random.String(101)
newItem := fstest.NewItem(fileName, newContents, fstest.Time("2002-05-06T04:05:06.499999999Z"))
newObj := fstests.PutTestContents(ctx, t, f, &newItem, newContents, true)
newObjMetadata, err := newObj.(*Object).getMetaData(ctx)
require.NoError(t, err)
t.Run("Versions", func(t *testing.T) {
// Set --b2-versions for this test
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
// Read the contents
entries, err := f.List(ctx, dirName)
require.NoError(t, err)
tests := 0
var fileNameVersion string
for _, entry := range entries {
t.Log(entry)
remote := entry.Remote()
if remote == fileName {
t.Run("ReadCurrent", func(t *testing.T) {
assert.Equal(t, newContents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
})
tests++
} else if versionTime, p := version.Remove(remote); !versionTime.IsZero() && p == fileName {
t.Run("ReadVersion", func(t *testing.T) {
assert.Equal(t, contents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
})
assert.WithinDuration(t, time.Time(objMetadata.UploadTimestamp), versionTime, time.Second, "object time must be with 1 second of version time")
fileNameVersion = remote
tests++
}
}
assert.Equal(t, 2, tests, "object missing from listing")
// Check we can read the object with a version suffix
t.Run("NewObject", func(t *testing.T) {
o, err := f.NewObject(ctx, fileNameVersion)
require.NoError(t, err)
require.NotNil(t, o)
assert.Equal(t, int64(100), o.Size(), o.Remote())
})
// Check we can make a NewFs from that object with a version suffix
t.Run("NewFs", func(t *testing.T) {
newPath := bucket.Join(fs.ConfigStringFull(f), fileNameVersion)
// Make sure --b2-versions is set in the config of the new remote
fs.Debugf(nil, "oldPath = %q", newPath)
lastColon := strings.LastIndex(newPath, ":")
require.True(t, lastColon >= 0)
newPath = newPath[:lastColon] + ",versions" + newPath[lastColon:]
fs.Debugf(nil, "newPath = %q", newPath)
fNew, err := cache.Get(ctx, newPath)
// This should return pointing to a file
require.Equal(t, fs.ErrorIsFile, err)
require.NotNil(t, fNew)
// With the directory above
assert.Equal(t, dirName, path.Base(fs.ConfigStringFull(fNew)))
})
})
t.Run("VersionAt", func(t *testing.T) {
// We set --b2-version-at for this test so make sure we reset it at the end
defer func() {
f.opt.VersionAt = fs.Time{}
}()
var (
firstObjectTime = time.Time(objMetadata.UploadTimestamp)
secondObjectTime = time.Time(newObjMetadata.UploadTimestamp)
)
for _, test := range []struct {
what string
at time.Time
want []fstest.Item
wantErr error
wantSize int64
}{
{
what: "Before",
at: firstObjectTime.Add(-time.Second),
want: fstests.InternalTestFiles,
wantErr: fs.ErrorObjectNotFound,
},
{
what: "AfterOne",
at: firstObjectTime.Add(time.Second),
want: append([]fstest.Item{item}, fstests.InternalTestFiles...),
wantSize: 100,
},
{
what: "AfterDelete",
at: secondObjectTime.Add(-time.Second),
want: fstests.InternalTestFiles,
wantErr: fs.ErrorObjectNotFound,
},
{
what: "AfterTwo",
at: secondObjectTime.Add(time.Second),
want: append([]fstest.Item{newItem}, fstests.InternalTestFiles...),
wantSize: 101,
},
} {
t.Run(test.what, func(t *testing.T) {
f.opt.VersionAt = fs.Time(test.at)
t.Run("List", func(t *testing.T) {
fstest.CheckListing(t, f, test.want)
})
t.Run("NewObject", func(t *testing.T) {
gotObj, gotErr := f.NewObject(ctx, fileName)
assert.Equal(t, test.wantErr, gotErr)
if gotErr == nil {
assert.Equal(t, test.wantSize, gotObj.Size())
}
})
})
}
})
t.Run("Cleanup", func(t *testing.T) {
t.Run("DryRun", func(t *testing.T) {
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
// Listing should be unchanged after dry run
before := listAllFiles(ctx, t, f, dirName)
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
require.NoError(t, f.cleanUp(ctx, true, false, 0))
after := listAllFiles(ctx, t, f, dirName)
assert.Equal(t, before, after)
})
t.Run("RealThing", func(t *testing.T) {
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
// Listing should reflect current state after cleanup
require.NoError(t, f.cleanUp(ctx, true, false, 0))
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
fstest.CheckListing(t, f, items)
})
})
// Purge gets tested later
}
func (f *Fs) InternalTestCleanupUnfinished(t *testing.T) {
ctx := context.Background()
// B2CleanupHidden tests cleaning up hidden files
t.Run("CleanupUnfinished", func(t *testing.T) {
dirName := "unfinished"
fileCount := 5
expectedFiles := []string{}
for i := 1; i < fileCount; i++ {
fileName := fmt.Sprintf("%s/unfinished-%d", dirName, i)
expectedFiles = append(expectedFiles, fileName)
obj := &Object{
fs: f,
remote: fileName,
}
objInfo := object.NewStaticObjectInfo(fileName, fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
_, err := f.newLargeUpload(ctx, obj, nil, objInfo, f.opt.ChunkSize, false, nil)
require.NoError(t, err)
}
checkListing(ctx, t, f, dirName, expectedFiles)
t.Run("DryRun", func(t *testing.T) {
// Listing should not change after dry run
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
require.NoError(t, f.cleanUp(ctx, false, true, 0))
checkListing(ctx, t, f, dirName, expectedFiles)
})
t.Run("RealThing", func(t *testing.T) {
// Listing should be empty after real cleanup
require.NoError(t, f.cleanUp(ctx, false, true, 0))
checkListing(ctx, t, f, dirName, []string{})
})
})
}
func listAllFiles(ctx context.Context, t *testing.T, f *Fs, dirName string) []string {
bucket, directory := f.split(dirName)
foundFiles := []string{}
require.NoError(t, f.list(ctx, bucket, directory, "", false, true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
if !isDirectory {
foundFiles = append(foundFiles, object.Name)
}
return nil
}))
sort.Strings(foundFiles)
return foundFiles
}
func checkListing(ctx context.Context, t *testing.T, f *Fs, dirName string, expectedFiles []string) {
foundFiles := listAllFiles(ctx, t, f, dirName)
sort.Strings(expectedFiles)
assert.Equal(t, expectedFiles, foundFiles)
}
func (f *Fs) InternalTestLifecycleRules(t *testing.T) {
ctx := context.Background()
opt := map[string]string{}
t.Run("InitState", func(t *testing.T) {
// There should be no lifecycle rules at the outset
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
})
t.Run("DryRun", func(t *testing.T) {
// There should still be no lifecycle rules after each dry run operation
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
delete(opt, "daysFromHidingToDeleting")
opt["daysFromUploadingToHiding"] = "40"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
})
t.Run("RealThing", func(t *testing.T) {
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
delete(opt, "daysFromHidingToDeleting")
opt["daysFromUploadingToHiding"] = "40"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
})
}
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Metadata", f.InternalTestMetadata)
t.Run("Versions", f.InternalTestVersions)
t.Run("CleanupUnfinished", f.InternalTestCleanupUnfinished)
t.Run("LifecycleRules", f.InternalTestLifecycleRules)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -28,12 +28,7 @@ func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs) return f.setUploadCutoff(cs)
} }
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setCopyCutoff(cs)
}
var ( var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil) _ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil) _ fstests.SetUploadCutoffer = (*Fs)(nil)
_ fstests.SetCopyCutoffer = (*Fs)(nil)
) )

View File

@@ -1,10 +1,11 @@
// Upload large files for b2 // Upload large files for b2
// //
// Docs - https://www.backblaze.com/docs/cloud-storage-large-files // Docs - https://www.backblaze.com/b2/docs/large_files.html
package b2 package b2
import ( import (
"bytes"
"context" "context"
"crypto/sha1" "crypto/sha1"
"encoding/hex" "encoding/hex"
@@ -20,7 +21,6 @@ import (
"github.com/rclone/rclone/fs/chunksize" "github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
@@ -78,31 +78,36 @@ type largeUpload struct {
wrap accounting.WrapFn // account parts being transferred wrap accounting.WrapFn // account parts being transferred
id string // ID of the file being uploaded id string // ID of the file being uploaded
size int64 // total size size int64 // total size
parts int // calculated number of parts, if known parts int64 // calculated number of parts, if known
sha1smu sync.Mutex // mutex to protect sha1s
sha1s []string // slice of SHA1s for each part sha1s []string // slice of SHA1s for each part
uploadMu sync.Mutex // lock for upload variable uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
chunkSize int64 // chunk size to use chunkSize int64 // chunk size to use
src *Object // if copying, object we are reading from src *Object // if copying, object we are reading from
info *api.FileInfo // final response with info about the object
} }
// newLargeUpload starts an upload of object o from in with metadata in src // newLargeUpload starts an upload of object o from in with metadata in src
// //
// If newInfo is set then metadata from that will be used instead of reading it from src // If newInfo is set then metadata from that will be used instead of reading it from src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File, options ...fs.OpenOption) (up *largeUpload, err error) { func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
size := src.Size() size := src.Size()
parts := 0 parts := int64(0)
sha1SliceSize := int64(maxParts)
chunkSize := defaultChunkSize chunkSize := defaultChunkSize
if size == -1 { if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize) fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else { } else {
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize) chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
parts = int(size / int64(chunkSize)) parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 { if size%int64(chunkSize) != 0 {
parts++ parts++
} }
sha1SliceSize = parts
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_start_large_file",
} }
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
bucketID, err := f.getBucketID(ctx, bucket) bucketID, err := f.getBucketID(ctx, bucket)
@@ -113,27 +118,12 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
BucketID: bucketID, BucketID: bucketID,
Name: f.opt.Enc.FromStandardPath(bucketPath), Name: f.opt.Enc.FromStandardPath(bucketPath),
} }
optionsToSend := make([]fs.OpenOption, 0, len(options))
if newInfo == nil { if newInfo == nil {
modTime, err := o.getModTime(ctx, src, options) modTime := src.ModTime(ctx)
if err != nil {
return nil, err
}
request.ContentType = fs.MimeType(ctx, src) request.ContentType = fs.MimeType(ctx, src)
request.Info = map[string]string{ request.Info = map[string]string{
timeKey: timeString(modTime), timeKey: timeString(modTime),
} }
// Custom upload headers - remove header prefix since they are sent in the body
for _, option := range options {
k, v := option.Header()
k = strings.ToLower(k)
if strings.HasPrefix(k, headerPrefix) {
request.Info[k[len(headerPrefix):]] = v
} else {
optionsToSend = append(optionsToSend, option)
}
}
// Set the SHA1 if known // Set the SHA1 if known
if !o.fs.opt.DisableCheckSum || doCopy { if !o.fs.opt.DisableCheckSum || doCopy {
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" { if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
@@ -144,11 +134,6 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
request.ContentType = newInfo.ContentType request.ContentType = newInfo.ContentType
request.Info = newInfo.Info request.Info = newInfo.Info
} }
opts := rest.Opts{
Method: "POST",
Path: "/b2_start_large_file",
Options: optionsToSend,
}
var response api.StartLargeFileResponse var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response) resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
@@ -165,7 +150,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
id: response.ID, id: response.ID,
size: size, size: size,
parts: parts, parts: parts,
sha1s: make([]string, 0, 16), sha1s: make([]string, sha1SliceSize),
chunkSize: int64(chunkSize), chunkSize: int64(chunkSize),
} }
// unwrap the accounting from the input, we use wrap to put it // unwrap the accounting from the input, we use wrap to put it
@@ -184,26 +169,24 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
// This should be returned with returnUploadURL when finished // This should be returned with returnUploadURL when finished
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) { func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
up.uploadMu.Lock() up.uploadMu.Lock()
if len(up.uploads) > 0 { defer up.uploadMu.Unlock()
if len(up.uploads) == 0 {
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
} else {
upload, up.uploads = up.uploads[0], up.uploads[1:] upload, up.uploads = up.uploads[0], up.uploads[1:]
up.uploadMu.Unlock()
return upload, nil
}
up.uploadMu.Unlock()
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err = up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get upload URL: %w", err)
} }
return upload, nil return upload, nil
} }
@@ -218,39 +201,10 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
up.uploadMu.Unlock() up.uploadMu.Unlock()
} }
// Add an sha1 to the being built up sha1s // Transfer a chunk
func (up *largeUpload) addSha1(chunkNumber int, sha1 string) { func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
up.sha1smu.Lock() err := up.f.pacer.Call(func() (bool, error) {
defer up.sha1smu.Unlock() fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
if len(up.sha1s) < chunkNumber+1 {
up.sha1s = append(up.sha1s, make([]string, chunkNumber+1-len(up.sha1s))...)
}
up.sha1s[chunkNumber] = sha1
}
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) {
// Only account after the checksum reads have been done
if do, ok := reader.(pool.DelayAccountinger); ok {
// To figure out this number, do a transfer and if the accounted size is 0 or a
// multiple of what it should be, increase or decrease this number.
do.DelayAccounting(1)
}
err = up.f.pacer.Call(func() (bool, error) {
// Discover the size by seeking to the end
size, err = reader.Seek(0, io.SeekEnd)
if err != nil {
return false, err
}
// rewind the reader on retry and after reading size
_, err = reader.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
fs.Debugf(up.o, "Sending chunk %d length %d", chunkNumber, size)
// Get upload URL // Get upload URL
upload, err := up.getUploadURL(ctx) upload, err := up.getUploadURL(ctx)
@@ -258,8 +212,8 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
return false, err return false, err
} }
in := newHashAppendingReader(reader, sha1.New()) in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
sizeWithHash := size + int64(in.AdditionalLength()) size := int64(len(body)) + int64(in.AdditionalLength())
// Authorization // Authorization
// //
@@ -289,10 +243,10 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
Body: up.wrap(in), Body: up.wrap(in),
ExtraHeaders: map[string]string{ ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken, "Authorization": upload.AuthorizationToken,
"X-Bz-Part-Number": fmt.Sprintf("%d", chunkNumber+1), "X-Bz-Part-Number": fmt.Sprintf("%d", part),
sha1Header: "hex_digits_at_end", sha1Header: "hex_digits_at_end",
}, },
ContentLength: &sizeWithHash, ContentLength: &size,
} }
var response api.UploadPartResponse var response api.UploadPartResponse
@@ -300,7 +254,7 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response) resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
retry, err := up.f.shouldRetry(ctx, resp, err) retry, err := up.f.shouldRetry(ctx, resp, err)
if err != nil { if err != nil {
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err) fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
} }
// On retryable error clear PartUploadURL // On retryable error clear PartUploadURL
if retry { if retry {
@@ -308,30 +262,30 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
upload = nil upload = nil
} }
up.returnUploadURL(upload) up.returnUploadURL(upload)
up.addSha1(chunkNumber, in.HexSum()) up.sha1s[part-1] = in.HexSum()
return retry, err return retry, err
}) })
if err != nil { if err != nil {
fs.Debugf(up.o, "Error sending chunk %d: %v", chunkNumber, err) fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
} else { } else {
fs.Debugf(up.o, "Done sending chunk %d", chunkNumber) fs.Debugf(up.o, "Done sending chunk %d", part)
} }
return size, err return err
} }
// Copy a chunk // Copy a chunk
func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64) error { func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
err := up.f.pacer.Call(func() (bool, error) { err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize) fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/b2_copy_part", Path: "/b2_copy_part",
} }
offset := int64(part) * up.chunkSize // where we are in the source file offset := (part - 1) * up.chunkSize // where we are in the source file
var request = api.CopyPartRequest{ var request = api.CopyPartRequest{
SourceID: up.src.id, SourceID: up.src.id,
LargeFileID: up.id, LargeFileID: up.id,
PartNumber: int64(part + 1), PartNumber: part,
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1), Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
} }
var response api.UploadPartResponse var response api.UploadPartResponse
@@ -340,7 +294,7 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
if err != nil { if err != nil {
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err) fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
} }
up.addSha1(part, response.SHA1) up.sha1s[part-1] = response.SHA1
return retry, err return retry, err
}) })
if err != nil { if err != nil {
@@ -351,8 +305,8 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
return err return err
} }
// Close closes off the large upload // finish closes off the large upload
func (up *largeUpload) Close(ctx context.Context) error { func (up *largeUpload) finish(ctx context.Context) error {
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts) fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
@@ -370,12 +324,11 @@ func (up *largeUpload) Close(ctx context.Context) error {
if err != nil { if err != nil {
return err return err
} }
up.info = &response return up.o.decodeMetaDataFileInfo(&response)
return nil
} }
// Abort aborts the large upload // cancel aborts the large upload
func (up *largeUpload) Abort(ctx context.Context) error { func (up *largeUpload) cancel(ctx context.Context) error {
fs.Debugf(up.o, "Cancelling large file %s", up.what) fs.Debugf(up.o, "Cancelling large file %s", up.what)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
@@ -400,102 +353,128 @@ func (up *largeUpload) Abort(ctx context.Context) error {
// reaches EOF. // reaches EOF.
// //
// Note that initialUploadBlock must be returned to f.putBuf() // Note that initialUploadBlock must be returned to f.putBuf()
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW) (err error) { func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })() defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id) fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
var ( var (
g, gCtx = errgroup.WithContext(ctx) g, gCtx = errgroup.WithContext(ctx)
hasMoreParts = true hasMoreParts = true
) )
up.size = initialUploadBlock.Size() up.size = int64(len(initialUploadBlock))
up.parts = 0 g.Go(func() error {
for part := 0; hasMoreParts; part++ { for part := int64(1); hasMoreParts; part++ {
// Get a block of memory from the pool and token which limits concurrency. // Get a block of memory from the pool and token which limits concurrency.
var rw *pool.RW var buf []byte
if part == 0 { if part == 1 {
rw = initialUploadBlock buf = initialUploadBlock
} else { } else {
rw = up.f.getRW(false) buf = up.f.getBuf(false)
}
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putRW(rw)
break
}
// Read the chunk
var n int64
if part == 0 {
n = rw.Size()
} else {
n, err = io.CopyN(rw, up.in, up.chunkSize)
if err == io.EOF {
if n == 0 {
fs.Debugf(up.o, "Not sending empty chunk after EOF - ending.")
up.f.putRW(rw)
break
} else {
fs.Debugf(up.o, "Read less than a full chunk %d, making this the last one.", n)
}
hasMoreParts = false
} else if err != nil {
// other kinds of errors indicate failure
up.f.putRW(rw)
return err
} }
}
// Keep stats up to date // Fail fast, in case an errgroup managed function returns an error
up.parts += 1 // gCtx is cancelled. There is no point in uploading all the other parts.
up.size += n if gCtx.Err() != nil {
if part > maxParts { up.f.putBuf(buf, false)
up.f.putRW(rw) return nil
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts) }
}
part := part // for the closure // Read the chunk
g.Go(func() (err error) { var n int
defer up.f.putRW(rw) if part == 1 {
_, err = up.WriteChunk(gCtx, part, rw) n = len(buf)
return err } else {
}) n, err = io.ReadFull(up.in, buf)
} if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putBuf(buf, false)
return nil
} else if err != nil {
// other kinds of errors indicate failure
up.f.putBuf(buf, false)
return err
}
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
up.f.putBuf(buf, false)
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, false)
return up.transferChunk(gCtx, part, buf)
})
}
return nil
})
err = g.Wait() err = g.Wait()
if err != nil { if err != nil {
return err return err
} }
return up.Close(ctx) up.sha1s = up.sha1s[:up.parts]
return up.finish(ctx)
} }
// Copy the chunks from the source to the destination // Upload uploads the chunks from the input
func (up *largeUpload) Copy(ctx context.Context) (err error) { func (up *largeUpload) Upload(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })() defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id) fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
var ( var (
g, gCtx = errgroup.WithContext(ctx) g, gCtx = errgroup.WithContext(ctx)
remaining = up.size remaining = up.size
) )
g.SetLimit(up.f.opt.UploadConcurrency) g.Go(func() error {
for part := range up.parts { for part := int64(1); part <= up.parts; part++ {
// Fail fast, in case an errgroup managed function returns an error // Get a block of memory from the pool and token which limits concurrency.
// gCtx is cancelled. There is no point in copying all the other parts. buf := up.f.getBuf(up.doCopy)
if gCtx.Err() != nil {
break // Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putBuf(buf, up.doCopy)
return nil
}
reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
if !up.doCopy {
// Read the chunk
buf = buf[:reqSize]
_, err = io.ReadFull(up.in, buf)
if err != nil {
up.f.putBuf(buf, up.doCopy)
return err
}
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, up.doCopy)
if !up.doCopy {
err = up.transferChunk(gCtx, part, buf)
} else {
err = up.copyChunk(gCtx, part, reqSize)
}
return err
})
remaining -= reqSize
} }
return nil
reqSize := min(remaining, up.chunkSize) })
part := part // for the closure
g.Go(func() (err error) {
return up.copyChunk(gCtx, part, reqSize)
})
remaining -= reqSize
}
err = g.Wait() err = g.Wait()
if err != nil { if err != nil {
return err return err
} }
return up.Close(ctx) return up.finish(ctx)
} }

View File

@@ -52,7 +52,7 @@ func (e *Error) Error() string {
out += ": " + e.Message out += ": " + e.Message
} }
if e.ContextInfo != nil { if e.ContextInfo != nil {
out += fmt.Sprintf(" (%s)", string(e.ContextInfo)) out += fmt.Sprintf(" (%+v)", e.ContextInfo)
} }
return out return out
} }
@@ -63,7 +63,7 @@ var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo // ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by" var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
// Types of things in Item/ItemMini // Types of things in Item
const ( const (
ItemTypeFolder = "folder" ItemTypeFolder = "folder"
ItemTypeFile = "file" ItemTypeFile = "file"
@@ -72,31 +72,20 @@ const (
ItemStatusDeleted = "deleted" ItemStatusDeleted = "deleted"
) )
// ItemMini is a subset of the elements in a full Item returned by some API calls
type ItemMini struct {
Type string `json:"type"`
ID string `json:"id"`
SequenceID int64 `json:"sequence_id,string"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
}
// Item describes a folder or a file as returned by Get Folder Items and others // Item describes a folder or a file as returned by Get Folder Items and others
type Item struct { type Item struct {
Type string `json:"type"` Type string `json:"type"`
ID string `json:"id"` ID string `json:"id"`
SequenceID int64 `json:"sequence_id,string"` SequenceID string `json:"sequence_id"`
Etag string `json:"etag"` Etag string `json:"etag"`
SHA1 string `json:"sha1"` SHA1 string `json:"sha1"`
Name string `json:"name"` Name string `json:"name"`
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261 Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
CreatedAt Time `json:"created_at"` CreatedAt Time `json:"created_at"`
ModifiedAt Time `json:"modified_at"` ModifiedAt Time `json:"modified_at"`
ContentCreatedAt Time `json:"content_created_at"` ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"` ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
Parent ItemMini `json:"parent"`
SharedLink struct { SharedLink struct {
URL string `json:"url,omitempty"` URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"` Access string `json:"access,omitempty"`
@@ -125,21 +114,10 @@ type FolderItems struct {
Offset int `json:"offset"` Offset int `json:"offset"`
Limit int `json:"limit"` Limit int `json:"limit"`
NextMarker *string `json:"next_marker,omitempty"` NextMarker *string `json:"next_marker,omitempty"`
// There is some confusion about how this is actually Order []struct {
// returned. The []struct has worked for many years, but in By string `json:"by"`
// https://github.com/rclone/rclone/issues/8776 box was Direction string `json:"direction"`
// returning it returned not as a list. We don't actually use } `json:"order"`
// this so comment it out.
//
// Order struct {
// By string `json:"by"`
// Direction string `json:"direction"`
// } `json:"order"`
//
// Order []struct {
// By string `json:"by"`
// Direction string `json:"direction"`
// } `json:"order"`
} }
// Parent defined the ID of the parent directory // Parent defined the ID of the parent directory
@@ -178,7 +156,19 @@ type PreUploadCheckResponse struct {
// PreUploadCheckConflict is returned in the ContextInfo error field // PreUploadCheckConflict is returned in the ContextInfo error field
// from PreUploadCheck when the error code is "item_name_in_use" // from PreUploadCheck when the error code is "item_name_in_use"
type PreUploadCheckConflict struct { type PreUploadCheckConflict struct {
Conflicts ItemMini `json:"conflicts"` Conflicts struct {
Type string `json:"type"`
ID string `json:"id"`
FileVersion struct {
Type string `json:"type"`
ID string `json:"id"`
Sha1 string `json:"sha1"`
} `json:"file_version"`
SequenceID string `json:"sequence_id"`
Etag string `json:"etag"`
Sha1 string `json:"sha1"`
Name string `json:"name"`
} `json:"conflicts"`
} }
// UpdateFileModTime is used in Update File Info // UpdateFileModTime is used in Update File Info
@@ -282,39 +272,12 @@ type User struct {
ModifiedAt time.Time `json:"modified_at"` ModifiedAt time.Time `json:"modified_at"`
Language string `json:"language"` Language string `json:"language"`
Timezone string `json:"timezone"` Timezone string `json:"timezone"`
SpaceAmount float64 `json:"space_amount"` SpaceAmount int64 `json:"space_amount"`
SpaceUsed float64 `json:"space_used"` SpaceUsed int64 `json:"space_used"`
MaxUploadSize float64 `json:"max_upload_size"` MaxUploadSize int64 `json:"max_upload_size"`
Status string `json:"status"` Status string `json:"status"`
JobTitle string `json:"job_title"` JobTitle string `json:"job_title"`
Phone string `json:"phone"` Phone string `json:"phone"`
Address string `json:"address"` Address string `json:"address"`
AvatarURL string `json:"avatar_url"` AvatarURL string `json:"avatar_url"`
} }
// FileTreeChangeEventTypes are the events that can require cache invalidation
var FileTreeChangeEventTypes = map[string]struct{}{
"ITEM_COPY": {},
"ITEM_CREATE": {},
"ITEM_MAKE_CURRENT_VERSION": {},
"ITEM_MODIFY": {},
"ITEM_MOVE": {},
"ITEM_RENAME": {},
"ITEM_TRASH": {},
"ITEM_UNDELETE_VIA_TRASH": {},
"ITEM_UPLOAD": {},
}
// Event is an array element in the response returned from /events
type Event struct {
EventType string `json:"event_type"`
EventID string `json:"event_id"`
Source Item `json:"source"`
}
// Events is returned from /events
type Events struct {
ChunkSize int64 `json:"chunk_size"`
Entries []Event `json:"entries"`
NextStreamPosition int64 `json:"next_stream_position"`
}

View File

@@ -27,7 +27,6 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/golang-jwt/jwt/v4"
"github.com/rclone/rclone/backend/box/api" "github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
@@ -37,16 +36,16 @@ import (
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/jwtutil" "github.com/rclone/rclone/lib/jwtutil"
"github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"github.com/youmark/pkcs8" "github.com/youmark/pkcs8"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jws"
) )
const ( const (
@@ -65,21 +64,18 @@ const (
// Globals // Globals
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
oauthConfig = &oauthutil.Config{ oauthConfig = &oauth2.Config{
Scopes: nil, Scopes: nil,
AuthURL: "https://app.box.com/api/oauth2/authorize", Endpoint: oauth2.Endpoint{
TokenURL: "https://app.box.com/api/oauth2/token", AuthURL: "https://app.box.com/api/oauth2/authorize",
TokenURL: "https://app.box.com/api/oauth2/token",
},
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL, RedirectURL: oauthutil.RedirectURL,
} }
) )
type boxCustomClaims struct {
jwt.StandardClaims
BoxSubType string `json:"box_sub_type,omitempty"`
}
// Register with Fs // Register with Fs
func init() { func init() {
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
@@ -106,18 +102,16 @@ func init() {
return nil, nil return nil, nil
}, },
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "root_folder_id", Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.", Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "0", Default: "0",
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "box_config_file", Name: "box_config_file",
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp, Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
}, { }, {
Name: "access_token", Name: "access_token",
Help: "Box App Primary Access Token\n\nLeave blank normally.", Help: "Box App Primary Access Token\n\nLeave blank normally.",
Sensitive: true,
}, { }, {
Name: "box_sub_type", Name: "box_sub_type",
Default: "user", Default: "user",
@@ -148,23 +142,6 @@ func init() {
Default: "", Default: "",
Help: "Only show items owned by the login (email address) passed in.", Help: "Only show items owned by the login (email address) passed in.",
Advanced: true, Advanced: true,
}, {
Name: "impersonate",
Default: "",
Help: `Impersonate this user ID when using a service account.
Setting this flag allows rclone, when using a JWT service account, to
act on behalf of another user by setting the as-user header.
The user ID is the Box identifier for a user. User IDs can found for
any user via the GET /users endpoint, which is only available to
admins, or by calling the GET /users/me endpoint with an authenticated
user session.
See: https://developer.box.com/guides/authentication/jwt/as-user/
`,
Advanced: true,
Sensitive: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -201,7 +178,7 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
signingHeaders := getSigningHeaders(boxConfig) signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig) queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(ctx) client := fshttp.NewClient(ctx)
err = jwtutil.Config("box", name, tokenURL, *claims, signingHeaders, queryParams, privateKey, m, client) err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
return err return err
} }
@@ -217,31 +194,34 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
return boxConfig, nil return boxConfig, nil
} }
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomClaims, err error) { func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
val, err := jwtutil.RandomHex(20) val, err := jwtutil.RandomHex(20)
if err != nil { if err != nil {
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err) return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
} }
claims = &boxCustomClaims{ claims = &jws.ClaimSet{
//lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely Iss: boxConfig.BoxAppSettings.ClientID,
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019 Sub: boxConfig.EnterpriseID,
StandardClaims: jwt.StandardClaims{ Aud: tokenURL,
Id: val, Exp: time.Now().Add(time.Second * 45).Unix(),
Issuer: boxConfig.BoxAppSettings.ClientID, PrivateClaims: map[string]interface{}{
Subject: boxConfig.EnterpriseID, "box_sub_type": boxSubType,
Audience: tokenURL, "aud": tokenURL,
ExpiresAt: time.Now().Add(time.Second * 45).Unix(), "jti": val,
}, },
BoxSubType: boxSubType,
} }
return claims, nil return claims, nil
} }
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]any { func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
signingHeaders := map[string]any{ signingHeaders := &jws.Header{
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID, Algorithm: "RS256",
Typ: "JWT",
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
} }
return signingHeaders return signingHeaders
} }
@@ -255,10 +235,8 @@ func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
} }
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) { func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey)) block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
if block == nil {
return nil, errors.New("box: failed to PEM decode private key")
}
if len(rest) > 0 { if len(rest) > 0 {
return nil, fmt.Errorf("box: extra data included in private key: %w", err) return nil, fmt.Errorf("box: extra data included in private key: %w", err)
} }
@@ -280,29 +258,19 @@ type Options struct {
AccessToken string `config:"access_token"` AccessToken string `config:"access_token"`
ListChunk int `config:"list_chunk"` ListChunk int `config:"list_chunk"`
OwnedBy string `config:"owned_by"` OwnedBy string `config:"owned_by"`
Impersonate string `config:"impersonate"`
}
// ItemMeta defines metadata we cache for each Item ID
type ItemMeta struct {
SequenceID int64 // the most recent event processed for this item
ParentID string // ID of the parent directory of this item
Name string // leaf name of this item
} }
// Fs represents a remote box // Fs represents a remote box
type Fs struct { type Fs struct {
name string // name of this remote name string // name of this remote
root string // the path we are working on root string // the path we are working on
opt Options // parsed options opt Options // parsed options
features *fs.Features // optional features features *fs.Features // optional features
srv *rest.Client // the connection to the server srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry tokenRenewer *oauthutil.Renew // renew the token on expiry
uploadToken *pacer.TokenDispenser // control concurrency uploadToken *pacer.TokenDispenser // control concurrency
itemMetaCacheMu *sync.Mutex // protects itemMetaCache
itemMetaCache map[string]ItemMeta // map of Item ID to selected metadata
} }
// Object describes a box object // Object describes a box object
@@ -381,7 +349,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
// readMetaDataForPath reads the metadata from the path // readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) { func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
// defer log.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err) // defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false) leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
if err != nil { if err != nil {
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
@@ -390,30 +358,20 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
return nil, err return nil, err
} }
// Use preupload to find the ID found, err := f.listAll(ctx, directoryID, false, true, true, func(item *api.Item) bool {
itemMini, err := f.preUploadCheck(ctx, leaf, directoryID, -1) if strings.EqualFold(item.Name, leaf) {
if err != nil { info = item
return nil, err return true
} }
if itemMini == nil { return false
return nil, fs.ErrorObjectNotFound
}
// Now we have the ID we can look up the object proper
opts := rest.Opts{
Method: "GET",
Path: "/files/" + itemMini.ID,
Parameters: fieldsValue(),
}
var item api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &item)
return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &item, nil if !found {
return nil, fs.ErrorObjectNotFound
}
return info, nil
} }
// errorHandler parses a non 2xx error response into an error // errorHandler parses a non 2xx error response into an error
@@ -460,14 +418,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL), srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers), uploadToken: pacer.NewTokenDispenser(ci.Transfers),
itemMetaCacheMu: new(sync.Mutex),
itemMetaCache: make(map[string]ItemMeta),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: true, CaseInsensitive: true,
@@ -480,11 +436,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken) f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
} }
// If using impersonate set an as-user header
if f.opt.Impersonate != "" {
f.srv.SetHeader("as-user", f.opt.Impersonate)
}
jsonFile, ok := m.Get("box_config_file") jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type") boxSubType, boxSubTypeOk := m.Get("box_sub_type")
@@ -620,7 +571,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
// fmt.Printf("...Error %v\n", err) //fmt.Printf("...Error %v\n", err)
return "", err return "", err
} }
// fmt.Printf("...Id %q\n", *info.Id) // fmt.Printf("...Id %q\n", *info.Id)
@@ -706,27 +657,9 @@ OUTER:
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
directoryID, err := f.dirCache.FindDir(ctx, dir, false) directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil { if err != nil {
return err return nil, err
} }
var iErr error var iErr error
_, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool { _, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool {
@@ -736,43 +669,24 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
f.dirCache.Put(remote, info.ID) f.dirCache.Put(remote, info.ID)
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID) d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
// FIXME more info from dir? // FIXME more info from dir?
err = list.Add(d) entries = append(entries, d)
if err != nil {
iErr = err
return true
}
} else if info.Type == api.ItemTypeFile { } else if info.Type == api.ItemTypeFile {
o, err := f.newObjectWithInfo(ctx, remote, info) o, err := f.newObjectWithInfo(ctx, remote, info)
if err != nil { if err != nil {
iErr = err iErr = err
return true return true
} }
err = list.Add(o) entries = append(entries, o)
if err != nil {
iErr = err
return true
}
} }
// Cache some metadata for this Item to help us process events later
// on. In particular, the box event API does not provide the old path
// of the Item when it is renamed/deleted/moved/etc.
f.itemMetaCacheMu.Lock()
cachedItemMeta, found := f.itemMetaCache[info.ID]
if !found || cachedItemMeta.SequenceID < info.SequenceID {
f.itemMetaCache[info.ID] = ItemMeta{SequenceID: info.SequenceID, ParentID: directoryID, Name: info.Name}
}
f.itemMetaCacheMu.Unlock()
return false return false
}) })
if err != nil { if err != nil {
return err return nil, err
} }
if iErr != nil { if iErr != nil {
return iErr return nil, iErr
} }
return list.Flush() return entries, nil
} }
// Creates from the parameters passed in a half finished Object which // Creates from the parameters passed in a half finished Object which
@@ -799,7 +713,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
// //
// It returns "", nil if the file is good to go // It returns "", nil if the file is good to go
// It returns "ID", nil if the file must be updated // It returns "ID", nil if the file must be updated
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (item *api.ItemMini, err error) { func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (ID string, err error) {
check := api.PreUploadCheck{ check := api.PreUploadCheck{
Name: f.opt.Enc.FromStandardName(leaf), Name: f.opt.Enc.FromStandardName(leaf),
Parent: api.Parent{ Parent: api.Parent{
@@ -824,16 +738,16 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
var conflict api.PreUploadCheckConflict var conflict api.PreUploadCheckConflict
err = json.Unmarshal(apiErr.ContextInfo, &conflict) err = json.Unmarshal(apiErr.ContextInfo, &conflict)
if err != nil { if err != nil {
return nil, fmt.Errorf("pre-upload check: JSON decode failed: %w", err) return "", fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
} }
if conflict.Conflicts.Type != api.ItemTypeFile { if conflict.Conflicts.Type != api.ItemTypeFile {
return nil, fs.ErrorIsDir return "", fmt.Errorf("pre-upload check: can't overwrite non file with file: %w", err)
} }
return &conflict.Conflicts, nil return conflict.Conflicts.ID, nil
} }
return nil, fmt.Errorf("pre-upload check: %w", err) return "", fmt.Errorf("pre-upload check: %w", err)
} }
return nil, nil return "", nil
} }
// Put the object // Put the object
@@ -854,11 +768,11 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// Preflight check the upload, which returns the ID if the // Preflight check the upload, which returns the ID if the
// object already exists // object already exists
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size()) ID, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
if err != nil { if err != nil {
return nil, err return nil, err
} }
if item == nil { if ID == "" {
return f.PutUnchecked(ctx, in, src, options...) return f.PutUnchecked(ctx, in, src, options...)
} }
@@ -866,7 +780,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
o := &Object{ o := &Object{
fs: f, fs: f,
remote: remote, remote: remote,
id: item.ID, id: ID,
} }
return o, o.Update(ctx, in, src, options...) return o, o.Update(ctx, in, src, options...)
} }
@@ -993,26 +907,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, err return nil, err
} }
// check if dest already exists
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
if err != nil {
return nil, err
}
if item != nil { // dest already exists, need to copy to temp name and then move
tempSuffix := "-rclone-copy-" + random.String(8)
fs.Debugf(remote, "dst already exists, copying to temp name %v", remote+tempSuffix)
tempObj, err := f.Copy(ctx, src, remote+tempSuffix)
if err != nil {
return nil, err
}
fs.Debugf(remote+tempSuffix, "moving to real name %v", remote)
err = f.deleteObject(ctx, item.ID)
if err != nil {
return nil, err
}
return f.Move(ctx, tempObj, remote)
}
// Copy the object // Copy the object
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
@@ -1223,7 +1117,7 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
// CleanUp empties the trash // CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) (err error) { func (f *Fs) CleanUp(ctx context.Context) (err error) {
var ( var (
deleteErrors atomic.Uint64 deleteErrors = int64(0)
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers) concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
wg sync.WaitGroup wg sync.WaitGroup
) )
@@ -1239,7 +1133,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
err := f.deletePermanently(ctx, item.Type, item.ID) err := f.deletePermanently(ctx, item.Type, item.ID)
if err != nil { if err != nil {
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err) fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
deleteErrors.Add(1) atomic.AddInt64(&deleteErrors, 1)
} }
}() }()
} else { } else {
@@ -1248,279 +1142,12 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
return false return false
}) })
wg.Wait() wg.Wait()
if deleteErrors.Load() != 0 { if deleteErrors != 0 {
return fmt.Errorf("failed to delete %d trash items", deleteErrors.Load()) return fmt.Errorf("failed to delete %d trash items", deleteErrors)
} }
return err return err
} }
// Shutdown shutdown the fs
func (f *Fs) Shutdown(ctx context.Context) error {
f.tokenRenewer.Shutdown()
return nil
}
// ChangeNotify calls the passed function with a path that has had changes.
// If the implementation uses polling, it should adhere to the given interval.
//
// Automatically restarts itself in case of unexpected behavior of the remote.
//
// Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
go func() {
// get the `stream_position` early so all changes from now on get processed
streamPosition, err := f.changeNotifyStreamPosition(ctx)
if err != nil {
fs.Infof(f, "Failed to get StreamPosition: %s", err)
}
// box can send duplicate Event IDs. Use this map to track and filter
// the ones we've already processed.
processedEventIDs := make(map[string]time.Time)
var ticker *time.Ticker
var tickerC <-chan time.Time
for {
select {
case pollInterval, ok := <-pollIntervalChan:
if !ok {
if ticker != nil {
ticker.Stop()
}
return
}
if ticker != nil {
ticker.Stop()
ticker, tickerC = nil, nil
}
if pollInterval != 0 {
ticker = time.NewTicker(pollInterval)
tickerC = ticker.C
}
case <-tickerC:
if streamPosition == "" {
streamPosition, err = f.changeNotifyStreamPosition(ctx)
if err != nil {
fs.Infof(f, "Failed to get StreamPosition: %s", err)
continue
}
}
// Garbage collect EventIDs older than 1 minute
for eventID, timestamp := range processedEventIDs {
if time.Since(timestamp) > time.Minute {
delete(processedEventIDs, eventID)
}
}
streamPosition, err = f.changeNotifyRunner(ctx, notifyFunc, streamPosition, processedEventIDs)
if err != nil {
fs.Infof(f, "Change notify listener failure: %s", err)
}
}
}
}()
}
func (f *Fs) changeNotifyStreamPosition(ctx context.Context) (streamPosition string, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/events",
Parameters: fieldsValue(),
}
opts.Parameters.Set("stream_position", "now")
opts.Parameters.Set("stream_type", "changes")
var result api.Events
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", err
}
return strconv.FormatInt(result.NextStreamPosition, 10), nil
}
// Attempts to construct the full path for an object, given the ID of its
// parent directory and the name of the object.
//
// Can return "" if the parentID is not currently in the directory cache.
func (f *Fs) getFullPath(parentID string, childName string) (fullPath string) {
fullPath = ""
name := f.opt.Enc.ToStandardName(childName)
if parentID != "" {
if parentDir, ok := f.dirCache.GetInv(parentID); ok {
if len(parentDir) > 0 {
fullPath = parentDir + "/" + name
} else {
fullPath = name
}
}
} else {
// No parent, this object is at the root
fullPath = name
}
return fullPath
}
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), streamPosition string, processedEventIDs map[string]time.Time) (nextStreamPosition string, err error) {
nextStreamPosition = streamPosition
for {
// box only allows a max of 500 events
limit := min(f.opt.ListChunk, 500)
opts := rest.Opts{
Method: "GET",
Path: "/events",
Parameters: fieldsValue(),
}
opts.Parameters.Set("stream_position", nextStreamPosition)
opts.Parameters.Set("stream_type", "changes")
opts.Parameters.Set("limit", strconv.Itoa(limit))
var result api.Events
var resp *http.Response
fs.Debugf(f, "Checking for changes on remote (next_stream_position: %q)", nextStreamPosition)
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", err
}
if result.ChunkSize != int64(len(result.Entries)) {
return "", fmt.Errorf("invalid response to event request, chunk_size (%v) not equal to number of entries (%v)", result.ChunkSize, len(result.Entries))
}
nextStreamPosition = strconv.FormatInt(result.NextStreamPosition, 10)
if result.ChunkSize == 0 {
return nextStreamPosition, nil
}
type pathToClear struct {
path string
entryType fs.EntryType
}
var pathsToClear []pathToClear
newEventIDs := 0
for _, entry := range result.Entries {
eventDetails := fmt.Sprintf("[%q(%d)|%s|%s|%s|%s]", entry.Source.Name, entry.Source.SequenceID,
entry.Source.Type, entry.EventType, entry.Source.ID, entry.EventID)
if entry.EventID == "" {
fs.Debugf(f, "%s ignored due to missing EventID", eventDetails)
continue
}
if _, ok := processedEventIDs[entry.EventID]; ok {
fs.Debugf(f, "%s ignored due to duplicate EventID", eventDetails)
continue
}
processedEventIDs[entry.EventID] = time.Now()
newEventIDs++
if entry.Source.ID == "" { // missing File or Folder ID
fs.Debugf(f, "%s ignored due to missing SourceID", eventDetails)
continue
}
if entry.Source.Type != api.ItemTypeFile && entry.Source.Type != api.ItemTypeFolder { // event is not for a file or folder
fs.Debugf(f, "%s ignored due to unsupported SourceType", eventDetails)
continue
}
// Only interested in event types that result in a file tree change
if _, found := api.FileTreeChangeEventTypes[entry.EventType]; !found {
fs.Debugf(f, "%s ignored due to unsupported EventType", eventDetails)
continue
}
f.itemMetaCacheMu.Lock()
itemMeta, cachedItemMetaFound := f.itemMetaCache[entry.Source.ID]
if cachedItemMetaFound {
if itemMeta.SequenceID >= entry.Source.SequenceID {
// Item in the cache has the same or newer SequenceID than
// this event. Ignore this event, it must be old.
f.itemMetaCacheMu.Unlock()
fs.Debugf(f, "%s ignored due to old SequenceID (%q)", eventDetails, itemMeta.SequenceID)
continue
}
// This event is newer. Delete its entry from the cache,
// we'll notify about its change below, then it's up to a
// future list operation to repopulate the cache.
delete(f.itemMetaCache, entry.Source.ID)
}
f.itemMetaCacheMu.Unlock()
entryType := fs.EntryDirectory
if entry.Source.Type == api.ItemTypeFile {
entryType = fs.EntryObject
}
// The box event only includes the new path for the object (e.g.
// the path after the object was moved). If there was an old path
// saved in our cache, it must be cleared.
if cachedItemMetaFound {
path := f.getFullPath(itemMeta.ParentID, itemMeta.Name)
if path != "" {
fs.Debugf(f, "%s added old path (%q) for notify", eventDetails, path)
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
} else {
fs.Debugf(f, "%s old parent not cached", eventDetails)
}
// If this is a directory, also delete it from the dir cache.
// This will effectively invalidate the item metadata cache
// entries for all descendents of this directory, since we
// will no longer be able to construct a full path for them.
// This is exactly what we want, since we don't want to notify
// on the paths of these descendents if one of their ancestors
// has been renamed/deleted.
if entry.Source.Type == api.ItemTypeFolder {
f.dirCache.FlushDir(path)
}
}
// If the item is "active", then it is not trashed or deleted, so
// it potentially has a valid parent.
//
// Construct the new path of the object, based on the Parent ID
// and its name. If we get an empty result, it means we don't
// currently know about this object so notification is unnecessary.
if entry.Source.ItemStatus == api.ItemStatusActive {
path := f.getFullPath(entry.Source.Parent.ID, entry.Source.Name)
if path != "" {
fs.Debugf(f, "%s added new path (%q) for notify", eventDetails, path)
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
} else {
fs.Debugf(f, "%s new parent not found", eventDetails)
}
}
}
// box can sometimes repeatedly return the same Event IDs within a
// short period of time. If it stops giving us new ones, treat it
// the same as if it returned us none at all.
if newEventIDs == 0 {
return nextStreamPosition, nil
}
notifiedPaths := make(map[string]bool)
for _, p := range pathsToClear {
if _, ok := notifiedPaths[p.path]; ok {
continue
}
notifiedPaths[p.path] = true
notifyFunc(p.path, p.entryType)
}
fs.Debugf(f, "Received %v events, resulting in %v paths and %v notifications", len(result.Entries), len(pathsToClear), len(notifiedPaths))
}
}
// DirCacheFlush resets the directory cache - used in testing as an // DirCacheFlush resets the directory cache - used in testing as an
// optional interface // optional interface
func (f *Fs) DirCacheFlush() { func (f *Fs) DirCacheFlush() {
@@ -1768,8 +1395,6 @@ var (
_ fs.DirCacheFlusher = (*Fs)(nil) _ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil) _ fs.CleanUpper = (*Fs)(nil)
_ fs.ListPer = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.Object = (*Object)(nil) _ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil) _ fs.IDer = (*Object)(nil)
) )

View File

@@ -105,7 +105,7 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
const defaultDelay = 10 const defaultDelay = 10
var tries int var tries int
outer: outer:
for tries = range maxTries { for tries = 0; tries < maxTries; tries++ {
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil) resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
if err != nil { if err != nil {
@@ -203,7 +203,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
errs := make(chan error, 1) errs := make(chan error, 1)
var wg sync.WaitGroup var wg sync.WaitGroup
outer: outer:
for part := range session.TotalParts { for part := 0; part < session.TotalParts; part++ {
// Check any errors // Check any errors
select { select {
case err = <-errs: case err = <-errs:
@@ -211,7 +211,10 @@ outer:
default: default:
} }
reqSize := min(remaining, chunkSize) reqSize := remaining
if reqSize >= chunkSize {
reqSize = chunkSize
}
// Make a block of memory // Make a block of memory
buf := make([]byte, reqSize) buf := make([]byte, reqSize)

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js //go:build !plan9 && !js
// +build !plan9,!js
// Package cache implements a virtual provider to cache existing remotes. // Package cache implements a virtual provider to cache existing remotes.
package cache package cache
@@ -29,7 +30,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/atexit"
@@ -76,19 +76,17 @@ func init() {
Name: "plex_url", Name: "plex_url",
Help: "The URL of the Plex server.", Help: "The URL of the Plex server.",
}, { }, {
Name: "plex_username", Name: "plex_username",
Help: "The username of the Plex user.", Help: "The username of the Plex user.",
Sensitive: true,
}, { }, {
Name: "plex_password", Name: "plex_password",
Help: "The password of the Plex user.", Help: "The password of the Plex user.",
IsPassword: true, IsPassword: true,
}, { }, {
Name: "plex_token", Name: "plex_token",
Help: "The plex token for authentication - auto set normally.", Help: "The plex token for authentication - auto set normally.",
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "plex_insecure", Name: "plex_insecure",
Help: "Skip all certificate verification when connecting to the Plex server.", Help: "Skip all certificate verification when connecting to the Plex server.",
@@ -410,16 +408,18 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err) return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
} }
} else if opt.PlexPassword != "" && opt.PlexUsername != "" { } else {
decPass, err := obscure.Reveal(opt.PlexPassword) if opt.PlexPassword != "" && opt.PlexUsername != "" {
if err != nil { decPass, err := obscure.Reveal(opt.PlexPassword)
decPass = opt.PlexPassword if err != nil {
} decPass = opt.PlexPassword
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) { }
m.Set("plex_token", token) f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
}) m.Set("plex_token", token)
if err != nil { })
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err) if err != nil {
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
}
} }
} }
} }
@@ -684,7 +684,7 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
start, end int64 start, end int64
} }
parseChunks := func(ranges string) (crs []chunkRange, err error) { parseChunks := func(ranges string) (crs []chunkRange, err error) {
for part := range strings.SplitSeq(ranges, ",") { for _, part := range strings.Split(ranges, ",") {
var start, end int64 = 0, math.MaxInt64 var start, end int64 = 0, math.MaxInt64
switch ints := strings.Split(part, ":"); len(ints) { switch ints := strings.Split(part, ":"); len(ints) {
case 1: case 1:
@@ -1038,7 +1038,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
fs.Debugf(dir, "list: remove entry: %v", entryRemote) fs.Debugf(dir, "list: remove entry: %v", entryRemote)
} }
entries = nil //nolint:ineffassign entries = nil
// and then iterate over the ones from source (temp Objects will override source ones) // and then iterate over the ones from source (temp Objects will override source ones)
var batchDirectories []*Directory var batchDirectories []*Directory
@@ -1087,13 +1087,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return cachedEntries, nil return cachedEntries, nil
} }
func (f *Fs) recurse(ctx context.Context, dir string, list *list.Helper) error { func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
entries, err := f.List(ctx, dir) entries, err := f.List(ctx, dir)
if err != nil { if err != nil {
return err return err
} }
for i := range entries { for i := 0; i < len(entries); i++ {
innerDir, ok := entries[i].(fs.Directory) innerDir, ok := entries[i].(fs.Directory)
if ok { if ok {
err := f.recurse(ctx, innerDir.Remote(), list) err := f.recurse(ctx, innerDir.Remote(), list)
@@ -1139,7 +1139,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
} }
// if we're here, we're gonna do a standard recursive traversal and cache everything // if we're here, we're gonna do a standard recursive traversal and cache everything
list := list.NewHelper(callback) list := walk.NewListRHelper(callback)
err = f.recurse(ctx, dir, list) err = f.recurse(ctx, dir, list)
if err != nil { if err != nil {
return err return err
@@ -1429,7 +1429,7 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
}() }()
// wait until both are done // wait until both are done
for range 2 { for c := 0; c < 2; c++ {
<-done <-done
} }
} }
@@ -1754,7 +1754,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
} }
// Stats returns stats about the cache storage // Stats returns stats about the cache storage
func (f *Fs) Stats() (map[string]map[string]any, error) { func (f *Fs) Stats() (map[string]map[string]interface{}, error) {
return f.cache.Stats() return f.cache.Stats()
} }
@@ -1787,7 +1787,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
} }
} }
// StopBackgroundRunners will signal all the runners to stop their work // StopBackgroundRunners will signall all the runners to stop their work
// can be triggered from a terminate signal or from testing between runs // can be triggered from a terminate signal or from testing between runs
func (f *Fs) StopBackgroundRunners() { func (f *Fs) StopBackgroundRunners() {
f.cleanupChan <- false f.cleanupChan <- false
@@ -1934,7 +1934,7 @@ var commandHelp = []fs.CommandHelp{
// The result should be capable of being JSON encoded // The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user // If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that // otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) { func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
switch name { switch name {
case "stats": case "stats":
return f.Stats() return f.Stats()

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js && !race //go:build !plan9 && !js && !race
// +build !plan9,!js,!race
package cache_test package cache_test
@@ -10,6 +11,7 @@ import (
goflag "flag" goflag "flag"
"fmt" "fmt"
"io" "io"
"log"
"math/rand" "math/rand"
"os" "os"
"path" "path"
@@ -28,11 +30,10 @@ import (
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/testy" "github.com/rclone/rclone/fstest/testy"
"github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/vfs/vfscommon" "github.com/rclone/rclone/vfs/vfsflags"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -92,7 +93,7 @@ func TestMain(m *testing.M) {
goflag.Parse() goflag.Parse()
var rc int var rc int
fs.Logf(nil, "Running with the following params: \n remote: %v", remoteName) log.Printf("Running with the following params: \n remote: %v", remoteName)
runInstance = newRun() runInstance = newRun()
rc = m.Run() rc = m.Run()
os.Exit(rc) os.Exit(rc)
@@ -122,10 +123,10 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
/* TODO: is this testing something? /* TODO: is this testing something?
func TestInternalVfsCache(t *testing.T) { func TestInternalVfsCache(t *testing.T) {
vfscommon.Opt.DirCacheTime = time.Second * 30 vfsflags.Opt.DirCacheTime = time.Second * 30
testSize := int64(524288000) testSize := int64(524288000)
vfscommon.Opt.CacheMode = vfs.CacheModeWrites vfsflags.Opt.CacheMode = vfs.CacheModeWrites
id := "tiuufo" id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"}) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
@@ -337,7 +338,7 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
func TestInternalWrappedWrittenContentMatches(t *testing.T) { func TestInternalWrappedWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix()) id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second) vfsflags.Opt.DirCacheTime = time.Second
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
if runInstance.rootIsCrypt { if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote") t.Skip("test skipped with crypt remote")
@@ -360,14 +361,14 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, int64(len(checkSample)), o.Size()) require.Equal(t, int64(len(checkSample)), o.Size())
for i := range checkSample { for i := 0; i < len(checkSample); i++ {
require.Equal(t, testData[i], checkSample[i]) require.Equal(t, testData[i], checkSample[i])
} }
} }
func TestInternalLargeWrittenContentMatches(t *testing.T) { func TestInternalLargeWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tilwcm%v", time.Now().Unix()) id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second) vfsflags.Opt.DirCacheTime = time.Second
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
if runInstance.rootIsCrypt { if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote") t.Skip("test skipped with crypt remote")
@@ -387,7 +388,7 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false) readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
require.NoError(t, err) require.NoError(t, err)
for i := range readData { for i := 0; i < len(readData); i++ {
require.Equalf(t, testData[i], readData[i], "at byte %v", i) require.Equalf(t, testData[i], readData[i], "at byte %v", i)
} }
} }
@@ -407,7 +408,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
// update in the wrapped fs // update in the wrapped fs
originalSize, err := runInstance.size(t, rootFs, "data.bin") originalSize, err := runInstance.size(t, rootFs, "data.bin")
require.NoError(t, err) require.NoError(t, err)
fs.Logf(nil, "original size: %v", originalSize) log.Printf("original size: %v", originalSize)
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin")) o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err) require.NoError(t, err)
@@ -416,7 +417,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
if runInstance.rootIsCrypt { if runInstance.rootIsCrypt {
data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64) data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64)
require.NoError(t, err) require.NoError(t, err)
expectedSize++ // FIXME newline gets in, likely test data issue expectedSize = expectedSize + 1 // FIXME newline gets in, likely test data issue
} else { } else {
data2 = []byte("test content") data2 = []byte("test content")
} }
@@ -424,7 +425,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo) err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, int64(len(data2)), o.Size()) require.Equal(t, int64(len(data2)), o.Size())
fs.Logf(nil, "updated size: %v", len(data2)) log.Printf("updated size: %v", len(data2))
// get a new instance from the cache // get a new instance from the cache
if runInstance.wrappedIsExternal { if runInstance.wrappedIsExternal {
@@ -484,49 +485,49 @@ func TestInternalMoveWithNotify(t *testing.T) {
err = runInstance.retryBlock(func() error { err = runInstance.retryBlock(func() error {
li, err := runInstance.list(t, rootFs, "test") li, err := runInstance.list(t, rootFs, "test")
if err != nil { if err != nil {
fs.Logf(nil, "err: %v", err) log.Printf("err: %v", err)
return err return err
} }
if len(li) != 2 { if len(li) != 2 {
fs.Logf(nil, "not expected listing /test: %v", li) log.Printf("not expected listing /test: %v", li)
return fmt.Errorf("not expected listing /test: %v", li) return fmt.Errorf("not expected listing /test: %v", li)
} }
li, err = runInstance.list(t, rootFs, "test/one") li, err = runInstance.list(t, rootFs, "test/one")
if err != nil { if err != nil {
fs.Logf(nil, "err: %v", err) log.Printf("err: %v", err)
return err return err
} }
if len(li) != 0 { if len(li) != 0 {
fs.Logf(nil, "not expected listing /test/one: %v", li) log.Printf("not expected listing /test/one: %v", li)
return fmt.Errorf("not expected listing /test/one: %v", li) return fmt.Errorf("not expected listing /test/one: %v", li)
} }
li, err = runInstance.list(t, rootFs, "test/second") li, err = runInstance.list(t, rootFs, "test/second")
if err != nil { if err != nil {
fs.Logf(nil, "err: %v", err) log.Printf("err: %v", err)
return err return err
} }
if len(li) != 1 { if len(li) != 1 {
fs.Logf(nil, "not expected listing /test/second: %v", li) log.Printf("not expected listing /test/second: %v", li)
return fmt.Errorf("not expected listing /test/second: %v", li) return fmt.Errorf("not expected listing /test/second: %v", li)
} }
if fi, ok := li[0].(os.FileInfo); ok { if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "data.bin" { if fi.Name() != "data.bin" {
fs.Logf(nil, "not expected name: %v", fi.Name()) log.Printf("not expected name: %v", fi.Name())
return fmt.Errorf("not expected name: %v", fi.Name()) return fmt.Errorf("not expected name: %v", fi.Name())
} }
} else if di, ok := li[0].(fs.DirEntry); ok { } else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/second/data.bin" { if di.Remote() != "test/second/data.bin" {
fs.Logf(nil, "not expected remote: %v", di.Remote()) log.Printf("not expected remote: %v", di.Remote())
return fmt.Errorf("not expected remote: %v", di.Remote()) return fmt.Errorf("not expected remote: %v", di.Remote())
} }
} else { } else {
fs.Logf(nil, "unexpected listing: %v", li) log.Printf("unexpected listing: %v", li)
return fmt.Errorf("unexpected listing: %v", li) return fmt.Errorf("unexpected listing: %v", li)
} }
fs.Logf(nil, "complete listing: %v", li) log.Printf("complete listing: %v", li)
return nil return nil
}, 12, time.Second*10) }, 12, time.Second*10)
require.NoError(t, err) require.NoError(t, err)
@@ -576,43 +577,43 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
err = runInstance.retryBlock(func() error { err = runInstance.retryBlock(func() error {
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"))) found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
if !found { if !found {
fs.Logf(nil, "not found /test") log.Printf("not found /test")
return fmt.Errorf("not found /test") return fmt.Errorf("not found /test")
} }
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"))) found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
if !found { if !found {
fs.Logf(nil, "not found /test/one") log.Printf("not found /test/one")
return fmt.Errorf("not found /test/one") return fmt.Errorf("not found /test/one")
} }
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2"))) found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
if !found { if !found {
fs.Logf(nil, "not found /test/one/test2") log.Printf("not found /test/one/test2")
return fmt.Errorf("not found /test/one/test2") return fmt.Errorf("not found /test/one/test2")
} }
li, err := runInstance.list(t, rootFs, "test/one") li, err := runInstance.list(t, rootFs, "test/one")
if err != nil { if err != nil {
fs.Logf(nil, "err: %v", err) log.Printf("err: %v", err)
return err return err
} }
if len(li) != 1 { if len(li) != 1 {
fs.Logf(nil, "not expected listing /test/one: %v", li) log.Printf("not expected listing /test/one: %v", li)
return fmt.Errorf("not expected listing /test/one: %v", li) return fmt.Errorf("not expected listing /test/one: %v", li)
} }
if fi, ok := li[0].(os.FileInfo); ok { if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "test2" { if fi.Name() != "test2" {
fs.Logf(nil, "not expected name: %v", fi.Name()) log.Printf("not expected name: %v", fi.Name())
return fmt.Errorf("not expected name: %v", fi.Name()) return fmt.Errorf("not expected name: %v", fi.Name())
} }
} else if di, ok := li[0].(fs.DirEntry); ok { } else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/one/test2" { if di.Remote() != "test/one/test2" {
fs.Logf(nil, "not expected remote: %v", di.Remote()) log.Printf("not expected remote: %v", di.Remote())
return fmt.Errorf("not expected remote: %v", di.Remote()) return fmt.Errorf("not expected remote: %v", di.Remote())
} }
} else { } else {
fs.Logf(nil, "unexpected listing: %v", li) log.Printf("unexpected listing: %v", li)
return fmt.Errorf("unexpected listing: %v", li) return fmt.Errorf("unexpected listing: %v", li)
} }
fs.Logf(nil, "complete listing /test/one/test2") log.Printf("complete listing /test/one/test2")
return nil return nil
}, 12, time.Second*10) }, 12, time.Second*10)
require.NoError(t, err) require.NoError(t, err)
@@ -688,7 +689,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
co, ok := o.(*cache.Object) co, ok := o.(*cache.Object)
require.True(t, ok) require.True(t, ok)
for i := range 4 { // read first 4 for i := 0; i < 4; i++ { // read first 4
_ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false) _ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
} }
cfs.CleanUpCache(true) cfs.CleanUpCache(true)
@@ -707,7 +708,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
func TestInternalExpiredEntriesRemoved(t *testing.T) { func TestInternalExpiredEntriesRemoved(t *testing.T) {
id := fmt.Sprintf("tieer%v", time.Now().Unix()) id := fmt.Sprintf("tieer%v", time.Now().Unix())
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 4) // needs to be lower than the defined vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -742,7 +743,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
} }
func TestInternalBug2117(t *testing.T) { func TestInternalBug2117(t *testing.T) {
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 10) vfsflags.Opt.DirCacheTime = time.Second * 10
id := fmt.Sprintf("tib2117%v", time.Now().Unix()) id := fmt.Sprintf("tib2117%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"}) rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
@@ -770,24 +771,24 @@ func TestInternalBug2117(t *testing.T) {
di, err := runInstance.list(t, rootFs, "test/dir1/dir2") di, err := runInstance.list(t, rootFs, "test/dir1/dir2")
require.NoError(t, err) require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di)) log.Printf("len: %v", len(di))
require.Len(t, di, 1) require.Len(t, di, 1)
time.Sleep(time.Second * 30) time.Sleep(time.Second * 30)
di, err = runInstance.list(t, rootFs, "test/dir1/dir2") di, err = runInstance.list(t, rootFs, "test/dir1/dir2")
require.NoError(t, err) require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di)) log.Printf("len: %v", len(di))
require.Len(t, di, 1) require.Len(t, di, 1)
di, err = runInstance.list(t, rootFs, "test/dir1") di, err = runInstance.list(t, rootFs, "test/dir1")
require.NoError(t, err) require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di)) log.Printf("len: %v", len(di))
require.Len(t, di, 4) require.Len(t, di, 4)
di, err = runInstance.list(t, rootFs, "test") di, err = runInstance.list(t, rootFs, "test")
require.NoError(t, err) require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di)) log.Printf("len: %v", len(di))
require.Len(t, di, 4) require.Len(t, di, 4)
} }
@@ -828,7 +829,7 @@ func newRun() *run {
} else { } else {
r.tmpUploadDir = uploadDir r.tmpUploadDir = uploadDir
} }
fs.Logf(nil, "Temp Upload Dir: %v", r.tmpUploadDir) log.Printf("Temp Upload Dir: %v", r.tmpUploadDir)
return r return r
} }
@@ -849,8 +850,8 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) { func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
fstest.Initialise() fstest.Initialise()
remoteExists := false remoteExists := false
for _, s := range config.GetRemotes() { for _, s := range config.FileSections() {
if s.Name == remote { if s == remote {
remoteExists = true remoteExists = true
} }
} }
@@ -874,12 +875,12 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
cacheRemote := remote cacheRemote := remote
if !remoteExists { if !remoteExists {
localRemote := remote + "-local" localRemote := remote + "-local"
config.FileSetValue(localRemote, "type", "local") config.FileSet(localRemote, "type", "local")
config.FileSetValue(localRemote, "nounc", "true") config.FileSet(localRemote, "nounc", "true")
m.Set("type", "cache") m.Set("type", "cache")
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote)) m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
} else { } else {
remoteType := config.GetValue(remote, "type") remoteType := config.FileGet(remote, "type")
if remoteType == "" { if remoteType == "" {
t.Skipf("skipped due to invalid remote type for %v", remote) t.Skipf("skipped due to invalid remote type for %v", remote)
return nil, nil return nil, nil
@@ -890,14 +891,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
m.Set("password", cryptPassword1) m.Set("password", cryptPassword1)
m.Set("password2", cryptPassword2) m.Set("password2", cryptPassword2)
} }
remoteRemote := config.GetValue(remote, "remote") remoteRemote := config.FileGet(remote, "remote")
if remoteRemote == "" { if remoteRemote == "" {
t.Skipf("skipped due to invalid remote wrapper for %v", remote) t.Skipf("skipped due to invalid remote wrapper for %v", remote)
return nil, nil return nil, nil
} }
remoteRemoteParts := strings.Split(remoteRemote, ":") remoteRemoteParts := strings.Split(remoteRemote, ":")
remoteWrapping := remoteRemoteParts[0] remoteWrapping := remoteRemoteParts[0]
remoteType := config.GetValue(remoteWrapping, "type") remoteType := config.FileGet(remoteWrapping, "type")
if remoteType != "cache" { if remoteType != "cache" {
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType) t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
return nil, nil return nil, nil
@@ -934,7 +935,8 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
} }
if purge { if purge {
_ = operations.Purge(context.Background(), f, "") _ = f.Features().Purge(context.Background(), "")
require.NoError(t, err)
} }
err = f.Mkdir(context.Background(), "") err = f.Mkdir(context.Background(), "")
require.NoError(t, err) require.NoError(t, err)
@@ -947,7 +949,7 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
} }
func (r *run) cleanupFs(t *testing.T, f fs.Fs) { func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
err := operations.Purge(context.Background(), f, "") err := f.Features().Purge(context.Background(), "")
require.NoError(t, err) require.NoError(t, err)
cfs, err := r.getCacheFs(f) cfs, err := r.getCacheFs(f)
require.NoError(t, err) require.NoError(t, err)
@@ -971,7 +973,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
f, err := os.CreateTemp("", "rclonecache-tempfile") f, err := os.CreateTemp("", "rclonecache-tempfile")
require.NoError(t, err) require.NoError(t, err)
for range int(cnt) { for i := 0; i < int(cnt); i++ {
data := randStringBytes(int(chunk)) data := randStringBytes(int(chunk))
_, _ = f.Write(data) _, _ = f.Write(data)
} }
@@ -1085,9 +1087,9 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
return err return err
} }
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]any, error) { func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
var err error var err error
var l []any var l []interface{}
var list fs.DirEntries var list fs.DirEntries
list, err = f.List(context.Background(), remote) list, err = f.List(context.Background(), remote)
for _, ll := range list { for _, ll := range list {
@@ -1096,6 +1098,27 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]any, error) {
return l, err return l, err
} }
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer func() {
_ = in.Close()
}()
out, err := os.Create(dst)
if err != nil {
return err
}
defer func() {
_ = out.Close()
}()
_, err = io.Copy(out, in)
return err
}
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error { func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error var err error
@@ -1191,7 +1214,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
func (r *run) cleanSize(t *testing.T, size int64) int64 { func (r *run) cleanSize(t *testing.T, size int64) int64 {
if r.rootIsCrypt { if r.rootIsCrypt {
denominator := int64(65536 + 16) denominator := int64(65536 + 16)
size -= 32 size = size - 32
quotient := size / denominator quotient := size / denominator
remainder := size % denominator remainder := size % denominator
return (quotient*65536 + remainder - 16) return (quotient*65536 + remainder - 16)
@@ -1215,7 +1238,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
var err error var err error
var state cache.BackgroundUploadState var state cache.BackgroundUploadState
for range 2 { for i := 0; i < 2; i++ {
select { select {
case state = <-buCh: case state = <-buCh:
// continue // continue
@@ -1293,7 +1316,7 @@ func (r *run) completeAllBackgroundUploads(t *testing.T, f fs.Fs, lastRemote str
func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error { func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error {
var err error var err error
for range maxRetries { for i := 0; i < maxRetries; i++ {
err = block() err = block()
if err == nil { if err == nil {
return nil return nil

View File

@@ -1,6 +1,7 @@
// Test Cache filesystem interface // Test Cache filesystem interface
//go:build !plan9 && !js && !race //go:build !plan9 && !js && !race
// +build !plan9,!js,!race
package cache_test package cache_test
@@ -15,11 +16,10 @@ import (
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:", RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil), NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata", "ListP"}, UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"}, UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"}, SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
}) })
} }

View File

@@ -2,6 +2,6 @@
// about "no buildable Go source files " // about "no buildable Go source files "
//go:build plan9 || js //go:build plan9 || js
// +build plan9 js
// Package cache implements a virtual provider to cache existing remotes.
package cache package cache

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js && !race //go:build !plan9 && !js && !race
// +build !plan9,!js,!race
package cache_test package cache_test
@@ -159,11 +160,11 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
minSize := 5242880 minSize := 5242880
maxSize := 10485760 maxSize := 10485760
totalFiles := 10 totalFiles := 10
randInstance := rand.New(rand.NewSource(time.Now().Unix())) rand.Seed(time.Now().Unix())
lastFile := "" lastFile := ""
for i := range totalFiles { for i := 0; i < totalFiles; i++ {
size := int64(randInstance.Intn(maxSize-minSize) + minSize) size := int64(rand.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size) testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin" remote := "test/" + strconv.Itoa(i) + ".bin"
runInstance.writeRemoteReader(t, rootFs, remote, testReader) runInstance.writeRemoteReader(t, rootFs, remote, testReader)

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js //go:build !plan9 && !js
// +build !plan9,!js
package cache package cache

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js //go:build !plan9 && !js
// +build !plan9,!js
package cache package cache
@@ -118,7 +119,7 @@ func (r *Handle) startReadWorkers() {
r.scaleWorkers(totalWorkers) r.scaleWorkers(totalWorkers)
} }
// scaleWorkers will increase the worker pool count by the provided amount // scaleOutWorkers will increase the worker pool count by the provided amount
func (r *Handle) scaleWorkers(desired int) { func (r *Handle) scaleWorkers(desired int) {
current := r.workers current := r.workers
if current == desired { if current == desired {
@@ -182,7 +183,7 @@ func (r *Handle) queueOffset(offset int64) {
} }
} }
for i := range r.workers { for i := 0; i < r.workers; i++ {
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i) o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
if o < 0 || o >= r.cachedObject.Size() { if o < 0 || o >= r.cachedObject.Size() {
continue continue
@@ -208,7 +209,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize) offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
// we align the start offset of the first chunk to a likely chunk in the storage // we align the start offset of the first chunk to a likely chunk in the storage
chunkStart -= offset chunkStart = chunkStart - offset
r.queueOffset(chunkStart) r.queueOffset(chunkStart)
found := false found := false
@@ -222,7 +223,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
if !found { if !found {
// we're gonna give the workers a chance to pickup the chunk // we're gonna give the workers a chance to pickup the chunk
// and retry a couple of times // and retry a couple of times
for i := range r.cacheFs().opt.ReadRetries * 8 { for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
data, err = r.storage().GetChunk(r.cachedObject, chunkStart) data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
if err == nil { if err == nil {
found = true found = true
@@ -327,7 +328,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize)) chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) { if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
chunkStart -= int64(r.cacheFs().opt.ChunkSize) chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
} }
r.queueOffset(chunkStart) r.queueOffset(chunkStart)
@@ -415,8 +416,10 @@ func (w *worker) run() {
continue continue
} }
} }
} else if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) { } else {
continue if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
continue
}
} }
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize) chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js //go:build !plan9 && !js
// +build !plan9,!js
package cache package cache

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js //go:build !plan9 && !js
// +build !plan9,!js
package cache package cache
@@ -209,7 +210,7 @@ func (p *plexConnector) authenticate() error {
if err != nil { if err != nil {
return err return err
} }
var data map[string]any var data map[string]interface{}
err = json.NewDecoder(resp.Body).Decode(&data) err = json.NewDecoder(resp.Body).Decode(&data)
if err != nil { if err != nil {
return fmt.Errorf("failed to obtain token: %w", err) return fmt.Errorf("failed to obtain token: %w", err)
@@ -273,11 +274,11 @@ func (p *plexConnector) isPlaying(co *Object) bool {
} }
// adapted from: https://stackoverflow.com/a/28878037 (credit) // adapted from: https://stackoverflow.com/a/28878037 (credit)
func get(m any, path ...any) (any, bool) { func get(m interface{}, path ...interface{}) (interface{}, bool) {
for _, p := range path { for _, p := range path {
switch idx := p.(type) { switch idx := p.(type) {
case string: case string:
if mm, ok := m.(map[string]any); ok { if mm, ok := m.(map[string]interface{}); ok {
if val, found := mm[idx]; found { if val, found := mm[idx]; found {
m = val m = val
continue continue
@@ -285,7 +286,7 @@ func get(m any, path ...any) (any, bool) {
} }
return nil, false return nil, false
case int: case int:
if mm, ok := m.([]any); ok { if mm, ok := m.([]interface{}); ok {
if len(mm) > idx { if len(mm) > idx {
m = mm[idx] m = mm[idx]
continue continue

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js //go:build !plan9 && !js
// +build !plan9,!js
package cache package cache

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js //go:build !plan9 && !js
// +build !plan9,!js
package cache package cache
@@ -18,7 +19,6 @@ import (
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
bolt "go.etcd.io/bbolt" bolt "go.etcd.io/bbolt"
"go.etcd.io/bbolt/errors"
) )
// Constants // Constants
@@ -598,7 +598,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
}) })
if err != nil { if err != nil {
if err == errors.ErrDatabaseNotOpen { if err == bolt.ErrDatabaseNotOpen {
// we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore // we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore
return return
} }
@@ -607,16 +607,16 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
} }
// Stats returns a go map with the stats key values // Stats returns a go map with the stats key values
func (b *Persistent) Stats() (map[string]map[string]any, error) { func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
r := make(map[string]map[string]any) r := make(map[string]map[string]interface{})
r["data"] = make(map[string]any) r["data"] = make(map[string]interface{})
r["data"]["oldest-ts"] = time.Now() r["data"]["oldest-ts"] = time.Now()
r["data"]["oldest-file"] = "" r["data"]["oldest-file"] = ""
r["data"]["newest-ts"] = time.Now() r["data"]["newest-ts"] = time.Now()
r["data"]["newest-file"] = "" r["data"]["newest-file"] = ""
r["data"]["total-chunks"] = 0 r["data"]["total-chunks"] = 0
r["data"]["total-size"] = int64(0) r["data"]["total-size"] = int64(0)
r["files"] = make(map[string]any) r["files"] = make(map[string]interface{})
r["files"]["oldest-ts"] = time.Now() r["files"]["oldest-ts"] = time.Now()
r["files"]["oldest-name"] = "" r["files"]["oldest-name"] = ""
r["files"]["newest-ts"] = time.Now() r["files"]["newest-ts"] = time.Now()

View File

@@ -1,5 +1,3 @@
//go:build !plan9 && !js
package cache package cache
import bolt "go.etcd.io/bbolt" import bolt "go.etcd.io/bbolt"

View File

@@ -29,7 +29,6 @@ import (
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/encoder"
) )
// Chunker's composite files have one or more chunks // Chunker's composite files have one or more chunks
@@ -102,10 +101,8 @@ var (
// //
// And still chunker's primary function is to chunk large files // And still chunker's primary function is to chunk large files
// rather than serve as a generic metadata container. // rather than serve as a generic metadata container.
const ( const maxMetadataSize = 1023
maxMetadataSize = 1023 const maxMetadataSizeWritten = 255
maxMetadataSizeWritten = 255
)
// Current/highest supported metadata format. // Current/highest supported metadata format.
const metadataVersion = 2 const metadataVersion = 2
@@ -308,6 +305,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
root: rpath, root: rpath,
opt: *opt, opt: *opt,
} }
cache.PinUntilFinalized(f.base, f)
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm. f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType, opt.Transactions); err != nil { if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType, opt.Transactions); err != nil {
@@ -319,45 +317,29 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
// i.e. `rpath` does not exist in the wrapped remote, but chunker // i.e. `rpath` does not exist in the wrapped remote, but chunker
// detects a composite file because it finds the first chunk! // detects a composite file because it finds the first chunk!
// (yet can't satisfy fstest.CheckListing, will ignore) // (yet can't satisfy fstest.CheckListing, will ignore)
if err == nil && !f.useMeta { if err == nil && !f.useMeta && strings.Contains(rpath, "/") {
firstChunkPath := f.makeChunkName(remotePath, 0, "", "") firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
newBase, testErr := cache.Get(ctx, baseName+firstChunkPath) _, testErr := cache.Get(ctx, baseName+firstChunkPath)
if testErr == fs.ErrorIsFile { if testErr == fs.ErrorIsFile {
f.base = newBase
err = testErr err = testErr
} }
} }
cache.PinUntilFinalized(f.base, f)
// Correct root if definitely pointing to a file
if err == fs.ErrorIsFile {
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
// Note 1: the features here are ones we could support, and they are // Note 1: the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs. // ANDed with the ones from wrappedFs.
// Note 2: features.Fill() points features.PutStream to our PutStream, // Note 2: features.Fill() points features.PutStream to our PutStream,
// but features.Mask() will nullify it if wrappedFs does not have it. // but features.Mask() will nullify it if wrappedFs does not have it.
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: true, CaseInsensitive: true,
DuplicateFiles: true, DuplicateFiles: true,
ReadMimeType: false, // Object.MimeType not supported ReadMimeType: false, // Object.MimeType not supported
WriteMimeType: true, WriteMimeType: true,
BucketBased: true, BucketBased: true,
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: true, ServerSideAcrossConfigs: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: true,
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs) }).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
f.features.ListR = nil // Recursive listing may cause chunker skip files f.features.Disable("ListR") // Recursive listing may cause chunker skip files
f.features.ListP = nil // ListP not supported yet
return f, err return f, err
} }
@@ -633,7 +615,7 @@ func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ct
// forbidChunk prints error message or raises error if file is chunk. // forbidChunk prints error message or raises error if file is chunk.
// First argument sets log prefix, use `false` to suppress message. // First argument sets log prefix, use `false` to suppress message.
func (f *Fs) forbidChunk(o any, filePath string) error { func (f *Fs) forbidChunk(o interface{}, filePath string) error {
if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" { if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" {
if f.opt.FailHard { if f.opt.FailHard {
return fmt.Errorf("chunk overlap with %q", parentPath) return fmt.Errorf("chunk overlap with %q", parentPath)
@@ -681,7 +663,7 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
circleSec := unixSec % closestPrimeZzzzSeconds circleSec := unixSec % closestPrimeZzzzSeconds
first4chars := strconv.FormatInt(circleSec, 36) first4chars := strconv.FormatInt(circleSec, 36)
for range maxTransactionProbes { for tries := 0; tries < maxTransactionProbes; tries++ {
f.xactIDMutex.Lock() f.xactIDMutex.Lock()
randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1) randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1)
f.xactIDMutex.Unlock() f.xactIDMutex.Unlock()
@@ -831,7 +813,8 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
} }
case fs.Directory: case fs.Directory:
isSubdir[entry.Remote()] = true isSubdir[entry.Remote()] = true
wrapDir := fs.NewDirWrapper(entry.Remote(), entry) wrapDir := fs.NewDirCopy(ctx, entry)
wrapDir.SetRemote(entry.Remote())
tempEntries = append(tempEntries, wrapDir) tempEntries = append(tempEntries, wrapDir)
default: default:
if f.opt.FailHard { if f.opt.FailHard {
@@ -964,11 +947,6 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
} }
if caseInsensitive { if caseInsensitive {
sameMain = strings.EqualFold(mainRemote, remote) sameMain = strings.EqualFold(mainRemote, remote)
if sameMain && f.base.Features().IsLocal {
// on local, make sure the EqualFold still holds true when accounting for encoding.
// sometimes paths with special characters will only normalize the same way in Standard Encoding.
sameMain = strings.EqualFold(encoder.OS.FromStandardPath(mainRemote), encoder.OS.FromStandardPath(remote))
}
} else { } else {
sameMain = mainRemote == remote sameMain = mainRemote == remote
} }
@@ -982,13 +960,13 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
} }
continue continue
} }
// fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo) //fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
if err := o.addChunk(entry, chunkNo); err != nil { if err := o.addChunk(entry, chunkNo); err != nil {
return nil, err return nil, err
} }
} }
if o.main == nil && len(o.chunks) == 0 { if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
// Scanning hasn't found data chunks with conforming names. // Scanning hasn't found data chunks with conforming names.
if f.useMeta || quickScan { if f.useMeta || quickScan {
// Metadata is required but absent and there are no chunks. // Metadata is required but absent and there are no chunks.
@@ -1144,8 +1122,8 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
// put implements Put, PutStream, PutUnchecked, Update // put implements Put, PutStream, PutUnchecked, Update
func (f *Fs) put( func (f *Fs) put(
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption, ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
basePut putFn, action string, target fs.Object, basePut putFn, action string, target fs.Object) (obj fs.Object, err error) {
) (obj fs.Object, err error) {
// Perform consistency checks // Perform consistency checks
if err := f.forbidChunk(src, remote); err != nil { if err := f.forbidChunk(src, remote); err != nil {
return nil, fmt.Errorf("%s refused: %w", action, err) return nil, fmt.Errorf("%s refused: %w", action, err)
@@ -1190,7 +1168,10 @@ func (f *Fs) put(
} }
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID) tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID)
size := min(c.sizeLeft, c.chunkSize) size := c.sizeLeft
if size > c.chunkSize {
size = c.chunkSize
}
savedReadCount := c.readCount savedReadCount := c.readCount
// If a single chunk is expected, avoid the extra rename operation // If a single chunk is expected, avoid the extra rename operation
@@ -1475,7 +1456,10 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
const bufLen = 1048576 // 1 MiB const bufLen = 1048576 // 1 MiB
buf := make([]byte, bufLen) buf := make([]byte, bufLen)
for size > 0 { for size > 0 {
n := min(size, bufLen) n := size
if n > bufLen {
n = bufLen
}
if _, err := io.ReadFull(in, buf[0:n]); err != nil { if _, err := io.ReadFull(in, buf[0:n]); err != nil {
return err return err
} }
@@ -1579,14 +1563,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.base.Mkdir(ctx, dir) return f.base.Mkdir(ctx, dir)
} }
// MkdirMetadata makes the root directory of the Fs object
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
if do := f.base.Features().MkdirMetadata; do != nil {
return do(ctx, dir, metadata)
}
return nil, fs.ErrorNotImplemented
}
// Rmdir removes the directory (container, bucket) if empty // Rmdir removes the directory (container, bucket) if empty
// //
// Return an error if it doesn't exist or isn't empty // Return an error if it doesn't exist or isn't empty
@@ -1861,8 +1837,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// baseMove chains to the wrapped Move or simulates it by Copy+Delete // baseMove chains to the wrapped Move or simulates it by Copy+Delete
func (f *Fs) baseMove(ctx context.Context, src fs.Object, remote string, delMode int) (fs.Object, error) { func (f *Fs) baseMove(ctx context.Context, src fs.Object, remote string, delMode int) (fs.Object, error) {
ctx, ci := fs.AddConfig(ctx)
ci.NameTransform = nil // ensure operations.Move does not double-transform here
var ( var (
dest fs.Object dest fs.Object
err error err error
@@ -1906,14 +1880,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return do(ctx, srcFs.base, srcRemote, dstRemote) return do(ctx, srcFs.base, srcRemote, dstRemote)
} }
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
if do := f.base.Features().DirSetModTime; do != nil {
return do(ctx, dir, modTime)
}
return fs.ErrorNotImplemented
}
// CleanUp the trash in the Fs // CleanUp the trash in the Fs
// //
// Implement this if you have a way of emptying the trash or // Implement this if you have a way of emptying the trash or
@@ -1962,7 +1928,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
return return
} }
wrappedNotifyFunc := func(path string, entryType fs.EntryType) { wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType) //fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
if entryType == fs.EntryObject { if entryType == fs.EntryObject {
mainPath, _, _, xactID := f.parseChunkName(path) mainPath, _, _, xactID := f.parseChunkName(path)
metaXactID := "" metaXactID := ""
@@ -2477,7 +2443,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte)
if len(data) > maxMetadataSizeWritten { if len(data) > maxMetadataSizeWritten {
return nil, false, ErrMetaTooBig return nil, false, ErrMetaTooBig
} }
if len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' { if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
return nil, false, errors.New("invalid json") return nil, false, errors.New("invalid json")
} }
var metadata metaSimpleJSON var metadata metaSimpleJSON
@@ -2574,8 +2540,6 @@ var (
_ fs.Copier = (*Fs)(nil) _ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil) _ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil) _ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil) _ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil) _ fs.CleanUpper = (*Fs)(nil)

View File

@@ -40,7 +40,7 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
}) })
} }
type settings map[string]any type settings map[string]interface{}
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs { func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
fsName := strings.Split(f.Name(), "{")[0] // strip off hash fsName := strings.Split(f.Name(), "{")[0] // strip off hash

View File

@@ -36,17 +36,14 @@ func TestIntegration(t *testing.T) {
"GetTier", "GetTier",
"SetTier", "SetTier",
"Metadata", "Metadata",
"SetMetadata",
}, },
UnimplementableFsMethods: []string{ UnimplementableFsMethods: []string{
"PublicLink", "PublicLink",
"OpenWriterAt", "OpenWriterAt",
"OpenChunkWriter",
"MergeDirs", "MergeDirs",
"DirCacheFlush", "DirCacheFlush",
"UserInfo", "UserInfo",
"Disconnect", "Disconnect",
"ListP",
}, },
} }
if *fstest.RemoteName == "" { if *fstest.RemoteName == "" {

View File

@@ -1,48 +0,0 @@
// Package api has type definitions for cloudinary
package api
import (
"fmt"
)
// CloudinaryEncoder extends the built-in encoder
type CloudinaryEncoder interface {
// FromStandardPath takes a / separated path in Standard encoding
// and converts it to a / separated path in this encoding.
FromStandardPath(string) string
// FromStandardName takes name in Standard encoding and converts
// it in this encoding.
FromStandardName(string) string
// ToStandardPath takes a / separated path in this encoding
// and converts it to a / separated path in Standard encoding.
ToStandardPath(string) string
// ToStandardName takes name in this encoding and converts
// it in Standard encoding.
ToStandardName(string, string) string
// Encoded root of the remote (as passed into NewFs)
FromStandardFullPath(string) string
}
// UpdateOptions was created to pass options from Update to Put
type UpdateOptions struct {
PublicID string
ResourceType string
DeliveryType string
AssetFolder string
DisplayName string
}
// Header formats the option as a string
func (o *UpdateOptions) Header() (string, string) {
return "UpdateOption", fmt.Sprintf("%s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
}
// Mandatory returns whether the option must be parsed or can be ignored
func (o *UpdateOptions) Mandatory() bool {
return false
}
// String formats the option into human-readable form
func (o *UpdateOptions) String() string {
return fmt.Sprintf("Fully qualified Public ID: %s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
}

View File

@@ -1,754 +0,0 @@
// Package cloudinary provides an interface to the Cloudinary DAM
package cloudinary
import (
"context"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"path"
"slices"
"strconv"
"strings"
"time"
"github.com/cloudinary/cloudinary-go/v2"
SDKApi "github.com/cloudinary/cloudinary-go/v2/api"
"github.com/cloudinary/cloudinary-go/v2/api/admin"
"github.com/cloudinary/cloudinary-go/v2/api/admin/search"
"github.com/cloudinary/cloudinary-go/v2/api/uploader"
"github.com/rclone/rclone/backend/cloudinary/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"github.com/zeebo/blake3"
)
// Cloudinary shouldn't have a trailing dot if there is no path
func cldPathDir(somePath string) string {
if somePath == "" || somePath == "." {
return somePath
}
dir := path.Dir(somePath)
if dir == "." {
return ""
}
return dir
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "cloudinary",
Description: "Cloudinary",
NewFs: NewFs,
Options: []fs.Option{
{
Name: "cloud_name",
Help: "Cloudinary Environment Name",
Required: true,
Sensitive: true,
},
{
Name: "api_key",
Help: "Cloudinary API Key",
Required: true,
Sensitive: true,
},
{
Name: "api_secret",
Help: "Cloudinary API Secret",
Required: true,
Sensitive: true,
},
{
Name: "upload_prefix",
Help: "Specify the API endpoint for environments out of the US",
},
{
Name: "upload_preset",
Help: "Upload Preset to select asset manipulation on upload",
},
{
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
encoder.EncodeSlash |
encoder.EncodeLtGt |
encoder.EncodeDoubleQuote |
encoder.EncodeQuestion |
encoder.EncodeAsterisk |
encoder.EncodePipe |
encoder.EncodeHash |
encoder.EncodePercent |
encoder.EncodeBackSlash |
encoder.EncodeDel |
encoder.EncodeCtl |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8 |
encoder.EncodeDot),
},
{
Name: "eventually_consistent_delay",
Default: fs.Duration(0),
Advanced: true,
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
},
{
Name: "adjust_media_files_extensions",
Default: true,
Advanced: true,
Help: "Cloudinary handles media formats as a file attribute and strips it from the name, which is unlike most other file systems",
},
{
Name: "media_extensions",
Default: []string{
"3ds", "3g2", "3gp", "ai", "arw", "avi", "avif", "bmp", "bw",
"cr2", "cr3", "djvu", "dng", "eps3", "fbx", "flif", "flv", "gif",
"glb", "gltf", "hdp", "heic", "heif", "ico", "indd", "jp2", "jpe",
"jpeg", "jpg", "jxl", "jxr", "m2ts", "mov", "mp4", "mpeg", "mts",
"mxf", "obj", "ogv", "pdf", "ply", "png", "psd", "svg", "tga",
"tif", "tiff", "ts", "u3ma", "usdz", "wdp", "webm", "webp", "wmv"},
Advanced: true,
Help: "Cloudinary supported media extensions",
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
CloudName string `config:"cloud_name"`
APIKey string `config:"api_key"`
APISecret string `config:"api_secret"`
UploadPrefix string `config:"upload_prefix"`
UploadPreset string `config:"upload_preset"`
Enc encoder.MultiEncoder `config:"encoding"`
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
MediaExtensions []string `config:"media_extensions"`
AdjustMediaFilesExtensions bool `config:"adjust_media_files_extensions"`
}
// Fs represents a remote cloudinary server
type Fs struct {
name string
root string
opt Options
features *fs.Features
pacer *fs.Pacer
srv *rest.Client // For downloading assets via the Cloudinary CDN
cld *cloudinary.Cloudinary // API calls are going through the Cloudinary SDK
lastCRUD time.Time
}
// Object describes a cloudinary object
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
url string
md5sum string
publicID string
resourceType string
deliveryType string
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Initialize the Cloudinary client
cld, err := cloudinary.NewFromParams(opt.CloudName, opt.APIKey, opt.APISecret)
if err != nil {
return nil, fmt.Errorf("failed to create Cloudinary client: %w", err)
}
cld.Admin.Client = *fshttp.NewClient(ctx)
cld.Upload.Client = *fshttp.NewClient(ctx)
if opt.UploadPrefix != "" {
cld.Config.API.UploadPrefix = opt.UploadPrefix
}
client := fshttp.NewClient(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
cld: cld,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1000), pacer.MaxSleep(10000), pacer.DecayConstant(2))),
srv: rest.NewClient(client),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
if root != "" {
// Check to see if the root actually an existing file
remote := path.Base(root)
f.root = cldPathDir(root)
_, err := f.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
// File doesn't exist so return the previous root
f.root = root
return f, nil
}
return nil, err
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// ------------------------------------------------------------
// FromStandardPath implementation of the api.CloudinaryEncoder
func (f *Fs) FromStandardPath(s string) string {
return strings.ReplaceAll(f.opt.Enc.FromStandardPath(s), "&", "\uFF06")
}
// FromStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) FromStandardName(s string) string {
if f.opt.AdjustMediaFilesExtensions {
parsedURL, err := url.Parse(s)
ext := ""
if err != nil {
fs.Logf(nil, "Error parsing URL: %v", err)
} else {
ext = path.Ext(parsedURL.Path)
if slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
s = strings.TrimSuffix(parsedURL.Path, ext)
}
}
}
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
}
// ToStandardPath implementation of the api.CloudinaryEncoder
func (f *Fs) ToStandardPath(s string) string {
return strings.ReplaceAll(f.opt.Enc.ToStandardPath(s), "\uFF06", "&")
}
// ToStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) ToStandardName(s string, assetURL string) string {
ext := ""
if f.opt.AdjustMediaFilesExtensions {
parsedURL, err := url.Parse(assetURL)
if err != nil {
fs.Logf(nil, "Error parsing URL: %v", err)
} else {
ext = path.Ext(parsedURL.Path)
if !slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
ext = ""
}
}
}
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&") + ext
}
// FromStandardFullPath encodes a full path to Cloudinary standard
func (f *Fs) FromStandardFullPath(dir string) string {
return path.Join(api.CloudinaryEncoder.FromStandardPath(f, f.root), api.CloudinaryEncoder.FromStandardPath(f, dir))
}
// ToAssetFolderAPI encodes folders as expected by the Cloudinary SDK
func (f *Fs) ToAssetFolderAPI(dir string) string {
return strings.ReplaceAll(dir, "%", "%25")
}
// ToDisplayNameElastic encodes a special case of elasticsearch
func (f *Fs) ToDisplayNameElastic(dir string) string {
return strings.ReplaceAll(dir, "!", "\\!")
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// WaitEventuallyConsistent waits till the FS is eventually consistent
func (f *Fs) WaitEventuallyConsistent() {
if f.opt.EventuallyConsistentDelay == fs.Duration(0) {
return
}
delay := time.Duration(f.opt.EventuallyConsistentDelay)
timeSinceLastCRUD := time.Since(f.lastCRUD)
if timeSinceLastCRUD < delay {
time.Sleep(delay - timeSinceLastCRUD)
}
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Cloudinary root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// List the objects and directories in dir into entries
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
remotePrefix := f.FromStandardFullPath(dir)
if remotePrefix != "" && !strings.HasSuffix(remotePrefix, "/") {
remotePrefix += "/"
}
var entries fs.DirEntries
dirs := make(map[string]struct{})
nextCursor := ""
f.WaitEventuallyConsistent()
for {
// user the folders api to list folders.
folderParams := admin.SubFoldersParams{
Folder: f.ToAssetFolderAPI(remotePrefix),
MaxResults: 500,
}
if nextCursor != "" {
folderParams.NextCursor = nextCursor
}
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
if err != nil {
return nil, fmt.Errorf("failed to list sub-folders: %w", err)
}
if results.Error.Message != "" {
if strings.HasPrefix(results.Error.Message, "Can't find folder with path") {
return nil, fs.ErrorDirNotFound
}
return nil, fmt.Errorf("failed to list sub-folders: %s", results.Error.Message)
}
for _, folder := range results.Folders {
relativePath := api.CloudinaryEncoder.ToStandardPath(f, strings.TrimPrefix(folder.Path, remotePrefix))
parts := strings.Split(relativePath, "/")
// It's a directory
dirName := parts[len(parts)-1]
if _, found := dirs[dirName]; !found {
d := fs.NewDir(path.Join(dir, dirName), time.Time{})
entries = append(entries, d)
dirs[dirName] = struct{}{}
}
}
// Break if there are no more results
if results.NextCursor == "" {
break
}
nextCursor = results.NextCursor
}
for {
// Use the assets.AssetsByAssetFolder API to list assets
assetsParams := admin.AssetsByAssetFolderParams{
AssetFolder: remotePrefix,
MaxResults: 500,
}
if nextCursor != "" {
assetsParams.NextCursor = nextCursor
}
results, err := f.cld.Admin.AssetsByAssetFolder(ctx, assetsParams)
if err != nil {
return nil, fmt.Errorf("failed to list assets: %w", err)
}
for _, asset := range results.Assets {
remote := path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName, asset.SecureURL))
o := &Object{
fs: f,
remote: remote,
size: int64(asset.Bytes),
modTime: asset.CreatedAt,
url: asset.SecureURL,
publicID: asset.PublicID,
resourceType: asset.AssetType,
deliveryType: asset.Type,
}
entries = append(entries, o)
}
// Break if there are no more results
if results.NextCursor == "" {
break
}
nextCursor = results.NextCursor
}
return entries, nil
}
// NewObject finds the Object at remote. If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
searchParams := search.Query{
Expression: fmt.Sprintf("asset_folder:\"%s\" AND display_name:\"%s\"",
f.FromStandardFullPath(cldPathDir(remote)),
f.ToDisplayNameElastic(api.CloudinaryEncoder.FromStandardName(f, path.Base(remote)))),
SortBy: []search.SortByField{{"uploaded_at": "desc"}},
MaxResults: 2,
}
var results *admin.SearchResult
f.WaitEventuallyConsistent()
err := f.pacer.Call(func() (bool, error) {
var err1 error
results, err1 = f.cld.Admin.Search(ctx, searchParams)
if err1 == nil && results.TotalCount != len(results.Assets) {
err1 = errors.New("partial response so waiting for eventual consistency")
}
return shouldRetry(ctx, nil, err1)
})
if err != nil {
return nil, fs.ErrorObjectNotFound
}
if results.TotalCount == 0 || len(results.Assets) == 0 {
return nil, fs.ErrorObjectNotFound
}
asset := results.Assets[0]
o := &Object{
fs: f,
remote: remote,
size: int64(asset.Bytes),
modTime: asset.UploadedAt,
url: asset.SecureURL,
md5sum: asset.Etag,
publicID: asset.PublicID,
resourceType: asset.ResourceType,
deliveryType: asset.Type,
}
return o, nil
}
func (f *Fs) getSuggestedPublicID(assetFolder string, displayName string, modTime time.Time) string {
payload := []byte(path.Join(assetFolder, displayName))
hash := blake3.Sum256(payload)
return hex.EncodeToString(hash[:])
}
// Put uploads content to Cloudinary
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if src.Size() == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
params := uploader.UploadParams{
UploadPreset: f.opt.UploadPreset,
}
updateObject := false
var modTime time.Time
for _, option := range options {
if updateOptions, ok := option.(*api.UpdateOptions); ok {
if updateOptions.PublicID != "" {
updateObject = true
params.Overwrite = SDKApi.Bool(true)
params.Invalidate = SDKApi.Bool(true)
params.PublicID = updateOptions.PublicID
params.ResourceType = updateOptions.ResourceType
params.Type = SDKApi.DeliveryType(updateOptions.DeliveryType)
params.AssetFolder = updateOptions.AssetFolder
params.DisplayName = updateOptions.DisplayName
modTime = src.ModTime(ctx)
}
}
}
if !updateObject {
params.AssetFolder = f.FromStandardFullPath(cldPathDir(src.Remote()))
params.DisplayName = api.CloudinaryEncoder.FromStandardName(f, path.Base(src.Remote()))
// We want to conform to the unique asset ID of rclone, which is (asset_folder,display_name,last_modified).
// We also want to enable customers to choose their own public_id, in case duplicate names are not a crucial use case.
// Upload_presets that apply randomness to the public ID would not work well with rclone duplicate assets support.
params.FilenameOverride = f.getSuggestedPublicID(params.AssetFolder, params.DisplayName, src.ModTime(ctx))
}
uploadResult, err := f.cld.Upload.Upload(ctx, in, params)
f.lastCRUD = time.Now()
if err != nil {
return nil, fmt.Errorf("failed to upload to Cloudinary: %w", err)
}
if !updateObject {
modTime = uploadResult.CreatedAt
}
if uploadResult.Error.Message != "" {
return nil, errors.New(uploadResult.Error.Message)
}
o := &Object{
fs: f,
remote: src.Remote(),
size: int64(uploadResult.Bytes),
modTime: modTime,
url: uploadResult.SecureURL,
md5sum: uploadResult.Etag,
publicID: uploadResult.PublicID,
resourceType: uploadResult.ResourceType,
deliveryType: uploadResult.Type,
}
return o, nil
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash sets
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// Mkdir creates empty folders
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
params := admin.CreateFolderParams{Folder: f.ToAssetFolderAPI(f.FromStandardFullPath(dir))}
res, err := f.cld.Admin.CreateFolder(ctx, params)
f.lastCRUD = time.Now()
if err != nil {
return err
}
if res.Error.Message != "" {
return errors.New(res.Error.Message)
}
return nil
}
// Rmdir deletes empty folders
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// Additional test because Cloudinary will delete folders without
// assets, regardless of empty sub-folders
folder := f.ToAssetFolderAPI(f.FromStandardFullPath(dir))
folderParams := admin.SubFoldersParams{
Folder: folder,
MaxResults: 1,
}
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
if err != nil {
return err
}
if results.TotalCount > 0 {
return fs.ErrorDirectoryNotEmpty
}
params := admin.DeleteFolderParams{Folder: folder}
res, err := f.cld.Admin.DeleteFolder(ctx, params)
f.lastCRUD = time.Now()
if err != nil {
return err
}
if res.Error.Message != "" {
if strings.HasPrefix(res.Error.Message, "Can't find folder with path") {
return fs.ErrorDirNotFound
}
return errors.New(res.Error.Message)
}
return nil
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
420, // Too Many Requests (legacy)
429, // Too Many Requests
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if err != nil {
tryAgain := "Try again on "
if idx := strings.Index(err.Error(), tryAgain); idx != -1 {
layout := "2006-01-02 15:04:05 UTC"
dateStr := err.Error()[idx+len(tryAgain) : idx+len(tryAgain)+len(layout)]
timestamp, err2 := time.Parse(layout, dateStr)
if err2 == nil {
return true, fserrors.NewErrorRetryAfter(time.Until(timestamp))
}
}
fs.Debugf(nil, "Retrying API error %v", err)
return true, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// ------------------------------------------------------------
// Hash returns the MD5 of an object
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
if ty != hash.MD5 {
return "", hash.ErrUnsupported
}
return o.md5sum, nil
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// Size of object in bytes
func (o *Object) Size() int64 {
return o.size
}
// Storable returns if this object is storable
func (o *Object) Storable() bool {
return true
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return fs.ErrorCantSetModTime
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
var resp *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: o.url,
Options: options,
}
var offset int64
var count int64
var key string
var value string
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
offset, count = x.Decode(o.size)
if count < 0 {
count = o.size - offset
}
key, value = option.Header()
case *fs.SeekOption:
offset = x.Offset
count = o.size - offset
key, value = option.Header()
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
if key != "" && value != "" {
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders[key] = value
}
// Make sure that the asset is fully available
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
if err == nil {
cl, clErr := strconv.Atoi(resp.Header.Get("content-length"))
if clErr == nil && count == int64(cl) {
return false, nil
}
}
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed download of \"%s\": %w", o.url, err)
}
return resp.Body, err
}
// Update the object with the contents of the io.Reader
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
options = append(options, &api.UpdateOptions{
PublicID: o.publicID,
ResourceType: o.resourceType,
DeliveryType: o.deliveryType,
DisplayName: api.CloudinaryEncoder.FromStandardName(o.fs, path.Base(o.Remote())),
AssetFolder: o.fs.FromStandardFullPath(cldPathDir(o.Remote())),
})
updatedObj, err := o.fs.Put(ctx, in, src, options...)
if err != nil {
return err
}
if uo, ok := updatedObj.(*Object); ok {
o.size = uo.size
o.modTime = time.Now() // Skipping uo.modTime because the API returns the create time
o.url = uo.url
o.md5sum = uo.md5sum
o.publicID = uo.publicID
o.resourceType = uo.resourceType
o.deliveryType = uo.deliveryType
}
return nil
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
params := uploader.DestroyParams{
PublicID: o.publicID,
ResourceType: o.resourceType,
Type: o.deliveryType,
}
res, dErr := o.fs.cld.Upload.Destroy(ctx, params)
o.fs.lastCRUD = time.Now()
if dErr != nil {
return dErr
}
if res.Error.Message != "" {
return errors.New(res.Error.Message)
}
if res.Result != "ok" {
return errors.New(res.Result)
}
return nil
}

View File

@@ -1,23 +0,0 @@
// Test Cloudinary filesystem interface
package cloudinary_test
import (
"testing"
"github.com/rclone/rclone/backend/cloudinary"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
name := "TestCloudinary"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*cloudinary.Object)(nil),
SkipInvalidUTF8: true,
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
},
})
}

View File

@@ -1,4 +1,4 @@
// Package combine implements a backend to combine multiple remotes in a directory tree // Package combine implents a backend to combine multiple remotes in a directory tree
package combine package combine
/* /*
@@ -20,7 +20,6 @@ import (
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
@@ -187,6 +186,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
g, gCtx := errgroup.WithContext(ctx) g, gCtx := errgroup.WithContext(ctx)
var mu sync.Mutex var mu sync.Mutex
for _, upstream := range opt.Upstreams { for _, upstream := range opt.Upstreams {
upstream := upstream
g.Go(func() (err error) { g.Go(func() (err error) {
equal := strings.IndexRune(upstream, '=') equal := strings.IndexRune(upstream, '=')
if equal < 0 { if equal < 0 {
@@ -222,40 +222,30 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
} }
// check features // check features
var features = (&fs.Features{ var features = (&fs.Features{
CaseInsensitive: true, CaseInsensitive: true,
DuplicateFiles: false, DuplicateFiles: false,
ReadMimeType: true, ReadMimeType: true,
WriteMimeType: true, WriteMimeType: true,
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
BucketBased: true, BucketBased: true,
SetTier: true, SetTier: true,
GetTier: true, GetTier: true,
ReadMetadata: true, ReadMetadata: true,
WriteMetadata: true, WriteMetadata: true,
UserMetadata: true, UserMetadata: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: true,
PartialUploads: true,
}).Fill(ctx, f) }).Fill(ctx, f)
canMove, slowHash := true, false canMove := true
for _, u := range f.upstreams { for _, u := range f.upstreams {
features = features.Mask(ctx, u.f) // Mask all upstream fs features = features.Mask(ctx, u.f) // Mask all upstream fs
if !operations.CanServerSideMove(u.f) { if !operations.CanServerSideMove(u.f) {
canMove = false canMove = false
} }
slowHash = slowHash || u.f.Features().SlowHash
} }
// We can move if all remotes support Move or Copy // We can move if all remotes support Move or Copy
if canMove { if canMove {
features.Move = f.Move features.Move = f.Move
} }
// If any of upstreams are SlowHash, propagate it
features.SlowHash = slowHash
// Enable ListR when upstreams either support ListR or is local // Enable ListR when upstreams either support ListR or is local
// But not when all upstreams are local // But not when all upstreams are local
if features.ListR == nil { if features.ListR == nil {
@@ -269,9 +259,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
} }
} }
// Enable ListP always
features.ListP = f.ListP
// Enable Purge when any upstreams support it // Enable Purge when any upstreams support it
if features.Purge == nil { if features.Purge == nil {
for _, u := range f.upstreams { for _, u := range f.upstreams {
@@ -302,16 +289,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
} }
} }
// Enable CleanUp when any upstreams support it
if features.CleanUp == nil {
for _, u := range f.upstreams {
if u.f.Features().CleanUp != nil {
features.CleanUp = f.CleanUp
break
}
}
}
// Enable ChangeNotify when any upstreams support it // Enable ChangeNotify when any upstreams support it
if features.ChangeNotify == nil { if features.ChangeNotify == nil {
for _, u := range f.upstreams { for _, u := range f.upstreams {
@@ -322,9 +299,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
} }
} }
// show that we wrap other backends
features.Overlay = true
f.features = features f.features = features
// Get common intersection of hashes // Get common intersection of hashes
@@ -369,6 +343,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error { func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
g, gCtx := errgroup.WithContext(ctx) g, gCtx := errgroup.WithContext(ctx)
for _, u := range f.upstreams { for _, u := range f.upstreams {
u := u
g.Go(func() (err error) { g.Go(func() (err error) {
return fn(gCtx, u) return fn(gCtx, u)
}) })
@@ -376,7 +351,7 @@ func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream
return g.Wait() return g.Wait()
} }
// join the elements together but unlike path.Join return empty string // join the elements together but unline path.Join return empty string
func join(elem ...string) string { func join(elem ...string) string {
result := path.Join(elem...) result := path.Join(elem...)
if result == "." { if result == "." {
@@ -451,32 +426,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return u.f.Mkdir(ctx, uRemote) return u.f.Mkdir(ctx, uRemote)
} }
// MkdirMetadata makes the root directory of the Fs object
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return nil, err
}
do := u.f.Features().MkdirMetadata
if do == nil {
return nil, fs.ErrorNotImplemented
}
newDir, err := do(ctx, uRemote, metadata)
if err != nil {
return nil, err
}
entries := fs.DirEntries{newDir}
entries, err = u.wrapEntries(ctx, entries)
if err != nil {
return nil, err
}
newDir, ok := entries[0].(fs.Directory)
if !ok {
return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0])
}
return newDir, nil
}
// purge the upstream or fallback to a slow way // purge the upstream or fallback to a slow way
func (u *upstream) purge(ctx context.Context, dir string) (err error) { func (u *upstream) purge(ctx context.Context, dir string) (err error) {
if do := u.f.Features().Purge; do != nil { if do := u.f.Features().Purge; do != nil {
@@ -635,6 +584,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
var uChans []chan time.Duration var uChans []chan time.Duration
for _, u := range f.upstreams { for _, u := range f.upstreams {
u := u
if do := u.f.Features().ChangeNotify; do != nil { if do := u.f.Features().ChangeNotify; do != nil {
ch := make(chan time.Duration) ch := make(chan time.Duration)
uChans = append(uChans, ch) uChans = append(uChans, ch)
@@ -791,11 +741,12 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
case fs.Object: case fs.Object:
entries[i] = u.newObject(x) entries[i] = u.newObject(x)
case fs.Directory: case fs.Directory:
newPath, err := u.pathAdjustment.do(x.Remote()) newDir := fs.NewDirCopy(ctx, x)
newPath, err := u.pathAdjustment.do(newDir.Remote())
if err != nil { if err != nil {
return nil, err return nil, err
} }
newDir := fs.NewDirWrapper(newPath, x) newDir.SetRemote(newPath)
entries[i] = newDir entries[i] = newDir
default: default:
return nil, fmt.Errorf("unknown entry type %T", entry) return nil, fmt.Errorf("unknown entry type %T", entry)
@@ -814,52 +765,24 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err) // defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
if f.root == "" && dir == "" { if f.root == "" && dir == "" {
entries := make(fs.DirEntries, 0, len(f.upstreams)) entries = make(fs.DirEntries, 0, len(f.upstreams))
for combineDir := range f.upstreams { for combineDir := range f.upstreams {
d := fs.NewLimitedDirWrapper(combineDir, fs.NewDir(combineDir, f.when)) d := fs.NewDir(combineDir, f.when)
entries = append(entries, d) entries = append(entries, d)
} }
return callback(entries) return entries, nil
} }
u, uRemote, err := f.findUpstream(dir) u, uRemote, err := f.findUpstream(dir)
if err != nil { if err != nil {
return err return nil, err
} }
wrappedCallback := func(entries fs.DirEntries) error { entries, err = u.f.List(ctx, uRemote)
entries, err := u.wrapEntries(ctx, entries) if err != nil {
if err != nil { return nil, err
return err
}
return callback(entries)
} }
listP := u.f.Features().ListP return u.wrapEntries(ctx, entries)
if listP == nil {
entries, err := u.f.List(ctx, uRemote)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, uRemote, wrappedCallback)
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@@ -964,116 +887,6 @@ func (f *Fs) Shutdown(ctx context.Context) error {
}) })
} }
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
u, uRemote, err := f.findUpstream(remote)
if err != nil {
return "", err
}
do := u.f.Features().PublicLink
if do == nil {
return "", fs.ErrorNotImplemented
}
return do(ctx, uRemote, expire, unlink)
}
// PutUnchecked in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
//
// May create duplicates or return errors if src already
// exists.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
srcPath := src.Remote()
u, uRemote, err := f.findUpstream(srcPath)
if err != nil {
return nil, err
}
do := u.f.Features().PutUnchecked
if do == nil {
return nil, fs.ErrorNotImplemented
}
uSrc := fs.NewOverrideRemote(src, uRemote)
return do(ctx, in, uSrc, options...)
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
if len(dirs) == 0 {
return nil
}
var (
u *upstream
uDirs []fs.Directory
)
for _, dir := range dirs {
uNew, uDir, err := f.findUpstream(dir.Remote())
if err != nil {
return err
}
if u == nil {
u = uNew
} else if u != uNew {
return fmt.Errorf("can't merge directories from different upstreams")
}
uDirs = append(uDirs, fs.NewOverrideDirectory(dir, uDir))
}
do := u.f.Features().MergeDirs
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx, uDirs)
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
u, uDir, err := f.findUpstream(dir)
if err != nil {
return err
}
if uDir == "" {
fs.Debugf(dir, "Can't set modtime on upstream root. skipping.")
return nil
}
if do := u.f.Features().DirSetModTime; do != nil {
return do(ctx, uDir, modTime)
}
return fs.ErrorNotImplemented
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
func (f *Fs) CleanUp(ctx context.Context) error {
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
if do := u.f.Features().CleanUp; do != nil {
return do(ctx)
}
return nil
})
}
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
u, uRemote, err := f.findUpstream(remote)
if err != nil {
return nil, err
}
do := u.f.Features().OpenWriterAt
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx, uRemote, size)
}
// Object describes a wrapped Object // Object describes a wrapped Object
// //
// This is a wrapped Object which knows its path prefix // This is a wrapped Object which knows its path prefix
@@ -1103,7 +916,7 @@ func (o *Object) String() string {
func (o *Object) Remote() string { func (o *Object) Remote() string {
newPath, err := o.u.pathAdjustment.do(o.Object.String()) newPath, err := o.u.pathAdjustment.do(o.Object.String())
if err != nil { if err != nil {
fs.Errorf(o.Object, "Bad object: %v", err) fs.Errorf(o, "Bad object: %v", err)
return err.Error() return err.Error()
} }
return newPath return newPath
@@ -1152,17 +965,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
return do.Metadata(ctx) return do.Metadata(ctx)
} }
// SetMetadata sets metadata for an Object
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
do, ok := o.Object.(fs.SetMetadataer)
if !ok {
return fs.ErrorNotImplemented
}
return do.SetMetadata(ctx, metadata)
}
// SetTier performs changing storage tier of the Object if // SetTier performs changing storage tier of the Object if
// multiple storage classes supported // multiple storage classes supported
func (o *Object) SetTier(tier string) error { func (o *Object) SetTier(tier string) error {
@@ -1186,12 +988,5 @@ var (
_ fs.Abouter = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil) _ fs.ListRer = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil) _ fs.Shutdowner = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.OpenWriterAter = (*Fs)(nil)
_ fs.FullObject = (*Object)(nil) _ fs.FullObject = (*Object)(nil)
) )

View File

@@ -10,11 +10,6 @@ import (
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
var (
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "OpenChunkWriter"}
unimplementableObjectMethods = []string{}
)
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" { if *fstest.RemoteName == "" {
@@ -22,8 +17,8 @@ func TestIntegration(t *testing.T) {
} }
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName, RemoteName: *fstest.RemoteName,
UnimplementableFsMethods: unimplementableFsMethods, UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: unimplementableObjectMethods, UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }
@@ -40,9 +35,7 @@ func TestLocal(t *testing.T) {
{Name: name, Key: "type", Value: "combine"}, {Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams}, {Name: name, Key: "upstreams", Value: upstreams},
}, },
QuickTestOK: true, QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
}) })
} }
@@ -58,9 +51,7 @@ func TestMemory(t *testing.T) {
{Name: name, Key: "type", Value: "combine"}, {Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams}, {Name: name, Key: "upstreams", Value: upstreams},
}, },
QuickTestOK: true, QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
}) })
} }
@@ -77,8 +68,6 @@ func TestMixed(t *testing.T) {
{Name: name, Key: "type", Value: "combine"}, {Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams}, {Name: name, Key: "upstreams", Value: upstreams},
}, },
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
}) })
} }

View File

@@ -14,7 +14,6 @@ import (
"fmt" "fmt"
"io" "io"
"os" "os"
"path"
"regexp" "regexp"
"strings" "strings"
"time" "time"
@@ -29,7 +28,6 @@ import (
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
@@ -39,7 +37,6 @@ import (
const ( const (
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
maxChunkSize = 8388608 // at 256 KiB and 8 MiB. maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
chunkStreams = 0 // Streams to use for reading
bufferSize = 8388608 bufferSize = 8388608
heuristicBytes = 1048576 heuristicBytes = 1048576
@@ -175,33 +172,20 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
opt: *opt, opt: *opt,
mode: compressionModeFromName(opt.CompressionMode), mode: compressionModeFromName(opt.CompressionMode),
} }
// Correct root if definitely pointing to a file
if err == fs.ErrorIsFile {
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
// the features here are ones we could support, and they are // the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs // ANDed with the ones from wrappedFs
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: true, CaseInsensitive: true,
DuplicateFiles: false, DuplicateFiles: false,
ReadMimeType: false, ReadMimeType: false,
WriteMimeType: false, WriteMimeType: false,
GetTier: true, GetTier: true,
SetTier: true, SetTier: true,
BucketBased: true, BucketBased: true,
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
ReadMetadata: true, ReadMetadata: true,
WriteMetadata: true, WriteMetadata: true,
UserMetadata: true, UserMetadata: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs) }).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// We support reading MIME types no matter the wrapped fs // We support reading MIME types no matter the wrapped fs
f.features.ReadMimeType = true f.features.ReadMimeType = true
@@ -209,8 +193,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
if !operations.CanServerSideMove(wrappedFs) { if !operations.CanServerSideMove(wrappedFs) {
f.features.Disable("PutStream") f.features.Disable("PutStream")
} }
// Enable ListP always
f.features.ListP = f.ListP
return f, err return f, err
} }
@@ -274,16 +256,6 @@ func isMetadataFile(filename string) bool {
return strings.HasSuffix(filename, metaFileExt) return strings.HasSuffix(filename, metaFileExt)
} }
// Checks whether a file is a metadata file and returns the original
// file name and a flag indicating whether it was a metadata file or
// not.
func unwrapMetadataFile(filename string) (string, bool) {
if !isMetadataFile(filename) {
return "", false
}
return filename[:len(filename)-len(metaFileExt)], true
}
// makeDataName generates the file name for a data file with specified compression mode // makeDataName generates the file name for a data file with specified compression mode
func makeDataName(remote string, size int64, mode int) (newRemote string) { func makeDataName(remote string, size int64, mode int) (newRemote string) {
if mode != Uncompressed { if mode != Uncompressed {
@@ -355,39 +327,11 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
// found. // found.
// List entries and process them // List entries and process them
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f) entries, err = f.Fs.List(ctx, dir)
} if err != nil {
return nil, err
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.processEntries(entries)
if err != nil {
return err
}
return callback(entries)
} }
listP := f.Fs.Features().ListP return f.processEntries(entries)
if listP == nil {
entries, err := f.Fs.List(ctx, dir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, dir, wrappedCallback)
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@@ -487,7 +431,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
if err != nil { if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err) fs.Errorf(o, "Failed to remove corrupted object: %v", err)
} }
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash) return fmt.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
} }
return nil return nil
} }
@@ -821,14 +765,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.Fs.Mkdir(ctx, dir) return f.Fs.Mkdir(ctx, dir)
} }
// MkdirMetadata makes the root directory of the Fs object
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
if do := f.Fs.Features().MkdirMetadata; do != nil {
return do(ctx, dir, metadata)
}
return nil, fs.ErrorNotImplemented
}
// Rmdir removes the directory (container, bucket) if empty // Rmdir removes the directory (container, bucket) if empty
// //
// Return an error if it doesn't exist or isn't empty // Return an error if it doesn't exist or isn't empty
@@ -972,14 +908,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return do(ctx, srcFs.Fs, srcRemote, dstRemote) return do(ctx, srcFs.Fs, srcRemote, dstRemote)
} }
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
if do := f.Fs.Features().DirSetModTime; do != nil {
return do(ctx, dir, modTime)
}
return fs.ErrorNotImplemented
}
// CleanUp the trash in the Fs // CleanUp the trash in the Fs
// //
// Implement this if you have a way of emptying the trash or // Implement this if you have a way of emptying the trash or
@@ -1050,8 +978,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
wrappedNotifyFunc := func(path string, entryType fs.EntryType) { wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
fs.Logf(f, "path %q entryType %d", path, entryType) fs.Logf(f, "path %q entryType %d", path, entryType)
var ( var (
wrappedPath string wrappedPath string
isMetadataFile bool
) )
switch entryType { switch entryType {
case fs.EntryDirectory: case fs.EntryDirectory:
@@ -1059,10 +986,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
case fs.EntryObject: case fs.EntryObject:
// Note: All we really need to do to monitor the object is to check whether the metadata changed, // Note: All we really need to do to monitor the object is to check whether the metadata changed,
// as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same. // as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same.
wrappedPath, isMetadataFile = unwrapMetadataFile(path) wrappedPath = makeMetadataName(path)
if !isMetadataFile {
return
}
default: default:
fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType) fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType)
return return
@@ -1318,17 +1242,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
return do.Metadata(ctx) return do.Metadata(ctx)
} }
// SetMetadata sets metadata for an Object
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
do, ok := o.Object.(fs.SetMetadataer)
if !ok {
return fs.ErrorNotImplemented
}
return do.SetMetadata(ctx, metadata)
}
// Hash returns the selected checksum of the file // Hash returns the selected checksum of the file
// If no checksum is available it returns "" // If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
@@ -1394,7 +1307,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
} }
} }
// Get a chunkedreader for the wrapped object // Get a chunkedreader for the wrapped object
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams) chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize)
// Get file handle // Get file handle
var file io.Reader var file io.Reader
if offset != 0 { if offset != 0 {
@@ -1561,8 +1474,6 @@ var (
_ fs.Copier = (*Fs)(nil) _ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil) _ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil) _ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil) _ fs.CleanUpper = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil) _ fs.UnWrapper = (*Fs)(nil)

View File

@@ -14,26 +14,23 @@ import (
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
var defaultOpt = fstests.Opt{
RemoteName: "TestCompress:",
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"OpenChunkWriter",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{},
}
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &defaultOpt) opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{}}
fstests.Run(t, &opt)
} }
// TestRemoteGzip tests GZIP compression // TestRemoteGzip tests GZIP compression
@@ -43,13 +40,27 @@ func TestRemoteGzip(t *testing.T) {
} }
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip") tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
name := "TestCompressGzip" name := "TestCompressGzip"
opt := defaultOpt fstests.Run(t, &fstests.Opt{
opt.RemoteName = name + ":" RemoteName: name + ":",
opt.ExtraConfig = []fstests.ExtraConfigItem{ NilObject: (*Object)(nil),
{Name: name, Key: "type", Value: "compress"}, UnimplementableFsMethods: []string{
{Name: name, Key: "remote", Value: tempdir}, "OpenWriterAt",
{Name: name, Key: "compression_mode", Value: "gzip"}, "MergeDirs",
} "DirCacheFlush",
opt.QuickTestOK = true "PutUnchecked",
fstests.Run(t, &opt) "PutStream",
"UserInfo",
"Disconnect",
},
UnimplementableObjectMethods: []string{
"GetTier",
"SetTier",
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
},
QuickTestOK: true,
})
} }

View File

@@ -21,7 +21,6 @@ import (
"github.com/rclone/rclone/backend/crypt/pkcs7" "github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/version" "github.com/rclone/rclone/lib/version"
"github.com/rfjakob/eme" "github.com/rfjakob/eme"
"golang.org/x/crypto/nacl/secretbox" "golang.org/x/crypto/nacl/secretbox"
@@ -38,6 +37,7 @@ const (
blockHeaderSize = secretbox.Overhead blockHeaderSize = secretbox.Overhead
blockDataSize = 64 * 1024 blockDataSize = 64 * 1024
blockSize = blockHeaderSize + blockDataSize blockSize = blockHeaderSize + blockDataSize
encryptedSuffix = ".bin" // when file name encryption is off we add this suffix to make sure the cloud provider doesn't process the file
) )
// Errors returned by cipher // Errors returned by cipher
@@ -53,9 +53,8 @@ var (
ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?") ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?")
ErrorBadBase32Encoding = errors.New("bad base32 filename encoding") ErrorBadBase32Encoding = errors.New("bad base32 filename encoding")
ErrorFileClosed = errors.New("file already closed") ErrorFileClosed = errors.New("file already closed")
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - does not match suffix") ErrorNotAnEncryptedFile = errors.New("not an encrypted file - no \"" + encryptedSuffix + "\" suffix")
ErrorBadSeek = errors.New("Seek beyond end of file") ErrorBadSeek = errors.New("Seek beyond end of file")
ErrorSuffixMissingDot = errors.New("suffix config setting should include a '.'")
defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1} defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1}
obfuscQuoteRune = '!' obfuscQuoteRune = '!'
) )
@@ -170,30 +169,27 @@ func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
// Cipher defines an encoding and decoding cipher for the crypt backend // Cipher defines an encoding and decoding cipher for the crypt backend
type Cipher struct { type Cipher struct {
dataKey [32]byte // Key for secretbox dataKey [32]byte // Key for secretbox
nameKey [32]byte // 16,24 or 32 bytes nameKey [32]byte // 16,24 or 32 bytes
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
block gocipher.Block block gocipher.Block
mode NameEncryptionMode mode NameEncryptionMode
fileNameEnc fileNameEncoding fileNameEnc fileNameEncoding
buffers sync.Pool // encrypt/decrypt buffers buffers sync.Pool // encrypt/decrypt buffers
cryptoRand io.Reader // read crypto random numbers from here cryptoRand io.Reader // read crypto random numbers from here
dirNameEncrypt bool dirNameEncrypt bool
passBadBlocks bool // if set passed bad blocks as zeroed blocks
encryptedSuffix string
} }
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val // newCipher initialises the cipher. If salt is "" then it uses a built in salt val
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) { func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
c := &Cipher{ c := &Cipher{
mode: mode, mode: mode,
fileNameEnc: enc, fileNameEnc: enc,
cryptoRand: rand.Reader, cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt, dirNameEncrypt: dirNameEncrypt,
encryptedSuffix: ".bin",
} }
c.buffers.New = func() any { c.buffers.New = func() interface{} {
return new([blockSize]byte) return make([]byte, blockSize)
} }
err := c.Key(password, salt) err := c.Key(password, salt)
if err != nil { if err != nil {
@@ -202,29 +198,11 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
return c, nil return c, nil
} }
// setEncryptedSuffix set suffix, or an empty string
func (c *Cipher) setEncryptedSuffix(suffix string) {
if strings.EqualFold(suffix, "none") {
c.encryptedSuffix = ""
return
}
if !strings.HasPrefix(suffix, ".") {
fs.Errorf(nil, "crypt: bad suffix: %v", ErrorSuffixMissingDot)
suffix = "." + suffix
}
c.encryptedSuffix = suffix
}
// Call to set bad block pass through
func (c *Cipher) setPassBadBlocks(passBadBlocks bool) {
c.passBadBlocks = passBadBlocks
}
// Key creates all the internal keys from the password passed in using // Key creates all the internal keys from the password passed in using
// scrypt. // scrypt.
// //
// If salt is "" we use a fixed salt just to make attackers lives // If salt is "" we use a fixed salt just to make attackers lives
// slightly harder than using no salt. // slighty harder than using no salt.
// //
// Note that empty password makes all 0x00 keys which is used in the // Note that empty password makes all 0x00 keys which is used in the
// tests. // tests.
@@ -252,12 +230,15 @@ func (c *Cipher) Key(password, salt string) (err error) {
} }
// getBlock gets a block from the pool of size blockSize // getBlock gets a block from the pool of size blockSize
func (c *Cipher) getBlock() *[blockSize]byte { func (c *Cipher) getBlock() []byte {
return c.buffers.Get().(*[blockSize]byte) return c.buffers.Get().([]byte)
} }
// putBlock returns a block to the pool of size blockSize // putBlock returns a block to the pool of size blockSize
func (c *Cipher) putBlock(buf *[blockSize]byte) { func (c *Cipher) putBlock(buf []byte) {
if len(buf) != blockSize {
panic("bad blocksize returned to pool")
}
c.buffers.Put(buf) c.buffers.Put(buf)
} }
@@ -329,14 +310,14 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
for _, runeValue := range plaintext { for _, runeValue := range plaintext {
dir += int(runeValue) dir += int(runeValue)
} }
dir %= 256 dir = dir % 256
// We'll use this number to store in the result filename... // We'll use this number to store in the result filename...
var result bytes.Buffer var result bytes.Buffer
_, _ = result.WriteString(strconv.Itoa(dir) + ".") _, _ = result.WriteString(strconv.Itoa(dir) + ".")
// but we'll augment it with the nameKey for real calculation // but we'll augment it with the nameKey for real calculation
for i := range len(c.nameKey) { for i := 0; i < len(c.nameKey); i++ {
dir += int(c.nameKey[i]) dir += int(c.nameKey[i])
} }
@@ -418,7 +399,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
} }
// add the nameKey to get the real rotate distance // add the nameKey to get the real rotate distance
for i := range len(c.nameKey) { for i := 0; i < len(c.nameKey); i++ {
dir += int(c.nameKey[i]) dir += int(c.nameKey[i])
} }
@@ -450,7 +431,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
if pos >= 26 { if pos >= 26 {
pos -= 6 pos -= 6
} }
pos -= thisdir pos = pos - thisdir
if pos < 0 { if pos < 0 {
pos += 52 pos += 52
} }
@@ -527,7 +508,7 @@ func (c *Cipher) encryptFileName(in string) string {
// EncryptFileName encrypts a file path // EncryptFileName encrypts a file path
func (c *Cipher) EncryptFileName(in string) string { func (c *Cipher) EncryptFileName(in string) string {
if c.mode == NameEncryptionOff { if c.mode == NameEncryptionOff {
return in + c.encryptedSuffix return in + encryptedSuffix
} }
return c.encryptFileName(in) return c.encryptFileName(in)
} }
@@ -587,8 +568,8 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
// DecryptFileName decrypts a file path // DecryptFileName decrypts a file path
func (c *Cipher) DecryptFileName(in string) (string, error) { func (c *Cipher) DecryptFileName(in string) (string, error) {
if c.mode == NameEncryptionOff { if c.mode == NameEncryptionOff {
remainingLength := len(in) - len(c.encryptedSuffix) remainingLength := len(in) - len(encryptedSuffix)
if remainingLength == 0 || !strings.HasSuffix(in, c.encryptedSuffix) { if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) {
return "", ErrorNotAnEncryptedFile return "", ErrorNotAnEncryptedFile
} }
decrypted := in[:remainingLength] decrypted := in[:remainingLength]
@@ -628,7 +609,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
// fromReader fills the nonce from an io.Reader - normally the OSes // fromReader fills the nonce from an io.Reader - normally the OSes
// crypto random number generator // crypto random number generator
func (n *nonce) fromReader(in io.Reader) error { func (n *nonce) fromReader(in io.Reader) error {
read, err := readers.ReadFill(in, (*n)[:]) read, err := io.ReadFull(in, (*n)[:])
if read != fileNonceSize { if read != fileNonceSize {
return fmt.Errorf("short read of nonce: %w", err) return fmt.Errorf("short read of nonce: %w", err)
} }
@@ -664,7 +645,7 @@ func (n *nonce) increment() {
// add a uint64 to the nonce // add a uint64 to the nonce
func (n *nonce) add(x uint64) { func (n *nonce) add(x uint64) {
carry := uint16(0) carry := uint16(0)
for i := range 8 { for i := 0; i < 8; i++ {
digit := (*n)[i] digit := (*n)[i]
xDigit := byte(x) xDigit := byte(x)
x >>= 8 x >>= 8
@@ -683,8 +664,8 @@ type encrypter struct {
in io.Reader in io.Reader
c *Cipher c *Cipher
nonce nonce nonce nonce
buf *[blockSize]byte buf []byte
readBuf *[blockSize]byte readBuf []byte
bufIndex int bufIndex int
bufSize int bufSize int
err error err error
@@ -709,9 +690,9 @@ func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
} }
} }
// Copy magic into buffer // Copy magic into buffer
copy((*fh.buf)[:], fileMagicBytes) copy(fh.buf, fileMagicBytes)
// Copy nonce into buffer // Copy nonce into buffer
copy((*fh.buf)[fileMagicSize:], fh.nonce[:]) copy(fh.buf[fileMagicSize:], fh.nonce[:])
return fh, nil return fh, nil
} }
@@ -726,20 +707,22 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
if fh.bufIndex >= fh.bufSize { if fh.bufIndex >= fh.bufSize {
// Read data // Read data
// FIXME should overlap the reads with a go-routine and 2 buffers? // FIXME should overlap the reads with a go-routine and 2 buffers?
readBuf := (*fh.readBuf)[:blockDataSize] readBuf := fh.readBuf[:blockDataSize]
n, err = readers.ReadFill(fh.in, readBuf) n, err = io.ReadFull(fh.in, readBuf)
if n == 0 { if n == 0 {
// err can't be nil since:
// n == len(buf) if and only if err == nil.
return fh.finish(err) return fh.finish(err)
} }
// possibly err != nil here, but we will process the // possibly err != nil here, but we will process the
// data and the next call to ReadFill will return 0, err // data and the next call to ReadFull will return 0, err
// Encrypt the block using the nonce // Encrypt the block using the nonce
secretbox.Seal((*fh.buf)[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey) secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
fh.bufIndex = 0 fh.bufIndex = 0
fh.bufSize = blockHeaderSize + n fh.bufSize = blockHeaderSize + n
fh.nonce.increment() fh.nonce.increment()
} }
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufSize]) n = copy(p, fh.buf[fh.bufIndex:fh.bufSize])
fh.bufIndex += n fh.bufIndex += n
return n, nil return n, nil
} }
@@ -780,8 +763,8 @@ type decrypter struct {
nonce nonce nonce nonce
initialNonce nonce initialNonce nonce
c *Cipher c *Cipher
buf *[blockSize]byte buf []byte
readBuf *[blockSize]byte readBuf []byte
bufIndex int bufIndex int
bufSize int bufSize int
err error err error
@@ -799,12 +782,12 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
limit: -1, limit: -1,
} }
// Read file header (magic + nonce) // Read file header (magic + nonce)
readBuf := (*fh.readBuf)[:fileHeaderSize] readBuf := fh.readBuf[:fileHeaderSize]
n, err := readers.ReadFill(fh.rc, readBuf) _, err := io.ReadFull(fh.rc, readBuf)
if n < fileHeaderSize && err == io.EOF { if err == io.EOF || err == io.ErrUnexpectedEOF {
// This read from 0..fileHeaderSize-1 bytes // This read from 0..fileHeaderSize-1 bytes
return nil, fh.finishAndClose(ErrorEncryptedFileTooShort) return nil, fh.finishAndClose(ErrorEncryptedFileTooShort)
} else if err != io.EOF && err != nil { } else if err != nil {
return nil, fh.finishAndClose(err) return nil, fh.finishAndClose(err)
} }
// check the magic // check the magic
@@ -862,8 +845,10 @@ func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
func (fh *decrypter) fillBuffer() (err error) { func (fh *decrypter) fillBuffer() (err error) {
// FIXME should overlap the reads with a go-routine and 2 buffers? // FIXME should overlap the reads with a go-routine and 2 buffers?
readBuf := fh.readBuf readBuf := fh.readBuf
n, err := readers.ReadFill(fh.rc, (*readBuf)[:]) n, err := io.ReadFull(fh.rc, readBuf)
if n == 0 { if n == 0 {
// err can't be nil since:
// n == len(buf) if and only if err == nil.
return err return err
} }
// possibly err != nil here, but we will process the data and // possibly err != nil here, but we will process the data and
@@ -871,25 +856,18 @@ func (fh *decrypter) fillBuffer() (err error) {
// Check header + 1 byte exists // Check header + 1 byte exists
if n <= blockHeaderSize { if n <= blockHeaderSize {
if err != nil && err != io.EOF { if err != nil {
return err // return pending error as it is likely more accurate return err // return pending error as it is likely more accurate
} }
return ErrorEncryptedFileBadHeader return ErrorEncryptedFileBadHeader
} }
// Decrypt the block using the nonce // Decrypt the block using the nonce
_, ok := secretbox.Open((*fh.buf)[:0], (*readBuf)[:n], fh.nonce.pointer(), &fh.c.dataKey) _, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
if !ok { if !ok {
if err != nil && err != io.EOF { if err != nil {
return err // return pending error as it is likely more accurate return err // return pending error as it is likely more accurate
} }
if !fh.c.passBadBlocks { return ErrorEncryptedBadBlock
return ErrorEncryptedBadBlock
}
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
// Zero out the bad block and continue
for i := range (*fh.buf)[:n] {
fh.buf[i] = 0
}
} }
fh.bufIndex = 0 fh.bufIndex = 0
fh.bufSize = n - blockHeaderSize fh.bufSize = n - blockHeaderSize
@@ -915,7 +893,7 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
if fh.limit >= 0 && fh.limit < int64(toCopy) { if fh.limit >= 0 && fh.limit < int64(toCopy) {
toCopy = int(fh.limit) toCopy = int(fh.limit)
} }
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufIndex+toCopy]) n = copy(p, fh.buf[fh.bufIndex:fh.bufIndex+toCopy])
fh.bufIndex += n fh.bufIndex += n
if fh.limit >= 0 { if fh.limit >= 0 {
fh.limit -= int64(n) fh.limit -= int64(n)
@@ -926,8 +904,9 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
return n, nil return n, nil
} }
// calculateUnderlying converts an (offset, limit) in an encrypted file // calculateUnderlying converts an (offset, limit) in a crypted file
// into an (underlyingOffset, underlyingLimit) for the underlying file. // into an (underlyingOffset, underlyingLimit) for the underlying
// file.
// //
// It also returns number of bytes to discard after reading the first // It also returns number of bytes to discard after reading the first
// block and number of blocks this is from the start so the nonce can // block and number of blocks this is from the start so the nonce can

View File

@@ -27,14 +27,14 @@ func TestNewNameEncryptionMode(t *testing.T) {
{"off", NameEncryptionOff, ""}, {"off", NameEncryptionOff, ""},
{"standard", NameEncryptionStandard, ""}, {"standard", NameEncryptionStandard, ""},
{"obfuscate", NameEncryptionObfuscated, ""}, {"obfuscate", NameEncryptionObfuscated, ""},
{"potato", NameEncryptionOff, "unknown file name encryption mode \"potato\""}, {"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""},
} { } {
actual, actualErr := NewNameEncryptionMode(test.in) actual, actualErr := NewNameEncryptionMode(test.in)
assert.Equal(t, actual, test.expected) assert.Equal(t, actual, test.expected)
if test.expectedErr == "" { if test.expectedErr == "" {
assert.NoError(t, actualErr) assert.NoError(t, actualErr)
} else { } else {
assert.EqualError(t, actualErr, test.expectedErr) assert.Error(t, actualErr, test.expectedErr)
} }
} }
} }
@@ -405,13 +405,6 @@ func TestNonStandardEncryptFileName(t *testing.T) {
// Off mode // Off mode
c, _ := newCipher(NameEncryptionOff, "", "", true, nil) c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123")) assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
// Off mode with custom suffix
c, _ = newCipher(NameEncryptionOff, "", "", true, nil)
c.setEncryptedSuffix(".jpg")
assert.Equal(t, "1/12/123.jpg", c.EncryptFileName("1/12/123"))
// Off mode with empty suffix
c.setEncryptedSuffix("none")
assert.Equal(t, "1/12/123", c.EncryptFileName("1/12/123"))
// Obfuscation mode // Obfuscation mode
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil) c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello")) assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
@@ -490,27 +483,21 @@ func TestNonStandardDecryptFileName(t *testing.T) {
in string in string
expected string expected string
expectedErr error expectedErr error
customSuffix string
}{ }{
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil, ""}, {NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile, ""}, {NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile, ""}, {NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil, ""}, {NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil, ""}, {NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil, ""}, {NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
{NameEncryptionOff, true, "1/12/123.jpg", "1/12/123", nil, ".jpg"}, {NameEncryptionObfuscated, true, "!.hello", "hello", nil},
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil, "none"}, {NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil, ""}, {NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile, ""}, {NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil, ""}, {NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil, ""}, {NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil, ""},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil, ""},
} { } {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc) c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
if test.customSuffix != "" {
c.setEncryptedSuffix(test.customSuffix)
}
actual, actualErr := c.DecryptFileName(test.in) actual, actualErr := c.DecryptFileName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode) what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what) assert.Equal(t, test.expected, actual, what)
@@ -739,7 +726,7 @@ func TestNonceFromReader(t *testing.T) {
assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x) assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x)
buf = bytes.NewBufferString("123456789abcdefghijklmn") buf = bytes.NewBufferString("123456789abcdefghijklmn")
err = x.fromReader(buf) err = x.fromReader(buf)
assert.EqualError(t, err, "short read of nonce: EOF") assert.Error(t, err, "short read of nonce")
} }
func TestNonceFromBuf(t *testing.T) { func TestNonceFromBuf(t *testing.T) {
@@ -1063,7 +1050,7 @@ func TestRandomSource(t *testing.T) {
_, _ = source.Read(buf) _, _ = source.Read(buf)
sink = newRandomSource(1e8) sink = newRandomSource(1e8)
_, err = io.Copy(sink, source) _, err = io.Copy(sink, source)
assert.EqualError(t, err, "Error in stream at 1") assert.Error(t, err, "Error in stream")
} }
type zeroes struct{} type zeroes struct{}
@@ -1180,13 +1167,13 @@ func TestNewEncrypter(t *testing.T) {
fh, err := c.newEncrypter(z, nil) fh, err := c.newEncrypter(z, nil)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, nonce{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.nonce) assert.Equal(t, nonce{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.nonce)
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, (*fh.buf)[:32]) assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.buf[:32])
// Test error path // Test error path
c.cryptoRand = bytes.NewBufferString("123456789abcdefghijklmn") c.cryptoRand = bytes.NewBufferString("123456789abcdefghijklmn")
fh, err = c.newEncrypter(z, nil) fh, err = c.newEncrypter(z, nil)
assert.Nil(t, fh) assert.Nil(t, fh)
assert.EqualError(t, err, "short read of nonce: EOF") assert.Error(t, err, "short read of nonce")
} }
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to // Test the stream returning 0, io.ErrUnexpectedEOF - this used to
@@ -1237,7 +1224,7 @@ func TestNewDecrypter(t *testing.T) {
cd := newCloseDetector(bytes.NewBuffer(file0[:i])) cd := newCloseDetector(bytes.NewBuffer(file0[:i]))
fh, err = c.newDecrypter(cd) fh, err = c.newDecrypter(cd)
assert.Nil(t, fh) assert.Nil(t, fh)
assert.EqualError(t, err, ErrorEncryptedFileTooShort.Error()) assert.Error(t, err, ErrorEncryptedFileTooShort.Error())
assert.Equal(t, 1, cd.closed) assert.Equal(t, 1, cd.closed)
} }
@@ -1245,7 +1232,7 @@ func TestNewDecrypter(t *testing.T) {
cd = newCloseDetector(er) cd = newCloseDetector(er)
fh, err = c.newDecrypter(cd) fh, err = c.newDecrypter(cd)
assert.Nil(t, fh) assert.Nil(t, fh)
assert.EqualError(t, err, "potato") assert.Error(t, err, "potato")
assert.Equal(t, 1, cd.closed) assert.Equal(t, 1, cd.closed)
// bad magic // bad magic
@@ -1256,7 +1243,7 @@ func TestNewDecrypter(t *testing.T) {
cd := newCloseDetector(bytes.NewBuffer(file0copy)) cd := newCloseDetector(bytes.NewBuffer(file0copy))
fh, err := c.newDecrypter(cd) fh, err := c.newDecrypter(cd)
assert.Nil(t, fh) assert.Nil(t, fh)
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error()) assert.Error(t, err, ErrorEncryptedBadMagic.Error())
file0copy[i] ^= 0x1 file0copy[i] ^= 0x1
assert.Equal(t, 1, cd.closed) assert.Equal(t, 1, cd.closed)
} }
@@ -1307,7 +1294,10 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
end := len(ciphertext) end := len(ciphertext)
if underlyingLimit >= 0 { if underlyingLimit >= 0 {
end = min(int(underlyingOffset+underlyingLimit), len(ciphertext)) end = int(underlyingOffset + underlyingLimit)
if end > len(ciphertext) {
end = len(ciphertext)
}
} }
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end])) reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
return reader, nil return reader, nil
@@ -1487,7 +1477,7 @@ func TestDecrypterRead(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
// Test truncating the file at each possible point // Test truncating the file at each possible point
for i := range len(file16) - 1 { for i := 0; i < len(file16)-1; i++ {
what := fmt.Sprintf("truncating to %d/%d", i, len(file16)) what := fmt.Sprintf("truncating to %d/%d", i, len(file16))
cd := newCloseDetector(bytes.NewBuffer(file16[:i])) cd := newCloseDetector(bytes.NewBuffer(file16[:i]))
fh, err := c.newDecrypter(cd) fh, err := c.newDecrypter(cd)
@@ -1505,10 +1495,8 @@ func TestDecrypterRead(t *testing.T) {
case i == fileHeaderSize: case i == fileHeaderSize:
// This would normally produce an error *except* on the first block // This would normally produce an error *except* on the first block
expectedErr = nil expectedErr = nil
case i <= fileHeaderSize+blockHeaderSize:
expectedErr = ErrorEncryptedFileBadHeader
default: default:
expectedErr = ErrorEncryptedBadBlock expectedErr = io.ErrUnexpectedEOF
} }
if expectedErr != nil { if expectedErr != nil {
assert.EqualError(t, err, expectedErr.Error(), what) assert.EqualError(t, err, expectedErr.Error(), what)
@@ -1526,7 +1514,7 @@ func TestDecrypterRead(t *testing.T) {
fh, err := c.newDecrypter(cd) fh, err := c.newDecrypter(cd)
assert.NoError(t, err) assert.NoError(t, err)
_, err = io.ReadAll(fh) _, err = io.ReadAll(fh)
assert.EqualError(t, err, "potato") assert.Error(t, err, "potato")
assert.Equal(t, 0, cd.closed) assert.Equal(t, 0, cd.closed)
// Test corrupting the input // Test corrupting the input
@@ -1537,26 +1525,15 @@ func TestDecrypterRead(t *testing.T) {
file16copy[i] ^= 0xFF file16copy[i] ^= 0xFF
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy))) fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
if i < fileMagicSize { if i < fileMagicSize {
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error()) assert.Error(t, err, ErrorEncryptedBadMagic.Error())
assert.Nil(t, fh) assert.Nil(t, fh)
} else { } else {
assert.NoError(t, err) assert.NoError(t, err)
_, err = io.ReadAll(fh) _, err = io.ReadAll(fh)
assert.EqualError(t, err, ErrorEncryptedBadBlock.Error()) assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
} }
file16copy[i] ^= 0xFF file16copy[i] ^= 0xFF
} }
// Test that we can corrupt a byte and read zeroes if
// passBadBlocks is set
copy(file16copy, file16)
file16copy[len(file16copy)-1] ^= 0xFF
c.passBadBlocks = true
fh, err = c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
assert.NoError(t, err)
buf, err := io.ReadAll(fh)
assert.NoError(t, err)
assert.Equal(t, make([]byte, 16), buf)
} }
func TestDecrypterClose(t *testing.T) { func TestDecrypterClose(t *testing.T) {
@@ -1577,7 +1554,7 @@ func TestDecrypterClose(t *testing.T) {
// double close // double close
err = fh.Close() err = fh.Close()
assert.EqualError(t, err, ErrorFileClosed.Error()) assert.Error(t, err, ErrorFileClosed.Error())
assert.Equal(t, 1, cd.closed) assert.Equal(t, 1, cd.closed)
// try again reading the file this time // try again reading the file this time
@@ -1604,6 +1581,8 @@ func TestPutGetBlock(t *testing.T) {
block := c.getBlock() block := c.getBlock()
c.putBlock(block) c.putBlock(block)
c.putBlock(block) c.putBlock(block)
assert.Panics(t, func() { c.putBlock(block[:len(block)-1]) })
} }
func TestKey(t *testing.T) { func TestKey(t *testing.T) {

View File

@@ -18,7 +18,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
) )
// Globals // Globals
@@ -49,7 +48,7 @@ func init() {
Help: "Very simple filename obfuscation.", Help: "Very simple filename obfuscation.",
}, { }, {
Value: "off", Value: "off",
Help: "Don't encrypt the file names.\nAdds a \".bin\", or \"suffix\" extension only.", Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
}, },
}, },
}, { }, {
@@ -80,9 +79,7 @@ NB If filename_encryption is "off" then this option will do nothing.`,
}, { }, {
Name: "server_side_across_configs", Name: "server_side_across_configs",
Default: false, Default: false,
Help: `Deprecated: use --server-side-across-configs instead. Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
Allow server-side operations (e.g. copy) to work across different crypt configs.
Normally this option is not what you want, but if you have two crypts Normally this option is not what you want, but if you have two crypts
pointing to the same backend you can use it. pointing to the same backend you can use it.
@@ -122,25 +119,6 @@ names, or for debugging purposes.`,
Help: "Encrypt file data.", Help: "Encrypt file data.",
}, },
}, },
}, {
Name: "pass_bad_blocks",
Help: `If set this will pass bad blocks through as all 0.
This should not be set in normal operation, it should only be set if
trying to recover an encrypted file with errors and it is desired to
recover as much of the file as possible.`,
Default: false,
Advanced: true,
}, {
Name: "strict_names",
Help: `If set, this will raise an error when crypt comes across a filename that can't be decrypted.
(By default, rclone will just log a NOTICE and continue as normal.)
This can happen if encrypted and unencrypted files are stored in the same
directory (which is not recommended.) It may also indicate a more serious
problem that should be investigated.`,
Default: false,
Advanced: true,
}, { }, {
Name: "filename_encoding", Name: "filename_encoding",
Help: `How to encode the encrypted filename to text string. Help: `How to encode the encrypted filename to text string.
@@ -160,18 +138,10 @@ length and if it's case sensitive.`,
}, },
{ {
Value: "base32768", Value: "base32768",
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)", Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
}, },
}, },
Advanced: true, Advanced: true,
}, {
Name: "suffix",
Help: `If this is set it will override the default suffix of ".bin".
Setting suffix to "none" will result in an empty suffix. This may be useful
when the path length is critical.`,
Default: ".bin",
Advanced: true,
}}, }},
}) })
} }
@@ -204,8 +174,6 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to make cipher: %w", err) return nil, fmt.Errorf("failed to make cipher: %w", err)
} }
cipher.setEncryptedSuffix(opt.Suffix)
cipher.setPassBadBlocks(opt.PassBadBlocks)
return cipher, nil return cipher, nil
} }
@@ -264,39 +232,23 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
cipher: cipher, cipher: cipher,
} }
cache.PinUntilFinalized(f.Fs, f) cache.PinUntilFinalized(f.Fs, f)
// Correct root if definitely pointing to a file
if err == fs.ErrorIsFile {
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
// the features here are ones we could support, and they are // the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs // ANDed with the ones from wrappedFs
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff, CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
DuplicateFiles: true, DuplicateFiles: true,
ReadMimeType: false, // MimeTypes not supported with crypt ReadMimeType: false, // MimeTypes not supported with crypt
WriteMimeType: false, WriteMimeType: false,
BucketBased: true, BucketBased: true,
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
SetTier: true, SetTier: true,
GetTier: true, GetTier: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs, ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
ReadMetadata: true, ReadMetadata: true,
WriteMetadata: true, WriteMetadata: true,
UserMetadata: true, UserMetadata: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
WriteDirSetModTime: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs) }).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// Enable ListP always
f.features.ListP = f.ListP
return f, err return f, err
} }
@@ -310,10 +262,7 @@ type Options struct {
Password2 string `config:"password2"` Password2 string `config:"password2"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"` ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ShowMapping bool `config:"show_mapping"` ShowMapping bool `config:"show_mapping"`
PassBadBlocks bool `config:"pass_bad_blocks"`
FilenameEncoding string `config:"filename_encoding"` FilenameEncoding string `config:"filename_encoding"`
Suffix string `config:"suffix"`
StrictNames bool `config:"strict_names"`
} }
// Fs represents a wrapped fs.Fs // Fs represents a wrapped fs.Fs
@@ -348,64 +297,45 @@ func (f *Fs) String() string {
} }
// Encrypt an object file name to entries. // Encrypt an object file name to entries.
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) error { func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
remote := obj.Remote() remote := obj.Remote()
decryptedRemote, err := f.cipher.DecryptFileName(remote) decryptedRemote, err := f.cipher.DecryptFileName(remote)
if err != nil { if err != nil {
if f.opt.StrictNames { fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
return fmt.Errorf("%s: undecryptable file name detected: %v", remote, err) return
}
fs.Logf(remote, "Skipping undecryptable file name: %v", err)
return nil
} }
if f.opt.ShowMapping { if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote) fs.Logf(decryptedRemote, "Encrypts to %q", remote)
} }
*entries = append(*entries, f.newObject(obj)) *entries = append(*entries, f.newObject(obj))
return nil
} }
// Encrypt a directory file name to entries. // Encrypt a directory file name to entries.
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) error { func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
remote := dir.Remote() remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote) decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil { if err != nil {
if f.opt.StrictNames { fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
return fmt.Errorf("%s: undecryptable dir name detected: %v", remote, err) return
}
fs.Logf(remote, "Skipping undecryptable dir name: %v", err)
return nil
} }
if f.opt.ShowMapping { if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote) fs.Logf(decryptedRemote, "Encrypts to %q", remote)
} }
*entries = append(*entries, f.newDir(ctx, dir)) *entries = append(*entries, f.newDir(ctx, dir))
return nil
} }
// Encrypt some directory entries. This alters entries returning it as newEntries. // Encrypt some directory entries. This alters entries returning it as newEntries.
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) { func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
newEntries = entries[:0] // in place filter newEntries = entries[:0] // in place filter
errors := 0
var firsterr error
for _, entry := range entries { for _, entry := range entries {
switch x := entry.(type) { switch x := entry.(type) {
case fs.Object: case fs.Object:
err = f.add(&newEntries, x) f.add(&newEntries, x)
case fs.Directory: case fs.Directory:
err = f.addDir(ctx, &newEntries, x) f.addDir(ctx, &newEntries, x)
default: default:
return nil, fmt.Errorf("unknown object type %T", entry) return nil, fmt.Errorf("unknown object type %T", entry)
} }
if err != nil {
errors++
if firsterr == nil {
firsterr = err
}
}
}
if firsterr != nil {
return nil, fmt.Errorf("there were %v undecryptable name errors. first error: %v", errors, firsterr)
} }
return newEntries, nil return newEntries, nil
} }
@@ -420,40 +350,11 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f) entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
} if err != nil {
return nil, err
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.encryptEntries(ctx, entries)
if err != nil {
return err
}
return callback(entries)
} }
listP := f.Fs.Features().ListP return f.encryptEntries(ctx, entries)
encryptedDir := f.cipher.EncryptDirName(dir)
if listP == nil {
entries, err := f.Fs.List(ctx, encryptedDir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, encryptedDir, wrappedCallback)
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@@ -495,8 +396,6 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
// put implements Put or PutStream // put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) { func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
ci := fs.GetConfig(ctx)
if f.opt.NoDataEncryption { if f.opt.NoDataEncryption {
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...) o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
if err == nil && o != nil { if err == nil && o != nil {
@@ -514,9 +413,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
// Find a hash the destination supports to compute a hash of // Find a hash the destination supports to compute a hash of
// the encrypted data // the encrypted data
ht := f.Fs.Hashes().GetOne() ht := f.Fs.Hashes().GetOne()
if ci.IgnoreChecksum {
ht = hash.None
}
var hasher *hash.MultiHasher var hasher *hash.MultiHasher
if ht != hash.None { if ht != hash.None {
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht)) hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
@@ -553,7 +449,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil { if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err) fs.Errorf(o, "Failed to remove corrupted object: %v", err)
} }
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash) return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
} }
fs.Debugf(src, "%v = %s OK", ht, srcHash) fs.Debugf(src, "%v = %s OK", ht, srcHash)
} }
@@ -588,37 +484,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir)) return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
} }
// MkdirMetadata makes the root directory of the Fs object
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
do := f.Fs.Features().MkdirMetadata
if do == nil {
return nil, fs.ErrorNotImplemented
}
newDir, err := do(ctx, f.cipher.EncryptDirName(dir), metadata)
if err != nil {
return nil, err
}
var entries = make(fs.DirEntries, 0, 1)
err = f.addDir(ctx, &entries, newDir)
if err != nil {
return nil, err
}
newDir, ok := entries[0].(fs.Directory)
if !ok {
return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0])
}
return newDir, nil
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
do := f.Fs.Features().DirSetModTime
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx, f.cipher.EncryptDirName(dir), modTime)
}
// Rmdir removes the directory (container, bucket) if empty // Rmdir removes the directory (container, bucket) if empty
// //
// Return an error if it doesn't exist or isn't empty // Return an error if it doesn't exist or isn't empty
@@ -860,7 +725,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
} }
out := make([]fs.Directory, len(dirs)) out := make([]fs.Directory, len(dirs))
for i, dir := range dirs { for i, dir := range dirs {
out[i] = fs.NewDirWrapper(f.cipher.EncryptDirName(dir.Remote()), dir) out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
} }
return do(ctx, out) return do(ctx, out)
} }
@@ -957,7 +822,7 @@ Usage Example:
// The result should be capable of being JSON encoded // The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user // If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that // otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name { switch name {
case "decode": case "decode":
out := make([]string, 0, len(arg)) out := make([]string, 0, len(arg))
@@ -1096,14 +961,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// newDir returns a dir with the Name decrypted // newDir returns a dir with the Name decrypted
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory { func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
newDir := fs.NewDirCopy(ctx, dir)
remote := dir.Remote() remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote) decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil { if err != nil {
fs.Debugf(remote, "Undecryptable dir name: %v", err) fs.Debugf(remote, "Undecryptable dir name: %v", err)
} else { } else {
remote = decryptedRemote newDir.SetRemote(decryptedRemote)
} }
newDir := fs.NewDirWrapper(remote, dir)
return newDir return newDir
} }
@@ -1281,17 +1146,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
return do.Metadata(ctx) return do.Metadata(ctx)
} }
// SetMetadata sets metadata for an Object
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
do, ok := o.Object.(fs.SetMetadataer)
if !ok {
return fs.ErrorNotImplemented
}
return do.SetMetadata(ctx, metadata)
}
// MimeType returns the content type of the Object if // MimeType returns the content type of the Object if
// known, or "" if not // known, or "" if not
// //
@@ -1317,8 +1171,6 @@ var (
_ fs.Abouter = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil) _ fs.Wrapper = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil) _ fs.MergeDirser = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil) _ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil) _ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil)

View File

@@ -24,7 +24,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName, RemoteName: *fstest.RemoteName,
NilObject: (*crypt.Object)(nil), NilObject: (*crypt.Object)(nil),
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }
@@ -45,7 +45,7 @@ func TestStandardBase32(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato")}, {Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"}, {Name: name, Key: "filename_encryption", Value: "standard"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -67,7 +67,7 @@ func TestStandardBase64(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "standard"}, {Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base64"}, {Name: name, Key: "filename_encoding", Value: "base64"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -89,7 +89,7 @@ func TestStandardBase32768(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "standard"}, {Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base32768"}, {Name: name, Key: "filename_encoding", Value: "base32768"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -111,7 +111,7 @@ func TestOff(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")}, {Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "off"}, {Name: name, Key: "filename_encryption", Value: "off"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -137,7 +137,7 @@ func TestObfuscate(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "obfuscate"}, {Name: name, Key: "filename_encryption", Value: "obfuscate"},
}, },
SkipBadWindowsCharacters: true, SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -164,7 +164,7 @@ func TestNoDataObfuscate(t *testing.T) {
{Name: name, Key: "no_data_encryption", Value: "true"}, {Name: name, Key: "no_data_encryption", Value: "true"},
}, },
SkipBadWindowsCharacters: true, SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })

View File

@@ -25,7 +25,7 @@ func Pad(n int, buf []byte) []byte {
} }
length := len(buf) length := len(buf)
padding := n - (length % n) padding := n - (length % n)
for range padding { for i := 0; i < padding; i++ {
buf = append(buf, byte(padding)) buf = append(buf, byte(padding))
} }
if (len(buf) % n) != 0 { if (len(buf) % n) != 0 {
@@ -54,7 +54,7 @@ func Unpad(n int, buf []byte) ([]byte, error) {
if padding == 0 { if padding == 0 {
return nil, ErrorPaddingTooShort return nil, ErrorPaddingTooShort
} }
for i := range padding { for i := 0; i < padding; i++ {
if buf[length-1-i] != byte(padding) { if buf[length-1-i] != byte(padding) {
return nil, ErrorPaddingNotAllTheSame return nil, ErrorPaddingNotAllTheSame
} }

View File

@@ -1,38 +0,0 @@
// Type definitions specific to Dataverse
package api
// DataverseDatasetResponse is returned by the Dataverse dataset API
type DataverseDatasetResponse struct {
Status string `json:"status"`
Data DataverseDataset `json:"data"`
}
// DataverseDataset is the representation of a dataset
type DataverseDataset struct {
LatestVersion DataverseDatasetVersion `json:"latestVersion"`
}
// DataverseDatasetVersion is the representation of a dataset version
type DataverseDatasetVersion struct {
LastUpdateTime string `json:"lastUpdateTime"`
Files []DataverseFile `json:"files"`
}
// DataverseFile is the representation of a file found in a dataset
type DataverseFile struct {
DirectoryLabel string `json:"directoryLabel"`
DataFile DataverseDataFile `json:"dataFile"`
}
// DataverseDataFile represents file metadata details
type DataverseDataFile struct {
ID int64 `json:"id"`
Filename string `json:"filename"`
ContentType string `json:"contentType"`
FileSize int64 `json:"filesize"`
OriginalFileFormat string `json:"originalFileFormat"`
OriginalFileSize int64 `json:"originalFileSize"`
OriginalFileName string `json:"originalFileName"`
MD5 string `json:"md5"`
}

View File

@@ -1,33 +0,0 @@
// Type definitions specific to InvenioRDM
package api
// InvenioRecordResponse is the representation of a record stored in InvenioRDM
type InvenioRecordResponse struct {
Links InvenioRecordResponseLinks `json:"links"`
}
// InvenioRecordResponseLinks represents a record's links
type InvenioRecordResponseLinks struct {
Self string `json:"self"`
}
// InvenioFilesResponse is the representation of a record's files
type InvenioFilesResponse struct {
Entries []InvenioFilesResponseEntry `json:"entries"`
}
// InvenioFilesResponseEntry is the representation of a file entry
type InvenioFilesResponseEntry struct {
Key string `json:"key"`
Checksum string `json:"checksum"`
Size int64 `json:"size"`
Updated string `json:"updated"`
MimeType string `json:"mimetype"`
Links InvenioFilesResponseEntryLinks `json:"links"`
}
// InvenioFilesResponseEntryLinks represents file links details
type InvenioFilesResponseEntryLinks struct {
Content string `json:"content"`
}

View File

@@ -1,26 +0,0 @@
// Package api has general type definitions for doi
package api
// DoiResolverResponse is returned by the DOI resolver API
//
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
type DoiResolverResponse struct {
ResponseCode int `json:"responseCode"`
Handle string `json:"handle"`
Values []DoiResolverResponseValue `json:"values"`
}
// DoiResolverResponseValue is a single handle record value
type DoiResolverResponseValue struct {
Index int `json:"index"`
Type string `json:"type"`
Data DoiResolverResponseValueData `json:"data"`
TTL int `json:"ttl"`
Timestamp string `json:"timestamp"`
}
// DoiResolverResponseValueData is the data held in a handle value
type DoiResolverResponseValueData struct {
Format string `json:"format"`
Value any `json:"value"`
}

View File

@@ -1,112 +0,0 @@
// Implementation for Dataverse
package doi
import (
"context"
"fmt"
"net/http"
"net/url"
"path"
"strings"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
// Returns true if resolvedURL is likely a DOI hosted on a Dataverse intallation
func activateDataverse(resolvedURL *url.URL) (isActive bool) {
queryValues := resolvedURL.Query()
persistentID := queryValues.Get("persistentId")
return persistentID != ""
}
// Resolve the main API endpoint for a DOI hosted on a Dataverse installation
func resolveDataverseEndpoint(resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
queryValues := resolvedURL.Query()
persistentID := queryValues.Get("persistentId")
query := url.Values{}
query.Add("persistentId", persistentID)
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/datasets/:persistentId/", RawQuery: query.Encode()})
return Dataverse, endpointURL, nil
}
// dataverseProvider implements the doiProvider interface for Dataverse installations
type dataverseProvider struct {
f *Fs
}
// ListEntries returns the full list of entries found at the remote, regardless of root
func (dp *dataverseProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
// Use the cache if populated
cachedEntries, found := dp.f.cache.GetMaybe("files")
if found {
parsedEntries, ok := cachedEntries.([]Object)
if ok {
for _, entry := range parsedEntries {
newEntry := entry
entries = append(entries, &newEntry)
}
return entries, nil
}
}
filesURL := dp.f.endpoint
var res *http.Response
var result api.DataverseDatasetResponse
opts := rest.Opts{
Method: "GET",
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
Parameters: filesURL.Query(),
}
err = dp.f.pacer.Call(func() (bool, error) {
res, err = dp.f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("readDir failed: %w", err)
}
modTime, modTimeErr := time.Parse(time.RFC3339, result.Data.LatestVersion.LastUpdateTime)
if modTimeErr != nil {
fs.Logf(dp.f, "error: could not parse last update time %v", modTimeErr)
modTime = timeUnset
}
for _, file := range result.Data.LatestVersion.Files {
contentURLPath := fmt.Sprintf("/api/access/datafile/%d", file.DataFile.ID)
query := url.Values{}
query.Add("format", "original")
contentURL := dp.f.endpoint.ResolveReference(&url.URL{Path: contentURLPath, RawQuery: query.Encode()})
entry := &Object{
fs: dp.f,
remote: path.Join(file.DirectoryLabel, file.DataFile.Filename),
contentURL: contentURL.String(),
size: file.DataFile.FileSize,
modTime: modTime,
md5: file.DataFile.MD5,
contentType: file.DataFile.ContentType,
}
if file.DataFile.OriginalFileName != "" {
entry.remote = path.Join(file.DirectoryLabel, file.DataFile.OriginalFileName)
entry.size = file.DataFile.OriginalFileSize
entry.contentType = file.DataFile.OriginalFileFormat
}
entries = append(entries, entry)
}
// Populate the cache
cacheEntries := []Object{}
for _, entry := range entries {
cacheEntries = append(cacheEntries, *entry)
}
dp.f.cache.Put("files", cacheEntries)
return entries, nil
}
func newDataverseProvider(f *Fs) doiProvider {
return &dataverseProvider{
f: f,
}
}

View File

@@ -1,649 +0,0 @@
// Package doi provides a filesystem interface for digital objects identified by DOIs.
//
// See: https://www.doi.org/the-identifier/what-is-a-doi/
package doi
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"path"
"strings"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/cache"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
const (
// the URL of the DOI resolver
//
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
doiResolverAPIURL = "https://doi.org/api"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
var (
errorReadOnly = errors.New("doi remotes are read only")
timeUnset = time.Unix(0, 0)
)
func init() {
fsi := &fs.RegInfo{
Name: "doi",
Description: "DOI datasets",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "doi",
Help: "The DOI or the doi.org URL.",
Required: true,
}, {
Name: fs.ConfigProvider,
Help: `DOI provider.
The DOI provider can be set when rclone does not automatically recognize a supported DOI provider.`,
Examples: []fs.OptionExample{
{
Value: "auto",
Help: "Auto-detect provider",
},
{
Value: string(Zenodo),
Help: "Zenodo",
}, {
Value: string(Dataverse),
Help: "Dataverse",
}, {
Value: string(Invenio),
Help: "Invenio",
}},
Required: false,
Advanced: true,
}, {
Name: "doi_resolver_api_url",
Help: `The URL of the DOI resolver API to use.
The DOI resolver can be set for testing or for cases when the the canonical DOI resolver API cannot be used.
Defaults to "https://doi.org/api".`,
Required: false,
Advanced: true,
}},
}
fs.Register(fsi)
}
// Provider defines the type of provider hosting the DOI
type Provider string
const (
// Zenodo provider, see https://zenodo.org
Zenodo Provider = "zenodo"
// Dataverse provider, see https://dataverse.harvard.edu
Dataverse Provider = "dataverse"
// Invenio provider, see https://inveniordm.docs.cern.ch
Invenio Provider = "invenio"
)
// Options defines the configuration for this backend
type Options struct {
Doi string `config:"doi"` // The DOI, a digital identifier of an object, usually a dataset
Provider string `config:"provider"` // The DOI provider
DoiResolverAPIURL string `config:"doi_resolver_api_url"` // The URL of the DOI resolver API to use.
}
// Fs stores the interface to the remote HTTP files
type Fs struct {
name string // name of this remote
root string // the path we are working on
provider Provider // the DOI provider
doiProvider doiProvider // the interface used to interact with the DOI provider
features *fs.Features // optional features
opt Options // options for this backend
ci *fs.ConfigInfo // global config
endpoint *url.URL // the main API endpoint for this remote
endpointURL string // endpoint as a string
srv *rest.Client // the connection to the server
pacer *fs.Pacer // pacer for API calls
cache *cache.Cache // a cache for the remote metadata
}
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
type Object struct {
fs *Fs // what this object is part of
remote string // the remote path
contentURL string // the URL where the contents of the file can be downloaded
size int64 // size of the object
modTime time.Time // modification time of the object
contentType string // content type of the object
md5 string // MD5 hash of the object content
}
// doiProvider is the interface used to list objects in a DOI
type doiProvider interface {
// ListEntries returns the full list of entries found at the remote, regardless of root
ListEntries(ctx context.Context) (entries []*Object, err error)
}
// Parse the input string as a DOI
// Examples:
// 10.1000/182 -> 10.1000/182
// https://doi.org/10.1000/182 -> 10.1000/182
// doi:10.1000/182 -> 10.1000/182
func parseDoi(doi string) string {
doiURL, err := url.Parse(doi)
if err != nil {
return doi
}
if doiURL.Scheme == "doi" {
return strings.TrimLeft(strings.TrimPrefix(doi, "doi:"), "/")
}
if strings.HasSuffix(doiURL.Hostname(), "doi.org") {
return strings.TrimLeft(doiURL.Path, "/")
}
return doi
}
// Resolve a DOI to a URL
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
func resolveDoiURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (doiURL *url.URL, err error) {
resolverURL := opt.DoiResolverAPIURL
if resolverURL == "" {
resolverURL = doiResolverAPIURL
}
var result api.DoiResolverResponse
params := url.Values{}
params.Add("index", "1")
opts := rest.Opts{
Method: "GET",
RootURL: resolverURL,
Path: "/handles/" + opt.Doi,
Parameters: params,
}
err = pacer.Call(func() (bool, error) {
res, err := srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, err
}
if result.ResponseCode != 1 {
return nil, fmt.Errorf("could not resolve DOI (error code %d)", result.ResponseCode)
}
resolvedURLStr := ""
for _, value := range result.Values {
if value.Type == "URL" && value.Data.Format == "string" {
valueStr, ok := value.Data.Value.(string)
if !ok {
return nil, fmt.Errorf("could not resolve DOI (incorrect response format)")
}
resolvedURLStr = valueStr
}
}
resolvedURL, err := url.Parse(resolvedURLStr)
if err != nil {
return nil, err
}
return resolvedURL, nil
}
// Resolve the passed configuration into a provider and enpoint
func resolveEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (provider Provider, endpoint *url.URL, err error) {
resolvedURL, err := resolveDoiURL(ctx, srv, pacer, opt)
if err != nil {
return "", nil, err
}
switch opt.Provider {
case string(Dataverse):
return resolveDataverseEndpoint(resolvedURL)
case string(Invenio):
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
case string(Zenodo):
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
}
hostname := strings.ToLower(resolvedURL.Hostname())
if hostname == "dataverse.harvard.edu" || activateDataverse(resolvedURL) {
return resolveDataverseEndpoint(resolvedURL)
}
if hostname == "zenodo.org" || strings.HasSuffix(hostname, ".zenodo.org") {
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
}
if activateInvenio(ctx, srv, pacer, resolvedURL) {
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
}
return "", nil, fmt.Errorf("provider '%s' is not supported", resolvedURL.Hostname())
}
// Make the http connection from the passed options
func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err error) {
provider, endpoint, err := resolveEndpoint(ctx, f.srv, f.pacer, opt)
if err != nil {
return false, err
}
// Update f with the new parameters
f.srv.SetRoot(endpoint.ResolveReference(&url.URL{Path: "/"}).String())
f.endpoint = endpoint
f.endpointURL = endpoint.String()
f.provider = provider
f.opt.Provider = string(provider)
switch f.provider {
case Dataverse:
f.doiProvider = newDataverseProvider(f)
case Invenio, Zenodo:
f.doiProvider = newInvenioProvider(f)
default:
return false, fmt.Errorf("provider type '%s' not supported", f.provider)
}
// Determine if the root is a file
entries, err := f.doiProvider.ListEntries(ctx)
if err != nil {
return false, err
}
for _, entry := range entries {
if entry.remote == f.root {
isFile = true
break
}
}
return isFile, nil
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this res and err
// deserve to be retried. It returns the err as a convenience.
func shouldRetry(ctx context.Context, res *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
}
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
root = strings.Trim(root, "/")
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
opt.Doi = parseDoi(opt.Doi)
client := fshttp.NewClient(ctx)
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: ci,
srv: rest.NewClient(client),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
cache: cache.New(),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
isFile, err := f.httpConnection(ctx, opt)
if err != nil {
return nil, err
}
if isFile {
// return an error with an fs which points to the parent
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.root = newRoot
return f, fs.ErrorIsFile
}
return f, nil
}
// Name returns the configured name of the file system
func (f *Fs) Name() string {
return f.name
}
// Root returns the root for the filesystem
func (f *Fs) Root() string {
return f.root
}
// String returns the URL for the filesystem
func (f *Fs) String() string {
return fmt.Sprintf("DOI %s", f.opt.Doi)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision is the remote http file system's modtime precision, which we have no way of knowing. We estimate at 1s
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
// return hash.Set(hash.None)
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return errorReadOnly
}
// Remove a remote http file object
func (o *Object) Remove(ctx context.Context) error {
return errorReadOnly
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return errorReadOnly
}
// NewObject creates a new remote http file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
entries, err := f.doiProvider.ListEntries(ctx)
if err != nil {
return nil, err
}
remoteFullPath := remote
if f.root != "" {
remoteFullPath = path.Join(f.root, remote)
}
for _, entry := range entries {
if entry.Remote() == remoteFullPath {
return entry, nil
}
}
return nil, fs.ErrorObjectNotFound
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
fileEntries, err := f.doiProvider.ListEntries(ctx)
if err != nil {
return nil, fmt.Errorf("error listing %q: %w", dir, err)
}
fullDir := path.Join(f.root, dir)
if fullDir != "" {
fullDir += "/"
}
dirPaths := map[string]bool{}
for _, entry := range fileEntries {
// First, filter out files not in `fullDir`
if !strings.HasPrefix(entry.remote, fullDir) {
continue
}
// Then, find entries in subfolers
remotePath := entry.remote
if fullDir != "" {
remotePath = strings.TrimLeft(strings.TrimPrefix(remotePath, fullDir), "/")
}
parts := strings.SplitN(remotePath, "/", 2)
if len(parts) == 1 {
newEntry := *entry
newEntry.remote = path.Join(dir, remotePath)
entries = append(entries, &newEntry)
} else {
dirPaths[path.Join(dir, parts[0])] = true
}
}
for dirPath := range dirPaths {
entry := fs.NewDir(dirPath, time.Time{})
entries = append(entries, entry)
}
return entries, nil
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly
}
// Fs is the filesystem this remote http file object is located within
func (o *Object) Fs() fs.Info {
return o.fs
}
// String returns the URL to the remote HTTP file
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote the name of the remote HTTP file, relative to the fs root
func (o *Object) Remote() string {
return o.remote
}
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
return o.md5, nil
}
// Size returns the size in bytes of the remote http file
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the remote http file
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// SetModTime sets the modification and access time to the specified time
//
// it also updates the info field
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return errorReadOnly
}
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
func (o *Object) Storable() bool {
return true
}
// Open a remote http file object for reading. Seek is supported
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.size)
opts := rest.Opts{
Method: "GET",
RootURL: o.contentURL,
Options: options,
}
var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("Open failed: %w", err)
}
// Handle non-compliant redirects
if res.Header.Get("Location") != "" {
newURL, err := res.Location()
if err == nil {
opts.RootURL = newURL.String()
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("Open failed: %w", err)
}
}
}
return res.Body, nil
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return errorReadOnly
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.contentType
}
var commandHelp = []fs.CommandHelp{{
Name: "metadata",
Short: "Show metadata about the DOI.",
Long: `This command returns a JSON object with some information about the DOI.
rclone backend medatadata doi:
It returns a JSON object representing metadata about the DOI.
`,
}, {
Name: "set",
Short: "Set command for updating the config parameters.",
Long: `This set command can be used to update the config parameters
for a running doi backend.
Usage Examples:
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
The option keys are named as they are in the config file.
This rebuilds the connection to the doi backend when it is called with
the new parameters. Only new parameters need be passed as the values
will default to those currently in use.
It doesn't return anything.
`,
}}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
switch name {
case "metadata":
return f.ShowMetadata(ctx)
case "set":
newOpt := f.opt
err := configstruct.Set(configmap.Simple(opt), &newOpt)
if err != nil {
return nil, fmt.Errorf("reading config: %w", err)
}
_, err = f.httpConnection(ctx, &newOpt)
if err != nil {
return nil, fmt.Errorf("updating session: %w", err)
}
f.opt = newOpt
keys := []string{}
for k := range opt {
keys = append(keys, k)
}
fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", "))
return nil, nil
default:
return nil, fs.ErrorCommandNotFound
}
}
// ShowMetadata returns some metadata about the corresponding DOI
func (f *Fs) ShowMetadata(ctx context.Context) (metadata any, err error) {
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
if err != nil {
return nil, err
}
info := map[string]any{}
info["DOI"] = f.opt.Doi
info["URL"] = doiURL.String()
info["metadataURL"] = f.endpointURL
info["provider"] = f.provider
return info, nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
)

View File

@@ -1,260 +0,0 @@
package doi
import (
"context"
"crypto/md5"
"encoding/hex"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"net/url"
"sort"
"strings"
"testing"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/hash"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var remoteName = "TestDoi"
func TestParseDoi(t *testing.T) {
// 10.1000/182 -> 10.1000/182
doi := "10.1000/182"
parsed := parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// https://doi.org/10.1000/182 -> 10.1000/182
doi = "https://doi.org/10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// https://dx.doi.org/10.1000/182 -> 10.1000/182
doi = "https://dxdoi.org/10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// doi:10.1000/182 -> 10.1000/182
doi = "doi:10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// doi://10.1000/182 -> 10.1000/182
doi = "doi://10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
}
// prepareMockDoiResolverServer prepares a test server to resolve DOIs
func prepareMockDoiResolverServer(t *testing.T, resolvedURL string) (doiResolverAPIURL string) {
mux := http.NewServeMux()
// Handle requests for resolving DOIs
mux.HandleFunc("GET /api/handles/{handle...}", func(w http.ResponseWriter, r *http.Request) {
// Check that we are resolving a DOI
handle := strings.TrimPrefix(r.URL.Path, "/api/handles/")
assert.NotEmpty(t, handle)
index := r.URL.Query().Get("index")
assert.Equal(t, "1", index)
// Return the most basic response
result := api.DoiResolverResponse{
ResponseCode: 1,
Handle: handle,
Values: []api.DoiResolverResponseValue{
{
Index: 1,
Type: "URL",
Data: api.DoiResolverResponseValueData{
Format: "string",
Value: resolvedURL,
},
},
},
}
resultBytes, err := json.Marshal(result)
require.NoError(t, err)
w.Header().Add("Content-Type", "application/json")
_, err = w.Write(resultBytes)
require.NoError(t, err)
})
// Make the test server
ts := httptest.NewServer(mux)
// Close the server at the end of the test
t.Cleanup(ts.Close)
return ts.URL + "/api"
}
func md5Sum(text string) string {
hash := md5.Sum([]byte(text))
return hex.EncodeToString(hash[:])
}
// prepareMockZenodoServer prepares a test server that mocks Zenodo.org
func prepareMockZenodoServer(t *testing.T, files map[string]string) *httptest.Server {
mux := http.NewServeMux()
// Handle requests for a single record
mux.HandleFunc("GET /api/records/{recordID...}", func(w http.ResponseWriter, r *http.Request) {
// Check that we are returning data about a single record
recordID := strings.TrimPrefix(r.URL.Path, "/api/records/")
assert.NotEmpty(t, recordID)
// Return the most basic response
selfURL, err := url.Parse("http://" + r.Host)
require.NoError(t, err)
selfURL = selfURL.JoinPath(r.URL.String())
result := api.InvenioRecordResponse{
Links: api.InvenioRecordResponseLinks{
Self: selfURL.String(),
},
}
resultBytes, err := json.Marshal(result)
require.NoError(t, err)
w.Header().Add("Content-Type", "application/json")
_, err = w.Write(resultBytes)
require.NoError(t, err)
})
// Handle requests for listing files in a record
mux.HandleFunc("GET /api/records/{record}/files", func(w http.ResponseWriter, r *http.Request) {
// Return the most basic response
filesBaseURL, err := url.Parse("http://" + r.Host)
require.NoError(t, err)
filesBaseURL = filesBaseURL.JoinPath("/api/files/")
entries := []api.InvenioFilesResponseEntry{}
for filename, contents := range files {
entries = append(entries,
api.InvenioFilesResponseEntry{
Key: filename,
Checksum: md5Sum(contents),
Size: int64(len(contents)),
Updated: time.Now().UTC().Format(time.RFC3339),
MimeType: "text/plain; charset=utf-8",
Links: api.InvenioFilesResponseEntryLinks{
Content: filesBaseURL.JoinPath(filename).String(),
},
},
)
}
result := api.InvenioFilesResponse{
Entries: entries,
}
resultBytes, err := json.Marshal(result)
require.NoError(t, err)
w.Header().Add("Content-Type", "application/json")
_, err = w.Write(resultBytes)
require.NoError(t, err)
})
// Handle requests for file contents
mux.HandleFunc("/api/files/{file}", func(w http.ResponseWriter, r *http.Request) {
// Check that we are returning the contents of a file
filename := strings.TrimPrefix(r.URL.Path, "/api/files/")
assert.NotEmpty(t, filename)
contents, found := files[filename]
if !found {
w.WriteHeader(404)
return
}
// Return the most basic response
_, err := w.Write([]byte(contents))
require.NoError(t, err)
})
// Make the test server
ts := httptest.NewServer(mux)
// Close the server at the end of the test
t.Cleanup(ts.Close)
return ts
}
func TestZenodoRemote(t *testing.T) {
recordID := "2600782"
doi := "10.5281/zenodo.2600782"
// The files in the dataset
files := map[string]string{
"README.md": "This is a dataset.",
"data.txt": "Some data",
}
ts := prepareMockZenodoServer(t, files)
resolvedURL := ts.URL + "/record/" + recordID
doiResolverAPIURL := prepareMockDoiResolverServer(t, resolvedURL)
testConfig := configmap.Simple{
"type": "doi",
"doi": doi,
"provider": "zenodo",
"doi_resolver_api_url": doiResolverAPIURL,
}
f, err := NewFs(context.Background(), remoteName, "", testConfig)
require.NoError(t, err)
// Test listing the DOI files
entries, err := f.List(context.Background(), "")
require.NoError(t, err)
sort.Sort(entries)
require.Equal(t, len(files), len(entries))
e := entries[0]
assert.Equal(t, "README.md", e.Remote())
assert.Equal(t, int64(18), e.Size())
_, ok := e.(*Object)
assert.True(t, ok)
e = entries[1]
assert.Equal(t, "data.txt", e.Remote())
assert.Equal(t, int64(9), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
// Test reading the DOI files
o, err := f.NewObject(context.Background(), "README.md")
require.NoError(t, err)
assert.Equal(t, int64(18), o.Size())
md5Hash, err := o.Hash(context.Background(), hash.MD5)
require.NoError(t, err)
assert.Equal(t, "464352b1cab5240e44528a56fda33d9d", md5Hash)
fd, err := o.Open(context.Background())
require.NoError(t, err)
data, err := io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, []byte(files["README.md"]), data)
do, ok := o.(fs.MimeTyper)
require.True(t, ok)
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
o, err = f.NewObject(context.Background(), "data.txt")
require.NoError(t, err)
assert.Equal(t, int64(9), o.Size())
md5Hash, err = o.Hash(context.Background(), hash.MD5)
require.NoError(t, err)
assert.Equal(t, "5b82f8bf4df2bfb0e66ccaa7306fd024", md5Hash)
fd, err = o.Open(context.Background())
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, []byte(files["data.txt"]), data)
do, ok = o.(fs.MimeTyper)
require.True(t, ok)
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
}

View File

@@ -1,16 +0,0 @@
// Test DOI filesystem interface
package doi
import (
"testing"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestDoi:",
NilObject: (*Object)(nil),
})
}

View File

@@ -1,164 +0,0 @@
// Implementation for InvenioRDM
package doi
import (
"context"
"fmt"
"net/http"
"net/url"
"regexp"
"strings"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
var invenioRecordRegex = regexp.MustCompile(`\/records?\/(.+)`)
// Returns true if resolvedURL is likely a DOI hosted on an InvenioRDM intallation
func activateInvenio(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (isActive bool) {
_, _, err := resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
return err == nil
}
// Resolve the main API endpoint for a DOI hosted on an InvenioRDM installation
func resolveInvenioEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
var res *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: resolvedURL.String(),
}
err = pacer.Call(func() (bool, error) {
res, err = srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err)
})
if err != nil {
return "", nil, err
}
// First, attempt to grab the API URL from the headers
var linksetURL *url.URL
links := parseLinkHeader(res.Header.Get("Link"))
for _, link := range links {
if link.Rel == "linkset" && link.Type == "application/linkset+json" {
parsed, err := url.Parse(link.Href)
if err == nil {
linksetURL = parsed
break
}
}
}
if linksetURL != nil {
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, linksetURL)
if err == nil {
return Invenio, endpoint, nil
}
fs.Logf(nil, "using linkset URL failed: %s", err.Error())
}
// If there is no linkset header, try to grab the record ID from the URL
recordID := ""
resURL := res.Request.URL
match := invenioRecordRegex.FindStringSubmatch(resURL.EscapedPath())
if match != nil {
recordID = match[1]
guessedURL := res.Request.URL.ResolveReference(&url.URL{
Path: "/api/records/" + recordID,
})
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, guessedURL)
if err == nil {
return Invenio, endpoint, nil
}
fs.Logf(nil, "guessing the URL failed: %s", err.Error())
}
return "", nil, fmt.Errorf("could not resolve the Invenio API endpoint for '%s'", resolvedURL.String())
}
func checkInvenioAPIURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (endpoint *url.URL, err error) {
var result api.InvenioRecordResponse
opts := rest.Opts{
Method: "GET",
RootURL: resolvedURL.String(),
}
err = pacer.Call(func() (bool, error) {
res, err := srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, err
}
if result.Links.Self == "" {
return nil, fmt.Errorf("could not parse API response from '%s'", resolvedURL.String())
}
return url.Parse(result.Links.Self)
}
// invenioProvider implements the doiProvider interface for InvenioRDM installations
type invenioProvider struct {
f *Fs
}
// ListEntries returns the full list of entries found at the remote, regardless of root
func (ip *invenioProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
// Use the cache if populated
cachedEntries, found := ip.f.cache.GetMaybe("files")
if found {
parsedEntries, ok := cachedEntries.([]Object)
if ok {
for _, entry := range parsedEntries {
newEntry := entry
entries = append(entries, &newEntry)
}
return entries, nil
}
}
filesURL := ip.f.endpoint.JoinPath("files")
var result api.InvenioFilesResponse
opts := rest.Opts{
Method: "GET",
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
}
err = ip.f.pacer.Call(func() (bool, error) {
res, err := ip.f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("readDir failed: %w", err)
}
for _, file := range result.Entries {
modTime, modTimeErr := time.Parse(time.RFC3339, file.Updated)
if modTimeErr != nil {
fs.Logf(ip.f, "error: could not parse last update time %v", modTimeErr)
modTime = timeUnset
}
entry := &Object{
fs: ip.f,
remote: file.Key,
contentURL: file.Links.Content,
size: file.Size,
modTime: modTime,
contentType: file.MimeType,
md5: strings.TrimPrefix(file.Checksum, "md5:"),
}
entries = append(entries, entry)
}
// Populate the cache
cacheEntries := []Object{}
for _, entry := range entries {
cacheEntries = append(cacheEntries, *entry)
}
ip.f.cache.Put("files", cacheEntries)
return entries, nil
}
func newInvenioProvider(f *Fs) doiProvider {
return &invenioProvider{
f: f,
}
}

View File

@@ -1,75 +0,0 @@
package doi
import (
"regexp"
"strings"
)
var linkRegex = regexp.MustCompile(`^<(.+)>$`)
var valueRegex = regexp.MustCompile(`^"(.+)"$`)
// headerLink represents a link as presented in HTTP headers
// MDN Reference: https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Link
type headerLink struct {
Href string
Rel string
Type string
Extras map[string]string
}
func parseLinkHeader(header string) (links []headerLink) {
for link := range strings.SplitSeq(header, ",") {
link = strings.TrimSpace(link)
parsed := parseLink(link)
if parsed != nil {
links = append(links, *parsed)
}
}
return links
}
func parseLink(link string) (parsedLink *headerLink) {
var parts []string
for part := range strings.SplitSeq(link, ";") {
parts = append(parts, strings.TrimSpace(part))
}
match := linkRegex.FindStringSubmatch(parts[0])
if match == nil {
return nil
}
result := &headerLink{
Href: match[1],
Extras: map[string]string{},
}
for _, keyValue := range parts[1:] {
parsed := parseKeyValue(keyValue)
if parsed != nil {
key, value := parsed[0], parsed[1]
switch strings.ToLower(key) {
case "rel":
result.Rel = value
case "type":
result.Type = value
default:
result.Extras[key] = value
}
}
}
return result
}
func parseKeyValue(keyValue string) []string {
parts := strings.SplitN(keyValue, "=", 2)
if parts[0] == "" || len(parts) < 2 {
return nil
}
match := valueRegex.FindStringSubmatch(parts[1])
if match != nil {
parts[1] = match[1]
return parts
}
return parts
}

View File

@@ -1,44 +0,0 @@
package doi
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestParseLinkHeader(t *testing.T) {
header := "<https://zenodo.org/api/records/15063252> ; rel=\"linkset\" ; type=\"application/linkset+json\""
links := parseLinkHeader(header)
expected := headerLink{
Href: "https://zenodo.org/api/records/15063252",
Rel: "linkset",
Type: "application/linkset+json",
Extras: map[string]string{},
}
assert.Contains(t, links, expected)
header = "<https://api.example.com/issues?page=2>; rel=\"prev\", <https://api.example.com/issues?page=4>; rel=\"next\", <https://api.example.com/issues?page=10>; rel=\"last\", <https://api.example.com/issues?page=1>; rel=\"first\""
links = parseLinkHeader(header)
expectedList := []headerLink{{
Href: "https://api.example.com/issues?page=2",
Rel: "prev",
Type: "",
Extras: map[string]string{},
}, {
Href: "https://api.example.com/issues?page=4",
Rel: "next",
Type: "",
Extras: map[string]string{},
}, {
Href: "https://api.example.com/issues?page=10",
Rel: "last",
Type: "",
Extras: map[string]string{},
}, {
Href: "https://api.example.com/issues?page=1",
Rel: "first",
Type: "",
Extras: map[string]string{},
}}
assert.Equal(t, links, expectedList)
}

View File

@@ -1,47 +0,0 @@
// Implementation for Zenodo
package doi
import (
"context"
"fmt"
"net/url"
"regexp"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
var zenodoRecordRegex = regexp.MustCompile(`zenodo[.](.+)`)
// Resolve the main API endpoint for a DOI hosted on Zenodo
func resolveZenodoEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL, doi string) (provider Provider, endpoint *url.URL, err error) {
match := zenodoRecordRegex.FindStringSubmatch(doi)
if match == nil {
return "", nil, fmt.Errorf("could not derive API endpoint URL from '%s'", resolvedURL.String())
}
recordID := match[1]
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/records/" + recordID})
var result api.InvenioRecordResponse
opts := rest.Opts{
Method: "GET",
RootURL: endpointURL.String(),
}
err = pacer.Call(func() (bool, error) {
res, err := srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return "", nil, err
}
endpointURL, err = url.Parse(result.Links.Self)
if err != nil {
return "", nil, err
}
return Zenodo, endpointURL, nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -95,7 +95,7 @@ func TestInternalParseExtensions(t *testing.T) {
wantErr error wantErr error
}{ }{
{"doc", []string{".doc"}, nil}, {"doc", []string{".doc"}, nil},
{" docx ,XLSX, pptx,svg,md", []string{".docx", ".xlsx", ".pptx", ".svg", ".md"}, nil}, {" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil},
{"docx,svg,Docx", []string{".docx", ".svg"}, nil}, {"docx,svg,Docx", []string{".docx", ".svg"}, nil},
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)}, {"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
} { } {
@@ -243,15 +243,6 @@ func (f *Fs) InternalTestShouldRetry(t *testing.T) {
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403) quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, quotaExceededRetry) assert.False(t, quotaExceededRetry)
assert.Equal(t, quotaExceededError, expectedQuotaError) assert.Equal(t, quotaExceededError, expectedQuotaError)
sqEItem := googleapi.ErrorItem{
Reason: "storageQuotaExceeded",
}
generic403.Errors[0] = sqEItem
expectedStorageQuotaError := fserrors.FatalError(&generic403)
storageQuotaExceededRetry, storageQuotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, storageQuotaExceededRetry)
assert.Equal(t, storageQuotaExceededError, expectedStorageQuotaError)
} }
func (f *Fs) InternalTestDocumentImport(t *testing.T) { func (f *Fs) InternalTestDocumentImport(t *testing.T) {
@@ -479,8 +470,8 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
require.NoError(t, f.Purge(ctx, "trashDir")) require.NoError(t, f.Purge(ctx, "trashDir"))
} }
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyOrMoveID // TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) { func (f *Fs) InternalTestCopyID(t *testing.T) {
ctx := context.Background() ctx := context.Background()
obj, err := f.NewObject(ctx, existingFile) obj, err := f.NewObject(ctx, existingFile)
require.NoError(t, err) require.NoError(t, err)
@@ -498,7 +489,7 @@ func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
} }
t.Run("BadID", func(t *testing.T) { t.Run("BadID", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "moveid", "ID-NOT-FOUND", dir+"/") err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
require.Error(t, err) require.Error(t, err)
assert.Contains(t, err.Error(), "couldn't find id") assert.Contains(t, err.Error(), "couldn't find id")
}) })
@@ -506,71 +497,22 @@ func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
t.Run("Directory", func(t *testing.T) { t.Run("Directory", func(t *testing.T) {
rootID, err := f.dirCache.RootID(ctx, false) rootID, err := f.dirCache.RootID(ctx, false)
require.NoError(t, err) require.NoError(t, err)
err = f.copyOrMoveID(ctx, "moveid", rootID, dir+"/") err = f.copyID(ctx, rootID, dir+"/")
require.Error(t, err) require.Error(t, err)
assert.Contains(t, err.Error(), "can't moveid directory") assert.Contains(t, err.Error(), "can't copy directory")
}) })
t.Run("MoveWithoutDestName", func(t *testing.T) { t.Run("WithoutDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/") err = f.copyID(ctx, o.id, dir+"/")
require.NoError(t, err) require.NoError(t, err)
checkFile(path.Base(existingFile)) checkFile(path.Base(existingFile))
}) })
t.Run("CopyWithoutDestName", func(t *testing.T) { t.Run("WithDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/") err = f.copyID(ctx, o.id, dir+"/potato.txt")
require.NoError(t, err)
checkFile(path.Base(existingFile))
})
t.Run("MoveWithDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/potato.txt")
require.NoError(t, err) require.NoError(t, err)
checkFile("potato.txt") checkFile("potato.txt")
}) })
t.Run("CopyWithDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/potato.txt")
require.NoError(t, err)
checkFile("potato.txt")
})
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/Query
func (f *Fs) InternalTestQuery(t *testing.T) {
ctx := context.Background()
var err error
t.Run("BadQuery", func(t *testing.T) {
_, err = f.query(ctx, "this is a bad query")
require.Error(t, err)
assert.Contains(t, err.Error(), "failed to execute query")
})
t.Run("NoMatch", func(t *testing.T) {
results, err := f.query(ctx, fmt.Sprintf("name='%s' and name!='%s'", existingSubDir, existingSubDir))
require.NoError(t, err)
assert.Len(t, results, 0)
})
t.Run("GoodQuery", func(t *testing.T) {
pathSegments := strings.Split(existingFile, "/")
var parent string
for _, item := range pathSegments {
// the file name contains ' characters which must be escaped
escapedItem := f.opt.Enc.FromStandardName(item)
escapedItem = strings.ReplaceAll(escapedItem, `\`, `\\`)
escapedItem = strings.ReplaceAll(escapedItem, `'`, `\'`)
results, err := f.query(ctx, fmt.Sprintf("%strashed=false and name='%s'", parent, escapedItem))
require.NoError(t, err)
require.True(t, len(results) > 0)
for _, result := range results {
assert.True(t, len(result.Id) > 0)
assert.Equal(t, result.Name, item)
}
parent = fmt.Sprintf("'%s' in parents and ", results[0].Id)
}
})
} }
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery // TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
@@ -578,7 +520,7 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
// Check set up for filtering // Check set up for filtering
assert.True(t, f.Features().FilterAware) assert.True(t, f.Features().FilterAware)
opt := &filter.Options{} opt := &filter.Opt{}
err := opt.MaxAge.Set("1h") err := opt.MaxAge.Set("1h")
assert.NoError(t, err) assert.NoError(t, err)
flt, err := filter.NewFilter(opt) flt, err := filter.NewFilter(opt)
@@ -659,8 +601,7 @@ func (f *Fs) InternalTest(t *testing.T) {
}) })
t.Run("Shortcuts", f.InternalTestShortcuts) t.Run("Shortcuts", f.InternalTestShortcuts)
t.Run("UnTrash", f.InternalTestUnTrash) t.Run("UnTrash", f.InternalTestUnTrash)
t.Run("CopyOrMoveID", f.InternalTestCopyOrMoveID) t.Run("CopyID", f.InternalTestCopyID)
t.Run("Query", f.InternalTestQuery)
t.Run("AgeQuery", f.InternalTestAgeQuery) t.Run("AgeQuery", f.InternalTestAgeQuery)
t.Run("ShouldRetry", f.InternalTestShouldRetry) t.Run("ShouldRetry", f.InternalTestShouldRetry)
} }

View File

@@ -1,637 +0,0 @@
package drive
import (
"context"
"encoding/json"
"fmt"
"maps"
"strconv"
"strings"
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/errcount"
"golang.org/x/sync/errgroup"
drive "google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
)
// system metadata keys which this backend owns
var systemMetadataInfo = map[string]fs.MetadataHelp{
"content-type": {
Help: "The MIME type of the file.",
Type: "string",
Example: "text/plain",
},
"mtime": {
Help: "Time of last modification with mS accuracy.",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999Z07:00",
},
"btime": {
Help: "Time of file birth (creation) with mS accuracy. Note that this is only writable on fresh uploads - it can't be written for updates.",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999Z07:00",
},
"copy-requires-writer-permission": {
Help: "Whether the options to copy, print, or download this file, should be disabled for readers and commenters.",
Type: "boolean",
Example: "true",
},
"writers-can-share": {
Help: "Whether users with only writer permission can modify the file's permissions. Not populated and ignored when setting for items in shared drives.",
Type: "boolean",
Example: "false",
},
"viewed-by-me": {
Help: "Whether the file has been viewed by this user.",
Type: "boolean",
Example: "true",
ReadOnly: true,
},
"owner": {
Help: "The owner of the file. Usually an email address. Enable with --drive-metadata-owner.",
Type: "string",
Example: "user@example.com",
},
"permissions": {
Help: "Permissions in a JSON dump of Google drive format. On shared drives these will only be present if they aren't inherited. Enable with --drive-metadata-permissions.",
Type: "JSON",
Example: "{}",
},
"folder-color-rgb": {
Help: "The color for a folder or a shortcut to a folder as an RGB hex string.",
Type: "string",
Example: "881133",
},
"description": {
Help: "A short description of the file.",
Type: "string",
Example: "Contract for signing",
},
"starred": {
Help: "Whether the user has starred the file.",
Type: "boolean",
Example: "false",
},
"labels": {
Help: "Labels attached to this file in a JSON dump of Googled drive format. Enable with --drive-metadata-labels.",
Type: "JSON",
Example: "[]",
},
}
// Extra fields we need to fetch to implement the system metadata above
var metadataFields = googleapi.Field(strings.Join([]string{
"copyRequiresWriterPermission",
"description",
"folderColorRgb",
"hasAugmentedPermissions",
"owners",
"permissionIds",
"permissions",
"properties",
"starred",
"viewedByMe",
"viewedByMeTime",
"writersCanShare",
}, ","))
// Fields we need to read from permissions
var permissionsFields = googleapi.Field(strings.Join([]string{
"*",
"permissionDetails/*",
}, ","))
// getPermission returns permissions for the fileID and permissionID passed in
func (f *Fs) getPermission(ctx context.Context, fileID, permissionID string, useCache bool) (perm *drive.Permission, inherited bool, err error) {
f.permissionsMu.Lock()
defer f.permissionsMu.Unlock()
if useCache {
perm = f.permissions[permissionID]
if perm != nil {
return perm, false, nil
}
}
fs.Debugf(f, "Fetching permission %q", permissionID)
err = f.pacer.Call(func() (bool, error) {
perm, err = f.svc.Permissions.Get(fileID, permissionID).
Fields(permissionsFields).
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return nil, false, err
}
inherited = len(perm.PermissionDetails) > 0 && perm.PermissionDetails[0].Inherited
cleanPermission(perm)
// cache the permission
f.permissions[permissionID] = perm
return perm, inherited, err
}
// Set the permissions on the info
func (f *Fs) setPermissions(ctx context.Context, info *drive.File, permissions []*drive.Permission) (err error) {
errs := errcount.New()
for _, perm := range permissions {
if perm.Role == "owner" {
// ignore owner permissions - these are set with owner
continue
}
cleanPermissionForWrite(perm)
err := f.pacer.Call(func() (bool, error) {
_, err := f.svc.Permissions.Create(info.Id, perm).
SupportsAllDrives(true).
SendNotificationEmail(false).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
fs.Errorf(f, "Failed to set permission %s for %q: %v", perm.Role, perm.EmailAddress, err)
errs.Add(err)
}
}
err = errs.Err("failed to set permission")
if err != nil {
err = fserrors.NoRetryError(err)
}
return err
}
// Clean attributes from permissions which we can't write
func cleanPermissionForWrite(perm *drive.Permission) {
perm.Deleted = false
perm.DisplayName = ""
perm.Id = ""
perm.Kind = ""
perm.PermissionDetails = nil
perm.TeamDrivePermissionDetails = nil
}
// Clean and cache the permission if not already cached
func (f *Fs) cleanAndCachePermission(perm *drive.Permission) {
f.permissionsMu.Lock()
defer f.permissionsMu.Unlock()
cleanPermission(perm)
if _, found := f.permissions[perm.Id]; !found {
f.permissions[perm.Id] = perm
}
}
// Clean fields we don't need to keep from the permission
func cleanPermission(perm *drive.Permission) {
// DisplayName: Output only. The "pretty" name of the value of the
// permission. The following is a list of examples for each type of
// permission: * `user` - User's full name, as defined for their Google
// account, such as "Joe Smith." * `group` - Name of the Google Group,
// such as "The Company Administrators." * `domain` - String domain
// name, such as "thecompany.com." * `anyone` - No `displayName` is
// present.
perm.DisplayName = ""
// Kind: Output only. Identifies what kind of resource this is. Value:
// the fixed string "drive#permission".
perm.Kind = ""
// PermissionDetails: Output only. Details of whether the permissions on
// this shared drive item are inherited or directly on this item. This
// is an output-only field which is present only for shared drive items.
perm.PermissionDetails = nil
// PhotoLink: Output only. A link to the user's profile photo, if
// available.
perm.PhotoLink = ""
// TeamDrivePermissionDetails: Output only. Deprecated: Output only. Use
// `permissionDetails` instead.
perm.TeamDrivePermissionDetails = nil
}
// Fields we need to read from labels
var labelsFields = googleapi.Field(strings.Join([]string{
"*",
}, ","))
// getLabels returns labels for the fileID passed in
func (f *Fs) getLabels(ctx context.Context, fileID string) (labels []*drive.Label, err error) {
fs.Debugf(f, "Fetching labels for %q", fileID)
listLabels := f.svc.Files.ListLabels(fileID).
Fields(labelsFields).
Context(ctx)
for {
var info *drive.LabelList
err = f.pacer.Call(func() (bool, error) {
info, err = listLabels.Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
labels = append(labels, info.Labels...)
if info.NextPageToken == "" {
break
}
listLabels.PageToken(info.NextPageToken)
}
for _, label := range labels {
cleanLabel(label)
}
return labels, nil
}
// Set the labels on the info
func (f *Fs) setLabels(ctx context.Context, info *drive.File, labels []*drive.Label) (err error) {
if len(labels) == 0 {
return nil
}
req := drive.ModifyLabelsRequest{}
for _, label := range labels {
req.LabelModifications = append(req.LabelModifications, &drive.LabelModification{
FieldModifications: labelFieldsToFieldModifications(label.Fields),
LabelId: label.Id,
})
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Files.ModifyLabels(info.Id, &req).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("failed to set labels: %w", err)
}
return nil
}
// Convert label fields into something which can set the fields
func labelFieldsToFieldModifications(fields map[string]drive.LabelField) (out []*drive.LabelFieldModification) {
for id, field := range fields {
var emails []string
for _, user := range field.User {
emails = append(emails, user.EmailAddress)
}
out = append(out, &drive.LabelFieldModification{
// FieldId: The ID of the field to be modified.
FieldId: id,
// SetDateValues: Replaces the value of a dateString Field with these
// new values. The string must be in the RFC 3339 full-date format:
// YYYY-MM-DD.
SetDateValues: field.DateString,
// SetIntegerValues: Replaces the value of an `integer` field with these
// new values.
SetIntegerValues: field.Integer,
// SetSelectionValues: Replaces a `selection` field with these new
// values.
SetSelectionValues: field.Selection,
// SetTextValues: Sets the value of a `text` field.
SetTextValues: field.Text,
// SetUserValues: Replaces a `user` field with these new values. The
// values must be valid email addresses.
SetUserValues: emails,
})
}
return out
}
// Clean fields we don't need to keep from the label
func cleanLabel(label *drive.Label) {
// Kind: This is always drive#label
label.Kind = ""
for name, field := range label.Fields {
// Kind: This is always drive#labelField.
field.Kind = ""
// Note the fields are copies so we need to write them
// back to the map
label.Fields[name] = field
}
}
// Parse the metadata from drive item
//
// It should return nil if there is no Metadata
func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err error) {
metadata := make(fs.Metadata, 16)
// Dump user metadata first as it overrides system metadata
maps.Copy(metadata, info.Properties)
// System metadata
metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission)
metadata["writers-can-share"] = fmt.Sprint(info.WritersCanShare)
metadata["viewed-by-me"] = fmt.Sprint(info.ViewedByMe)
metadata["content-type"] = info.MimeType
// Owners: Output only. The owner of this file. Only certain legacy
// files may have more than one owner. This field isn't populated for
// items in shared drives.
if o.fs.opt.MetadataOwner.IsSet(rwRead) && len(info.Owners) > 0 {
user := info.Owners[0]
if len(info.Owners) > 1 {
fs.Logf(o, "Ignoring more than 1 owner")
}
if user != nil {
id := user.EmailAddress
if id == "" {
id = user.DisplayName
}
metadata["owner"] = id
}
}
if o.fs.opt.MetadataPermissions.IsSet(rwRead) {
// We only write permissions out if they are not inherited.
//
// On My Drives permissions seem to be attached to every item
// so they will always be written out.
//
// On Shared Drives only non-inherited permissions will be
// written out.
// To read the inherited permissions flag will mean we need to
// read the permissions for each object and the cache will be
// useless. However shared drives don't return permissions
// only permissionIds so will need to fetch them for each
// object. We use HasAugmentedPermissions to see if there are
// special permissions before fetching them to save transactions.
// HasAugmentedPermissions: Output only. Whether there are permissions
// directly on this file. This field is only populated for items in
// shared drives.
if o.fs.isTeamDrive && !info.HasAugmentedPermissions {
// Don't process permissions if there aren't any specifically set
fs.Debugf(o, "Ignoring %d permissions and %d permissionIds as is shared drive with hasAugmentedPermissions false", len(info.Permissions), len(info.PermissionIds))
info.Permissions = nil
info.PermissionIds = nil
}
// PermissionIds: Output only. List of permission IDs for users with
// access to this file.
//
// Only process these if we have no Permissions
if len(info.PermissionIds) > 0 && len(info.Permissions) == 0 {
info.Permissions = make([]*drive.Permission, 0, len(info.PermissionIds))
g, gCtx := errgroup.WithContext(ctx)
g.SetLimit(o.fs.ci.Checkers)
var mu sync.Mutex // protect the info.Permissions from concurrent writes
for _, permissionID := range info.PermissionIds {
g.Go(func() error {
// must fetch the team drive ones individually to check the inherited flag
perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive)
if err != nil {
return fmt.Errorf("failed to read permission: %w", err)
}
// Don't write inherited permissions out
if inherited {
return nil
}
// Don't write owner role out - these are covered by the owner metadata
if perm.Role == "owner" {
return nil
}
mu.Lock()
info.Permissions = append(info.Permissions, perm)
mu.Unlock()
return nil
})
}
err = g.Wait()
if err != nil {
return err
}
} else {
// Clean the fetched permissions
for _, perm := range info.Permissions {
o.fs.cleanAndCachePermission(perm)
}
}
// Permissions: Output only. The full list of permissions for the file.
// This is only available if the requesting user can share the file. Not
// populated for items in shared drives.
if len(info.Permissions) > 0 {
buf, err := json.Marshal(info.Permissions)
if err != nil {
return fmt.Errorf("failed to marshal permissions: %w", err)
}
metadata["permissions"] = string(buf)
}
// Permission propagation
// https://developers.google.com/drive/api/guides/manage-sharing#permission-propagation
// Leads me to believe that in non shared drives, permissions
// are added to each item when you set permissions for a
// folder whereas in shared drives they are inherited and
// placed on the item directly.
}
if info.FolderColorRgb != "" {
metadata["folder-color-rgb"] = info.FolderColorRgb
}
if info.Description != "" {
metadata["description"] = info.Description
}
metadata["starred"] = fmt.Sprint(info.Starred)
metadata["btime"] = info.CreatedTime
metadata["mtime"] = info.ModifiedTime
if o.fs.opt.MetadataLabels.IsSet(rwRead) {
// FIXME would be really nice if we knew if files had labels
// before listing but we need to know all possible label IDs
// to get it in the listing.
labels, err := o.fs.getLabels(ctx, actualID(info.Id))
if err != nil {
return fmt.Errorf("failed to fetch labels: %w", err)
}
buf, err := json.Marshal(labels)
if err != nil {
return fmt.Errorf("failed to marshal labels: %w", err)
}
metadata["labels"] = string(buf)
}
o.metadata = &metadata
return nil
}
// Set the owner on the info
func (f *Fs) setOwner(ctx context.Context, info *drive.File, owner string) (err error) {
perm := drive.Permission{
Role: "owner",
EmailAddress: owner,
// Type: The type of the grantee. Valid values are: * `user` * `group` *
// `domain` * `anyone` When creating a permission, if `type` is `user`
// or `group`, you must provide an `emailAddress` for the user or group.
// When `type` is `domain`, you must provide a `domain`. There isn't
// extra information required for an `anyone` type.
Type: "user",
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Permissions.Create(info.Id, &perm).
SupportsAllDrives(true).
TransferOwnership(true).
// SendNotificationEmail(false). - required apparently!
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("failed to set owner: %w", err)
}
return nil
}
// Call back to set metadata that can't be set on the upload/update
//
// The *drive.File passed in holds the current state of the drive.File
// and this should update it with any modifications.
type updateMetadataFn func(context.Context, *drive.File) error
// read the metadata from meta and write it into updateInfo
//
// update should be true if this is being used to create metadata for
// an update/PATCH call as the rules on what can be updated are
// slightly different there.
//
// It returns a callback which should be called to finish the updates
// after the data is uploaded.
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update, isFolder bool) (callback updateMetadataFn, err error) {
callbackFns := []updateMetadataFn{}
callback = func(ctx context.Context, info *drive.File) error {
for _, fn := range callbackFns {
err := fn(ctx, info)
if err != nil {
return err
}
}
return nil
}
// merge metadata into request and user metadata
for k, v := range meta {
// parse a boolean from v and write into out
parseBool := func(out *bool) error {
b, err := strconv.ParseBool(v)
if err != nil {
return fmt.Errorf("can't parse metadata %q = %q: %w", k, v, err)
}
*out = b
return nil
}
switch k {
case "copy-requires-writer-permission":
if isFolder {
fs.Debugf(f, "Ignoring %s=%s as can't set on folders", k, v)
} else if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
return nil, err
}
case "writers-can-share":
if !f.isTeamDrive {
if err := parseBool(&updateInfo.WritersCanShare); err != nil {
return nil, err
}
} else {
fs.Debugf(f, "Ignoring %s=%s as can't set on shared drives", k, v)
}
case "viewed-by-me":
// Can't write this
case "content-type":
updateInfo.MimeType = v
case "owner":
if !f.opt.MetadataOwner.IsSet(rwWrite) {
continue
}
// Can't set Owner on upload so need to set afterwards
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
err := f.setOwner(ctx, info, v)
if err != nil && f.opt.MetadataOwner.IsSet(rwFailOK) {
fs.Errorf(f, "Ignoring error as failok is set: %v", err)
return nil
}
return err
})
case "permissions":
if !f.opt.MetadataPermissions.IsSet(rwWrite) {
continue
}
var perms []*drive.Permission
err := json.Unmarshal([]byte(v), &perms)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal permissions: %w", err)
}
// Can't set Permissions on upload so need to set afterwards
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
err := f.setPermissions(ctx, info, perms)
if err != nil && f.opt.MetadataPermissions.IsSet(rwFailOK) {
// We've already logged the permissions errors individually here
fs.Debugf(f, "Ignoring error as failok is set: %v", err)
return nil
}
return err
})
case "labels":
if !f.opt.MetadataLabels.IsSet(rwWrite) {
continue
}
var labels []*drive.Label
err := json.Unmarshal([]byte(v), &labels)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal labels: %w", err)
}
// Can't set Labels on upload so need to set afterwards
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
err := f.setLabels(ctx, info, labels)
if err != nil && f.opt.MetadataLabels.IsSet(rwFailOK) {
fs.Errorf(f, "Ignoring error as failok is set: %v", err)
return nil
}
return err
})
case "folder-color-rgb":
updateInfo.FolderColorRgb = v
case "description":
updateInfo.Description = v
case "starred":
if err := parseBool(&updateInfo.Starred); err != nil {
return nil, err
}
case "btime":
if update {
fs.Debugf(f, "Skipping btime metadata as can't update it on an existing file: %v", v)
} else {
updateInfo.CreatedTime = v
}
case "mtime":
updateInfo.ModifiedTime = v
default:
if updateInfo.Properties == nil {
updateInfo.Properties = make(map[string]string, 1)
}
updateInfo.Properties[k] = v
}
}
return callback, nil
}
// Fetch metadata and update updateInfo if --metadata is in use
func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, updateInfo *drive.File, update bool) (callback updateMetadataFn, err error) {
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
if err != nil {
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
}
callback, err = f.updateMetadata(ctx, updateInfo, meta, update, false)
if err != nil {
return nil, fmt.Errorf("failed to update metadata from source object: %w", err)
}
return callback, nil
}

View File

@@ -177,7 +177,10 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
if start >= rx.ContentLength { if start >= rx.ContentLength {
break break
} }
reqSize = min(rx.ContentLength-start, int64(rx.f.opt.ChunkSize)) reqSize = rx.ContentLength - start
if reqSize >= int64(rx.f.opt.ChunkSize) {
reqSize = int64(rx.f.opt.ChunkSize)
}
chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize) chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
} else { } else {
// If size unknown read into buffer // If size unknown read into buffer

View File

@@ -8,22 +8,130 @@ package dropbox
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"sync"
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/atexit"
) )
const (
maxBatchSize = 1000 // max size the batch can be
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
defaultBatchSizeAsync = 100 // default batch size if async
)
// batcher holds info about the current items waiting for upload
type batcher struct {
f *Fs // Fs this batch is part of
mode string // configured batch mode
size int // maximum size for batch
timeout time.Duration // idle timeout for batch
async bool // whether we are using async batching
in chan batcherRequest // incoming items to batch
closed chan struct{} // close to indicate batcher shut down
atexit atexit.FnHandle // atexit handle
shutOnce sync.Once // make sure we shutdown once only
wg sync.WaitGroup // wait for shutdown
}
// batcherRequest holds an incoming request with a place for a reply
type batcherRequest struct {
commitInfo *files.UploadSessionFinishArg
result chan<- batcherResponse
}
// Return true if batcherRequest is the quit request
func (br *batcherRequest) isQuit() bool {
return br.commitInfo == nil
}
// Send this to get the engine to quit
var quitRequest = batcherRequest{}
// batcherResponse holds a response to be delivered to clients waiting
// for a batch to complete.
type batcherResponse struct {
err error
entry *files.FileMetadata
}
// newBatcher creates a new batcher structure
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
if size > maxBatchSize || size < 0 {
return nil, fmt.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
}
async := false
switch mode {
case "sync":
if size <= 0 {
ci := fs.GetConfig(ctx)
size = ci.Transfers
}
if timeout <= 0 {
timeout = defaultTimeoutSync
}
case "async":
if size <= 0 {
size = defaultBatchSizeAsync
}
if timeout <= 0 {
timeout = defaultTimeoutAsync
}
async = true
case "off":
size = 0
default:
return nil, fmt.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
}
b := &batcher{
f: f,
mode: mode,
size: size,
timeout: timeout,
async: async,
in: make(chan batcherRequest, size),
closed: make(chan struct{}),
}
if b.Batching() {
b.atexit = atexit.Register(b.Shutdown)
b.wg.Add(1)
go b.commitLoop(context.Background())
}
return b, nil
}
// Batching returns true if batching is active
func (b *batcher) Batching() bool {
return b.size > 0
}
// finishBatch commits the batch, returning a batch status to poll or maybe complete // finishBatch commits the batch, returning a batch status to poll or maybe complete
func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) { func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
var arg = &files.UploadSessionFinishBatchArg{ var arg = &files.UploadSessionFinishBatchArg{
Entries: items, Entries: items,
} }
err = f.pacer.Call(func() (bool, error) { err = b.f.pacer.Call(func() (bool, error) {
complete, err = f.srv.UploadSessionFinishBatchV2(arg) complete, err = b.f.srv.UploadSessionFinishBatchV2(arg)
if retry, err := shouldRetryExclude(ctx, err); !retry { // If error is insufficient space then don't retry
return retry, err if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
} }
// after the first chunk is uploaded, we retry everything except the excluded errors // after the first chunk is uploaded, we retry everything
return err != nil, err return err != nil, err
}) })
if err != nil { if err != nil {
@@ -32,10 +140,66 @@ func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinish
return complete, nil return complete, nil
} }
// Called by the batcher to commit a batch // finishBatchJobStatus waits for the batch to complete returning completed entries
func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []*files.FileMetadata, errors []error) (err error) { func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
if launchBatchStatus.AsyncJobId == "" {
return nil, errors.New("wait for batch completion: empty job ID")
}
var batchStatus *files.UploadSessionFinishBatchJobStatus
sleepTime := 100 * time.Millisecond
const maxSleepTime = 1 * time.Second
startTime := time.Now()
try := 1
for {
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
if remaining < 0 {
break
}
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
AsyncJobId: launchBatchStatus.AsyncJobId,
})
return shouldRetry(ctx, err)
})
if err != nil {
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
} else {
if batchStatus.Tag == "complete" {
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
return batchStatus.Complete, nil
}
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
}
time.Sleep(sleepTime)
sleepTime *= 2
if sleepTime > maxSleepTime {
sleepTime = maxSleepTime
}
try++
}
if err == nil {
err = errors.New("batch didn't complete")
}
return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
}
// commit a batch
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
// If commit fails then signal clients if sync
var signalled = b.async
defer func() {
if err != nil && signalled {
// Signal to clients that there was an error
for _, result := range results {
result <- batcherResponse{err: err}
}
}
}()
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
fs.Debugf(b.f, "Committing %s", desc)
// finalise the batch getting either a result or a job id to poll // finalise the batch getting either a result or a job id to poll
complete, err := f.finishBatch(ctx, items) complete, err := b.finishBatch(ctx, items)
if err != nil { if err != nil {
return err return err
} }
@@ -46,13 +210,19 @@ func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinish
return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries)) return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
} }
// Format results for return // Report results to clients
var (
errorTag = ""
errorCount = 0
)
for i := range results { for i := range results {
item := entries[i] item := entries[i]
resp := batcherResponse{}
if item.Tag == "success" { if item.Tag == "success" {
results[i] = item.Success resp.entry = item.Success
} else { } else {
errorTag := item.Tag errorCount++
errorTag = item.Tag
if item.Failure != nil { if item.Failure != nil {
errorTag = item.Failure.Tag errorTag = item.Failure.Tag
if item.Failure.LookupFailed != nil { if item.Failure.LookupFailed != nil {
@@ -65,9 +235,112 @@ func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinish
errorTag += "/" + item.Failure.PropertiesError.Tag errorTag += "/" + item.Failure.PropertiesError.Tag
} }
} }
errors[i] = fmt.Errorf("upload failed: %s", errorTag) resp.err = fmt.Errorf("batch upload failed: %s", errorTag)
}
if !b.async {
results[i] <- resp
} }
} }
// Show signalled so no need to report error to clients from now on
signalled = true
// Report an error if any failed in the batch
if errorTag != "" {
return fmt.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
}
fs.Debugf(b.f, "Committed %s", desc)
return nil return nil
} }
// commitLoop runs the commit engine in the background
func (b *batcher) commitLoop(ctx context.Context) {
var (
items []*files.UploadSessionFinishArg // current batch of uncommitted files
results []chan<- batcherResponse // current batch of clients awaiting results
idleTimer = time.NewTimer(b.timeout)
commit = func() {
err := b.commitBatch(ctx, items, results)
if err != nil {
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
}
items, results = nil, nil
}
)
defer b.wg.Done()
defer idleTimer.Stop()
idleTimer.Stop()
outer:
for {
select {
case req := <-b.in:
if req.isQuit() {
break outer
}
items = append(items, req.commitInfo)
results = append(results, req.result)
idleTimer.Stop()
if len(items) >= b.size {
commit()
} else {
idleTimer.Reset(b.timeout)
}
case <-idleTimer.C:
if len(items) > 0 {
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
commit()
}
}
}
// commit any remaining items
if len(items) > 0 {
commit()
}
}
// Shutdown finishes any pending batches then shuts everything down
//
// Can be called from atexit handler
func (b *batcher) Shutdown() {
if !b.Batching() {
return
}
b.shutOnce.Do(func() {
atexit.Unregister(b.atexit)
fs.Infof(b.f, "Committing uploads - please wait...")
// show that batcher is shutting down
close(b.closed)
// quit the commitLoop by sending a quitRequest message
//
// Note that we don't close b.in because that will
// cause write to closed channel in Commit when we are
// exiting due to a signal.
b.in <- quitRequest
b.wg.Wait()
})
}
// Commit commits the file using a batch call, first adding it to the
// batch and then waiting for the batch to complete in a synchronous
// way if async is not set.
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
select {
case <-b.closed:
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
default:
}
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
resp := make(chan batcherResponse, 1)
b.in <- batcherRequest{
commitInfo: commitInfo,
result: resp,
}
// If running async then don't wait for the result
if b.async {
return nil, nil
}
result := <-resp
return result.entry, result.err
}

View File

@@ -55,7 +55,10 @@ func (d *digest) Write(p []byte) (n int, err error) {
n = len(p) n = len(p)
for len(p) > 0 { for len(p) > 0 {
d.writtenMore = true d.writtenMore = true
toWrite := min(bytesPerBlock-d.n, len(p)) toWrite := bytesPerBlock - d.n
if toWrite > len(p) {
toWrite = len(p)
}
_, err = d.blockHash.Write(p[:toWrite]) _, err = d.blockHash.Write(p[:toWrite])
if err != nil { if err != nil {
panic(hashReturnedError) panic(hashReturnedError)

View File

@@ -11,7 +11,7 @@ import (
func testChunk(t *testing.T, chunk int) { func testChunk(t *testing.T, chunk int) {
data := make([]byte, chunk) data := make([]byte, chunk)
for i := range chunk { for i := 0; i < chunk; i++ {
data[i] = 'A' data[i] = 'A'
} }
for _, test := range []struct { for _, test := range []struct {

File diff suppressed because it is too large Load Diff

View File

@@ -1,16 +1,9 @@
package dropbox package dropbox
import ( import (
"context"
"io"
"strings"
"testing" "testing"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestInternalCheckPathLength(t *testing.T) { func TestInternalCheckPathLength(t *testing.T) {
@@ -49,54 +42,3 @@ func TestInternalCheckPathLength(t *testing.T) {
assert.Equal(t, test.ok, err == nil, test.in) assert.Equal(t, test.ok, err == nil, test.in)
} }
} }
func (f *Fs) importPaperForTest(t *testing.T) {
content := `# test doc
Lorem ipsum __dolor__ sit amet
[link](http://google.com)
`
arg := files.PaperCreateArg{
Path: f.slashRootSlash + "export.paper",
ImportFormat: &files.ImportFormat{Tagged: dropbox.Tagged{Tag: files.ImportFormatMarkdown}},
}
var err error
err = f.pacer.Call(func() (bool, error) {
reader := strings.NewReader(content)
_, err = f.srv.PaperCreate(&arg, reader)
return shouldRetry(context.Background(), err)
})
require.NoError(t, err)
}
func (f *Fs) InternalTestPaperExport(t *testing.T) {
ctx := context.Background()
f.importPaperForTest(t)
f.exportExts = []exportExtension{"html"}
obj, err := f.NewObject(ctx, "export.html")
require.NoError(t, err)
rc, err := obj.Open(ctx)
require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }()
buf, err := io.ReadAll(rc)
require.NoError(t, err)
text := string(buf)
for _, excerpt := range []string{
"Lorem ipsum",
"<b>dolor</b>",
`href="http://google.com"`,
} {
require.Contains(t, text, excerpt)
}
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("PaperExport", f.InternalTestPaperExport)
}
var _ fstests.InternalTester = (*Fs)(nil)

Some files were not shown because too many files have changed in this diff Show More