1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-12 14:23:24 +00:00

Compare commits

..

2 Commits

Author SHA1 Message Date
Nick Craig-Wood
bbb31d6acf lib/oauthutil: allow the browser opening function to be overridden 2024-10-24 13:12:11 +01:00
Nick Craig-Wood
7c705e0efa oauthutil: shut down the oauth webserver when the context closes
This patch ensures that we pass the context from the CreateBackend
call all the way to the creation of the oauth web server.

This means that when the command has finished the webserver will
definitely be shut down, and if the user abandons it (eg via an rc
call timing out or being cancelled) then it will be shut down too.
2024-10-24 12:51:55 +01:00
993 changed files with 77120 additions and 182430 deletions

View File

@@ -23,18 +23,15 @@ jobs:
build: build:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
timeout-minutes: 60 timeout-minutes: 60
defaults:
run:
shell: bash
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.24'] job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.21', 'go1.22']
include: include:
- job_name: linux - job_name: linux
os: ubuntu-latest os: ubuntu-latest
go: '>=1.25.0-rc.1' go: '>=1.23.0-rc.1'
gotags: cmount gotags: cmount
build_flags: '-include "^linux/"' build_flags: '-include "^linux/"'
check: true check: true
@@ -45,14 +42,14 @@ jobs:
- job_name: linux_386 - job_name: linux_386
os: ubuntu-latest os: ubuntu-latest
go: '>=1.25.0-rc.1' go: '>=1.23.0-rc.1'
goarch: 386 goarch: 386
gotags: cmount gotags: cmount
quicktest: true quicktest: true
- job_name: mac_amd64 - job_name: mac_amd64
os: macos-latest os: macos-latest
go: '>=1.25.0-rc.1' go: '>=1.23.0-rc.1'
gotags: 'cmount' gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo' build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true quicktest: true
@@ -61,14 +58,14 @@ jobs:
- job_name: mac_arm64 - job_name: mac_arm64
os: macos-latest os: macos-latest
go: '>=1.25.0-rc.1' go: '>=1.23.0-rc.1'
gotags: 'cmount' gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib' build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true deploy: true
- job_name: windows - job_name: windows
os: windows-latest os: windows-latest
go: '>=1.25.0-rc.1' go: '>=1.23.0-rc.1'
gotags: cmount gotags: cmount
cgo: '0' cgo: '0'
build_flags: '-include "^windows/"' build_flags: '-include "^windows/"'
@@ -78,14 +75,20 @@ jobs:
- job_name: other_os - job_name: other_os
os: ubuntu-latest os: ubuntu-latest
go: '>=1.25.0-rc.1' go: '>=1.23.0-rc.1'
build_flags: '-exclude "^(windows/|darwin/|linux/)"' build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true compile_all: true
deploy: true deploy: true
- job_name: go1.24 - job_name: go1.21
os: ubuntu-latest os: ubuntu-latest
go: '1.24' go: '1.21'
quicktest: true
racequicktest: true
- job_name: go1.22
os: ubuntu-latest
go: '1.22'
quicktest: true quicktest: true
racequicktest: true racequicktest: true
@@ -95,17 +98,18 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Install Go - name: Install Go
uses: actions/setup-go@v6 uses: actions/setup-go@v5
with: with:
go-version: ${{ matrix.go }} go-version: ${{ matrix.go }}
check-latest: true check-latest: true
- name: Set environment variables - name: Set environment variables
shell: bash
run: | run: |
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
@@ -114,15 +118,16 @@ jobs:
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
- name: Install Libraries on Linux - name: Install Libraries on Linux
shell: bash
run: | run: |
sudo modprobe fuse sudo modprobe fuse
sudo chmod 666 /dev/fuse sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf sudo chown root:$USER /etc/fuse.conf
sudo apt-get update sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
sudo apt-get install -y fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
if: matrix.os == 'ubuntu-latest' if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS - name: Install Libraries on macOS
shell: bash
run: | run: |
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788 # https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008 # https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
@@ -151,6 +156,7 @@ jobs:
if: matrix.os == 'windows-latest' if: matrix.os == 'windows-latest'
- name: Print Go version and environment - name: Print Go version and environment
shell: bash
run: | run: |
printf "Using go at: $(which go)\n" printf "Using go at: $(which go)\n"
printf "Go version: $(go version)\n" printf "Go version: $(go version)\n"
@@ -162,24 +168,29 @@ jobs:
env env
- name: Build rclone - name: Build rclone
shell: bash
run: | run: |
make make
- name: Rclone version - name: Rclone version
shell: bash
run: | run: |
rclone version rclone version
- name: Run tests - name: Run tests
shell: bash
run: | run: |
make quicktest make quicktest
if: matrix.quicktest if: matrix.quicktest
- name: Race test - name: Race test
shell: bash
run: | run: |
make racequicktest make racequicktest
if: matrix.racequicktest if: matrix.racequicktest
- name: Run librclone tests - name: Run librclone tests
shell: bash
run: | run: |
make -C librclone/ctest test make -C librclone/ctest test
make -C librclone/ctest clean make -C librclone/ctest clean
@@ -187,12 +198,14 @@ jobs:
if: matrix.librclonetest if: matrix.librclonetest
- name: Compile all architectures test - name: Compile all architectures test
shell: bash
run: | run: |
make make
make compile_all make compile_all
if: matrix.compile_all if: matrix.compile_all
- name: Deploy built binaries - name: Deploy built binaries
shell: bash
run: | run: |
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
make ci_beta make ci_beta
@@ -211,25 +224,24 @@ jobs:
steps: steps:
- name: Get runner parameters - name: Get runner parameters
id: get-runner-parameters id: get-runner-parameters
shell: bash
run: | run: |
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
- name: Checkout - name: Checkout
uses: actions/checkout@v6 uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install Go - name: Install Go
id: setup-go id: setup-go
uses: actions/setup-go@v6 uses: actions/setup-go@v5
with: with:
go-version: '>=1.24.0-rc.1' go-version: '>=1.23.0-rc.1'
check-latest: true check-latest: true
cache: false cache: false
- name: Cache - name: Cache
uses: actions/cache@v5 uses: actions/cache@v4
with: with:
path: | path: |
~/go/pkg/mod ~/go/pkg/mod
@@ -239,13 +251,13 @@ jobs:
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}- restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
- name: Code quality test (Linux) - name: Code quality test (Linux)
uses: golangci/golangci-lint-action@v9 uses: golangci/golangci-lint-action@v6
with: with:
version: latest version: latest
skip-cache: true skip-cache: true
- name: Code quality test (Windows) - name: Code quality test (Windows)
uses: golangci/golangci-lint-action@v9 uses: golangci/golangci-lint-action@v6
env: env:
GOOS: "windows" GOOS: "windows"
with: with:
@@ -253,7 +265,7 @@ jobs:
skip-cache: true skip-cache: true
- name: Code quality test (macOS) - name: Code quality test (macOS)
uses: golangci/golangci-lint-action@v9 uses: golangci/golangci-lint-action@v6
env: env:
GOOS: "darwin" GOOS: "darwin"
with: with:
@@ -261,7 +273,7 @@ jobs:
skip-cache: true skip-cache: true
- name: Code quality test (FreeBSD) - name: Code quality test (FreeBSD)
uses: golangci/golangci-lint-action@v9 uses: golangci/golangci-lint-action@v6
env: env:
GOOS: "freebsd" GOOS: "freebsd"
with: with:
@@ -269,7 +281,7 @@ jobs:
skip-cache: true skip-cache: true
- name: Code quality test (OpenBSD) - name: Code quality test (OpenBSD)
uses: golangci/golangci-lint-action@v9 uses: golangci/golangci-lint-action@v6
env: env:
GOOS: "openbsd" GOOS: "openbsd"
with: with:
@@ -282,23 +294,6 @@ jobs:
- name: Scan for vulnerabilities - name: Scan for vulnerabilities
run: govulncheck ./... run: govulncheck ./...
- name: Check Markdown format
uses: DavidAnson/markdownlint-cli2-action@v20
with:
globs: |
CONTRIBUTING.md
MAINTAINERS.md
README.md
RELEASE.md
CODE_OF_CONDUCT.md
librclone\README.md
backend\s3\README.md
docs/content/{_index,authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
- name: Scan edits of autogenerated files
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
if: github.event_name == 'pull_request'
android: android:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
timeout-minutes: 30 timeout-minutes: 30
@@ -307,17 +302,18 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
# Upgrade together with NDK version # Upgrade together with NDK version
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v6 uses: actions/setup-go@v5
with: with:
go-version: '>=1.25.0-rc.1' go-version: '>=1.23.0-rc.1'
- name: Set global environment variables - name: Set global environment variables
shell: bash
run: | run: |
echo "VERSION=$(make version)" >> $GITHUB_ENV echo "VERSION=$(make version)" >> $GITHUB_ENV
@@ -336,6 +332,7 @@ jobs:
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
- name: arm-v7a Set environment variables - name: arm-v7a Set environment variables
shell: bash
run: | run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
@@ -349,6 +346,7 @@ jobs:
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a . run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
- name: arm64-v8a Set environment variables - name: arm64-v8a Set environment variables
shell: bash
run: | run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
@@ -361,6 +359,7 @@ jobs:
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a . run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
- name: x86 Set environment variables - name: x86 Set environment variables
shell: bash
run: | run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
@@ -373,6 +372,7 @@ jobs:
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 . run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
- name: x64 Set environment variables - name: x64 Set environment variables
shell: bash
run: | run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV

View File

@@ -0,0 +1,77 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/${{ github.repository }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
# GitHub Actions at the start of a workflow run to identify the job.
# This is used to authenticate against GitHub Container Registry.
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
# for more detailed information.
password: ${{ secrets.GITHUB_TOKEN }}
- name: Show disk usage
shell: bash
run: |
df -h .
- name: Build and publish image
uses: docker/build-push-action@v6
with:
file: Dockerfile
context: .
push: true # push the image to ghcr
tags: |
ghcr.io/rclone/rclone:beta
rclone/rclone:beta
labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
cache-from: type=gha, scope=${{ github.workflow }}
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
provenance: false
# Eventually cache will need to be cleared if builds more frequent than once a week
# https://github.com/docker/build-push-action/issues/252
- name: Show disk usage
shell: bash
run: |
df -h .

View File

@@ -1,294 +0,0 @@
---
# Github Actions release for rclone
# -*- compile-command: "yamllint -f parsable build_publish_docker_image.yml" -*-
name: Build & Push Docker Images
# Trigger the workflow on push or pull request
on:
push:
branches:
- '**'
tags:
- '**'
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
jobs:
build-image:
if: inputs.manual || (github.repository == 'rclone/rclone' && github.event_name != 'pull_request')
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
include:
- platform: linux/amd64
runs-on: ubuntu-24.04
- platform: linux/386
runs-on: ubuntu-24.04
- platform: linux/arm64
runs-on: ubuntu-24.04-arm
- platform: linux/arm/v7
runs-on: ubuntu-24.04-arm
- platform: linux/arm/v6
runs-on: ubuntu-24.04-arm
name: Build Docker Image for ${{ matrix.platform }}
runs-on: ${{ matrix.runs-on }}
steps:
- name: Free Space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout Repository
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Set REPO_NAME Variable
run: |
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
- name: Set PLATFORM Variable
run: |
platform=${{ matrix.platform }}
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
- name: Set CACHE_NAME Variable
shell: python
run: |
import os, re
def slugify(input_string, max_length=63):
slug = input_string.lower()
slug = re.sub(r'[^a-z0-9 -]', ' ', slug)
slug = slug.strip()
slug = re.sub(r'\s+', '-', slug)
slug = re.sub(r'-+', '-', slug)
slug = slug[:max_length]
slug = re.sub(r'[-]+$', '', slug)
return slug
ref_name_slug = "cache"
if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request":
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"CACHE_NAME={ref_name_slug}\n")
- name: Get ImageOS
# There's no way around this, because "ImageOS" is only available to
# processes, but the setup-go action uses it in its key.
id: imageos
uses: actions/github-script@v8
with:
result-encoding: string
script: |
return process.env.ImageOS
- name: Extract Metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,manifest-descriptor # Important for digest annotation (used by Github packages)
with:
images: |
ghcr.io/${{ env.REPO_NAME }}
labels: |
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
org.opencontainers.image.vendor=${{ github.repository_owner }}
org.opencontainers.image.authors=rclone <https://github.com/rclone>
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
tags: |
type=sha
type=ref,event=pr
type=ref,event=branch
type=semver,pattern={{version}}
type=semver,pattern={{major}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=beta,enable={{is_default_branch}}
- name: Setup QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Load Go Build Cache for Docker
id: go-cache
uses: actions/cache@v5
with:
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
# Cache only the go builds, the module download is cached via the docker layer caching
path: |
go-build-cache
- name: Inject Go Build Cache into Docker
uses: reproducible-containers/buildkit-cache-dance@v3
with:
cache-map: |
{
"go-build-cache": "/root/.cache/go-build"
}
skip-extraction: ${{ steps.go-cache.outputs.cache-hit }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and Publish Image Digest
id: build
uses: docker/build-push-action@v6
with:
file: Dockerfile
context: .
provenance: false
# don't specify 'tags' here (error "get can't push tagged ref by digest")
# tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
annotations: ${{ steps.meta.outputs.annotations }}
platforms: ${{ matrix.platform }}
outputs: |
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
cache-from: |
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
cache-to: |
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd
- name: Export Image Digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
- name: Upload Image Digest
uses: actions/upload-artifact@v5
with:
name: digests-${{ env.PLATFORM }}
path: /tmp/digests/*
retention-days: 1
if-no-files-found: error
merge-image:
name: Merge & Push Final Docker Image
runs-on: ubuntu-24.04
needs:
- build-image
steps:
- name: Download Image Digests
uses: actions/download-artifact@v6
with:
path: /tmp/digests
pattern: digests-*
merge-multiple: true
- name: Set REPO_NAME Variable
run: |
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
- name: Extract Metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
with:
images: |
${{ env.REPO_NAME }}
ghcr.io/${{ env.REPO_NAME }}
labels: |
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
org.opencontainers.image.vendor=${{ github.repository_owner }}
org.opencontainers.image.authors=rclone <https://github.com/rclone>
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
tags: |
type=sha
type=ref,event=pr
type=ref,event=branch
type=semver,pattern={{version}}
type=semver,pattern={{major}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=beta,enable={{is_default_branch}}
- name: Extract Tags
shell: python
run: |
import json, os
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
metadata = json.loads(metadata_json)
tags = [f"--tag '{tag}'" for tag in metadata["tags"]]
tags_string = " ".join(tags)
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"TAGS={tags_string}\n")
- name: Extract Annotations
shell: python
run: |
import json, os
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
metadata = json.loads(metadata_json)
annotations = [f"--annotation '{annotation}'" for annotation in metadata["annotations"]]
annotations_string = " ".join(annotations)
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"ANNOTATIONS={annotations_string}\n")
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Create & Push Manifest List
working-directory: /tmp/digests
run: |
docker buildx imagetools create \
${{ env.TAGS }} \
${{ env.ANNOTATIONS }} \
$(printf 'ghcr.io/${{ env.REPO_NAME }}@sha256:%s ' *)
- name: Inspect and Run Multi-Platform Image
run: |
docker buildx imagetools inspect --raw ${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
docker buildx imagetools inspect --raw ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
docker run --rm ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} version

View File

@@ -1,49 +0,0 @@
---
# Github Actions release for rclone
# -*- compile-command: "yamllint -f parsable build_publish_docker_plugin.yml" -*-
name: Release Build for Docker Plugin
on:
release:
types: [published]
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
jobs:
build_docker_volume_plugin:
if: inputs.manual || github.repository == 'rclone/rclone'
name: Build docker plugin job
runs-on: ubuntu-latest
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Build and publish docker plugin
shell: bash
run: |
VER=${GITHUB_REF#refs/tags/}
PLUGIN_USER=rclone
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
export PLUGIN_USER PLUGIN_ARCH
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
done
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}

View File

@@ -0,0 +1,89 @@
name: Docker release build
on:
release:
types: [published]
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get actual patch version
id: actual_patch_version
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
- name: Get actual minor version
id: actual_minor_version
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
- name: Get actual major version
id: actual_major_version
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_HUB_USER }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: Build and publish image
uses: docker/build-push-action@v6
with:
file: Dockerfile
context: .
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
push: true
tags: |
rclone/rclone:latest
rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
build_docker_volume_plugin:
if: github.repository == 'rclone/rclone'
needs: build
runs-on: ubuntu-latest
name: Build docker plugin job
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build and publish docker plugin
shell: bash
run: |
VER=${GITHUB_REF#refs/tags/}
PLUGIN_USER=rclone
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
export PLUGIN_USER PLUGIN_ARCH
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
done
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}

View File

@@ -1,160 +1,144 @@
version: "2" # golangci-lint configuration options
linters: linters:
# Configure the linter set. To avoid unexpected results the implicit default
# set is ignored and all the ones to use are explicitly enabled.
default: none
enable: enable:
# Default
- errcheck - errcheck
- govet
- ineffassign
- staticcheck
- unused
# Additional
- gocritic
- misspell
#- prealloc # TODO
- revive
- unconvert
exclusions:
rules:
- linters:
- revive
text: 'var-naming: avoid meaningless package names'
- linters:
- revive
text: 'var-naming: avoid package names that conflict with Go standard library package names'
# Configure checks. Mostly using defaults but with some commented exceptions.
settings:
govet:
enable-all: true
disable:
- fieldalignment
- shadow
staticcheck:
# With staticcheck there is only one setting, so to extend the implicit
# default value it must be explicitly included.
checks:
# Default
- all
- -ST1000
- -ST1003
- -ST1016
- -ST1020
- -ST1021
- -ST1022
# Disable quickfix checks
- -QF*
gocritic:
# With gocritic there are different settings, but since enabled-checks
# and disabled-checks cannot both be set, for full customization the
# alternative is to disable all defaults and explicitly enable the ones
# to use.
disable-all: true
enabled-checks:
#- appendAssign # Skip default
- argOrder
- assignOp
- badCall
- badCond
#- captLocal # Skip default
- caseOrder
- codegenComment
#- commentFormatting # Skip default
- defaultCaseOrder
- deprecatedComment
- dupArg
- dupBranchBody
- dupCase
- dupSubExpr
- elseif
#- exitAfterDefer # Skip default
- flagDeref
- flagName
#- ifElseChain # Skip default
- mapKey
- newDeref
- offBy1
- regexpMust
- ruleguard # Enable additional check that are not enabled by default
#- singleCaseSwitch # Skip default
- sloppyLen
- sloppyTypeAssert
- switchTrue
- typeSwitchVar
- underef
- unlambda
- unslice
- valSwap
- wrapperFunc
settings:
ruleguard:
rules: ${base-path}/bin/rules.go
revive:
# With revive there is in reality only one setting, and when at least one
# rule are specified then only these rules will be considered, defaults
# and all others are then implicitly disabled, so must explicitly enable
# all rules to be used.
rules:
- name: blank-imports
disabled: false
- name: context-as-argument
disabled: false
- name: context-keys-type
disabled: false
- name: dot-imports
disabled: false
#- name: empty-block # Skip default
# disabled: true
- name: error-naming
disabled: false
- name: error-return
disabled: false
- name: error-strings
disabled: false
- name: errorf
disabled: false
- name: exported
disabled: false
#- name: increment-decrement # Skip default
# disabled: true
- name: indent-error-flow
disabled: false
- name: package-comments
disabled: false
- name: range
disabled: false
- name: receiver-naming
disabled: false
#- name: redefines-builtin-id # Skip default
# disabled: true
#- name: superfluous-else # Skip default
# disabled: true
- name: time-naming
disabled: false
- name: unexported-return
disabled: false
#- name: unreachable-code # Skip default
# disabled: true
#- name: unused-parameter # Skip default
# disabled: true
- name: var-declaration
disabled: false
- name: var-naming
disabled: false
formatters:
enable:
- goimports - goimports
- revive
- ineffassign
- govet
- unconvert
- staticcheck
- gosimple
- stylecheck
- unused
- misspell
- gocritic
#- prealloc
#- maligned
disable-all: true
issues: issues:
# Enable some lints excluded by default
exclude-use-default: false
# Maximum issues count per one linter. Set to 0 to disable. Default is 50. # Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-issues-per-linter: 0 max-issues-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3. # Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0 max-same-issues: 0
exclude-rules:
- linters:
- staticcheck
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
# don't disable the revive messages about comments on exported functions
include:
- EXC0012
- EXC0013
- EXC0014
- EXC0015
run: run:
# Timeout for total work, e.g. 30s, 5m, 5m30s. Default is 0 (disabled). # timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m timeout: 10m
linters-settings:
revive:
# setting rules seems to disable all the rules, so re-enable them here
rules:
- name: blank-imports
disabled: false
- name: context-as-argument
disabled: false
- name: context-keys-type
disabled: false
- name: dot-imports
disabled: false
- name: empty-block
disabled: true
- name: error-naming
disabled: false
- name: error-return
disabled: false
- name: error-strings
disabled: false
- name: errorf
disabled: false
- name: exported
disabled: false
- name: increment-decrement
disabled: true
- name: indent-error-flow
disabled: false
- name: package-comments
disabled: false
- name: range
disabled: false
- name: receiver-naming
disabled: false
- name: redefines-builtin-id
disabled: true
- name: superfluous-else
disabled: true
- name: time-naming
disabled: false
- name: unexported-return
disabled: false
- name: unreachable-code
disabled: true
- name: unused-parameter
disabled: true
- name: var-declaration
disabled: false
- name: var-naming
disabled: false
stylecheck:
# Only enable the checks performed by the staticcheck stand-alone tool,
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
gocritic:
# Enable all default checks with some exceptions and some additions (commented).
# Cannot use both enabled-checks and disabled-checks, so must specify all to be used.
disable-all: true
enabled-checks:
#- appendAssign # Enabled by default
- argOrder
- assignOp
- badCall
- badCond
#- captLocal # Enabled by default
- caseOrder
- codegenComment
#- commentFormatting # Enabled by default
- defaultCaseOrder
- deprecatedComment
- dupArg
- dupBranchBody
- dupCase
- dupSubExpr
- elseif
#- exitAfterDefer # Enabled by default
- flagDeref
- flagName
#- ifElseChain # Enabled by default
- mapKey
- newDeref
- offBy1
- regexpMust
- ruleguard # Not enabled by default
#- singleCaseSwitch # Enabled by default
- sloppyLen
- sloppyTypeAssert
- switchTrue
- typeSwitchVar
- underef
- unlambda
- unslice
- valSwap
- wrapperFunc
settings:
ruleguard:
rules: "${configDir}/bin/rules.go"

View File

@@ -1,72 +0,0 @@
default: true
# Use specific styles, to be consistent accross all documents.
# Default is to accept any as long as it is consistent within the same document.
heading-style: # MD003
style: atx
ul-style: # MD004
style: dash
hr-style: # MD035
style: ---
code-block-style: # MD046
style: fenced
code-fence-style: # MD048
style: backtick
emphasis-style: # MD049
style: asterisk
strong-style: # MD050
style: asterisk
# Allow multiple headers with same text as long as they are not siblings.
no-duplicate-heading: # MD024
siblings_only: true
# Allow long lines in code blocks and tables.
line-length: # MD013
code_blocks: false
tables: false
# The Markdown files used to generated docs with Hugo contain a top level
# header, even though the YAML front matter has a title property (which is
# used for the HTML document title only). Suppress Markdownlint warning:
# Multiple top-level headings in the same document.
single-title: # MD025
level: 1
front_matter_title:
# The HTML docs generated by Hugo from Markdown files may have slightly
# different header anchors than GitHub rendered Markdown, e.g. Hugo trims
# leading dashes so "--config string" becomes "#config-string" while it is
# "#--config-string" in GitHub preview. When writing links to headers in the
# Markdown files we must use whatever works in the final HTML generated docs.
# Suppress Markdownlint warning: Link fragments should be valid.
link-fragments: false # MD051
# Restrict the languages and language identifiers to use for code blocks.
# We only want those supported by both Hugo and GitHub. These are documented
# here:
# https://gohugo.io/content-management/syntax-highlighting/#languages
# https://docs.github.com//get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks#syntax-highlighting
# In addition, we only want to allow identifiers (aliases) that correspond to
# the same language in Hugo and GitHub, and preferrably also VSCode and other
# commonly used tools, to avoid confusion. An example of this is that "shell"
# by some are considered an identifier for shell scripts, i.e. an alias for
# "sh", while others consider it an identifier for shell sessions, i.e. an
# alias for "console". Although Hugo and GitHub in this case are consistent and
# have choosen the former, using "sh" instead, and not allowing use of "shell",
# avoids the confusion entirely.
fenced-code-language: # MD040
allowed_languages:
- text
- console
- sh
- bat
- ini
- json
- yaml
- go
- python
- c++
- c#
- java
- powershell

View File

@@ -1,80 +0,0 @@
# Rclone Code of Conduct
Like the technical community as a whole, the Rclone team and community
is made up of a mixture of professionals and volunteers from all over
the world, working on every aspect of the mission - including
mentorship, teaching, and connecting people.
Diversity is one of our huge strengths, but it can also lead to
communication issues and unhappiness. To that end, we have a few
ground rules that we ask people to adhere to. This code applies
equally to founders, mentors and those seeking help and guidance.
This isn't an exhaustive list of things that you can't do. Rather,
take it in the spirit in which it's intended - a guide to make it
easier to enrich all of us and the technical communities in which we
participate.
This code of conduct applies to all spaces managed by the Rclone
project or Rclone Services Ltd. This includes the issue tracker, the
forum, the GitHub site, the wiki, any other online services or
in-person events. In addition, violations of this code outside these
spaces may affect a person's ability to participate within them.
- **Be friendly and patient.**
- **Be welcoming.** We strive to be a community that welcomes and
supports people of all backgrounds and identities. This includes,
but is not limited to members of any race, ethnicity, culture,
national origin, colour, immigration status, social and economic
class, educational level, sex, sexual orientation, gender identity
and expression, age, size, family status, political belief,
religion, and mental and physical ability.
- **Be considerate.** Your work will be used by other people, and you
in turn will depend on the work of others. Any decision you take
will affect users and colleagues, and you should take those
consequences into account when making decisions. Remember that we're
a world-wide community, so you might not be communicating in someone
else's primary language.
- **Be respectful.** Not all of us will agree all the time, but
disagreement is no excuse for poor behavior and poor manners. We
might all experience some frustration now and then, but we cannot
allow that frustration to turn into a personal attack. It's
important to remember that a community where people feel
uncomfortable or threatened is not a productive one. Members of the
Rclone community should be respectful when dealing with other
members as well as with people outside the Rclone community.
- **Be careful in the words that you choose.** We are a community of
professionals, and we conduct ourselves professionally. Be kind to
others. Do not insult or put down other participants. Harassment and
other exclusionary behavior aren't acceptable. This includes, but is
not limited to:
- Violent threats or language directed against another person.
- Discriminatory jokes and language.
- Posting sexually explicit or violent material.
- Posting (or threatening to post) other people's personally
identifying information ("doxing").
- Personal insults, especially those using racist or sexist terms.
- Unwelcome sexual attention.
- Advocating for, or encouraging, any of the above behavior.
- Repeated harassment of others. In general, if someone asks you to
stop, then stop.
- **When we disagree, try to understand why.** Disagreements, both
social and technical, happen all the time and Rclone is no
exception. It is important that we resolve disagreements and
differing views constructively. Remember that we're different. The
strength of Rclone comes from its varied community, people from a
wide range of backgrounds. Different people have different
perspectives on issues. Being unable to understand why someone holds
a viewpoint doesn't mean that they're wrong. Don't forget that it is
human to err and blaming each other doesn't get us anywhere.
Instead, focus on helping to resolve issues and learning from
mistakes.
If you believe someone is violating the code of conduct, we ask that
you report it by emailing [info@rclone.com](mailto:info@rclone.com).
Original text courtesy of the [Speak Up! project](http://web.archive.org/web/20141109123859/http://speakup.io/coc.html).
## Questions?
If you have questions, please feel free to [contact us](mailto:info@rclone.com).

View File

@@ -15,81 +15,61 @@ with the [latest beta of rclone](https://beta.rclone.org/):
- Rclone version (e.g. output from `rclone version`) - Rclone version (e.g. output from `rclone version`)
- Which OS you are using and how many bits (e.g. Windows 10, 64 bit) - Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
- The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`) - The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
- A log of the command with the `-vv` flag (e.g. output from - A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
`rclone -vv copy /tmp remote:tmp`) - if the log contains secrets then edit the file with a text editor first to obscure them
- if the log contains secrets then edit the file with a text editor first to
obscure them
## Submitting a new feature or bug fix ## Submitting a new feature or bug fix
If you find a bug that you'd like to fix, or a new feature that you'd If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via GitHub. like to implement then please submit a pull request via GitHub.
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) first so it can be discussed.
first so it can be discussed.
To prepare your pull request first press the fork button on [rclone's GitHub To prepare your pull request first press the fork button on [rclone's GitHub
page](https://github.com/rclone/rclone). page](https://github.com/rclone/rclone).
Then [install Git](https://git-scm.com/downloads) and set your public contribution Then [install Git](https://git-scm.com/downloads) and set your public contribution [name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git) and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
[name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git)
and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
Next open your terminal, change directory to your preferred folder and initialise Next open your terminal, change directory to your preferred folder and initialise your local rclone project:
your local rclone project:
```console git clone https://github.com/rclone/rclone.git
git clone https://github.com/rclone/rclone.git cd rclone
cd rclone git remote rename origin upstream
git remote rename origin upstream # if you have SSH keys setup in your GitHub account:
# if you have SSH keys setup in your GitHub account: git remote add origin git@github.com:YOURUSER/rclone.git
git remote add origin git@github.com:YOURUSER/rclone.git # otherwise:
# otherwise: git remote add origin https://github.com/YOURUSER/rclone.git
git remote add origin https://github.com/YOURUSER/rclone.git
```
Note that most of the terminal commands in the rest of this guide must be Note that most of the terminal commands in the rest of this guide must be executed from the rclone folder created above.
executed from the rclone folder created above.
Now [install Go](https://golang.org/doc/install) and verify your installation: Now [install Go](https://golang.org/doc/install) and verify your installation:
```console go version
go version
```
Great, you can now compile and execute your own version of rclone: Great, you can now compile and execute your own version of rclone:
```console go build
go build ./rclone version
./rclone version
```
(Note that you can also replace `go build` with `make`, which will include a (Note that you can also replace `go build` with `make`, which will include a
more accurate version number in the executable as well as enable you to specify more accurate version number in the executable as well as enable you to specify
more build options.) Finally make a branch to add your new feature more build options.) Finally make a branch to add your new feature
```console git checkout -b my-new-feature
git checkout -b my-new-feature
```
And get hacking. And get hacking.
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) and a quick view on the rclone [code organisation](#code-organisation).
and a quick view on the rclone [code organisation](#code-organisation).
When ready - test the affected functionality and run the unit tests for the When ready - test the affected functionality and run the unit tests for the code you changed
code you changed
```console cd folder/with/changed/files
cd folder/with/changed/files go test -v
go test -v
```
Note that you may need to make a test remote, e.g. `TestSwift` for some Note that you may need to make a test remote, e.g. `TestSwift` for some
of the unit tests. of the unit tests.
This is typically enough if you made a simple bug fix, otherwise please read This is typically enough if you made a simple bug fix, otherwise please read the rclone [testing](#testing) section too.
the rclone [testing](#testing) section too.
Make sure you Make sure you
@@ -99,19 +79,14 @@ Make sure you
When you are done with that push your changes to GitHub: When you are done with that push your changes to GitHub:
```console git push -u origin my-new-feature
git push -u origin my-new-feature
```
and open the GitHub website to [create your pull and open the GitHub website to [create your pull
request](https://help.github.com/articles/creating-a-pull-request/). request](https://help.github.com/articles/creating-a-pull-request/).
Your changes will then get reviewed and you might get asked to fix some stuff. Your changes will then get reviewed and you might get asked to fix some stuff. If so, then make the changes in the same branch, commit and push your updates to GitHub.
If so, then make the changes in the same branch, commit and push your updates to
GitHub.
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
or [squash your commits](#squashing-your-commits).
## Using Git and GitHub ## Using Git and GitHub
@@ -119,118 +94,87 @@ or [squash your commits](#squashing-your-commits).
Follow the guideline for [commit messages](#commit-messages) and then: Follow the guideline for [commit messages](#commit-messages) and then:
```console git checkout my-new-feature # To switch to your branch
git checkout my-new-feature # To switch to your branch git status # To see the new and changed files
git status # To see the new and changed files git add FILENAME # To select FILENAME for the commit
git add FILENAME # To select FILENAME for the commit git status # To verify the changes to be committed
git status # To verify the changes to be committed git commit # To do the commit
git commit # To do the commit git log # To verify the commit. Use q to quit the log
git log # To verify the commit. Use q to quit the log
```
You can modify the message or changes in the latest commit using: You can modify the message or changes in the latest commit using:
```console git commit --amend
git commit --amend
```
If you amend to commits that have been pushed to GitHub, then you will have to If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Replacing your previously pushed commits ### Replacing your previously pushed commits
Note that you are about to rewrite the GitHub history of your branch. It is good Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
practice to involve your collaborators before modifying commits that have been
pushed to GitHub.
Your previously pushed commits are replaced by: Your previously pushed commits are replaced by:
```console git push --force origin my-new-feature
git push --force origin my-new-feature
```
### Basing your changes on the latest master ### Basing your changes on the latest master
To base your changes on the latest version of the To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
[rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
```console git checkout master
git checkout master git fetch upstream
git fetch upstream git merge --ff-only
git merge --ff-only git push origin --follow-tags # optional update of your fork in GitHub
git push origin --follow-tags # optional update of your fork in GitHub git checkout my-new-feature
git checkout my-new-feature git rebase master
git rebase master
```
If you rebase commits that have been pushed to GitHub, then you will have to If you rebase commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Squashing your commits ### Squashing your commits ###
To combine your commits into one commit: To combine your commits into one commit:
```console git log # To count the commits to squash, e.g. the last 2
git log # To count the commits to squash, e.g. the last 2 git reset --soft HEAD~2 # To undo the 2 latest commits
git reset --soft HEAD~2 # To undo the 2 latest commits git status # To check everything is as expected
git status # To check everything is as expected
```
If everything is fine, then make the new combined commit: If everything is fine, then make the new combined commit:
```console git commit # To commit the undone commits as one
git commit # To commit the undone commits as one
```
otherwise, you may roll back using: otherwise, you may roll back using:
```console git reflog # To check that HEAD{1} is your previous state
git reflog # To check that HEAD{1} is your previous state git reset --soft 'HEAD@{1}' # To roll back to your previous state
git reset --soft 'HEAD@{1}' # To roll back to your previous state
```
If you squash commits that have been pushed to GitHub, then you will have to If you squash commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
Tip: You may like to use `git rebase -i master` if you are experienced or have a Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
more complex situation.
### GitHub Continuous Integration ### GitHub Continuous Integration
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
to build and test the project, which should be automatically available for your
fork too from the `Actions` tab in your repository.
## Testing ## Testing
### Code quality tests ### Code quality tests
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then If you install [golangci-lint](https://github.com/golangci/golangci-lint) then you can run the same tests as get run in the CI which can be very helpful.
you can run the same tests as get run in the CI which can be very helpful.
You can run them with `make check` or with `golangci-lint run ./...`. You can run them with `make check` or with `golangci-lint run ./...`.
Using these tests ensures that the rclone codebase all uses the same coding Using these tests ensures that the rclone codebase all uses the same coding standards. These tests also check for easy mistakes to make (like forgetting to check an error return).
standards. These tests also check for easy mistakes to make (like forgetting
to check an error return).
### Quick testing ### Quick testing
rclone's tests are run from the go testing framework, so at the top rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests. level you can run this to run all the tests.
```console go test -v ./...
go test -v ./...
```
You can also use `make`, if supported by your platform You can also use `make`, if supported by your platform
```console make quicktest
make quicktest
```
The quicktest is [automatically run by GitHub](#github-continuous-integration) The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
when you push your branch to GitHub.
### Backend testing ### Backend testing
@@ -246,50 +190,41 @@ need to make a remote called `TestDrive`.
You can then run the unit tests in the drive directory. These tests You can then run the unit tests in the drive directory. These tests
are skipped if `TestDrive:` isn't defined. are skipped if `TestDrive:` isn't defined.
```console cd backend/drive
cd backend/drive go test -v
go test -v
```
You can then run the integration tests which test all of rclone's You can then run the integration tests which test all of rclone's
operations. Normally these get run against the local file system, operations. Normally these get run against the local file system,
but they can be run against any of the remotes. but they can be run against any of the remotes.
```console cd fs/sync
cd fs/sync go test -v -remote TestDrive:
go test -v -remote TestDrive: go test -v -remote TestDrive: -fast-list
go test -v -remote TestDrive: -fast-list
cd fs/operations cd fs/operations
go test -v -remote TestDrive: go test -v -remote TestDrive:
```
If you want to use the integration test framework to run these tests If you want to use the integration test framework to run these tests
altogether with an HTML report and test retries then from the altogether with an HTML report and test retries then from the
project root: project root:
```console go install github.com/rclone/rclone/fstest/test_all
go run ./fstest/test_all -backends drive test_all -backends drive
```
### Full integration testing ### Full integration testing
If you want to run all the integration tests against all the remotes, If you want to run all the integration tests against all the remotes,
then change into the project root and run then change into the project root and run
```console make check
make check make test
make test
```
The commands may require some extra go packages which you can install with The commands may require some extra go packages which you can install with
```console make build_dep
make build_dep
```
The full integration tests are run daily on the integration test server. You can The full integration tests are run daily on the integration test server. You can
find the results at <https://integration.rclone.org> find the results at https://pub.rclone.org/integration-tests/
## Code Organisation ## Code Organisation
@@ -297,48 +232,46 @@ Rclone code is organised into a small number of top level directories
with modules beneath. with modules beneath.
- backend - the rclone backends for interfacing to cloud providers - - backend - the rclone backends for interfacing to cloud providers -
- all - import this to load all the cloud providers - all - import this to load all the cloud providers
- ...providers - ...providers
- bin - scripts for use while building or maintaining rclone - bin - scripts for use while building or maintaining rclone
- cmd - the rclone commands - cmd - the rclone commands
- all - import this to load all the commands - all - import this to load all the commands
- ...commands - ...commands
- cmdtest - end-to-end tests of commands, flags, environment variables,... - cmdtest - end-to-end tests of commands, flags, environment variables,...
- docs - the documentation and website - docs - the documentation and website
- content - adjust these docs only, except those marked autogenerated - content - adjust these docs only - everything else is autogenerated
or portions marked autogenerated where the corresponding .go file must be - command - these are auto-generated - edit the corresponding .go file
edited instead, and everything else is autogenerated
- commands - these are auto-generated, edit the corresponding .go file
- fs - main rclone definitions - minimal amount of code - fs - main rclone definitions - minimal amount of code
- accounting - bandwidth limiting and statistics - accounting - bandwidth limiting and statistics
- asyncreader - an io.Reader which reads ahead - asyncreader - an io.Reader which reads ahead
- config - manage the config file and flags - config - manage the config file and flags
- driveletter - detect if a name is a drive letter - driveletter - detect if a name is a drive letter
- filter - implements include/exclude filtering - filter - implements include/exclude filtering
- fserrors - rclone specific error handling - fserrors - rclone specific error handling
- fshttp - http handling for rclone - fshttp - http handling for rclone
- fspath - path handling for rclone - fspath - path handling for rclone
- hash - defines rclone's hash types and functions - hash - defines rclone's hash types and functions
- list - list a remote - list - list a remote
- log - logging facilities - log - logging facilities
- march - iterates directories in lock step - march - iterates directories in lock step
- object - in memory Fs objects - object - in memory Fs objects
- operations - primitives for sync, e.g. Copy, Move - operations - primitives for sync, e.g. Copy, Move
- sync - sync directories - sync - sync directories
- walk - walk a directory - walk - walk a directory
- fstest - provides integration test framework - fstest - provides integration test framework
- fstests - integration tests for the backends - fstests - integration tests for the backends
- mockdir - mocks an fs.Directory - mockdir - mocks an fs.Directory
- mockobject - mocks an fs.Object - mockobject - mocks an fs.Object
- test_all - Runs integration tests for everything - test_all - Runs integration tests for everything
- graphics - the images used in the website, etc. - graphics - the images used in the website, etc.
- lib - libraries used by the backend - lib - libraries used by the backend
- atexit - register functions to run when rclone exits - atexit - register functions to run when rclone exits
- dircache - directory ID to name caching - dircache - directory ID to name caching
- oauthutil - helpers for using oauth - oauthutil - helpers for using oauth
- pacer - retries with backoff and paces operations - pacer - retries with backoff and paces operations
- readers - a selection of useful io.Readers - readers - a selection of useful io.Readers
- rest - a thin abstraction over net/http for REST - rest - a thin abstraction over net/http for REST
- librclone - in memory interface to rclone's API for embedding rclone - librclone - in memory interface to rclone's API for embedding rclone
- vfs - Virtual FileSystem layer for implementing rclone mount and similar - vfs - Virtual FileSystem layer for implementing rclone mount and similar
@@ -346,109 +279,47 @@ with modules beneath.
If you are adding a new feature then please update the documentation. If you are adding a new feature then please update the documentation.
The documentation sources are generally in Markdown format, in conformance
with the CommonMark specification and compatible with GitHub Flavored
Markdown (GFM). The markdown format and style is checked as part of the lint
operation that runs automatically on pull requests, to enforce standards and
consistency. This is based on the [markdownlint](https://github.com/DavidAnson/markdownlint)
tool by David Anson, which can also be integrated into editors so you can
perform the same checks while writing. It generally follows Ciro Santilli's
[Markdown Style Guide](https://cirosantilli.com/markdown-style-guide), which
is good source if you want to know more.
HTML pages, served as website <rclone.org>, are generated from the Markdown,
using [Hugo](https://gohugo.io). Note that when generating the HTML pages,
there is currently used a different algorithm for generating header anchors
than what GitHub uses for its Markdown rendering. For example, in the HTML docs
generated by Hugo any leading `-` characters are ignored, which means when
linking to a header with text `--config string` we therefore need to use the
link `#config-string` in our Markdown source, which will not work in GitHub's
preview where `#--config-string` would be the correct link.
Most of the documentation are written directly in text files with extension
`.md`, mainly within folder `docs/content`. Note that several of such files
are autogenerated (e.g. the command documentation, and `docs/content/flags.md`),
or contain autogenerated portions (e.g. the backend documentation under
`docs/content/commands`). These are marked with an `autogenerated` comment.
The sources of the autogenerated text are usually Markdown formatted text
embedded as string values in the Go source code, so you need to locate these
and edit the `.go` file instead. The `MANUAL.*`, `rclone.1` and other text
files in the root of the repository are also autogenerated. The autogeneration
of files, and the website, will be done during the release process. See the
`make doc` and `make website` targets in the Makefile if you are interested in
how. You don't need to run these when adding a feature.
If you add a new general flag (not for a backend), then document it in If you add a new general flag (not for a backend), then document it in
`docs/content/docs.md` - the flags there are supposed to be in `docs/content/docs.md` - the flags there are supposed to be in
alphabetical order. alphabetical order.
If you add a new backend option/flag, then it should be documented in If you add a new backend option/flag, then it should be documented in
the source file in the `Help:` field: the source file in the `Help:` field.
- Start with the most important information about the option, - Start with the most important information about the option,
as a single sentence on a single line. as a single sentence on a single line.
- This text will be used for the command-line flag help. - This text will be used for the command-line flag help.
- It will be combined with other information, such as any default value, - It will be combined with other information, such as any default value,
and the result will look odd if not written as a single sentence. and the result will look odd if not written as a single sentence.
- It should end with a period/full stop character, which will be shown - It should end with a period/full stop character, which will be shown
in docs but automatically removed when producing the flag help. in docs but automatically removed when producing the flag help.
- Try to keep it below 80 characters, to reduce text wrapping in the terminal. - Try to keep it below 80 characters, to reduce text wrapping in the terminal.
- More details can be added in a new paragraph, after an empty line (`"\n\n"`). - More details can be added in a new paragraph, after an empty line (`"\n\n"`).
- Like with docs generated from Markdown, a single line break is ignored - Like with docs generated from Markdown, a single line break is ignored
and two line breaks creates a new paragraph. and two line breaks creates a new paragraph.
- This text will be shown to the user in `rclone config` - This text will be shown to the user in `rclone config`
and in the docs (where it will be added by `make backenddocs`, and in the docs (where it will be added by `make backenddocs`,
normally run some time before next release). normally run some time before next release).
- To create options of enumeration type use the `Examples:` field. - To create options of enumeration type use the `Examples:` field.
- Each example value have their own `Help:` field, but they are treated - Each example value have their own `Help:` field, but they are treated
a bit different than the main option help text. They will be shown a bit different than the main option help text. They will be shown
as an unordered list, therefore a single line break is enough to as an unordered list, therefore a single line break is enough to
create a new list item. Also, for enumeration texts like name of create a new list item. Also, for enumeration texts like name of
countries, it looks better without an ending period/full stop character. countries, it looks better without an ending period/full stop character.
- You can run `make backenddocs` to verify the resulting Markdown.
- This will update the autogenerated sections of the backend docs Markdown
files under `docs/content`.
- It requires you to have [Python](https://www.python.org) installed.
- The `backenddocs` make target runs the Python script `bin/make_backend_docs.py`,
and you can also run this directly, optionally with the name of a backend
as argument to only update the docs for a specific backend.
- **Do not** commit the updated Markdown files. This operation is run as part of
the release process. Since any manual changes in the autogenerated sections
of the Markdown files will then be lost, we have a pull request check that
reports error for any changes within the autogenerated sections. Should you
have done manual changes outside of the autogenerated sections they must be
committed, of course.
- You can run `make serve` to verify the resulting website.
- This will build the website and serve it locally, so you can open it in
your web browser and verify that the end result looks OK. Check specifically
any added links, also in light of the note above regarding different algorithms
for generated header anchors.
- It requires you to have the [Hugo](https://gohugo.io) tool available.
- The `serve` make target depends on the `website` target, which runs the
`hugo` command from the `docs` directory to build the website, and then
it serves the website locally with an embedded web server using a command
`hugo server --logLevel info -w --disableFastRender --ignoreCache`, so you
can run similar Hugo commands directly as well.
When writing documentation for an entirely new backend, The only documentation you need to edit are the `docs/content/*.md`
see [backend documentation](#backend-documentation). files. The `MANUAL.*`, `rclone.1`, website, etc. are all auto-generated
from those during the release process. See the `make doc` and `make
website` targets in the Makefile if you are interested in how. You
don't need to run these when adding a feature.
If you are updating documentation for a command, you must do that in the Documentation for rclone sub commands is with their code, e.g.
command source code, e.g. `cmd/ls/ls.go`. Write flag help strings as a single `cmd/ls/ls.go`. Write flag help strings as a single sentence on a single
sentence on a single line, without a period/full stop character at the end, line, without a period/full stop character at the end, as it will be
as it will be combined unmodified with other information (such as any default combined unmodified with other information (such as any default value).
value).
Note that you can use Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
[GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository) for small changes in the docs which makes it very easy.
for small changes in the docs which makes it very easy. Just remember the
caveat when linking to header anchors, noted above, which means that GitHub's
Markdown preview may not be an entirely reliable verification of the results.
After your changes have been merged, you can verify them on
[tip.rclone.org](https://tip.rclone.org). This site is updated daily with the
current state of the master branch at 07:00 UTC. The changes will be on the main
[rclone.org](https://rclone.org) site once they have been included in a release.
## Making a release ## Making a release
@@ -479,13 +350,13 @@ change will get linked into the issue.
Here is an example of a short commit message: Here is an example of a short commit message:
```text ```
drive: add team drive support - fixes #885 drive: add team drive support - fixes #885
``` ```
And here is an example of a longer one: And here is an example of a longer one:
```text ```
mount: fix hang on errored upload mount: fix hang on errored upload
In certain circumstances, if an upload failed then the mount could hang In certain circumstances, if an upload failed then the mount could hang
@@ -508,9 +379,7 @@ To add a dependency `github.com/ncw/new_dependency` see the
instructions below. These will fetch the dependency and add it to instructions below. These will fetch the dependency and add it to
`go.mod` and `go.sum`. `go.mod` and `go.sum`.
```console go get github.com/ncw/new_dependency
go get github.com/ncw/new_dependency
```
You can add constraints on that package when doing `go get` (see the You can add constraints on that package when doing `go get` (see the
go docs linked above), but don't unless you really need to. go docs linked above), but don't unless you really need to.
@@ -522,9 +391,7 @@ and `go.sum` in the same commit as your other changes.
If you need to update a dependency then run If you need to update a dependency then run
```console go get golang.org/x/crypto
go get golang.org/x/crypto
```
Check in a single commit as above. Check in a single commit as above.
@@ -567,38 +434,25 @@ remote or an fs.
### Getting going ### Getting going
- Create `backend/remote/remote.go` (copy this from a similar remote) - Create `backend/remote/remote.go` (copy this from a similar remote)
- box is a good one to start from if you have a directory-based remote (and - box is a good one to start from if you have a directory-based remote (and shows how to use the directory cache)
shows how to use the directory cache) - b2 is a good one to start from if you have a bucket-based remote
- b2 is a good one to start from if you have a bucket-based remote
- Add your remote to the imports in `backend/all/all.go` - Add your remote to the imports in `backend/all/all.go`
- HTTP based remotes are easiest to maintain if they use rclone's - HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good Go SDK from the provider then use that instead.
[lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but - Try to implement as many optional methods as possible as it makes the remote more usable.
if there is a really good Go SDK from the provider then use that instead. - Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
- Try to implement as many optional methods as possible as it makes the remote - `rclone purge -v TestRemote:rclone-info`
more usable. - `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to - `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
make sure we can encode any path name and `rclone info` to help determine the - open `remote.csv` in a spreadsheet and examine
encodings needed
- `rclone purge -v TestRemote:rclone-info`
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
- open `remote.csv` in a spreadsheet and examine
### Guidelines for a speedy merge ### Guidelines for a speedy merge
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) - **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend.
if you are implementing a REST like backend and parsing XML/JSON in the backend. - **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) if your backend is HTTP based - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) - **Do** follow your example backend exactly - use the same code order, function names, layout, structure. **Don't** move stuff around and **Don't** delete the comments.
if your backend is HTTP based - this adds features like `--dump bodies`, - **Do not** split your backend up into `fs.go` and `object.go` (there are a few backends like that - don't follow them!)
`--tpslimit`, `--user-agent` without you having to code anything!
- **Do** follow your example backend exactly - use the same code order, function
names, layout, structure. **Don't** move stuff around and **Don't** delete the
comments.
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few
backends like that - don't follow them!)
- **Do** put your API type definitions in a separate file - by preference `api/types.go` - **Do** put your API type definitions in a separate file - by preference `api/types.go`
- **Remember** we have >50 backends to maintain so keeping them as similar as - **Remember** we have >50 backends to maintain so keeping them as similar as possible to each other is a high priority!
possible to each other is a high priority!
### Unit tests ### Unit tests
@@ -609,19 +463,19 @@ remote or an fs.
### Integration tests ### Integration tests
- Add your backend to `fstest/test_all/config.yaml` - Add your backend to `fstest/test_all/config.yaml`
- Once you've done that then you can use the integration test framework from - Once you've done that then you can use the integration test framework from the project root:
the project root: - go install ./...
- `go run ./fstest/test_all -backends remote` - test_all -backends remote
Or if you want to run the integration tests manually: Or if you want to run the integration tests manually:
- Make sure integration tests pass with - Make sure integration tests pass with
- `cd fs/operations` - `cd fs/operations`
- `go test -v -remote TestRemote:` - `go test -v -remote TestRemote:`
- `cd fs/sync` - `cd fs/sync`
- `go test -v -remote TestRemote:` - `go test -v -remote TestRemote:`
- If your remote defines `ListR` check with this also - If your remote defines `ListR` check with this also
- `go test -v -remote TestRemote: -fast-list` - `go test -v -remote TestRemote: -fast-list`
See the [testing](#testing) section for more information on integration tests. See the [testing](#testing) section for more information on integration tests.
@@ -633,13 +487,10 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
`Google Drive`) but with the local file system last. `Google Drive`) but with the local file system last.
- `README.md` - main GitHub page - `README.md` - main GitHub page
- `docs/content/remote.md` - main docs page (note the backend options are - `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
automatically added to this file with `make backenddocs`) - make sure this has the `autogenerated options` comments in (see your reference backend docs)
- make sure this has the `autogenerated options` comments in (see your - update them in your backend with `bin/make_backend_docs.py remote`
reference backend docs) - `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table.
- update them in your backend with `bin/make_backend_docs.py remote`
- `docs/content/overview.md` - overview docs - add an entry into the Features
table and the Optional Features table.
- `docs/content/docs.md` - list of remotes in config section - `docs/content/docs.md` - list of remotes in config section
- `docs/content/_index.md` - front page of rclone.org - `docs/content/_index.md` - front page of rclone.org
- `docs/layouts/chrome/navbar.html` - add it to the website navigation - `docs/layouts/chrome/navbar.html` - add it to the website navigation
@@ -650,55 +501,74 @@ in the web browser and the links (internal and external) all work.
## Adding a new s3 provider ## Adding a new s3 provider
[Please see the guide in the S3 backend directory](backend/s3/README.md). It is quite easy to add a new S3 provider to rclone.
You'll need to modify the following files
- `backend/s3/s3.go`
- Add the provider to `providerOption` at the top of the file
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
- Exclude your provider from generic config questions (eg `region` and `endpoint).
- Add the provider to the `setQuirks` function - see the documentation there.
- `docs/content/s3.md`
- Add the provider at the top of the page.
- Add a section about the provider linked from there.
- Add a transcript of a trial `rclone config` session
- Edit the transcript to remove things which might change in subsequent versions
- **Do not** alter or add to the autogenerated parts of `s3.md`
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
- `README.md` - this is the home page in github
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
- `docs/content/_index.md` - this is the home page of rclone.org
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
When adding the provider, endpoints, quirks, docs etc keep them in
alphabetical order by `Provider` name, but with `AWS` first and
`Other` last.
Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work.
Once you've written the code, test `rclone config` works to your
satisfaction, and check the integration tests work `go test -v -remote
NewS3Provider:`. You may need to adjust the quirks to get them to
pass. Some providers just can't pass the tests with control characters
in the names so if these fail and the provider doesn't support
`urlEncodeListings` in the quirks then ignore them. Note that the
`SetTier` test may also fail on non AWS providers.
For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone/rclone/commit/eb3082a1ebdb76d5625f14cedec3f5154a5e7b10).
## Writing a plugin ## Writing a plugin
New features (backends, commands) can also be added "out-of-tree", through Go New features (backends, commands) can also be added "out-of-tree", through Go plugins.
plugins. Changes will be kept in a dynamically loaded file instead of being Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
compiled into the main binary. This is useful if you can't merge your changes This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
upstream or don't want to maintain a fork of rclone.
### Usage ### Usage
- Naming - Naming
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`. - Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
- `KIND` should be one of `backend`, `command` or `bundle`. - `KIND` should be one of `backend`, `command` or `bundle`.
- Example: A plugin with backend support for PiFS would be called - Example: A plugin with backend support for PiFS would be called
`librcloneplugin_backend_pifs.so`. `librcloneplugin_backend_pifs.so`.
- Loading - Loading
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282)) - Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
- Supported on rclone v1.50 or greater. - Supported on rclone v1.50 or greater.
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded. - All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
- If this variable doesn't exist, plugin support is disabled. - If this variable doesn't exist, plugin support is disabled.
- Plugins must be compiled against the exact version of rclone to work. - Plugins must be compiled against the exact version of rclone to work.
(The rclone used during building the plugin must be the same as the source (The rclone used during building the plugin must be the same as the source of rclone)
of rclone)
### Building ### Building
To turn your existing additions into a Go plugin, move them to an external repository To turn your existing additions into a Go plugin, move them to an external repository
and change the top-level package name to `main`. and change the top-level package name to `main`.
Check `rclone --version` and make sure that the plugin's rclone dependency and Check `rclone --version` and make sure that the plugin's rclone dependency and host Go version match.
host Go version match.
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin. Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin) [Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
## Keeping a backend or command out of tree [Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
Rclone was designed to be modular so it is very easy to keep a backend
or a command out of the main rclone source tree.
So for example if you had a backend which accessed your proprietary
systems or a command which was specialised for your needs you could
add them out of tree.
This may be easier than using a plugin and is supported on all
platforms not just macOS and Linux.
This is explained further in <https://github.com/rclone/rclone_out_of_tree_example>
which has an example of an out of tree backend `ram` (which is a
renamed version of the `memory` backend).

View File

@@ -1,47 +1,19 @@
FROM golang:alpine AS builder FROM golang:alpine AS builder
ARG CGO_ENABLED=0 COPY . /go/src/github.com/rclone/rclone/
WORKDIR /go/src/github.com/rclone/rclone/ WORKDIR /go/src/github.com/rclone/rclone/
RUN echo "**** Set Go Environment Variables ****" && \ RUN apk add --no-cache make bash gawk git
go env -w GOCACHE=/root/.cache/go-build RUN \
CGO_ENABLED=0 \
RUN echo "**** Install Dependencies ****" && \ make
apk add --no-cache \ RUN ./rclone version
make \
bash \
gawk \
git
COPY go.mod .
COPY go.sum .
RUN echo "**** Download Go Dependencies ****" && \
go mod download -x
RUN echo "**** Verify Go Dependencies ****" && \
go mod verify
COPY . .
RUN --mount=type=cache,target=/root/.cache/go-build,sharing=locked \
echo "**** Build Binary ****" && \
make
RUN echo "**** Print Version Binary ****" && \
./rclone version
# Begin final image # Begin final image
FROM alpine:latest FROM alpine:latest
RUN echo "**** Install Dependencies ****" && \ RUN apk --no-cache add ca-certificates fuse3 tzdata && \
apk add --no-cache \ echo "user_allow_other" >> /etc/fuse.conf
ca-certificates \
fuse3 \
tzdata && \
echo "Enable user_allow_other in fuse" && \
echo "user_allow_other" >> /etc/fuse.conf
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/ COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/

View File

@@ -1,4 +1,4 @@
# Maintainers guide for rclone # Maintainers guide for rclone #
Current active maintainers of rclone are: Current active maintainers of rclone are:
@@ -24,108 +24,80 @@ Current active maintainers of rclone are:
| Dan McArdle | @dmcardle | gitannex | | Dan McArdle | @dmcardle | gitannex |
| Sam Harrison | @childish-sambino | filescom | | Sam Harrison | @childish-sambino | filescom |
## This is a work in progress draft **This is a work in progress Draft**
This is a guide for how to be an rclone maintainer. This is mostly a write-up This is a guide for how to be an rclone maintainer. This is mostly a write-up of what I (@ncw) attempt to do.
of what I (@ncw) attempt to do.
## Triaging Tickets ## Triaging Tickets ##
When a ticket comes in it should be triaged. This means it should be classified When a ticket comes in it should be triaged. This means it should be classified by adding labels and placed into a milestone. Quite a lot of tickets need a bit of back and forth to determine whether it is a valid ticket so tickets may remain without labels or milestone for a while.
by adding labels and placed into a milestone. Quite a lot of tickets need a bit
of back and forth to determine whether it is a valid ticket so tickets may
remain without labels or milestone for a while.
Rclone uses the labels like this: Rclone uses the labels like this:
- `bug` - a definitely verified bug * `bug` - a definitely verified bug
- `can't reproduce` - a problem which we can't reproduce * `can't reproduce` - a problem which we can't reproduce
- `doc fix` - a bug in the documentation - if users need help understanding the * `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
docs add this label * `duplicate` - normally close these and ask the user to subscribe to the original
- `duplicate` - normally close these and ask the user to subscribe to the original * `enhancement: new remote` - a new rclone backend
- `enhancement: new remote` - a new rclone backend * `enhancement` - a new feature
- `enhancement` - a new feature * `FUSE` - to do with `rclone mount` command
- `FUSE` - to do with `rclone mount` command * `good first issue` - mark these if you find a small self-contained issue - these get shown to new visitors to the project
- `good first issue` - mark these if you find a small self-contained issue - * `help` wanted - mark these if you find a self-contained issue - these get shown to new visitors to the project
these get shown to new visitors to the project * `IMPORTANT` - note to maintainers not to forget to fix this for the release
- `help` wanted - mark these if you find a self-contained issue - these get * `maintenance` - internal enhancement, code re-organisation, etc.
shown to new visitors to the project * `Needs Go 1.XX` - waiting for that version of Go to be released
- `IMPORTANT` - note to maintainers not to forget to fix this for the release * `question` - not a `bug` or `enhancement` - direct to the forum for next time
- `maintenance` - internal enhancement, code re-organisation, etc. * `Remote: XXX` - which rclone backend this affects
- `Needs Go 1.XX` - waiting for that version of Go to be released * `thinking` - not decided on the course of action yet
- `question` - not a `bug` or `enhancement` - direct to the forum for next time
- `Remote: XXX` - which rclone backend this affects
- `thinking` - not decided on the course of action yet
If it turns out to be a bug or an enhancement it should be tagged as such, with If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
the appropriate other tags. Don't forget the "good first issue" tag to give new
contributors something easy to do to get going.
When a ticket is tagged it should be added to a milestone, either the next When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (e.g. the next go release).
release, the one after, Soon or Help Wanted. Bugs can be added to the
"Known Bugs" milestone if they aren't planned to be fixed or need to wait for
something (e.g. the next go release).
The milestones have these meanings: The milestones have these meanings:
- v1.XX - stuff we would like to fit into this release * v1.XX - stuff we would like to fit into this release
- v1.XX+1 - stuff we are leaving until the next release * v1.XX+1 - stuff we are leaving until the next release
- Soon - stuff we think is a good idea - waiting to be scheduled for a release * Soon - stuff we think is a good idea - waiting to be scheduled for a release
- Help wanted - blue sky stuff that might get moved up, or someone could help with * Help wanted - blue sky stuff that might get moved up, or someone could help with
- Known bugs - bugs waiting on external factors or we aren't going to fix for * Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
the moment
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
are good candidates for ones that have slipped between the gaps and need
following up.
## Closing Tickets ## Closing Tickets ##
Close tickets as soon as you can - make sure they are tagged with a release. Close tickets as soon as you can - make sure they are tagged with a release. Post a link to a beta in the ticket with the fix in, asking for feedback.
Post a link to a beta in the ticket with the fix in, asking for feedback.
## Pull requests ## Pull requests ##
Try to process pull requests promptly! Try to process pull requests promptly!
Merging pull requests on GitHub itself works quite well nowadays so you can Merging pull requests on GitHub itself works quite well nowadays so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
squash and rebase or rebase pull requests. rclone doesn't use merge commits.
Use the squash and rebase option if you need to edit the commit message.
After merging the commit, in your local master branch, do `git pull` then run After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
`bin/update-authors.py` to update the authors file then `git push`.
Sometimes pull requests need to be left open for a while - this especially true Sometimes pull requests need to be left open for a while - this especially true of contributions of new backends which take a long time to get right.
of contributions of new backends which take a long time to get right.
## Merges ## Merges ##
If you are merging a branch locally then do `git merge --ff-only branch-name` to If you are merging a branch locally then do `git merge --ff-only branch-name` to avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
## Release cycle ## Release cycle ##
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer if there is something big to merge that didn't stabilize properly or for personal reasons.
if there is something big to merge that didn't stabilize properly or for personal
reasons.
High impact regressions should be fixed before the next release. High impact regressions should be fixed before the next release.
Near the start of the release cycle, the dependencies should be updated with Near the start of the release cycle, the dependencies should be updated with `make update` to give time for bugs to surface.
`make update` to give time for bugs to surface.
Towards the end of the release cycle try not to merge anything too big so let Towards the end of the release cycle try not to merge anything too big so let things settle down.
things settle down.
Follow the instructions in RELEASE.md for making the release. Note that the Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time-consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
testing part is the most time-consuming often needing several rounds of test
and fix depending on exactly how many new features rclone has gained.
## Mailing list ## Mailing list ##
There is now an invite-only mailing list for rclone developers `rclone-dev` on There is now an invite-only mailing list for rclone developers `rclone-dev` on google groups.
google groups.
## TODO ## TODO ##
I should probably make a <dev@rclone.org> to register with cloud providers. I should probably make a dev@rclone.org to register with cloud providers.

58579
MANUAL.html generated

File diff suppressed because it is too large Load Diff

40384
MANUAL.md generated

File diff suppressed because it is too large Load Diff

18564
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -100,7 +100,6 @@ compiletest:
check: rclone check: rclone
@echo "-- START CODE QUALITY REPORT -------------------------------" @echo "-- START CODE QUALITY REPORT -------------------------------"
@golangci-lint run $(LINTTAGS) ./... @golangci-lint run $(LINTTAGS) ./...
@bin/markdown-lint
@echo "-- END CODE QUALITY REPORT ---------------------------------" @echo "-- END CODE QUALITY REPORT ---------------------------------"
# Get the build dependencies # Get the build dependencies
@@ -114,21 +113,21 @@ release_dep_linux:
# Update dependencies # Update dependencies
showupdates: showupdates:
@echo "*** Direct dependencies that could be updated ***" @echo "*** Direct dependencies that could be updated ***"
@go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null @GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
# Update direct dependencies only # Update direct dependencies only
updatedirect: updatedirect:
go get $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all) GO111MODULE=on go get -d $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
go mod tidy GO111MODULE=on go mod tidy
# Update direct and indirect dependencies and test dependencies # Update direct and indirect dependencies and test dependencies
update: update:
go get -u -t ./... GO111MODULE=on go get -d -u -t ./...
go mod tidy GO111MODULE=on go mod tidy
# Tidy the module dependencies # Tidy the module dependencies
tidy: tidy:
go mod tidy GO111MODULE=on go mod tidy
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
@@ -145,11 +144,9 @@ MANUAL.txt: MANUAL.md
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone commanddocs: rclone
go generate ./lib/transform
-@rmdir -p '$$HOME/.config/rclone' -@rmdir -p '$$HOME/.config/rclone'
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/ XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1) @[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
go run bin/make_bisync_docs.go ./docs/content/
backenddocs: rclone bin/make_backend_docs.py backenddocs: rclone bin/make_backend_docs.py
-@rmdir -p '$$HOME/.config/rclone' -@rmdir -p '$$HOME/.config/rclone'
@@ -246,7 +243,7 @@ fetch_binaries:
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/ rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
serve: website serve: website
cd docs && hugo server --logLevel info -w --disableFastRender --ignoreCache cd docs && hugo server --logLevel info -w --disableFastRender
tag: retag doc tag: retag doc
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new

280
README.md
View File

@@ -1,6 +1,22 @@
<!-- markdownlint-disable-next-line first-line-heading no-inline-html --> <div align="center">
<sup>Special thanks to our sponsor:</sup>
<br>
<br>
<a href="https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103">
<div>
<img src="https://rclone.org/img/logos/warp-github.svg" width="300" alt="Warp">
</div>
<b>Warp is a modern, Rust-based terminal with AI built in so you and your team can build great software, faster.</b>
<div>
<sup>Visit warp.dev to learn more.</sup>
</div>
</a>
<br>
<hr>
</div>
<br>
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only) [<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
<!-- markdownlint-disable-next-line no-inline-html -->
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only) [<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
[Website](https://rclone.org) | [Website](https://rclone.org) |
@@ -18,112 +34,97 @@
# Rclone # Rclone
Rclone *("rsync for cloud storage")* is a command-line program to sync files and Rclone *("rsync for cloud storage")* is a command-line program to sync files and directories to and from different cloud storage providers.
directories to and from different cloud storage providers.
## Storage providers ## Storage providers
- 1Fichier [:page_facing_up:](https://rclone.org/fichier/) * 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
- Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/) * Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
- Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss) * Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
- Amazon S3 [:page_facing_up:](https://rclone.org/s3/) * Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
- ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos) * ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
- Backblaze B2 [:page_facing_up:](https://rclone.org/b2/) * Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
- Box [:page_facing_up:](https://rclone.org/box/) * Box [:page_facing_up:](https://rclone.org/box/)
- Ceph [:page_facing_up:](https://rclone.org/s3/#ceph) * Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos) * China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2) * Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/) * Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
- Cubbit DS3 [:page_facing_up:](https://rclone.org/s3/#Cubbit) * DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces) * Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage) * Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost) * Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
- Dropbox [:page_facing_up:](https://rclone.org/dropbox/) * Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/) * Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
- Exaba [:page_facing_up:](https://rclone.org/s3/#exaba) * Files.com [:page_facing_up:](https://rclone.org/filescom/)
- Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files) * FTP [:page_facing_up:](https://rclone.org/ftp/)
- FileLu [:page_facing_up:](https://rclone.org/filelu/) * GoFile [:page_facing_up:](https://rclone.org/gofile/)
- Files.com [:page_facing_up:](https://rclone.org/filescom/) * Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
- FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade) * Google Drive [:page_facing_up:](https://rclone.org/drive/)
- FTP [:page_facing_up:](https://rclone.org/ftp/) * Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
- GoFile [:page_facing_up:](https://rclone.org/gofile/) * HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
- Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/) * Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
- Google Drive [:page_facing_up:](https://rclone.org/drive/) * HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
- Google Photos [:page_facing_up:](https://rclone.org/googlephotos/) * HTTP [:page_facing_up:](https://rclone.org/http/)
- HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/) * Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
- Hetzner Object Storage [:page_facing_up:](https://rclone.org/s3/#hetzner) * iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
- Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box) * ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
- HiDrive [:page_facing_up:](https://rclone.org/hidrive/) * Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
- HTTP [:page_facing_up:](https://rclone.org/http/) * Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
- Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs) * IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
- iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/) * IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
- ImageKit [:page_facing_up:](https://rclone.org/imagekit/) * Koofr [:page_facing_up:](https://rclone.org/koofr/)
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/) * Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/) * Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3) * Linkbox [:page_facing_up:](https://rclone.org/linkbox)
- Intercolo Object Storage [:page_facing_up:](https://rclone.org/s3/#intercolo) * Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
- IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos) * Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
- Koofr [:page_facing_up:](https://rclone.org/koofr/) * Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
- Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia) * Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
- Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage) * Mega [:page_facing_up:](https://rclone.org/mega/)
- Linkbox [:page_facing_up:](https://rclone.org/linkbox) * Memory [:page_facing_up:](https://rclone.org/memory/)
- Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode) * Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
- Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu) * Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
- Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/) * Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
- Memset Memstore [:page_facing_up:](https://rclone.org/swift/) * Minio [:page_facing_up:](https://rclone.org/s3/#minio)
- MEGA [:page_facing_up:](https://rclone.org/mega/) * Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
- MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega) * OVH [:page_facing_up:](https://rclone.org/swift/)
- Memory [:page_facing_up:](https://rclone.org/memory/) * Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
- Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/) * OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
- Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/) * OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
- Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/) * Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
- Minio [:page_facing_up:](https://rclone.org/s3/#minio) * Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
- Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud) * Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
- Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/) * ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
- OpenDrive [:page_facing_up:](https://rclone.org/opendrive/) * pCloud [:page_facing_up:](https://rclone.org/pcloud/)
- OpenStack Swift [:page_facing_up:](https://rclone.org/swift/) * Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
- Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/) * PikPak [:page_facing_up:](https://rclone.org/pikpak/)
- Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/) * Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
- Outscale [:page_facing_up:](https://rclone.org/s3/#outscale) * premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
- OVHcloud Object Storage (Swift) [:page_facing_up:](https://rclone.org/swift/) * put.io [:page_facing_up:](https://rclone.org/putio/)
- OVHcloud Object Storage (S3-compatible) [:page_facing_up:](https://rclone.org/s3/#ovhcloud) * Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
- ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud) * QingStor [:page_facing_up:](https://rclone.org/qingstor/)
- pCloud [:page_facing_up:](https://rclone.org/pcloud/) * Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
- Petabox [:page_facing_up:](https://rclone.org/s3/#petabox) * Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
- PikPak [:page_facing_up:](https://rclone.org/pikpak/) * Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
- Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/) * RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
- premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/) * rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
- put.io [:page_facing_up:](https://rclone.org/putio/) * Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
- Proton Drive [:page_facing_up:](https://rclone.org/protondrive/) * Seafile [:page_facing_up:](https://rclone.org/seafile/)
- QingStor [:page_facing_up:](https://rclone.org/qingstor/) * SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
- Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu) * Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
- Rabata Cloud Storage [:page_facing_up:](https://rclone.org/s3/#Rabata) * SFTP [:page_facing_up:](https://rclone.org/sftp/)
- Quatrix [:page_facing_up:](https://rclone.org/quatrix/) * SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
- Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/) * StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
- RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp) * Storj [:page_facing_up:](https://rclone.org/storj/)
- rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net) * SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
- Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway) * Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
- Seafile [:page_facing_up:](https://rclone.org/seafile/) * Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
- Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve) * Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
- SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs) * Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel) * WebDAV [:page_facing_up:](https://rclone.org/webdav/)
- Servercore Object Storage [:page_facing_up:](https://rclone.org/s3/#servercore) * Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
- SFTP [:page_facing_up:](https://rclone.org/sftp/) * Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
- Shade [:page_facing_up:](https://rclone.org/shade/) * The local filesystem [:page_facing_up:](https://rclone.org/local/)
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
- Storj [:page_facing_up:](https://rclone.org/storj/)
- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
- Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
- Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
- Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
- Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
- WebDAV [:page_facing_up:](https://rclone.org/webdav/)
- Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
- Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
- Zata.ai [:page_facing_up:](https://rclone.org/s3/#Zata)
- The local filesystem [:page_facing_up:](https://rclone.org/local/)
Please see [the full list of all storage providers and their features](https://rclone.org/overview/) Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
@@ -131,55 +132,50 @@ Please see [the full list of all storage providers and their features](https://r
These backends adapt or modify other storage providers These backends adapt or modify other storage providers
- Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/) * Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
- Archive: read archive files [:page_facing_up:](https://rclone.org/archive/) * Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
- Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/) * Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
- Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/) * Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
- Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/) * Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
- Compress: compress files [:page_facing_up:](https://rclone.org/compress/) * Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
- Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/) * Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
- Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/) * Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
- Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
## Features ## Features
- MD5/SHA-1 hashes checked at all times for file integrity * MD5/SHA-1 hashes checked at all times for file integrity
- Timestamps preserved on files * Timestamps preserved on files
- Partial syncs supported on a whole file basis * Partial syncs supported on a whole file basis
- [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed * [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
files * [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
- [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory * [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync bidirectionally
identical * [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
- [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync * Can sync to and from network, e.g. two different cloud accounts
bidirectionally * Optional large file chunking ([Chunker](https://rclone.org/chunker/))
- [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash * Optional transparent compression ([Compress](https://rclone.org/compress/))
equality * Optional encryption ([Crypt](https://rclone.org/crypt/))
- Can sync to and from network, e.g. two different cloud accounts * Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
- Optional large file chunking ([Chunker](https://rclone.org/chunker/)) * Multi-threaded downloads to local disk
- Optional transparent compression ([Compress](https://rclone.org/compress/)) * Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDAV/FTP/SFTP/DLNA
- Optional encryption ([Crypt](https://rclone.org/crypt/))
- Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
- Multi-threaded downloads to local disk
- Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files
over HTTP/WebDAV/FTP/SFTP/DLNA
## Installation & documentation ## Installation & documentation
Please see the [rclone website](https://rclone.org/) for: Please see the [rclone website](https://rclone.org/) for:
- [Installation](https://rclone.org/install/) * [Installation](https://rclone.org/install/)
- [Documentation & configuration](https://rclone.org/docs/) * [Documentation & configuration](https://rclone.org/docs/)
- [Changelog](https://rclone.org/changelog/) * [Changelog](https://rclone.org/changelog/)
- [FAQ](https://rclone.org/faq/) * [FAQ](https://rclone.org/faq/)
- [Storage providers](https://rclone.org/overview/) * [Storage providers](https://rclone.org/overview/)
- [Forum](https://forum.rclone.org/) * [Forum](https://forum.rclone.org/)
- ...and more * ...and more
## Downloads ## Downloads
- <https://rclone.org/downloads/> * https://rclone.org/downloads/
## License License
-------
This is free software under the terms of the MIT license (check the This is free software under the terms of the MIT license (check the
[COPYING file](/COPYING) included in this package). [COPYING file](/COPYING) included in this package).

View File

@@ -4,74 +4,63 @@ This file describes how to make the various kinds of releases
## Extra required software for making a release ## Extra required software for making a release
- [gh the github cli](https://github.com/cli/cli) for uploading packages * [gh the github cli](https://github.com/cli/cli) for uploading packages
- pandoc for making the html and man pages * pandoc for making the html and man pages
## Making a release ## Making a release
- git checkout master # see below for stable branch * git checkout master # see below for stable branch
- git pull # IMPORTANT * git pull # IMPORTANT
- git status - make sure everything is checked in * git status - make sure everything is checked in
- Check GitHub actions build for master is Green * Check GitHub actions build for master is Green
- make test # see integration test server or run locally * make test # see integration test server or run locally
- make tag * make tag
- edit docs/content/changelog.md # make sure to remove duplicate logs from point * edit docs/content/changelog.md # make sure to remove duplicate logs from point releases
releases * make tidy
- make tidy * make doc
- make doc * git status - to check for new man pages - git add them
- git status - to check for new man pages - git add them * git commit -a -v -m "Version v1.XX.0"
- git commit -a -v -m "Version v1.XX.0" * make retag
- make check * git push origin # without --follow-tags so it doesn't push the tag if it fails
- make retag * git push --follow-tags origin
- git push origin # without --follow-tags so it doesn't push the tag if it fails * # Wait for the GitHub builds to complete then...
- git push --follow-tags origin * make fetch_binaries
- \# Wait for the GitHub builds to complete then... * make tarball
- make fetch_binaries * make vendorball
- make tarball * make sign_upload
- make vendorball * make check_sign
- make sign_upload * make upload
- make check_sign * make upload_website
- make upload * make upload_github
- make upload_website * make startdev # make startstable for stable branch
- make upload_github * # announce with forum post, twitter post, patreon post
- make startdev # make startstable for stable branch
- \# announce with forum post, twitter post, patreon post
## Update dependencies ## Update dependencies
Early in the next release cycle update the dependencies. Early in the next release cycle update the dependencies.
- Review any pinned packages in go.mod and remove if possible * Review any pinned packages in go.mod and remove if possible
- `make updatedirect` * `make updatedirect`
- `make GOTAGS=cmount` * `make GOTAGS=cmount`
- `make compiletest` * `make compiletest`
- Fix anything which doesn't compile at this point and commit changes here * Fix anything which doesn't compile at this point and commit changes here
- `git commit -a -v -m "build: update all dependencies"` * `git commit -a -v -m "build: update all dependencies"`
If the `make updatedirect` upgrades the version of go in the `go.mod` If the `make updatedirect` upgrades the version of go in the `go.mod`
then go to manual mode. `go1.20` here is the lowest supported version
```text
go 1.22.0
```
then go to manual mode. `go1.22` here is the lowest supported version
in the `go.mod`. in the `go.mod`.
If `make updatedirect` added a `toolchain` directive then remove it. ```
We don't want to force a toolchain on our users. Linux packagers are
often using a version of Go that is a few versions out of date.
```console
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
go get -d $(cat /tmp/potential-upgrades) go get -d $(cat /tmp/potential-upgrades)
go mod tidy -go=1.22 -compat=1.22 go mod tidy -go=1.20 -compat=1.20
``` ```
If the `go mod tidy` fails use the output from it to remove the If the `go mod tidy` fails use the output from it to remove the
package which can't be upgraded from `/tmp/potential-upgrades` when package which can't be upgraded from `/tmp/potential-upgrades` when
done done
```console ```
git co go.mod go.sum git co go.mod go.sum
``` ```
@@ -81,12 +70,12 @@ Optionally upgrade the direct and indirect dependencies. This is very
likely to fail if the manual method was used abve - in that case likely to fail if the manual method was used abve - in that case
ignore it as it is too time consuming to fix. ignore it as it is too time consuming to fix.
- `make update` * `make update`
- `make GOTAGS=cmount` * `make GOTAGS=cmount`
- `make compiletest` * `make compiletest`
- roll back any updates which didn't compile * roll back any updates which didn't compile
- `git commit -a -v --amend` * `git commit -a -v --amend`
- **NB** watch out for this changing the default go version in `go.mod` * **NB** watch out for this changing the default go version in `go.mod`
Note that `make update` updates all direct and indirect dependencies Note that `make update` updates all direct and indirect dependencies
and there can occasionally be forwards compatibility problems with and there can occasionally be forwards compatibility problems with
@@ -97,25 +86,11 @@ build.
Once it compiles locally, push it on a test branch and commit fixes Once it compiles locally, push it on a test branch and commit fixes
until the tests pass. until the tests pass.
### Major versions
The above procedure will not upgrade major versions, so v2 to v3.
However this tool can show which major versions might need to be
upgraded:
```console
go run github.com/icholy/gomajor@latest list -major
```
Expect API breakage when updating major versions.
## Tidy beta ## Tidy beta
At some point after the release run At some point after the release run
```console bin/tidy-beta v1.55
bin/tidy-beta v1.55
```
where the version number is that of a couple ago to remove old beta binaries. where the version number is that of a couple ago to remove old beta binaries.
@@ -125,64 +100,54 @@ If rclone needs a point release due to some horrendous bug:
Set vars Set vars
- BASE_TAG=v1.XX # e.g. v1.52 * BASE_TAG=v1.XX # e.g. v1.52
- NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1 * NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
- echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1 * echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
First make the release branch. If this is a second point release then First make the release branch. If this is a second point release then
this will be done already. this will be done already.
- git co -b ${BASE_TAG}-stable ${BASE_TAG}.0 * git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
- make startstable * make startstable
Now Now
- git co ${BASE_TAG}-stable * git co ${BASE_TAG}-stable
- git cherry-pick any fixes * git cherry-pick any fixes
- make startstable * Do the steps as above
- Do the steps as above * make startstable
- git co master * git co master
- `#` cherry pick the changes to the changelog - check the diff to make sure it * `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
is correct * git checkout ${BASE_TAG}-stable docs/content/changelog.md
- git checkout ${BASE_TAG}-stable docs/content/changelog.md * git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
- git commit -a -v -m "Changelog updates from Version ${NEW_TAG}" * git push
- git push
## Sponsor logos ## Sponsor logos
If updating the website note that the sponsor logos have been moved out of the If updating the website note that the sponsor logos have been moved out of the main repository.
main repository.
You will need to checkout `/docs/static/img/logos` from <https://github.com/rclone/third-party-logos> You will need to checkout `/docs/static/img/logos` from https://github.com/rclone/third-party-logos
which is a private repo containing artwork from sponsors. which is a private repo containing artwork from sponsors.
## Update the website between releases ## Update the website between releases
Create an update website branch based off the last release Create an update website branch based off the last release
```console git co -b update-website
git co -b update-website
```
If the branch already exists, double check there are no commits that need saving. If the branch already exists, double check there are no commits that need saving.
Now reset the branch to the last release Now reset the branch to the last release
```console git reset --hard v1.64.0
git reset --hard v1.64.0
```
Create the changes, check them in, test with `make serve` then Create the changes, check them in, test with `make serve` then
```console make upload_test_website
make upload_test_website
```
Check out <https://test.rclone.org> and when happy Check out https://test.rclone.org and when happy
```console make upload_website
make upload_website
```
Cherry pick any changes back to master and the stable branch if it is active. Cherry pick any changes back to master and the stable branch if it is active.
@@ -190,14 +155,14 @@ Cherry pick any changes back to master and the stable branch if it is active.
To do a basic build of rclone's docker image to debug builds locally: To do a basic build of rclone's docker image to debug builds locally:
```console ```
docker buildx build --load -t rclone/rclone:testing --progress=plain . docker buildx build --load -t rclone/rclone:testing --progress=plain .
docker run --rm rclone/rclone:testing version docker run --rm rclone/rclone:testing version
``` ```
To test the multipatform build To test the multipatform build
```console ```
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 . docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
``` ```
@@ -205,6 +170,6 @@ To make a full build then set the tags correctly and add `--push`
Note that you can't only build one architecture - you need to build them all. Note that you can't only build one architecture - you need to build them all.
```console ```
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push . docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
``` ```

View File

@@ -1 +1 @@
v1.73.0 v1.69.0

View File

@@ -4,23 +4,19 @@ package all
import ( import (
// Active file systems // Active file systems
_ "github.com/rclone/rclone/backend/alias" _ "github.com/rclone/rclone/backend/alias"
_ "github.com/rclone/rclone/backend/archive"
_ "github.com/rclone/rclone/backend/azureblob" _ "github.com/rclone/rclone/backend/azureblob"
_ "github.com/rclone/rclone/backend/azurefiles" _ "github.com/rclone/rclone/backend/azurefiles"
_ "github.com/rclone/rclone/backend/b2" _ "github.com/rclone/rclone/backend/b2"
_ "github.com/rclone/rclone/backend/box" _ "github.com/rclone/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache" _ "github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/chunker" _ "github.com/rclone/rclone/backend/chunker"
_ "github.com/rclone/rclone/backend/cloudinary"
_ "github.com/rclone/rclone/backend/combine" _ "github.com/rclone/rclone/backend/combine"
_ "github.com/rclone/rclone/backend/compress" _ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt" _ "github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/doi"
_ "github.com/rclone/rclone/backend/drive" _ "github.com/rclone/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/dropbox" _ "github.com/rclone/rclone/backend/dropbox"
_ "github.com/rclone/rclone/backend/fichier" _ "github.com/rclone/rclone/backend/fichier"
_ "github.com/rclone/rclone/backend/filefabric" _ "github.com/rclone/rclone/backend/filefabric"
_ "github.com/rclone/rclone/backend/filelu"
_ "github.com/rclone/rclone/backend/filescom" _ "github.com/rclone/rclone/backend/filescom"
_ "github.com/rclone/rclone/backend/ftp" _ "github.com/rclone/rclone/backend/ftp"
_ "github.com/rclone/rclone/backend/gofile" _ "github.com/rclone/rclone/backend/gofile"
@@ -55,7 +51,6 @@ import (
_ "github.com/rclone/rclone/backend/s3" _ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/seafile" _ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp" _ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/shade"
_ "github.com/rclone/rclone/backend/sharefile" _ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sia" _ "github.com/rclone/rclone/backend/sia"
_ "github.com/rclone/rclone/backend/smb" _ "github.com/rclone/rclone/backend/smb"

View File

@@ -1,679 +0,0 @@
//go:build !plan9
// Package archive implements a backend to access archive files in a remote
package archive
// FIXME factor common code between backends out - eg VFS initialization
// FIXME can we generalize the VFS handle caching and use it in zip backend
// Factor more stuff out if possible
// Odd stats which are probably coming from the VFS
// * tensorflow.sqfs: 0% /3.074Gi, 204.426Ki/s, 4h22m46s
// FIXME this will perform poorly for unpacking as the VFS Reader is bad
// at multiple streams - need cache mode setting?
import (
"context"
"errors"
"fmt"
"io"
"path"
"strings"
"sync"
"time"
// Import all the required archivers here
_ "github.com/rclone/rclone/backend/archive/squashfs"
_ "github.com/rclone/rclone/backend/archive/zip"
"github.com/rclone/rclone/backend/archive/archiver"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
)
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "archive",
Description: "Read archives",
NewFs: NewFs,
MetadataInfo: &fs.MetadataInfo{
Help: `Any metadata supported by the underlying remote is read and written.`,
},
Options: []fs.Option{{
Name: "remote",
Help: `Remote to wrap to read archives from.
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
"myremote:bucket" or "myremote:".
If this is left empty, then the archive backend will use the root as
the remote.
This means that you can use :archive:remote:path and it will be
equivalent to setting remote="remote:path".
`,
Required: false,
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
Remote string `config:"remote"`
}
// Fs represents a archive of upstreams
type Fs struct {
name string // name of this remote
features *fs.Features // optional features
opt Options // options for this Fs
root string // the path we are working on
f fs.Fs // remote we are wrapping
wrapper fs.Fs // fs that wraps us
mu sync.Mutex // protects the below
archives map[string]*archive // the archives we have, by path
}
// A single open archive
type archive struct {
archiver archiver.Archiver // archiver responsible
remote string // path to the archive
prefix string // prefix to add on to listings
root string // root of the archive to remove from listings
mu sync.Mutex // protects the following variables
f fs.Fs // the archive Fs, may be nil
}
// If remote is an archive then return it otherwise return nil
func findArchive(remote string) *archive {
// FIXME use something faster than linear search?
for _, archiver := range archiver.Archivers {
if strings.HasSuffix(remote, archiver.Extension) {
return &archive{
archiver: archiver,
remote: remote,
prefix: remote,
root: "",
}
}
}
return nil
}
// Find an archive buried in remote
func subArchive(remote string) *archive {
archive := findArchive(remote)
if archive != nil {
return archive
}
parent := path.Dir(remote)
if parent == "/" || parent == "." {
return nil
}
return subArchive(parent)
}
// If remote is an archive then return it otherwise return nil
func (f *Fs) findArchive(remote string) (archive *archive) {
archive = findArchive(remote)
if archive != nil {
f.mu.Lock()
f.archives[remote] = archive
f.mu.Unlock()
}
return archive
}
// Instantiate archive if it hasn't been instantiated yet
//
// This is done lazily so that we can list a directory full of
// archives without opening them all.
func (a *archive) init(ctx context.Context, f fs.Fs) (fs.Fs, error) {
a.mu.Lock()
defer a.mu.Unlock()
if a.f != nil {
return a.f, nil
}
newFs, err := a.archiver.New(ctx, f, a.remote, a.prefix, a.root)
if err != nil && err != fs.ErrorIsFile {
return nil, fmt.Errorf("failed to create archive %q: %w", a.remote, err)
}
a.f = newFs
return a.f, nil
}
// NewFs constructs an Fs from the path.
//
// The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs.Fs, err error) {
// defer log.Trace(nil, "name=%q, root=%q, m=%v", name, root, m)("f=%+v, err=%v", &outFs, &err)
// Parse config into Options struct
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
remote := opt.Remote
origRoot := root
// If remote is empty, use the root instead
if remote == "" {
remote = root
root = ""
}
isDirectory := strings.HasSuffix(remote, "/")
remote = strings.TrimRight(remote, "/")
if remote == "" {
remote = "/"
}
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point archive remote at itself - check the value of the upstreams setting")
}
_ = isDirectory
foundArchive := subArchive(remote)
if foundArchive != nil {
fs.Debugf(nil, "Found archiver for %q remote %q", foundArchive.archiver.Extension, foundArchive.remote)
// Archive path
foundArchive.root = strings.Trim(remote[len(foundArchive.remote):], "/")
// Path to the archive
archiveRemote := remote[:len(foundArchive.remote)]
// Remote is archive leaf name
foundArchive.remote = path.Base(archiveRemote)
foundArchive.prefix = ""
// Point remote to archive file
remote = archiveRemote
}
// Make sure to remove trailing . referring to the current dir
if path.Base(root) == "." {
root = strings.TrimSuffix(root, ".")
}
remotePath := fspath.JoinRootPath(remote, root)
wrappedFs, err := cache.Get(ctx, remotePath)
if err != fs.ErrorIsFile && err != nil {
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
}
f := &Fs{
name: name,
//root: path.Join(remotePath, root),
root: origRoot,
opt: *opt,
f: wrappedFs,
archives: make(map[string]*archive),
}
cache.PinUntilFinalized(f.f, f)
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
f.features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: false,
ReadMimeType: true,
WriteMimeType: true,
CanHaveEmptyDirectories: true,
BucketBased: true,
SetTier: true,
GetTier: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
if foundArchive != nil {
fs.Debugf(f, "Root is an archive")
if err != fs.ErrorIsFile {
return nil, fmt.Errorf("expecting to find a file at %q", remote)
}
return foundArchive.init(ctx, f.f)
}
// Correct root if definitely pointing to a file
if err == fs.ErrorIsFile {
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
return f, err
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("archive root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.f.Rmdir(ctx, dir)
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() hash.Set {
return f.f.Hashes()
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.f.Mkdir(ctx, dir)
}
// Purge all files in the directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) error {
do := f.f.Features().Purge
if do == nil {
return fs.ErrorCantPurge
}
return do(ctx, dir)
}
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
do := f.f.Features().Copy
if do == nil {
return nil, fs.ErrorCantCopy
}
// FIXME
// o, ok := src.(*Object)
// if !ok {
// return nil, fs.ErrorCantCopy
// }
return do(ctx, src, remote)
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
do := f.f.Features().Move
if do == nil {
return nil, fs.ErrorCantMove
}
// FIXME
// o, ok := src.(*Object)
// if !ok {
// return nil, fs.ErrorCantMove
// }
return do(ctx, src, remote)
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
do := f.f.Features().DirMove
if do == nil {
return fs.ErrorCantDirMove
}
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
return do(ctx, srcFs.f, srcRemote, dstRemote)
}
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
// At least one value will be written to the channel,
// specifying the initial value and updated values might
// follow. A 0 Duration should pause the polling.
// The ChangeNotify implementation must empty the channel
// regularly. When the channel gets closed, the implementation
// should stop polling and release resources.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), ch <-chan time.Duration) {
do := f.f.Features().ChangeNotify
if do == nil {
return
}
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
notifyFunc(path, entryType)
}
do(ctx, wrappedNotifyFunc, ch)
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
do := f.f.Features().DirCacheFlush
if do != nil {
do()
}
}
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
var o fs.Object
var err error
if stream {
o, err = f.f.Features().PutStream(ctx, in, src, options...)
} else {
o, err = f.f.Put(ctx, in, src, options...)
}
if err != nil {
return nil, err
}
return o, nil
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
return f.put(ctx, in, src, false, options...)
default:
return nil, err
}
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
return f.put(ctx, in, src, true, options...)
default:
return nil, err
}
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.f.Features().About
if do == nil {
return nil, errors.New("not supported by underlying remote")
}
return do(ctx)
}
// Find the Fs for the directory
func (f *Fs) findFs(ctx context.Context, dir string) (subFs fs.Fs, err error) {
f.mu.Lock()
defer f.mu.Unlock()
subFs = f.f
// FIXME should do this with a better datastructure like a prefix tree
// FIXME want to find the longest first otherwise nesting won't work
dirSlash := dir + "/"
for archiverRemote, archive := range f.archives {
subRemote := archiverRemote + "/"
if strings.HasPrefix(dirSlash, subRemote) {
subFs, err = archive.init(ctx, f.f)
if err != nil {
return nil, err
}
break
}
}
return subFs, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
subFs, err := f.findFs(ctx, dir)
if err != nil {
return nil, err
}
entries, err = subFs.List(ctx, dir)
if err != nil {
return nil, err
}
for i, entry := range entries {
// Can only unarchive files
if o, ok := entry.(fs.Object); ok {
remote := o.Remote()
archive := f.findArchive(remote)
if archive != nil {
// Overwrite entry with directory
entries[i] = fs.NewDir(remote, o.ModTime(ctx))
}
}
}
return entries, nil
}
// NewObject creates a new remote archive file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
dir := path.Dir(remote)
if dir == "/" || dir == "." {
dir = ""
}
subFs, err := f.findFs(ctx, dir)
if err != nil {
return nil, err
}
o, err := subFs.NewObject(ctx, remote)
if err != nil {
return nil, err
}
return o, nil
}
// Precision is the greatest precision of all the archivers
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
if do := f.f.Features().Shutdown; do != nil {
return do(ctx)
}
return nil
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
do := f.f.Features().PublicLink
if do == nil {
return "", errors.New("PublicLink not supported")
}
return do(ctx, remote, expire, unlink)
}
// PutUnchecked in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
//
// May create duplicates or return errors if src already
// exists.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.f.Features().PutUnchecked
if do == nil {
return nil, errors.New("can't PutUnchecked")
}
o, err := do(ctx, in, src, options...)
if err != nil {
return nil, err
}
return o, nil
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
if len(dirs) == 0 {
return nil
}
do := f.f.Features().MergeDirs
if do == nil {
return errors.New("MergeDirs not supported")
}
return do(ctx, dirs)
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
func (f *Fs) CleanUp(ctx context.Context) error {
do := f.f.Features().CleanUp
if do == nil {
return errors.New("not supported by underlying remote")
}
return do(ctx)
}
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
do := f.f.Features().OpenWriterAt
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx, remote, size)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.f
}
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
do := f.f.Features().OpenChunkWriter
if do == nil {
return info, nil, fs.ErrorNotImplemented
}
return do(ctx, remote, src, options...)
}
// UserInfo returns info about the connected user
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
do := f.f.Features().UserInfo
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx)
}
// Disconnect the current user
func (f *Fs) Disconnect(ctx context.Context) error {
do := f.f.Features().Disconnect
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx)
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.OpenWriterAter = (*Fs)(nil)
_ fs.OpenChunkWriter = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
// FIXME _ fs.FullObject = (*Object)(nil)
)

View File

@@ -1,221 +0,0 @@
//go:build !plan9
package archive
import (
"bytes"
"context"
"fmt"
"os"
"os/exec"
"path"
"path/filepath"
"strconv"
"strings"
"testing"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// FIXME need to test Open with seek
// run - run a shell command
func run(t *testing.T, args ...string) {
cmd := exec.Command(args[0], args[1:]...)
fs.Debugf(nil, "run args = %v", args)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf(`
----------------------------
Failed to run %v: %v
Command output was:
%s
----------------------------
`, args, err, out)
}
}
// check the dst and src are identical
func checkTree(ctx context.Context, name string, t *testing.T, dstArchive, src string, expectedCount int) {
t.Run(name, func(t *testing.T) {
fs.Debugf(nil, "check %q vs %q", dstArchive, src)
Farchive, err := cache.Get(ctx, dstArchive)
if err != fs.ErrorIsFile {
require.NoError(t, err)
}
Fsrc, err := cache.Get(ctx, src)
if err != fs.ErrorIsFile {
require.NoError(t, err)
}
var matches bytes.Buffer
opt := operations.CheckOpt{
Fdst: Farchive,
Fsrc: Fsrc,
Match: &matches,
}
for _, action := range []string{"Check", "Download"} {
t.Run(action, func(t *testing.T) {
matches.Reset()
if action == "Download" {
assert.NoError(t, operations.CheckDownload(ctx, &opt))
} else {
assert.NoError(t, operations.Check(ctx, &opt))
}
if expectedCount > 0 {
assert.Equal(t, expectedCount, strings.Count(matches.String(), "\n"))
}
})
}
t.Run("NewObject", func(t *testing.T) {
// Check we can run NewObject on all files and read them
assert.NoError(t, operations.ListFn(ctx, Fsrc, func(srcObj fs.Object) {
if t.Failed() {
return
}
remote := srcObj.Remote()
archiveObj, err := Farchive.NewObject(ctx, remote)
require.NoError(t, err, remote)
assert.Equal(t, remote, archiveObj.Remote(), remote)
// Test that the contents are the same
archiveBuf := fstests.ReadObject(ctx, t, archiveObj, -1)
srcBuf := fstests.ReadObject(ctx, t, srcObj, -1)
assert.Equal(t, srcBuf, archiveBuf)
if len(srcBuf) < 81 {
return
}
// Tests that Open works with SeekOption
assert.Equal(t, srcBuf[50:], fstests.ReadObject(ctx, t, archiveObj, -1, &fs.SeekOption{Offset: 50}), "contents differ after seek")
// Tests that Open works with RangeOption
for _, test := range []struct {
ro fs.RangeOption
wantStart, wantEnd int
}{
{fs.RangeOption{Start: 5, End: 15}, 5, 16},
{fs.RangeOption{Start: 80, End: -1}, 80, len(srcBuf)},
{fs.RangeOption{Start: 81, End: 100000}, 81, len(srcBuf)},
{fs.RangeOption{Start: -1, End: 20}, len(srcBuf) - 20, len(srcBuf)}, // if start is omitted this means get the final bytes
// {fs.RangeOption{Start: -1, End: -1}, 0, len(srcBuf)}, - this seems to work but the RFC doesn't define it
} {
got := fstests.ReadObject(ctx, t, archiveObj, -1, &test.ro)
foundAt := strings.Index(srcBuf, got)
help := fmt.Sprintf("%#v failed want [%d:%d] got [%d:%d]", test.ro, test.wantStart, test.wantEnd, foundAt, foundAt+len(got))
assert.Equal(t, srcBuf[test.wantStart:test.wantEnd], got, help)
}
// Test that the modtimes are correct
fstest.AssertTimeEqualWithPrecision(t, remote, srcObj.ModTime(ctx), archiveObj.ModTime(ctx), Farchive.Precision())
// Test that the sizes are correct
assert.Equal(t, srcObj.Size(), archiveObj.Size())
// Test that Strings are OK
assert.Equal(t, srcObj.String(), archiveObj.String())
}))
})
// t.Logf("Fdst ------------- %v", Fdst)
// operations.List(ctx, Fdst, os.Stdout)
// t.Logf("Fsrc ------------- %v", Fsrc)
// operations.List(ctx, Fsrc, os.Stdout)
})
}
// test creating and reading back some archives
//
// Note that this uses rclone and zip as external binaries.
func testArchive(t *testing.T, archiveName string, archiveFn func(t *testing.T, output, input string)) {
ctx := context.Background()
checkFiles := 1000
// create random test input files
inputRoot := t.TempDir()
input := filepath.Join(inputRoot, archiveName)
require.NoError(t, os.Mkdir(input, 0777))
run(t, "rclone", "test", "makefiles", "--files", strconv.Itoa(checkFiles), "--ascii", input)
// Create the archive
output := t.TempDir()
zipFile := path.Join(output, archiveName)
archiveFn(t, zipFile, input)
// Check the archive itself
checkTree(ctx, "Archive", t, ":archive:"+zipFile, input, checkFiles)
// Now check a subdirectory
fis, err := os.ReadDir(input)
require.NoError(t, err)
subDir := "NOT FOUND"
aFile := "NOT FOUND"
for _, fi := range fis {
if fi.IsDir() {
subDir = fi.Name()
} else {
aFile = fi.Name()
}
}
checkTree(ctx, "SubDir", t, ":archive:"+zipFile+"/"+subDir, filepath.Join(input, subDir), 0)
// Now check a single file
fiCtx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ "+aFile))
require.NoError(t, fi.AddRule("- *"))
checkTree(fiCtx, "SingleFile", t, ":archive:"+zipFile+"/"+aFile, filepath.Join(input, aFile), 0)
// Now check the level above
checkTree(ctx, "Root", t, ":archive:"+output, inputRoot, checkFiles)
// run(t, "cp", "-a", inputRoot, output, "/tmp/test-"+archiveName)
}
// Make sure we have the executable named
func skipIfNoExe(t *testing.T, exeName string) {
_, err := exec.LookPath(exeName)
if err != nil {
t.Skipf("%s executable not installed", exeName)
}
}
// Test creating and reading back some archives
//
// Note that this uses rclone and zip as external binaries.
func TestArchiveZip(t *testing.T) {
fstest.Initialise()
skipIfNoExe(t, "zip")
skipIfNoExe(t, "rclone")
testArchive(t, "test.zip", func(t *testing.T, output, input string) {
oldcwd, err := os.Getwd()
require.NoError(t, err)
require.NoError(t, os.Chdir(input))
defer func() {
require.NoError(t, os.Chdir(oldcwd))
}()
run(t, "zip", "-9r", output, ".")
})
}
// Test creating and reading back some archives
//
// Note that this uses rclone and squashfs as external binaries.
func TestArchiveSquashfs(t *testing.T) {
fstest.Initialise()
skipIfNoExe(t, "mksquashfs")
skipIfNoExe(t, "rclone")
testArchive(t, "test.sqfs", func(t *testing.T, output, input string) {
run(t, "mksquashfs", input, output)
})
}

View File

@@ -1,67 +0,0 @@
//go:build !plan9
// Test Archive filesystem interface
package archive_test
import (
"testing"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/memory"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
var (
unimplementableFsMethods = []string{"ListR", "ListP", "MkdirMetadata", "DirSetModTime"}
// In these tests we receive objects from the underlying remote which don't implement these methods
unimplementableObjectMethods = []string{"GetTier", "ID", "Metadata", "MimeType", "SetTier", "UnWrap", "SetMetadata"}
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
t.Skip("Skipping as -remote not set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}
func TestLocal(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
remote := t.TempDir()
name := "TestArchiveLocal"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "archive"},
{Name: name, Key: "remote", Value: remote},
},
QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}
func TestMemory(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
remote := ":memory:"
name := "TestArchiveMemory"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "archive"},
{Name: name, Key: "remote", Value: remote},
},
QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}

View File

@@ -1,7 +0,0 @@
// Build for archive for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9
// Package archive implements a backend to access archive files in a remote
package archive

View File

@@ -1,24 +0,0 @@
// Package archiver registers all the archivers
package archiver
import (
"context"
"github.com/rclone/rclone/fs"
)
// Archiver describes an archive package
type Archiver struct {
// New constructs an Fs from the (wrappedFs, remote) with the objects
// prefix with prefix and rooted at root
New func(ctx context.Context, f fs.Fs, remote, prefix, root string) (fs.Fs, error)
Extension string
}
// Archivers is a slice of all registered archivers
var Archivers []Archiver
// Register adds the archivers provided to the list of known archivers
func Register(as ...Archiver) {
Archivers = append(Archivers, as...)
}

View File

@@ -1,233 +0,0 @@
// Package base is a base archive Fs
package base
import (
"context"
"errors"
"fmt"
"io"
"path"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/vfs"
)
// Fs represents a wrapped fs.Fs
type Fs struct {
f fs.Fs
wrapper fs.Fs
name string
features *fs.Features // optional features
vfs *vfs.VFS
node vfs.Node // archive object
remote string // remote of the archive object
prefix string // position for objects
prefixSlash string // position for objects with a slash on
root string // position to read from within the archive
}
var errNotImplemented = errors.New("internal error: method not implemented in archiver")
// New constructs an Fs from the (wrappedFs, remote) with the objects
// prefix with prefix and rooted at root
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (*Fs, error) {
// FIXME vfs cache?
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
fs.Debugf(nil, "New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
VFS := vfs.New(wrappedFs, nil)
node, err := VFS.Stat(remote)
if err != nil {
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
}
f := &Fs{
f: wrappedFs,
name: path.Join(fs.ConfigString(wrappedFs), remote),
vfs: VFS,
node: node,
remote: remote,
root: root,
prefix: prefix,
prefixSlash: prefix + "/",
}
// FIXME
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
//
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
f.features = (&fs.Features{
CaseInsensitive: false,
DuplicateFiles: false,
ReadMimeType: false, // MimeTypes not supported with gzip
WriteMimeType: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// String returns a description of the FS
func (f *Fs) String() string {
return f.name
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return nil, errNotImplemented
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
return nil, errNotImplemented
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
return nil, vfs.EROFS
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.f
}
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// Object describes an object to be read from the raw zip file
type Object struct {
f *Fs
remote string
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.f
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return -1
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
return time.Now()
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return vfs.EROFS
}
// Storable raturns a boolean indicating if this object is storable
func (o *Object) Storable() bool {
return true
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
return nil, errNotImplemented
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return vfs.EROFS
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return vfs.EROFS
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

View File

@@ -1,165 +0,0 @@
package squashfs
// Could just be using bare object Open with RangeRequest which
// would transfer the minimum amount of data but may be slower.
import (
"errors"
"fmt"
"io/fs"
"os"
"sync"
"github.com/diskfs/go-diskfs/backend"
"github.com/rclone/rclone/vfs"
)
// Cache file handles for accessing the file
type cache struct {
node vfs.Node
fhsMu sync.Mutex
fhs []cacheHandle
}
// A cached file handle
type cacheHandle struct {
offset int64
fh vfs.Handle
}
// Make a new cache
func newCache(node vfs.Node) *cache {
return &cache{
node: node,
}
}
// Get a vfs.Handle from the pool or open one
//
// This tries to find an open file handle which doesn't require seeking.
func (c *cache) open(off int64) (fh vfs.Handle, err error) {
c.fhsMu.Lock()
defer c.fhsMu.Unlock()
if len(c.fhs) > 0 {
// Look for exact match first
for i, cfh := range c.fhs {
if cfh.offset == off {
// fs.Debugf(nil, "CACHE MATCH")
c.fhs = append(c.fhs[:i], c.fhs[i+1:]...)
return cfh.fh, nil
}
}
// fs.Debugf(nil, "CACHE MISS")
// Just take the first one if not found
cfh := c.fhs[0]
c.fhs = c.fhs[1:]
return cfh.fh, nil
}
fh, err = c.node.Open(os.O_RDONLY)
if err != nil {
return nil, fmt.Errorf("failed to open squashfs archive: %w", err)
}
return fh, nil
}
// Close a vfs.Handle or return it to the pool
//
// off should be the offset the file handle would read from without seeking
func (c *cache) close(fh vfs.Handle, off int64) {
c.fhsMu.Lock()
defer c.fhsMu.Unlock()
c.fhs = append(c.fhs, cacheHandle{
offset: off,
fh: fh,
})
}
// ReadAt reads len(p) bytes into p starting at offset off in the underlying
// input source. It returns the number of bytes read (0 <= n <= len(p)) and any
// error encountered.
//
// When ReadAt returns n < len(p), it returns a non-nil error explaining why
// more bytes were not returned. In this respect, ReadAt is stricter than Read.
//
// Even if ReadAt returns n < len(p), it may use all of p as scratch
// space during the call. If some data is available but not len(p) bytes,
// ReadAt blocks until either all the data is available or an error occurs.
// In this respect ReadAt is different from Read.
//
// If the n = len(p) bytes returned by ReadAt are at the end of the input
// source, ReadAt may return either err == EOF or err == nil.
//
// If ReadAt is reading from an input source with a seek offset, ReadAt should
// not affect nor be affected by the underlying seek offset.
//
// Clients of ReadAt can execute parallel ReadAt calls on the same input
// source.
//
// Implementations must not retain p.
func (c *cache) ReadAt(p []byte, off int64) (n int, err error) {
fh, err := c.open(off)
if err != nil {
return n, err
}
defer func() {
c.close(fh, off+int64(len(p)))
}()
// fs.Debugf(nil, "ReadAt(p[%d], off=%d, fh=%p)", len(p), off, fh)
return fh.ReadAt(p, off)
}
var errCacheNotImplemented = errors.New("internal error: squashfs cache doesn't implement method")
// WriteAt method dummy stub to satisfy interface
func (c *cache) WriteAt(p []byte, off int64) (n int, err error) {
return 0, errCacheNotImplemented
}
// Seek method dummy stub to satisfy interface
func (c *cache) Seek(offset int64, whence int) (int64, error) {
return 0, errCacheNotImplemented
}
// Read method dummy stub to satisfy interface
func (c *cache) Read(p []byte) (n int, err error) {
return 0, errCacheNotImplemented
}
func (c *cache) Stat() (fs.FileInfo, error) {
return nil, errCacheNotImplemented
}
// Close the file
func (c *cache) Close() (err error) {
c.fhsMu.Lock()
defer c.fhsMu.Unlock()
// Close any open file handles
for i := range c.fhs {
fh := &c.fhs[i]
newErr := fh.fh.Close()
if err == nil {
err = newErr
}
}
c.fhs = nil
return err
}
// Sys returns OS-specific file for ioctl calls via fd
func (c *cache) Sys() (*os.File, error) {
return nil, errCacheNotImplemented
}
// Writable returns file for read-write operations
func (c *cache) Writable() (backend.WritableFile, error) {
return nil, errCacheNotImplemented
}
// check interfaces
var _ backend.Storage = (*cache)(nil)

View File

@@ -1,446 +0,0 @@
// Package squashfs implements a squashfs archiver for the archive backend
package squashfs
import (
"context"
"fmt"
"io"
"path"
"strings"
"time"
"github.com/diskfs/go-diskfs/filesystem/squashfs"
"github.com/rclone/rclone/backend/archive/archiver"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
)
func init() {
archiver.Register(archiver.Archiver{
New: New,
Extension: ".sqfs",
})
}
// Fs represents a wrapped fs.Fs
type Fs struct {
f fs.Fs
wrapper fs.Fs
name string
features *fs.Features // optional features
vfs *vfs.VFS
sqfs *squashfs.FileSystem // interface to the squashfs
c *cache
node vfs.Node // squashfs file object - set if reading
remote string // remote of the squashfs file object
prefix string // position for objects
prefixSlash string // position for objects with a slash on
root string // position to read from within the archive
}
// New constructs an Fs from the (wrappedFs, remote) with the objects
// prefix with prefix and rooted at root
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) {
// FIXME vfs cache?
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
fs.Debugf(nil, "Squashfs: New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
vfsOpt := vfscommon.Opt
vfsOpt.ReadWait = 0
VFS := vfs.New(wrappedFs, &vfsOpt)
node, err := VFS.Stat(remote)
if err != nil {
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
}
c := newCache(node)
// FIXME blocksize
sqfs, err := squashfs.Read(c, node.Size(), 0, 1024*1024)
if err != nil {
return nil, fmt.Errorf("failed to read squashfs: %w", err)
}
f := &Fs{
f: wrappedFs,
name: path.Join(fs.ConfigString(wrappedFs), remote),
vfs: VFS,
node: node,
sqfs: sqfs,
c: c,
remote: remote,
root: strings.Trim(root, "/"),
prefix: prefix,
prefixSlash: prefix + "/",
}
if prefix == "" {
f.prefixSlash = ""
}
singleObject := false
// Find the directory the root points to
if f.root != "" && !strings.HasSuffix(root, "/") {
native, err := f.toNative("")
if err == nil {
native = strings.TrimRight(native, "/")
_, err := f.newObjectNative(native)
if err == nil {
// If it pointed to a file, find the directory above
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
}
}
// FIXME
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
//
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
f.features = (&fs.Features{
CaseInsensitive: false,
DuplicateFiles: false,
ReadMimeType: false, // MimeTypes not supported with gsquashfs
WriteMimeType: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
if singleObject {
return f, fs.ErrorIsFile
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("Squashfs %q", f.name)
}
// This turns a remote into a native path in the squashfs starting with a /
func (f *Fs) toNative(remote string) (string, error) {
native := strings.Trim(remote, "/")
if f.prefix == "" {
native = "/" + native
} else if native == f.prefix {
native = "/"
} else if !strings.HasPrefix(native, f.prefixSlash) {
return "", fmt.Errorf("internal error: %q doesn't start with prefix %q", native, f.prefixSlash)
} else {
native = native[len(f.prefix):]
}
if f.root != "" {
native = "/" + f.root + native
}
return native, nil
}
// Turn a (nativeDir, leaf) into a remote
func (f *Fs) fromNative(nativeDir string, leaf string) string {
// fs.Debugf(nil, "nativeDir = %q, leaf = %q, root=%q", nativeDir, leaf, f.root)
dir := nativeDir
if f.root != "" {
dir = strings.TrimPrefix(dir, "/"+f.root)
}
remote := f.prefixSlash + strings.Trim(path.Join(dir, leaf), "/")
// fs.Debugf(nil, "dir = %q, remote=%q", dir, remote)
return remote
}
// Convert a FileInfo into an Object from native dir
func (f *Fs) objectFromFileInfo(nativeDir string, item squashfs.FileStat) *Object {
return &Object{
fs: f,
remote: f.fromNative(nativeDir, item.Name()),
size: item.Size(),
modTime: item.ModTime(),
item: item,
}
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
nativeDir, err := f.toNative(dir)
if err != nil {
return nil, err
}
items, err := f.sqfs.ReadDir(nativeDir)
if err != nil {
return nil, fmt.Errorf("read squashfs: couldn't read directory: %w", err)
}
entries = make(fs.DirEntries, 0, len(items))
for _, fi := range items {
item, ok := fi.(squashfs.FileStat)
if !ok {
return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi)
}
// fs.Debugf(item.Name(), "entry = %#v", item)
var entry fs.DirEntry
if err != nil {
return nil, fmt.Errorf("error reading item %q: %q", item.Name(), err)
}
if item.IsDir() {
var remote = f.fromNative(nativeDir, item.Name())
entry = fs.NewDir(remote, item.ModTime())
} else {
if item.Mode().IsRegular() {
entry = f.objectFromFileInfo(nativeDir, item)
} else {
fs.Debugf(item.Name(), "FIXME Not regular file - skipping")
continue
}
}
entries = append(entries, entry)
}
// fs.Debugf(f, "dir=%q, entries=%v", dir, entries)
return entries, nil
}
// newObjectNative finds the object at the native path passed in
func (f *Fs) newObjectNative(nativePath string) (o fs.Object, err error) {
// get the path and filename
dir, leaf := path.Split(nativePath)
dir = strings.TrimRight(dir, "/")
leaf = strings.Trim(leaf, "/")
// FIXME need to detect directory not found
fis, err := f.sqfs.ReadDir(dir)
if err != nil {
return nil, fs.ErrorObjectNotFound
}
for _, fi := range fis {
if fi.Name() == leaf {
if fi.IsDir() {
return nil, fs.ErrorNotAFile
}
item, ok := fi.(squashfs.FileStat)
if !ok {
return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi)
}
o = f.objectFromFileInfo(dir, item)
break
}
}
if o == nil {
return nil, fs.ErrorObjectNotFound
}
return o, nil
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err)
nativePath, err := f.toNative(remote)
if err != nil {
return nil, err
}
return f.newObjectNative(nativePath)
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
return nil, vfs.EROFS
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.f
}
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// Object describes an object to be read from the raw squashfs file
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
item squashfs.FileStat
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Turn a squashfs path into a full path for the parent Fs
// func (o *Object) path(remote string) string {
// return path.Join(o.fs.prefix, remote)
// }
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return vfs.EROFS
}
// Storable raturns a boolean indicating if this object is storable
func (o *Object) Storable() bool {
return true
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
remote, err := o.fs.toNative(o.remote)
if err != nil {
return nil, err
}
fs.Debugf(o, "Opening %q", remote)
//fh, err := o.fs.sqfs.OpenFile(remote, os.O_RDONLY)
fh, err := o.item.Open()
if err != nil {
return nil, err
}
// discard data from start as necessary
if offset > 0 {
_, err = fh.Seek(offset, io.SeekStart)
if err != nil {
return nil, err
}
}
// If limited then don't return everything
if limit >= 0 {
fs.Debugf(nil, "limit=%d, offset=%d, options=%v", limit, offset, options)
return readers.NewLimitedReadCloser(fh, limit), nil
}
return fh, nil
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return vfs.EROFS
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return vfs.EROFS
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

View File

@@ -1,385 +0,0 @@
// Package zip implements a zip archiver for the archive backend
package zip
import (
"archive/zip"
"context"
"errors"
"fmt"
"io"
"os"
"path"
"strings"
"time"
"github.com/rclone/rclone/backend/archive/archiver"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
)
func init() {
archiver.Register(archiver.Archiver{
New: New,
Extension: ".zip",
})
}
// Fs represents a wrapped fs.Fs
type Fs struct {
f fs.Fs
wrapper fs.Fs
name string
features *fs.Features // optional features
vfs *vfs.VFS
node vfs.Node // zip file object - set if reading
remote string // remote of the zip file object
prefix string // position for objects
prefixSlash string // position for objects with a slash on
root string // position to read from within the archive
dt dirtree.DirTree // read from zipfile
}
// New constructs an Fs from the (wrappedFs, remote) with the objects
// prefix with prefix and rooted at root
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) {
// FIXME vfs cache?
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
fs.Debugf(nil, "Zip: New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
vfsOpt := vfscommon.Opt
vfsOpt.ReadWait = 0
VFS := vfs.New(wrappedFs, &vfsOpt)
node, err := VFS.Stat(remote)
if err != nil {
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
}
f := &Fs{
f: wrappedFs,
name: path.Join(fs.ConfigString(wrappedFs), remote),
vfs: VFS,
node: node,
remote: remote,
root: root,
prefix: prefix,
prefixSlash: prefix + "/",
}
// Read the contents of the zip file
singleObject, err := f.readZip()
if err != nil {
return nil, fmt.Errorf("failed to open zip file: %w", err)
}
// FIXME
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
//
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
f.features = (&fs.Features{
CaseInsensitive: false,
DuplicateFiles: false,
ReadMimeType: false, // MimeTypes not supported with gzip
WriteMimeType: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
if singleObject {
return f, fs.ErrorIsFile
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("Zip %q", f.name)
}
// readZip the zip file into f
//
// Returns singleObject=true if f.root points to a file
func (f *Fs) readZip() (singleObject bool, err error) {
if f.node == nil {
return singleObject, fs.ErrorDirNotFound
}
size := f.node.Size()
if size < 0 {
return singleObject, errors.New("can't read from zip file with unknown size")
}
r, err := f.node.Open(os.O_RDONLY)
if err != nil {
return singleObject, fmt.Errorf("failed to open zip file: %w", err)
}
zr, err := zip.NewReader(r, size)
if err != nil {
return singleObject, fmt.Errorf("failed to read zip file: %w", err)
}
dt := dirtree.New()
for _, file := range zr.File {
remote := strings.Trim(path.Clean(file.Name), "/")
if remote == "." {
remote = ""
}
remote = path.Join(f.prefix, remote)
if f.root != "" {
// Ignore all files outside the root
if !strings.HasPrefix(remote, f.root) {
continue
}
if remote == f.root {
remote = ""
} else {
remote = strings.TrimPrefix(remote, f.root+"/")
}
}
if strings.HasSuffix(file.Name, "/") {
dir := fs.NewDir(remote, file.Modified)
dt.AddDir(dir)
} else {
if remote == "" {
remote = path.Base(f.root)
singleObject = true
dt = dirtree.New()
}
o := &Object{
f: f,
remote: remote,
fh: &file.FileHeader,
file: file,
}
dt.Add(o)
if singleObject {
break
}
}
}
dt.CheckParents("")
dt.Sort()
f.dt = dt
//fs.Debugf(nil, "dt = %v", dt)
return singleObject, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
// _, err = f.strip(dir)
// if err != nil {
// return nil, err
// }
entries, ok := f.dt[dir]
if !ok {
return nil, fs.ErrorDirNotFound
}
fs.Debugf(f, "dir=%q, entries=%v", dir, entries)
return entries, nil
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err)
if f.dt == nil {
return nil, fs.ErrorObjectNotFound
}
_, entry := f.dt.Find(remote)
if entry == nil {
return nil, fs.ErrorObjectNotFound
}
o, ok := entry.(*Object)
if !ok {
return nil, fs.ErrorNotAFile
}
return o, nil
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
return nil, vfs.EROFS
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.CRC32)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.f
}
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// Object describes an object to be read from the raw zip file
type Object struct {
f *Fs
remote string
fh *zip.FileHeader
file *zip.File
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.f
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return int64(o.fh.UncompressedSize64)
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.fh.Modified
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return vfs.EROFS
}
// Storable raturns a boolean indicating if this object is storable
func (o *Object) Storable() bool {
return true
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
if ht == hash.CRC32 {
// FIXME return empty CRC if writing
if o.f.dt == nil {
return "", nil
}
return fmt.Sprintf("%08x", o.fh.CRC32), nil
}
return "", hash.ErrUnsupported
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
rc, err = o.file.Open()
if err != nil {
return nil, err
}
// discard data from start as necessary
if offset > 0 {
_, err = io.CopyN(io.Discard, rc, offset)
if err != nil {
return nil, err
}
}
// If limited then don't return everything
if limit >= 0 {
return readers.NewLimitedReadCloser(rc, limit), nil
}
return rc, nil
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return vfs.EROFS
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return vfs.EROFS
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

File diff suppressed because it is too large Load Diff

View File

@@ -3,567 +3,16 @@
package azureblob package azureblob
import ( import (
"context"
"encoding/base64"
"fmt"
"net/http"
"strings"
"testing" "testing"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestBlockIDCreator(t *testing.T) { func (f *Fs) InternalTest(t *testing.T) {
// Check creation and random number // Check first feature flags are set on this
bic, err := newBlockIDCreator() // remote
require.NoError(t, err)
bic2, err := newBlockIDCreator()
require.NoError(t, err)
assert.NotEqual(t, bic.random, bic2.random)
assert.NotEqual(t, bic.random, [8]byte{})
// Set random to known value for tests
bic.random = [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
chunkNumber := uint64(0xFEDCBA9876543210)
// Check creation of ID
want := base64.StdEncoding.EncodeToString([]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, 1, 2, 3, 4, 5, 6, 7, 8})
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", want)
got := bic.newBlockID(chunkNumber)
assert.Equal(t, want, got)
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", got)
// Test checkID is working
assert.NoError(t, bic.checkID(chunkNumber, got))
assert.ErrorContains(t, bic.checkID(chunkNumber, "$"+got), "illegal base64")
assert.ErrorContains(t, bic.checkID(chunkNumber, "AAAA"+got), "bad block ID length")
assert.ErrorContains(t, bic.checkID(chunkNumber+1, got), "expecting decoded")
assert.ErrorContains(t, bic2.checkID(chunkNumber, got), "random bytes")
}
func (f *Fs) testFeatures(t *testing.T) {
// Check first feature flags are set on this remote
enabled := f.Features().SetTier enabled := f.Features().SetTier
assert.True(t, enabled) assert.True(t, enabled)
enabled = f.Features().GetTier enabled = f.Features().GetTier
assert.True(t, enabled) assert.True(t, enabled)
} }
type ReadSeekCloser struct {
*strings.Reader
}
func (r *ReadSeekCloser) Close() error {
return nil
}
// Stage a block at remote but don't commit it
func (f *Fs) stageBlockWithoutCommit(ctx context.Context, t *testing.T, remote string) {
var (
containerName, blobPath = f.split(remote)
containerClient = f.cntSVC(containerName)
blobClient = containerClient.NewBlockBlobClient(blobPath)
data = "uncommitted data"
blockID = "1"
blockIDBase64 = base64.StdEncoding.EncodeToString([]byte(blockID))
)
r := &ReadSeekCloser{strings.NewReader(data)}
_, err := blobClient.StageBlock(ctx, blockIDBase64, r, nil)
require.NoError(t, err)
// Verify the block is staged but not committed
blockList, err := blobClient.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
require.NoError(t, err)
found := false
for _, block := range blockList.UncommittedBlocks {
if *block.Name == blockIDBase64 {
found = true
break
}
}
require.True(t, found, "Block ID not found in uncommitted blocks")
}
// This tests uploading a blob where it has uncommitted blocks with a different ID size.
//
// https://gauravmantri.com/2013/05/18/windows-azure-blob-storage-dealing-with-the-specified-blob-or-block-content-is-invalid-error/
//
// TestIntegration/FsMkdir/FsPutFiles/Internal/WriteUncommittedBlocks
func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
var (
ctx = context.Background()
remote = "testBlob"
)
// Multipart copy the blob please
oldUseCopyBlob, oldCopyCutoff := f.opt.UseCopyBlob, f.opt.CopyCutoff
f.opt.UseCopyBlob = false
f.opt.CopyCutoff = f.opt.ChunkSize
defer func() {
f.opt.UseCopyBlob, f.opt.CopyCutoff = oldUseCopyBlob, oldCopyCutoff
}()
// Create a blob with uncommitted blocks
f.stageBlockWithoutCommit(ctx, t, remote)
// Now attempt to overwrite the block with a different sized block ID to provoke this error
// Check the object does not exist
_, err := f.NewObject(ctx, remote)
require.Equal(t, fs.ErrorObjectNotFound, err)
// Upload a multipart file over the block with uncommitted chunks of a different ID size
size := 4*int(f.opt.ChunkSize) - 1
contents := random.String(size)
item := fstest.NewItem(remote, contents, fstest.Time("2001-05-06T04:05:06.499Z"))
o := fstests.PutTestContents(ctx, t, f, &item, contents, true)
// Check size
assert.Equal(t, int64(size), o.Size())
// Create a new blob with uncommitted blocks
newRemote := "testBlob2"
f.stageBlockWithoutCommit(ctx, t, newRemote)
// Copy over that block
dst, err := f.Copy(ctx, o, newRemote)
require.NoError(t, err)
// Check basics
assert.Equal(t, int64(size), dst.Size())
assert.Equal(t, newRemote, dst.Remote())
// Check contents
gotContents := fstests.ReadObject(ctx, t, dst, -1)
assert.Equal(t, contents, gotContents)
// Remove the object
require.NoError(t, dst.Remove(ctx))
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Features", f.testFeatures)
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
t.Run("Metadata", f.testMetadataPaths)
}
// helper to read blob properties for an object
func getProps(ctx context.Context, t *testing.T, o fs.Object) *blob.GetPropertiesResponse {
ao := o.(*Object)
props, err := ao.readMetaDataAlways(ctx)
require.NoError(t, err)
return props
}
// helper to assert select headers and user metadata
func assertHeadersAndMetadata(t *testing.T, props *blob.GetPropertiesResponse, want map[string]string, wantUserMeta map[string]string) {
// Headers
get := func(p *string) string {
if p == nil {
return ""
}
return *p
}
if v, ok := want["content-type"]; ok {
assert.Equal(t, v, get(props.ContentType), "content-type")
}
if v, ok := want["cache-control"]; ok {
assert.Equal(t, v, get(props.CacheControl), "cache-control")
}
if v, ok := want["content-disposition"]; ok {
assert.Equal(t, v, get(props.ContentDisposition), "content-disposition")
}
if v, ok := want["content-encoding"]; ok {
assert.Equal(t, v, get(props.ContentEncoding), "content-encoding")
}
if v, ok := want["content-language"]; ok {
assert.Equal(t, v, get(props.ContentLanguage), "content-language")
}
// User metadata (case-insensitive keys from service)
norm := make(map[string]*string, len(props.Metadata))
for kk, vv := range props.Metadata {
norm[strings.ToLower(kk)] = vv
}
for k, v := range wantUserMeta {
pv, ok := norm[strings.ToLower(k)]
if assert.True(t, ok, fmt.Sprintf("missing user metadata key %q", k)) {
if pv == nil {
assert.Equal(t, v, "", k)
} else {
assert.Equal(t, v, *pv, k)
}
} else {
// Log available keys for diagnostics
keys := make([]string, 0, len(props.Metadata))
for kk := range props.Metadata {
keys = append(keys, kk)
}
t.Logf("available user metadata keys: %v", keys)
}
}
}
// helper to read blob tags for an object
func getTagsMap(ctx context.Context, t *testing.T, o fs.Object) map[string]string {
ao := o.(*Object)
blb := ao.getBlobSVC()
resp, err := blb.GetTags(ctx, nil)
require.NoError(t, err)
out := make(map[string]string)
for _, tag := range resp.BlobTagSet {
if tag.Key != nil {
k := *tag.Key
v := ""
if tag.Value != nil {
v = *tag.Value
}
out[k] = v
}
}
return out
}
// Test metadata across different write paths
func (f *Fs) testMetadataPaths(t *testing.T) {
ctx := context.Background()
if testing.Short() {
t.Skip("skipping in short mode")
}
// Common expected metadata and headers
baseMeta := fs.Metadata{
"cache-control": "no-cache",
"content-disposition": "inline",
"content-language": "en-US",
// Note: Don't set content-encoding here to avoid download decoding differences
// We will set a custom user metadata key
"potato": "royal",
// and modtime
"mtime": fstest.Time("2009-05-06T04:05:06.499999999Z").Format(time.RFC3339Nano),
}
// Singlepart upload
t.Run("PutSinglepart", func(t *testing.T) {
// size less than chunk size
contents := random.String(int(f.opt.ChunkSize / 2))
item := fstest.NewItem("meta-single.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
// override content-type via metadata mapping
meta := fs.Metadata{}
meta.Merge(baseMeta)
meta["content-type"] = "text/plain"
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/html", meta)
defer func() { _ = obj.Remove(ctx) }()
props := getProps(ctx, t, obj)
assertHeadersAndMetadata(t, props, map[string]string{
"content-type": "text/plain",
"cache-control": "no-cache",
"content-disposition": "inline",
"content-language": "en-US",
}, map[string]string{
"potato": "royal",
})
_ = http.StatusOK // keep import for parity but don't inspect RawResponse
})
// Multipart upload
t.Run("PutMultipart", func(t *testing.T) {
// size greater than chunk size to force multipart
contents := random.String(int(f.opt.ChunkSize + 1024))
item := fstest.NewItem("meta-multipart.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
meta := fs.Metadata{}
meta.Merge(baseMeta)
meta["content-type"] = "application/json"
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/html", meta)
defer func() { _ = obj.Remove(ctx) }()
props := getProps(ctx, t, obj)
assertHeadersAndMetadata(t, props, map[string]string{
"content-type": "application/json",
"cache-control": "no-cache",
"content-disposition": "inline",
"content-language": "en-US",
}, map[string]string{
"potato": "royal",
})
// Tags: Singlepart upload
t.Run("PutSinglepartTags", func(t *testing.T) {
contents := random.String(int(f.opt.ChunkSize / 2))
item := fstest.NewItem("tags-single.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
meta := fs.Metadata{
"x-ms-tags": "env=dev,team=sync",
}
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/plain", meta)
defer func() { _ = obj.Remove(ctx) }()
tags := getTagsMap(ctx, t, obj)
assert.Equal(t, "dev", tags["env"])
assert.Equal(t, "sync", tags["team"])
})
// Tags: Multipart upload
t.Run("PutMultipartTags", func(t *testing.T) {
contents := random.String(int(f.opt.ChunkSize + 2048))
item := fstest.NewItem("tags-multipart.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
meta := fs.Metadata{
"x-ms-tags": "project=alpha,release=2025-08",
}
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "application/octet-stream", meta)
defer func() { _ = obj.Remove(ctx) }()
tags := getTagsMap(ctx, t, obj)
assert.Equal(t, "alpha", tags["project"])
assert.Equal(t, "2025-08", tags["release"])
})
})
// Singlepart copy with metadata-set mapping; omit content-type to exercise fallback
t.Run("CopySinglepart", func(t *testing.T) {
// create small source
contents := random.String(int(f.opt.ChunkSize / 2))
srcItem := fstest.NewItem("meta-copy-single-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil)
defer func() { _ = srcObj.Remove(ctx) }()
// set mapping via MetadataSet
ctx2, ci := fs.AddConfig(ctx)
ci.Metadata = true
ci.MetadataSet = fs.Metadata{
"cache-control": "private, max-age=60",
"content-disposition": "attachment; filename=foo.txt",
"content-language": "fr",
// no content-type: should fallback to source
"potato": "maris",
}
// do copy
dstName := "meta-copy-single-dst.txt"
dst, err := f.Copy(ctx2, srcObj, dstName)
require.NoError(t, err)
defer func() { _ = dst.Remove(ctx2) }()
props := getProps(ctx2, t, dst)
// content-type should fallback to source (text/plain)
assertHeadersAndMetadata(t, props, map[string]string{
"content-type": "text/plain",
"cache-control": "private, max-age=60",
"content-disposition": "attachment; filename=foo.txt",
"content-language": "fr",
}, map[string]string{
"potato": "maris",
})
// mtime should be populated on copy when --metadata is used
// and should equal the source ModTime (RFC3339Nano)
// Read user metadata (case-insensitive)
m := props.Metadata
var gotMtime string
for k, v := range m {
if strings.EqualFold(k, "mtime") && v != nil {
gotMtime = *v
break
}
}
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
// parse and compare times ignoring formatting differences
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
require.NoError(t, err)
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
}
})
// CopySinglepart with only --metadata (no MetadataSet) must inject mtime and preserve src content-type
t.Run("CopySinglepart_MetadataOnly", func(t *testing.T) {
contents := random.String(int(f.opt.ChunkSize / 2))
srcItem := fstest.NewItem("meta-copy-single-only-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil)
defer func() { _ = srcObj.Remove(ctx) }()
ctx2, ci := fs.AddConfig(ctx)
ci.Metadata = true
dstName := "meta-copy-single-only-dst.txt"
dst, err := f.Copy(ctx2, srcObj, dstName)
require.NoError(t, err)
defer func() { _ = dst.Remove(ctx2) }()
props := getProps(ctx2, t, dst)
assertHeadersAndMetadata(t, props, map[string]string{
"content-type": "text/plain",
}, map[string]string{})
// Assert mtime injected
m := props.Metadata
var gotMtime string
for k, v := range m {
if strings.EqualFold(k, "mtime") && v != nil {
gotMtime = *v
break
}
}
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
require.NoError(t, err)
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
}
})
// Multipart copy with metadata-set mapping; omit content-type to exercise fallback
t.Run("CopyMultipart", func(t *testing.T) {
// create large source to force multipart
contents := random.String(int(f.opt.CopyCutoff + 1024))
srcItem := fstest.NewItem("meta-copy-multi-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil)
defer func() { _ = srcObj.Remove(ctx) }()
// set mapping via MetadataSet
ctx2, ci := fs.AddConfig(ctx)
ci.Metadata = true
ci.MetadataSet = fs.Metadata{
"cache-control": "max-age=0, no-cache",
// omit content-type to trigger fallback
"content-language": "de",
"potato": "desiree",
}
dstName := "meta-copy-multi-dst.txt"
dst, err := f.Copy(ctx2, srcObj, dstName)
require.NoError(t, err)
defer func() { _ = dst.Remove(ctx2) }()
props := getProps(ctx2, t, dst)
// content-type should fallback to source (application/octet-stream)
assertHeadersAndMetadata(t, props, map[string]string{
"content-type": "application/octet-stream",
"cache-control": "max-age=0, no-cache",
"content-language": "de",
}, map[string]string{
"potato": "desiree",
})
// mtime should be populated on copy when --metadata is used
m := props.Metadata
var gotMtime string
for k, v := range m {
if strings.EqualFold(k, "mtime") && v != nil {
gotMtime = *v
break
}
}
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
require.NoError(t, err)
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
}
})
// CopyMultipart with only --metadata must inject mtime and preserve src content-type
t.Run("CopyMultipart_MetadataOnly", func(t *testing.T) {
contents := random.String(int(f.opt.CopyCutoff + 2048))
srcItem := fstest.NewItem("meta-copy-multi-only-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil)
defer func() { _ = srcObj.Remove(ctx) }()
ctx2, ci := fs.AddConfig(ctx)
ci.Metadata = true
dstName := "meta-copy-multi-only-dst.txt"
dst, err := f.Copy(ctx2, srcObj, dstName)
require.NoError(t, err)
defer func() { _ = dst.Remove(ctx2) }()
props := getProps(ctx2, t, dst)
assertHeadersAndMetadata(t, props, map[string]string{
"content-type": "application/octet-stream",
}, map[string]string{})
m := props.Metadata
var gotMtime string
for k, v := range m {
if strings.EqualFold(k, "mtime") && v != nil {
gotMtime = *v
break
}
}
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
require.NoError(t, err)
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
}
})
// Tags: Singlepart copy
t.Run("CopySinglepartTags", func(t *testing.T) {
// create small source
contents := random.String(int(f.opt.ChunkSize / 2))
srcItem := fstest.NewItem("tags-copy-single-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil)
defer func() { _ = srcObj.Remove(ctx) }()
// set mapping via MetadataSet including tags
ctx2, ci := fs.AddConfig(ctx)
ci.Metadata = true
ci.MetadataSet = fs.Metadata{
"x-ms-tags": "copy=single,mode=test",
}
dstName := "tags-copy-single-dst.txt"
dst, err := f.Copy(ctx2, srcObj, dstName)
require.NoError(t, err)
defer func() { _ = dst.Remove(ctx2) }()
tags := getTagsMap(ctx2, t, dst)
assert.Equal(t, "single", tags["copy"])
assert.Equal(t, "test", tags["mode"])
})
// Tags: Multipart copy
t.Run("CopyMultipartTags", func(t *testing.T) {
// create large source to force multipart
contents := random.String(int(f.opt.CopyCutoff + 4096))
srcItem := fstest.NewItem("tags-copy-multi-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil)
defer func() { _ = srcObj.Remove(ctx) }()
ctx2, ci := fs.AddConfig(ctx)
ci.Metadata = true
ci.MetadataSet = fs.Metadata{
"x-ms-tags": "copy=multi,mode=test",
}
dstName := "tags-copy-multi-dst.txt"
dst, err := f.Copy(ctx2, srcObj, dstName)
require.NoError(t, err)
defer func() { _ = dst.Remove(ctx2) }()
tags := getTagsMap(ctx2, t, dst)
assert.Equal(t, "multi", tags["copy"])
assert.Equal(t, "test", tags["mode"])
})
// Negative: invalid x-ms-tags must error
t.Run("InvalidXMsTags", func(t *testing.T) {
contents := random.String(32)
item := fstest.NewItem("tags-invalid.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
// construct ObjectInfo with invalid x-ms-tags
buf := strings.NewReader(contents)
// Build obj info with metadata
meta := fs.Metadata{
"x-ms-tags": "badpair-without-equals",
}
// force metadata on
ctx2, ci := fs.AddConfig(ctx)
ci.Metadata = true
obji := object.NewStaticObjectInfo(item.Path, item.ModTime, int64(len(contents)), true, nil, nil)
obji = obji.WithMetadata(meta).WithMimeType("text/plain")
_, err := f.Put(ctx2, buf, obji)
require.Error(t, err)
assert.Contains(t, err.Error(), "invalid tag")
})
}

View File

@@ -15,17 +15,13 @@ import (
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
name := "TestAzureBlob"
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: name + ":", RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil), NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool", "Cold"}, TiersToTest: []string{"Hot", "Cool", "Cold"},
ChunkedUpload: fstests.ChunkedUploadConfig{ ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize, MinChunkSize: defaultChunkSize,
}, },
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "use_copy_blob", Value: "false"},
},
}) })
} }
@@ -44,7 +40,6 @@ func TestIntegration2(t *testing.T) {
}, },
ExtraConfig: []fstests.ExtraConfigItem{ ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"}, {Name: name, Key: "directory_markers", Value: "true"},
{Name: name, Key: "use_copy_blob", Value: "false"},
}, },
}) })
} }
@@ -53,13 +48,8 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs) return f.setUploadChunkSize(cs)
} }
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setCopyCutoff(cs)
}
var ( var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil) _ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetCopyCutoffer = (*Fs)(nil)
) )
func TestValidateAccessTier(t *testing.T) { func TestValidateAccessTier(t *testing.T) {

View File

@@ -56,7 +56,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
@@ -238,30 +237,6 @@ msi_client_id, or msi_mi_res_id parameters.`,
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.", Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
Advanced: true, Advanced: true,
Sensitive: true, Sensitive: true,
}, {
Name: "disable_instance_discovery",
Help: `Skip requesting Microsoft Entra instance metadata
This should be set true only by applications authenticating in
disconnected clouds, or private clouds such as Azure Stack.
It determines whether rclone requests Microsoft Entra instance
metadata from ` + "`https://login.microsoft.com/`" + ` before
authenticating.
Setting this to true will skip this request, making you responsible
for ensuring the configured authority is valid and trustworthy.
`,
Default: false,
Advanced: true,
}, {
Name: "use_az",
Help: `Use Azure CLI tool az for authentication
Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/)
as the sole means of authentication.
Setting this can be useful if you wish to use the az CLI on a host with
a System Managed Identity that you do not want to use.
Don't set env_auth at the same time.
`,
Default: false,
Advanced: true,
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.", Help: "Endpoint for the service.\n\nLeave blank normally.",
@@ -344,12 +319,10 @@ type Options struct {
Username string `config:"username"` Username string `config:"username"`
Password string `config:"password"` Password string `config:"password"`
ServicePrincipalFile string `config:"service_principal_file"` ServicePrincipalFile string `config:"service_principal_file"`
DisableInstanceDiscovery bool `config:"disable_instance_discovery"`
UseMSI bool `config:"use_msi"` UseMSI bool `config:"use_msi"`
MSIObjectID string `config:"msi_object_id"` MSIObjectID string `config:"msi_object_id"`
MSIClientID string `config:"msi_client_id"` MSIClientID string `config:"msi_client_id"`
MSIResourceID string `config:"msi_mi_res_id"` MSIResourceID string `config:"msi_mi_res_id"`
UseAZ bool `config:"use_az"`
Endpoint string `config:"endpoint"` Endpoint string `config:"endpoint"`
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
MaxStreamSize fs.SizeSuffix `config:"max_stream_size"` MaxStreamSize fs.SizeSuffix `config:"max_stream_size"`
@@ -420,10 +393,8 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
policyClientOptions := policy.ClientOptions{ policyClientOptions := policy.ClientOptions{
Transport: newTransporter(ctx), Transport: newTransporter(ctx),
} }
backup := service.ShareTokenIntentBackup
clientOpt := service.ClientOptions{ clientOpt := service.ClientOptions{
ClientOptions: policyClientOptions, ClientOptions: policyClientOptions,
FileRequestIntent: &backup,
} }
// Here we auth by setting one of cred, sharedKeyCred or f.client // Here we auth by setting one of cred, sharedKeyCred or f.client
@@ -441,8 +412,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
} }
// Read credentials from the environment // Read credentials from the environment
options := azidentity.DefaultAzureCredentialOptions{ options := azidentity.DefaultAzureCredentialOptions{
ClientOptions: policyClientOptions, ClientOptions: policyClientOptions,
DisableInstanceDiscovery: opt.DisableInstanceDiscovery,
} }
cred, err = azidentity.NewDefaultAzureCredential(&options) cred, err = azidentity.NewDefaultAzureCredential(&options)
if err != nil { if err != nil {
@@ -453,13 +423,6 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
if err != nil { if err != nil {
return nil, fmt.Errorf("create new shared key credential failed: %w", err) return nil, fmt.Errorf("create new shared key credential failed: %w", err)
} }
case opt.UseAZ:
options := azidentity.AzureCLICredentialOptions{}
cred, err = azidentity.NewAzureCLICredential(&options)
fmt.Println(cred)
if err != nil {
return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err)
}
case opt.SASURL != "": case opt.SASURL != "":
client, err = service.NewClientWithNoCredential(opt.SASURL, &clientOpt) client, err = service.NewClientWithNoCredential(opt.SASURL, &clientOpt)
if err != nil { if err != nil {
@@ -517,7 +480,6 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
} }
case opt.ClientID != "" && opt.Tenant != "" && opt.Username != "" && opt.Password != "": case opt.ClientID != "" && opt.Tenant != "" && opt.Username != "" && opt.Password != "":
// User with username and password // User with username and password
//nolint:staticcheck // this is deprecated due to Azure policy
options := azidentity.UsernamePasswordCredentialOptions{ options := azidentity.UsernamePasswordCredentialOptions{
ClientOptions: policyClientOptions, ClientOptions: policyClientOptions,
} }
@@ -551,7 +513,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
case opt.UseMSI: case opt.UseMSI:
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified. // Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
// Validate and ensure exactly one is set. (To do: better validation.) // Validate and ensure exactly one is set. (To do: better validation.)
b2i := map[bool]int{false: 0, true: 1} var b2i = map[bool]int{false: 0, true: 1}
set := b2i[opt.MSIClientID != ""] + b2i[opt.MSIObjectID != ""] + b2i[opt.MSIResourceID != ""] set := b2i[opt.MSIClientID != ""] + b2i[opt.MSIObjectID != ""] + b2i[opt.MSIResourceID != ""]
if set > 1 { if set > 1 {
return nil, errors.New("more than one user-assigned identity ID is set") return nil, errors.New("more than one user-assigned identity ID is set")
@@ -570,37 +532,6 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to acquire MSI token: %w", err) return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
} }
case opt.ClientID != "" && opt.Tenant != "" && opt.MSIClientID != "":
// Workload Identity based authentication
var options azidentity.ManagedIdentityCredentialOptions
options.ID = azidentity.ClientID(opt.MSIClientID)
msiCred, err := azidentity.NewManagedIdentityCredential(&options)
if err != nil {
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
}
getClientAssertions := func(context.Context) (string, error) {
token, err := msiCred.GetToken(context.Background(), policy.TokenRequestOptions{
Scopes: []string{"api://AzureADTokenExchange"},
})
if err != nil {
return "", fmt.Errorf("failed to acquire MSI token: %w", err)
}
return token.Token, nil
}
assertOpts := &azidentity.ClientAssertionCredentialOptions{}
cred, err = azidentity.NewClientAssertionCredential(
opt.Tenant,
opt.ClientID,
getClientAssertions,
assertOpts)
if err != nil {
return nil, fmt.Errorf("failed to acquire client assertion token: %w", err)
}
default: default:
return nil, errors.New("no authentication method configured") return nil, errors.New("no authentication method configured")
} }
@@ -844,35 +775,18 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// //
// This should return ErrDirNotFound if the directory isn't found. // This should return ErrDirNotFound if the directory isn't found.
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) { func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
return list.WithListP(ctx, dir, f) var entries fs.DirEntries
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
subDirClient := f.dirClient(dir) subDirClient := f.dirClient(dir)
// Checking whether directory exists // Checking whether directory exists
_, err := subDirClient.GetProperties(ctx, nil) _, err := subDirClient.GetProperties(ctx, nil)
if fileerror.HasCode(err, fileerror.ParentNotFound, fileerror.ResourceNotFound) { if fileerror.HasCode(err, fileerror.ParentNotFound, fileerror.ResourceNotFound) {
return fs.ErrorDirNotFound return entries, fs.ErrorDirNotFound
} else if err != nil { } else if err != nil {
return err return entries, err
} }
opt := &directory.ListFilesAndDirectoriesOptions{ var opt = &directory.ListFilesAndDirectoriesOptions{
Include: directory.ListFilesInclude{ Include: directory.ListFilesInclude{
Timestamps: true, Timestamps: true,
}, },
@@ -881,7 +795,7 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
for pager.More() { for pager.More() {
resp, err := pager.NextPage(ctx) resp, err := pager.NextPage(ctx)
if err != nil { if err != nil {
return err return entries, err
} }
for _, directory := range resp.Segment.Directories { for _, directory := range resp.Segment.Directories {
// Name *string `xml:"Name"` // Name *string `xml:"Name"`
@@ -907,10 +821,7 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
if directory.Properties.ContentLength != nil { if directory.Properties.ContentLength != nil {
entry.SetSize(*directory.Properties.ContentLength) entry.SetSize(*directory.Properties.ContentLength)
} }
err = list.Add(entry) entries = append(entries, entry)
if err != nil {
return err
}
} }
for _, file := range resp.Segment.Files { for _, file := range resp.Segment.Files {
leaf := f.opt.Enc.ToStandardPath(*file.Name) leaf := f.opt.Enc.ToStandardPath(*file.Name)
@@ -924,13 +835,10 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
if file.Properties.LastWriteTime != nil { if file.Properties.LastWriteTime != nil {
entry.modTime = *file.Properties.LastWriteTime entry.modTime = *file.Properties.LastWriteTime
} }
err = list.Add(entry) entries = append(entries, entry)
if err != nil {
return err
}
} }
} }
return list.Flush() return entries, nil
} }
// ------------------------------------------------------------ // ------------------------------------------------------------
@@ -977,7 +885,7 @@ func (o *Object) setMetadata(resp *file.GetPropertiesResponse) {
} }
} }
// getMetadata gets the metadata if it hasn't already been fetched // readMetaData gets the metadata if it hasn't already been fetched
func (o *Object) getMetadata(ctx context.Context) error { func (o *Object) getMetadata(ctx context.Context) error {
resp, err := o.fileClient().GetProperties(ctx, nil) resp, err := o.fileClient().GetProperties(ctx, nil)
if err != nil { if err != nil {
@@ -989,7 +897,7 @@ func (o *Object) getMetadata(ctx context.Context) error {
// Hash returns the MD5 of an object returning a lowercase hex string // Hash returns the MD5 of an object returning a lowercase hex string
// //
// May make a network request because the [fs.List] method does not // May make a network request becaue the [fs.List] method does not
// return MD5 hashes for DirEntry // return MD5 hashes for DirEntry
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
if ty != hash.MD5 { if ty != hash.MD5 {
@@ -1037,10 +945,6 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
SMBProperties: &file.SMBProperties{ SMBProperties: &file.SMBProperties{
LastWriteTime: &t, LastWriteTime: &t,
}, },
HTTPHeaders: &file.HTTPHeaders{
ContentMD5: o.md5,
ContentType: &o.contentType,
},
} }
_, err := o.fileClient().SetHTTPHeaders(ctx, &opt) _, err := o.fileClient().SetHTTPHeaders(ctx, &opt)
if err != nil { if err != nil {
@@ -1337,29 +1241,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
srcURL := srcObj.fileClient().URL() srcURL := srcObj.fileClient().URL()
fc := f.fileClient(remote) fc := f.fileClient(remote)
startCopy, err := fc.StartCopyFromURL(ctx, srcURL, &opt) _, err = fc.StartCopyFromURL(ctx, srcURL, &opt)
if err != nil { if err != nil {
return nil, fmt.Errorf("Copy failed: %w", err) return nil, fmt.Errorf("Copy failed: %w", err)
} }
// Poll for completion if necessary
//
// The for loop is never executed for same storage account copies.
copyStatus := startCopy.CopyStatus
var properties file.GetPropertiesResponse
pollTime := 100 * time.Millisecond
for copyStatus != nil && string(*copyStatus) == string(file.CopyStatusTypePending) {
time.Sleep(pollTime)
properties, err = fc.GetProperties(ctx, &file.GetPropertiesOptions{})
if err != nil {
return nil, err
}
copyStatus = properties.CopyStatus
pollTime = min(2*pollTime, time.Second)
}
dstObj, err := f.NewObject(ctx, remote) dstObj, err := f.NewObject(ctx, remote)
if err != nil { if err != nil {
return nil, fmt.Errorf("Copy: NewObject failed: %w", err) return nil, fmt.Errorf("Copy: NewObject failed: %w", err)
@@ -1474,7 +1359,6 @@ var (
_ fs.DirMover = &Fs{} _ fs.DirMover = &Fs{}
_ fs.Copier = &Fs{} _ fs.Copier = &Fs{}
_ fs.OpenWriterAter = &Fs{} _ fs.OpenWriterAter = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.Object = &Object{} _ fs.Object = &Object{}
_ fs.MimeTyper = &Object{} _ fs.MimeTyper = &Object{}
) )

View File

@@ -61,7 +61,7 @@ const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX"
func randomString(charCount int) string { func randomString(charCount int) string {
strBldr := strings.Builder{} strBldr := strings.Builder{}
for range charCount { for i := 0; i < charCount; i++ {
randPos := rand.Int63n(52) randPos := rand.Int63n(52)
strBldr.WriteByte(chars[randPos]) strBldr.WriteByte(chars[randPos])
} }

View File

@@ -42,18 +42,9 @@ type Bucket struct {
// LifecycleRule is a single lifecycle rule // LifecycleRule is a single lifecycle rule
type LifecycleRule struct { type LifecycleRule struct {
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"` DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"` DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
DaysFromStartingToCancelingUnfinishedLargeFiles *int `json:"daysFromStartingToCancelingUnfinishedLargeFiles"` FileNamePrefix string `json:"fileNamePrefix"`
FileNamePrefix string `json:"fileNamePrefix"`
}
// ServerSideEncryption is a configuration object for B2 Server-Side Encryption
type ServerSideEncryption struct {
Mode string `json:"mode"`
Algorithm string `json:"algorithm"` // Encryption algorithm to use
CustomerKey string `json:"customerKey"` // User provided Base64 encoded key that is used by the server to encrypt files
CustomerKeyMd5 string `json:"customerKeyMd5"` // An MD5 hash of the decoded key
} }
// Timestamp is a UTC time when this file was uploaded. It is a base // Timestamp is a UTC time when this file was uploaded. It is a base
@@ -133,32 +124,23 @@ type File struct {
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file. Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
} }
// StorageAPI is as returned from the b2_authorize_account call // AuthorizeAccountResponse is as returned from the b2_authorize_account call
type StorageAPI struct { type AuthorizeAccountResponse struct {
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file. AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
AccountID string `json:"accountId"` // The identifier for the account.
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it. Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
Buckets []struct { // When present, access is restricted to one or more buckets. BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
ID string `json:"id"` // ID of bucket BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
Name string `json:"name"` // When present, name of bucket - may be empty Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
} `json:"buckets"` NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has for every bucket.
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
} `json:"allowed"` } `json:"allowed"`
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files. APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files. DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead. MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead.
RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance. RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance.
} }
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
type AuthorizeAccountResponse struct {
AccountID string `json:"accountId"` // The identifier for the account.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
APIs struct { // Supported APIs for this account / key. These are API-dependent JSON objects.
Storage StorageAPI `json:"storageApi"`
} `json:"apiInfo"`
}
// ListBucketsRequest is parameters for b2_list_buckets call // ListBucketsRequest is parameters for b2_list_buckets call
type ListBucketsRequest struct { type ListBucketsRequest struct {
AccountID string `json:"accountId"` // The identifier for the account. AccountID string `json:"accountId"` // The identifier for the account.
@@ -278,22 +260,21 @@ type GetFileInfoRequest struct {
// //
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" } // Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
type StartLargeFileRequest struct { type StartLargeFileRequest struct {
BucketID string `json:"bucketId"` // The ID of the bucket that the file will go in. BucketID string `json:"bucketId"` //The ID of the bucket that the file will go in.
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names. Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream. ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info. Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
ServerSideEncryption *ServerSideEncryption `json:"serverSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption
} }
// StartLargeFileResponse is the response to StartLargeFileRequest // StartLargeFileResponse is the response to StartLargeFileRequest
type StartLargeFileResponse struct { type StartLargeFileResponse struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version. ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name. Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
AccountID string `json:"accountId"` // The identifier for the account. AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId"` // The unique ID of the bucket. BucketID string `json:"bucketId"` // The unique ID of the bucket.
ContentType string `json:"contentType"` // The MIME type of the file. ContentType string `json:"contentType"` // The MIME type of the file.
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file. Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
UploadTimestamp Timestamp `json:"uploadTimestamp,omitempty"` // This is a UTC time when this file was uploaded. UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
} }
// GetUploadPartURLRequest is passed to b2_get_upload_part_url // GetUploadPartURLRequest is passed to b2_get_upload_part_url
@@ -343,25 +324,21 @@ type CancelLargeFileResponse struct {
// CopyFileRequest is as passed to b2_copy_file // CopyFileRequest is as passed to b2_copy_file
type CopyFileRequest struct { type CopyFileRequest struct {
SourceID string `json:"sourceFileId"` // The ID of the source file being copied. SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
Name string `json:"fileName"` // The name of the new file being created. Name string `json:"fileName"` // The name of the new file being created.
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied. Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only) ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only) Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
SourceServerSideEncryption *ServerSideEncryption `json:"sourceServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the source file
DestinationServerSideEncryption *ServerSideEncryption `json:"destinationServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the destination file
} }
// CopyPartRequest is the request for b2_copy_part - the response is UploadPartResponse // CopyPartRequest is the request for b2_copy_part - the response is UploadPartResponse
type CopyPartRequest struct { type CopyPartRequest struct {
SourceID string `json:"sourceFileId"` // The ID of the source file being copied. SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file. LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file.
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1) PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied. Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
SourceServerSideEncryption *ServerSideEncryption `json:"sourceServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the source file
DestinationServerSideEncryption *ServerSideEncryption `json:"destinationServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the destination file
} }
// UpdateBucketRequest describes a request to modify a B2 bucket // UpdateBucketRequest describes a request to modify a B2 bucket

View File

@@ -8,9 +8,7 @@ import (
"bufio" "bufio"
"bytes" "bytes"
"context" "context"
"crypto/md5"
"crypto/sha1" "crypto/sha1"
"encoding/base64"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
@@ -18,7 +16,6 @@ import (
"io" "io"
"net/http" "net/http"
"path" "path"
"slices"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@@ -33,8 +30,7 @@ import (
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/multipart" "github.com/rclone/rclone/lib/multipart"
@@ -55,9 +51,6 @@ const (
nameHeader = "X-Bz-File-Name" nameHeader = "X-Bz-File-Name"
timestampHeader = "X-Bz-Upload-Timestamp" timestampHeader = "X-Bz-Upload-Timestamp"
retryAfterHeader = "Retry-After" retryAfterHeader = "Retry-After"
sseAlgorithmHeader = "X-Bz-Server-Side-Encryption-Customer-Algorithm"
sseKeyHeader = "X-Bz-Server-Side-Encryption-Customer-Key"
sseMd5Header = "X-Bz-Server-Side-Encryption-Customer-Key-Md5"
minSleep = 10 * time.Millisecond minSleep = 10 * time.Millisecond
maxSleep = 5 * time.Minute maxSleep = 5 * time.Minute
decayConstant = 1 // bigger for slower decay, exponential decayConstant = 1 // bigger for slower decay, exponential
@@ -72,7 +65,7 @@ const (
// Globals // Globals
var ( var (
errNotWithVersions = errors.New("can't modify files in --b2-versions mode") errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
errNotWithVersionAt = errors.New("can't modify or delete files in --b2-version-at mode") errNotWithVersionAt = errors.New("can't modify or delete files in --b2-version-at mode")
) )
@@ -257,51 +250,6 @@ See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket
Default: (encoder.Display | Default: (encoder.Display |
encoder.EncodeBackSlash | encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8), encoder.EncodeInvalidUtf8),
}, {
Name: "sse_customer_algorithm",
Help: "If using SSE-C, the server-side encryption algorithm used when storing this object in B2.",
Advanced: true,
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}, {
Value: "AES256",
Help: "Advanced Encryption Standard (256 bits key length)",
}},
}, {
Name: "sse_customer_key",
Help: `To use SSE-C, you may provide the secret encryption key encoded in a UTF-8 compatible string to encrypt/decrypt your data
Alternatively you can provide --sse-customer-key-base64.`,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}},
Sensitive: true,
}, {
Name: "sse_customer_key_base64",
Help: `To use SSE-C, you may provide the secret encryption key encoded in Base64 format to encrypt/decrypt your data
Alternatively you can provide --sse-customer-key.`,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}},
Sensitive: true,
}, {
Name: "sse_customer_key_md5",
Help: `If using SSE-C you may provide the secret encryption key MD5 checksum (optional).
If you leave it blank, this is calculated automatically from the sse_customer_key provided.
`,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}},
Sensitive: true,
}}, }},
}) })
} }
@@ -324,10 +272,6 @@ type Options struct {
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"` DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
Lifecycle int `config:"lifecycle"` Lifecycle int `config:"lifecycle"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
SSECustomerKey string `config:"sse_customer_key"`
SSECustomerKeyBase64 string `config:"sse_customer_key_base64"`
SSECustomerKeyMD5 string `config:"sse_customer_key_md5"`
} }
// Fs represents a remote b2 server // Fs represents a remote b2 server
@@ -355,13 +299,14 @@ type Fs struct {
// Object describes a b2 object // Object describes a b2 object
type Object struct { type Object struct {
fs *Fs // what this object is part of fs *Fs // what this object is part of
remote string // The remote path remote string // The remote path
id string // b2 id of the file id string // b2 id of the file
modTime time.Time // The modified time of the object if known modTime time.Time // The modified time of the object if known
sha1 string // SHA-1 hash if known sha1 string // SHA-1 hash if known
size int64 // Size of the object size int64 // Size of the object
mimeType string // Content-Type of the object mimeType string // Content-Type of the object
meta map[string]string // The object metadata if known - may be nil - with lower case keys
} }
// ------------------------------------------------------------ // ------------------------------------------------------------
@@ -558,24 +503,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.Endpoint == "" { if opt.Endpoint == "" {
opt.Endpoint = defaultEndpoint opt.Endpoint = defaultEndpoint
} }
if opt.SSECustomerKey != "" && opt.SSECustomerKeyBase64 != "" {
return nil, errors.New("b2: can't use both sse_customer_key and sse_customer_key_base64 at the same time")
} else if opt.SSECustomerKeyBase64 != "" {
// Decode the Base64-encoded key and store it in the SSECustomerKey field
decoded, err := base64.StdEncoding.DecodeString(opt.SSECustomerKeyBase64)
if err != nil {
return nil, fmt.Errorf("b2: Could not decode sse_customer_key_base64: %w", err)
}
opt.SSECustomerKey = string(decoded)
} else {
// Encode the raw key as Base64
opt.SSECustomerKeyBase64 = base64.StdEncoding.EncodeToString([]byte(opt.SSECustomerKey))
}
if opt.SSECustomerKey != "" && opt.SSECustomerKeyMD5 == "" {
// Calculate CustomerKeyMd5 if not supplied
md5sumBinary := md5.Sum([]byte(opt.SSECustomerKey))
opt.SSECustomerKeyMD5 = base64.StdEncoding.EncodeToString(md5sumBinary[:])
}
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
@@ -607,29 +534,17 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to authorize account: %w", err) return nil, fmt.Errorf("failed to authorize account: %w", err)
} }
// If this is a key limited to one or more buckets, one of them must exist // If this is a key limited to a single bucket, it must exist already
// and be ours. if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
if f.rootBucket != "" && len(f.info.APIs.Storage.Allowed.Buckets) != 0 { allowedBucket := f.opt.Enc.ToStandardName(f.info.Allowed.BucketName)
buckets := f.info.APIs.Storage.Allowed.Buckets if allowedBucket == "" {
var rootFound = false return nil, errors.New("bucket that application key is restricted to no longer exists")
var rootID string
for _, b := range buckets {
allowedBucket := f.opt.Enc.ToStandardName(b.Name)
if allowedBucket == "" {
fs.Debugf(f, "bucket %q that application key is restricted to no longer exists", b.ID)
continue
}
if allowedBucket == f.rootBucket {
rootFound = true
rootID = b.ID
}
} }
if !rootFound { if allowedBucket != f.rootBucket {
return nil, fmt.Errorf("you must use bucket(s) %q with this application key", buckets) return nil, fmt.Errorf("you must use bucket %q with this application key", allowedBucket)
} }
f.cache.MarkOK(f.rootBucket) f.cache.MarkOK(f.rootBucket)
f.setBucketID(f.rootBucket, rootID) f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
} }
if f.rootBucket != "" && f.rootDirectory != "" { if f.rootBucket != "" && f.rootDirectory != "" {
// Check to see if the (bucket,directory) is actually an existing file // Check to see if the (bucket,directory) is actually an existing file
@@ -655,7 +570,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
defer f.authMu.Unlock() defer f.authMu.Unlock()
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
Path: "/b2api/v4/b2_authorize_account", Path: "/b2api/v1/b2_authorize_account",
RootURL: f.opt.Endpoint, RootURL: f.opt.Endpoint,
UserName: f.opt.Account, UserName: f.opt.Account,
Password: f.opt.Key, Password: f.opt.Key,
@@ -668,13 +583,18 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
if err != nil { if err != nil {
return fmt.Errorf("failed to authenticate: %w", err) return fmt.Errorf("failed to authenticate: %w", err)
} }
f.srv.SetRoot(f.info.APIs.Storage.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken) f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
return nil return nil
} }
// hasPermission returns if the current AuthorizationToken has the selected permission // hasPermission returns if the current AuthorizationToken has the selected permission
func (f *Fs) hasPermission(permission string) bool { func (f *Fs) hasPermission(permission string) bool {
return slices.Contains(f.info.APIs.Storage.Allowed.Capabilities, permission) for _, capability := range f.info.Allowed.Capabilities {
if capability == permission {
return true
}
}
return false
} }
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken // getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
@@ -931,7 +851,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File
} }
// listDir lists a single directory // listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) { func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
last := "" last := ""
err = f.list(ctx, bucket, directory, prefix, f.rootBucket == "", false, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error { err = f.list(ctx, bucket, directory, prefix, f.rootBucket == "", false, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last) entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
@@ -939,16 +859,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
return err return err
} }
if entry != nil { if entry != nil {
return callback(entry) entries = append(entries, entry)
} }
return nil return nil
}) })
if err != nil { if err != nil {
return err return nil, err
} }
// bucket must be present if listing succeeded // bucket must be present if listing succeeded
f.cache.MarkOK(bucket) f.cache.MarkOK(bucket)
return nil return entries, nil
} }
// listBuckets returns all the buckets to out // listBuckets returns all the buckets to out
@@ -974,46 +894,14 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
bucket, directory := f.split(dir) bucket, directory := f.split(dir)
if bucket == "" { if bucket == "" {
if directory != "" { if directory != "" {
return fs.ErrorListBucketRequired return nil, fs.ErrorListBucketRequired
}
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
if err != nil {
return err
} }
return f.listBuckets(ctx)
} }
return list.Flush() return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@@ -1034,7 +922,7 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
// of listing recursively that doing a directory traversal. // of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir) bucket, directory := f.split(dir)
list := list.NewHelper(callback) list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error { listR := func(bucket, directory, prefix string, addBucket bool) error {
last := "" last := ""
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error { return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
@@ -1079,83 +967,44 @@ type listBucketFn func(*api.Bucket) error
// listBucketsToFn lists the buckets to the function supplied // listBucketsToFn lists the buckets to the function supplied
func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error { func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error {
responses := make([]api.ListBucketsResponse, len(f.info.APIs.Storage.Allowed.Buckets))[:0] var account = api.ListBucketsRequest{
AccountID: f.info.AccountID,
call := func(id string) error { BucketID: f.info.Allowed.BucketID,
var account = api.ListBucketsRequest{ }
AccountID: f.info.AccountID, if bucketName != "" && account.BucketID == "" {
BucketID: id, account.BucketName = f.opt.Enc.FromStandardName(bucketName)
}
if bucketName != "" && account.BucketID == "" {
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
}
var response api.ListBucketsResponse
opts := rest.Opts{
Method: "POST",
Path: "/b2_list_buckets",
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return err
}
responses = append(responses, response)
return nil
} }
for i := range f.info.APIs.Storage.Allowed.Buckets { var response api.ListBucketsResponse
b := &f.info.APIs.Storage.Allowed.Buckets[i] opts := rest.Opts{
// Empty names indicate a bucket that no longer exists, this is non-fatal Method: "POST",
// for multi-bucket API keys. Path: "/b2_list_buckets",
if b.Name == "" {
continue
}
// When requesting a specific bucket skip over non-matching names
if bucketName != "" && b.Name != bucketName {
continue
}
err := call(b.ID)
if err != nil {
return err
}
} }
err := f.pacer.Call(func() (bool, error) {
if len(f.info.APIs.Storage.Allowed.Buckets) == 0 { resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
err := call("") return f.shouldRetry(ctx, resp, err)
if err != nil { })
return err if err != nil {
} return err
} }
f.bucketIDMutex.Lock() f.bucketIDMutex.Lock()
f.bucketTypeMutex.Lock() f.bucketTypeMutex.Lock()
f._bucketID = make(map[string]string, 1) f._bucketID = make(map[string]string, 1)
f._bucketType = make(map[string]string, 1) f._bucketType = make(map[string]string, 1)
for i := range response.Buckets {
for ri := range responses { bucket := &response.Buckets[i]
response := &responses[ri] bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
for i := range response.Buckets { f.cache.MarkOK(bucket.Name)
bucket := &response.Buckets[i] f._bucketID[bucket.Name] = bucket.ID
bucket.Name = f.opt.Enc.ToStandardName(bucket.Name) f._bucketType[bucket.Name] = bucket.Type
f.cache.MarkOK(bucket.Name)
f._bucketID[bucket.Name] = bucket.ID
f._bucketType[bucket.Name] = bucket.Type
}
} }
f.bucketTypeMutex.Unlock() f.bucketTypeMutex.Unlock()
f.bucketIDMutex.Unlock() f.bucketIDMutex.Unlock()
for ri := range responses { for i := range response.Buckets {
response := &responses[ri] bucket := &response.Buckets[i]
for i := range response.Buckets { err = fn(bucket)
bucket := &response.Buckets[i] if err != nil {
err := fn(bucket) return err
if err != nil {
return err
}
} }
} }
return nil return nil
@@ -1426,7 +1275,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
toBeDeleted := make(chan *api.File, f.ci.Transfers) toBeDeleted := make(chan *api.File, f.ci.Transfers)
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(f.ci.Transfers) wg.Add(f.ci.Transfers)
for range f.ci.Transfers { for i := 0; i < f.ci.Transfers; i++ {
go func() { go func() {
defer wg.Done() defer wg.Done()
for object := range toBeDeleted { for object := range toBeDeleted {
@@ -1469,22 +1318,16 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
// Check current version of the file // Check current version of the file
if deleteHidden && object.Action == "hide" { if deleteHidden && object.Action == "hide" {
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID) fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
if !operations.SkipDestructive(ctx, object.Name, "remove hide marker") { toBeDeleted <- object
toBeDeleted <- object
}
} else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) { } else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local()) fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
if !operations.SkipDestructive(ctx, object.Name, "remove pending upload") { toBeDeleted <- object
toBeDeleted <- object
}
} else { } else {
fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp))) fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp)))
} }
} else { } else {
fs.Debugf(remote, "Deleting (id %q)", object.ID) fs.Debugf(remote, "Deleting (id %q)", object.ID)
if !operations.SkipDestructive(ctx, object.Name, "delete") { toBeDeleted <- object
toBeDeleted <- object
}
} }
last = remote last = remote
tr.Done(ctx, nil) tr.Done(ctx, nil)
@@ -1558,16 +1401,6 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
Name: f.opt.Enc.FromStandardPath(dstPath), Name: f.opt.Enc.FromStandardPath(dstPath),
DestBucketID: destBucketID, DestBucketID: destBucketID,
} }
if f.opt.SSECustomerKey != "" && f.opt.SSECustomerKeyMD5 != "" {
serverSideEncryptionConfig := api.ServerSideEncryption{
Mode: "SSE-C",
Algorithm: f.opt.SSECustomerAlgorithm,
CustomerKey: f.opt.SSECustomerKeyBase64,
CustomerKeyMd5: f.opt.SSECustomerKeyMD5,
}
request.SourceServerSideEncryption = &serverSideEncryptionConfig
request.DestinationServerSideEncryption = &serverSideEncryptionConfig
}
if newInfo == nil { if newInfo == nil {
request.MetadataDirective = "COPY" request.MetadataDirective = "COPY"
} else { } else {
@@ -1657,7 +1490,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
bucket, bucketPath := f.split(remote) bucket, bucketPath := f.split(remote)
var RootURL string var RootURL string
if f.opt.DownloadURL == "" { if f.opt.DownloadURL == "" {
RootURL = f.info.APIs.Storage.DownloadURL RootURL = f.info.DownloadURL
} else { } else {
RootURL = f.opt.DownloadURL RootURL = f.opt.DownloadURL
} }
@@ -1765,6 +1598,9 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
if err != nil { if err != nil {
return err return err
} }
// For now, just set "mtime" in metadata
o.meta = make(map[string]string, 1)
o.meta["mtime"] = o.modTime.Format(time.RFC3339Nano)
return nil return nil
} }
@@ -1838,21 +1674,6 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
return o.getMetaDataListing(ctx) return o.getMetaDataListing(ctx)
} }
} }
// If using versionAt we need to list the find the correct version.
if o.fs.opt.VersionAt.IsSet() {
info, err := o.getMetaDataListing(ctx)
if err != nil {
return nil, err
}
if info.Action == "hide" {
// Rerturn object not found error if the current version is deleted.
return nil, fs.ErrorObjectNotFound
}
return info, nil
}
_, info, err = o.getOrHead(ctx, "HEAD", nil) _, info, err = o.getOrHead(ctx, "HEAD", nil)
return info, err return info, err
} }
@@ -1999,16 +1820,15 @@ var _ io.ReadCloser = &openFile{}
func (o *Object) getOrHead(ctx context.Context, method string, options []fs.OpenOption) (resp *http.Response, info *api.File, err error) { func (o *Object) getOrHead(ctx context.Context, method string, options []fs.OpenOption) (resp *http.Response, info *api.File, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: method, Method: method,
Options: options, Options: options,
NoResponse: method == "HEAD", NoResponse: method == "HEAD",
ExtraHeaders: map[string]string{},
} }
// Use downloadUrl from backblaze if downloadUrl is not set // Use downloadUrl from backblaze if downloadUrl is not set
// otherwise use the custom downloadUrl // otherwise use the custom downloadUrl
if o.fs.opt.DownloadURL == "" { if o.fs.opt.DownloadURL == "" {
opts.RootURL = o.fs.info.APIs.Storage.DownloadURL opts.RootURL = o.fs.info.DownloadURL
} else { } else {
opts.RootURL = o.fs.opt.DownloadURL opts.RootURL = o.fs.opt.DownloadURL
} }
@@ -2020,11 +1840,6 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath)) opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath))
} }
if o.fs.opt.SSECustomerKey != "" && o.fs.opt.SSECustomerKeyMD5 != "" {
opts.ExtraHeaders[sseAlgorithmHeader] = o.fs.opt.SSECustomerAlgorithm
opts.ExtraHeaders[sseKeyHeader] = o.fs.opt.SSECustomerKeyBase64
opts.ExtraHeaders[sseMd5Header] = o.fs.opt.SSECustomerKeyMD5
}
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts) resp, err = o.fs.srv.Call(ctx, &opts)
return o.fs.shouldRetry(ctx, resp, err) return o.fs.shouldRetry(ctx, resp, err)
@@ -2065,18 +1880,20 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
Info: Info, Info: Info,
} }
// Embryonic metadata support - just mtime
o.meta = make(map[string]string, 1)
modTime, err := parseTimeStringHelper(info.Info[timeKey])
if err == nil {
o.meta["mtime"] = modTime.Format(time.RFC3339Nano)
}
// When reading files from B2 via cloudflare using // When reading files from B2 via cloudflare using
// --b2-download-url cloudflare strips the Content-Length // --b2-download-url cloudflare strips the Content-Length
// headers (presumably so it can inject stuff) so use the old // headers (presumably so it can inject stuff) so use the old
// length read from the listing. // length read from the listing.
// Additionally, the official examples return S3 headers
// instead of native, i.e. no file ID, use ones from listing.
if info.Size < 0 { if info.Size < 0 {
info.Size = o.size info.Size = o.size
} }
if info.ID == "" {
info.ID = o.id
}
return resp, info, nil return resp, info, nil
} }
@@ -2126,7 +1943,7 @@ func init() {
// urlEncode encodes in with % encoding // urlEncode encodes in with % encoding
func urlEncode(in string) string { func urlEncode(in string) string {
var out bytes.Buffer var out bytes.Buffer
for i := range len(in) { for i := 0; i < len(in); i++ {
c := in[i] c := in[i]
if noNeedToEncode[c] { if noNeedToEncode[c] {
_ = out.WriteByte(c) _ = out.WriteByte(c)
@@ -2289,11 +2106,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}, },
ContentLength: &size, ContentLength: &size,
} }
if o.fs.opt.SSECustomerKey != "" && o.fs.opt.SSECustomerKeyMD5 != "" {
opts.ExtraHeaders[sseAlgorithmHeader] = o.fs.opt.SSECustomerAlgorithm
opts.ExtraHeaders[sseKeyHeader] = o.fs.opt.SSECustomerKeyBase64
opts.ExtraHeaders[sseMd5Header] = o.fs.opt.SSECustomerKeyMD5
}
var response api.FileInfo var response api.FileInfo
// Don't retry, return a retry error instead // Don't retry, return a retry error instead
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.CallNoRetry(func() (bool, error) {
@@ -2368,27 +2180,20 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
return info, nil, err return info, nil, err
} }
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
if err != nil {
return info, nil, err
}
info = fs.ChunkWriterInfo{ info = fs.ChunkWriterInfo{
ChunkSize: up.chunkSize, ChunkSize: int64(f.opt.ChunkSize),
Concurrency: o.fs.opt.UploadConcurrency, Concurrency: o.fs.opt.UploadConcurrency,
//LeavePartsOnError: o.fs.opt.LeavePartsOnError, //LeavePartsOnError: o.fs.opt.LeavePartsOnError,
} }
return info, up, nil up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
return info, up, err
} }
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove(ctx context.Context) error {
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
if o.fs.opt.Versions { if o.fs.opt.Versions {
t, path := api.RemoveVersion(bucketPath) return errNotWithVersions
if !t.IsZero() {
return o.fs.deleteByID(ctx, o.id, path)
}
} }
if o.fs.opt.VersionAt.IsSet() { if o.fs.opt.VersionAt.IsSet() {
return errNotWithVersionAt return errNotWithVersionAt
@@ -2411,36 +2216,31 @@ func (o *Object) ID() string {
var lifecycleHelp = fs.CommandHelp{ var lifecycleHelp = fs.CommandHelp{
Name: "lifecycle", Name: "lifecycle",
Short: "Read or set the lifecycle for a bucket.", Short: "Read or set the lifecycle for a bucket",
Long: `This command can be used to read or set the lifecycle for a bucket. Long: `This command can be used to read or set the lifecycle for a bucket.
Usage Examples:
To show the current lifecycle rules: To show the current lifecycle rules:
` + "```console" + ` rclone backend lifecycle b2:bucket
rclone backend lifecycle b2:bucket
` + "```" + `
This will dump something like this showing the lifecycle rules. This will dump something like this showing the lifecycle rules.
` + "```json" + ` [
[ {
{ "daysFromHidingToDeleting": 1,
"daysFromHidingToDeleting": 1, "daysFromUploadingToHiding": null,
"daysFromUploadingToHiding": null, "fileNamePrefix": ""
"daysFromStartingToCancelingUnfinishedLargeFiles": null, }
"fileNamePrefix": "" ]
}
]
` + "```" + `
If there are no lifecycle rules (the default) then it will just return ` + "`[]`" + `. If there are no lifecycle rules (the default) then it will just return [].
To reset the current lifecycle rules: To reset the current lifecycle rules:
` + "```console" + ` rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30 rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
` + "```" + `
This will run and then print the new lifecycle rules as above. This will run and then print the new lifecycle rules as above.
@@ -2452,21 +2252,17 @@ the daysFromHidingToDeleting to 1 day. You can enable hard_delete in
the config also which will mean deletions won't cause versions but the config also which will mean deletions won't cause versions but
overwrites will still cause versions to be made. overwrites will still cause versions to be made.
` + "```console" + ` rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
` + "```" + `
See: <https://www.backblaze.com/docs/cloud-storage-lifecycle-rules>`, See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
`,
Opts: map[string]string{ Opts: map[string]string{
"daysFromHidingToDeleting": `After a file has been hidden for this many days "daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
it is deleted. 0 is off.`, "daysFromUploadingToHiding": "This many days after uploading a file is hidden",
"daysFromUploadingToHiding": `This many days after uploading a file is hidden.`,
"daysFromStartingToCancelingUnfinishedLargeFiles": `Cancels any unfinished
large file versions after this many days.`,
}, },
} }
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
var newRule api.LifecycleRule var newRule api.LifecycleRule
if daysStr := opt["daysFromHidingToDeleting"]; daysStr != "" { if daysStr := opt["daysFromHidingToDeleting"]; daysStr != "" {
days, err := strconv.Atoi(daysStr) days, err := strconv.Atoi(daysStr)
@@ -2482,23 +2278,14 @@ func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, op
} }
newRule.DaysFromUploadingToHiding = &days newRule.DaysFromUploadingToHiding = &days
} }
if daysStr := opt["daysFromStartingToCancelingUnfinishedLargeFiles"]; daysStr != "" {
days, err := strconv.Atoi(daysStr)
if err != nil {
return nil, fmt.Errorf("bad daysFromStartingToCancelingUnfinishedLargeFiles: %w", err)
}
newRule.DaysFromStartingToCancelingUnfinishedLargeFiles = &days
}
bucketName, _ := f.split("") bucketName, _ := f.split("")
if bucketName == "" { if bucketName == "" {
return nil, errors.New("bucket required") return nil, errors.New("bucket required")
} }
skip := operations.SkipDestructive(ctx, name, "update lifecycle rules")
var bucket *api.Bucket var bucket *api.Bucket
if !skip && (newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil || newRule.DaysFromStartingToCancelingUnfinishedLargeFiles != nil) { if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil {
bucketID, err := f.getBucketID(ctx, bucketName) bucketID, err := f.getBucketID(ctx, bucketName)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -2545,18 +2332,17 @@ max-age, which defaults to 24 hours.
Note that you can use --interactive/-i or --dry-run with this command to see what Note that you can use --interactive/-i or --dry-run with this command to see what
it would do. it would do.
` + "```console" + ` rclone backend cleanup b2:bucket/path/to/object
rclone backend cleanup b2:bucket/path/to/object rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
` + "```" + `
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.`, Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
`,
Opts: map[string]string{ Opts: map[string]string{
"max-age": "Max age of upload to delete.", "max-age": "Max age of upload to delete",
}, },
} }
func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
maxAge := defaultMaxAge maxAge := defaultMaxAge
if opt["max-age"] != "" { if opt["max-age"] != "" {
maxAge, err = fs.ParseDuration(opt["max-age"]) maxAge, err = fs.ParseDuration(opt["max-age"])
@@ -2575,12 +2361,11 @@ var cleanupHiddenHelp = fs.CommandHelp{
Note that you can use --interactive/-i or --dry-run with this command to see what Note that you can use --interactive/-i or --dry-run with this command to see what
it would do. it would do.
` + "```console" + ` rclone backend cleanup-hidden b2:bucket/path/to/dir
rclone backend cleanup-hidden b2:bucket/path/to/dir `,
` + "```",
} }
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
return nil, f.cleanUp(ctx, true, false, 0) return nil, f.cleanUp(ctx, true, false, 0)
} }
@@ -2599,7 +2384,7 @@ var commandHelp = []fs.CommandHelp{
// The result should be capable of being JSON encoded // The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user // If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that // otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name { switch name {
case "lifecycle": case "lifecycle":
return f.lifecycleCommand(ctx, name, arg, opt) return f.lifecycleCommand(ctx, name, arg, opt)
@@ -2620,7 +2405,6 @@ var (
_ fs.PutStreamer = &Fs{} _ fs.PutStreamer = &Fs{}
_ fs.CleanUpper = &Fs{} _ fs.CleanUpper = &Fs{}
_ fs.ListRer = &Fs{} _ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.PublicLinker = &Fs{} _ fs.PublicLinker = &Fs{}
_ fs.OpenChunkWriter = &Fs{} _ fs.OpenChunkWriter = &Fs{}
_ fs.Commander = &Fs{} _ fs.Commander = &Fs{}

View File

@@ -5,7 +5,6 @@ import (
"crypto/sha1" "crypto/sha1"
"fmt" "fmt"
"path" "path"
"sort"
"strings" "strings"
"testing" "testing"
"time" "time"
@@ -14,7 +13,6 @@ import (
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/bucket"
@@ -258,6 +256,12 @@ func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string
assert.Equal(t, v, got, k) assert.Equal(t, v, got, k)
} }
// mtime
for k, v := range metadata {
got := o.meta[k]
assert.Equal(t, v, got, k)
}
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type") assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
// Modification time from the x-bz-info-src_last_modified_millis header // Modification time from the x-bz-info-src_last_modified_millis header
@@ -446,174 +450,37 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
t.Run("List", func(t *testing.T) { t.Run("List", func(t *testing.T) {
fstest.CheckListing(t, f, test.want) fstest.CheckListing(t, f, test.want)
}) })
// b2 NewObject doesn't work with VersionAt
t.Run("NewObject", func(t *testing.T) { //t.Run("NewObject", func(t *testing.T) {
gotObj, gotErr := f.NewObject(ctx, fileName) // gotObj, gotErr := f.NewObject(ctx, fileName)
assert.Equal(t, test.wantErr, gotErr) // assert.Equal(t, test.wantErr, gotErr)
if gotErr == nil { // if gotErr == nil {
assert.Equal(t, test.wantSize, gotObj.Size()) // assert.Equal(t, test.wantSize, gotObj.Size())
} // }
}) //})
}) })
} }
}) })
t.Run("Cleanup", func(t *testing.T) { t.Run("Cleanup", func(t *testing.T) {
t.Run("DryRun", func(t *testing.T) { require.NoError(t, f.cleanUp(ctx, true, false, 0))
f.opt.Versions = true items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
defer func() { fstest.CheckListing(t, f, items)
f.opt.Versions = false // Set --b2-versions for this test
}() f.opt.Versions = true
// Listing should be unchanged after dry run defer func() {
before := listAllFiles(ctx, t, f, dirName) f.opt.Versions = false
ctx, ci := fs.AddConfig(ctx) }()
ci.DryRun = true fstest.CheckListing(t, f, items)
require.NoError(t, f.cleanUp(ctx, true, false, 0))
after := listAllFiles(ctx, t, f, dirName)
assert.Equal(t, before, after)
})
t.Run("RealThing", func(t *testing.T) {
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
// Listing should reflect current state after cleanup
require.NoError(t, f.cleanUp(ctx, true, false, 0))
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
fstest.CheckListing(t, f, items)
})
}) })
// Purge gets tested later // Purge gets tested later
} }
func (f *Fs) InternalTestCleanupUnfinished(t *testing.T) {
ctx := context.Background()
// B2CleanupHidden tests cleaning up hidden files
t.Run("CleanupUnfinished", func(t *testing.T) {
dirName := "unfinished"
fileCount := 5
expectedFiles := []string{}
for i := 1; i < fileCount; i++ {
fileName := fmt.Sprintf("%s/unfinished-%d", dirName, i)
expectedFiles = append(expectedFiles, fileName)
obj := &Object{
fs: f,
remote: fileName,
}
objInfo := object.NewStaticObjectInfo(fileName, fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
_, err := f.newLargeUpload(ctx, obj, nil, objInfo, f.opt.ChunkSize, false, nil)
require.NoError(t, err)
}
checkListing(ctx, t, f, dirName, expectedFiles)
t.Run("DryRun", func(t *testing.T) {
// Listing should not change after dry run
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
require.NoError(t, f.cleanUp(ctx, false, true, 0))
checkListing(ctx, t, f, dirName, expectedFiles)
})
t.Run("RealThing", func(t *testing.T) {
// Listing should be empty after real cleanup
require.NoError(t, f.cleanUp(ctx, false, true, 0))
checkListing(ctx, t, f, dirName, []string{})
})
})
}
func listAllFiles(ctx context.Context, t *testing.T, f *Fs, dirName string) []string {
bucket, directory := f.split(dirName)
foundFiles := []string{}
require.NoError(t, f.list(ctx, bucket, directory, "", false, true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
if !isDirectory {
foundFiles = append(foundFiles, object.Name)
}
return nil
}))
sort.Strings(foundFiles)
return foundFiles
}
func checkListing(ctx context.Context, t *testing.T, f *Fs, dirName string, expectedFiles []string) {
foundFiles := listAllFiles(ctx, t, f, dirName)
sort.Strings(expectedFiles)
assert.Equal(t, expectedFiles, foundFiles)
}
func (f *Fs) InternalTestLifecycleRules(t *testing.T) {
ctx := context.Background()
opt := map[string]string{}
t.Run("InitState", func(t *testing.T) {
// There should be no lifecycle rules at the outset
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
})
t.Run("DryRun", func(t *testing.T) {
// There should still be no lifecycle rules after each dry run operation
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
delete(opt, "daysFromHidingToDeleting")
opt["daysFromUploadingToHiding"] = "40"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
})
t.Run("RealThing", func(t *testing.T) {
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
delete(opt, "daysFromHidingToDeleting")
opt["daysFromUploadingToHiding"] = "40"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
})
}
// -run TestIntegration/FsMkdir/FsPutFiles/Internal // -run TestIntegration/FsMkdir/FsPutFiles/Internal
func (f *Fs) InternalTest(t *testing.T) { func (f *Fs) InternalTest(t *testing.T) {
t.Run("Metadata", f.InternalTestMetadata) t.Run("Metadata", f.InternalTestMetadata)
t.Run("Versions", f.InternalTestVersions) t.Run("Versions", f.InternalTestVersions)
t.Run("CleanupUnfinished", f.InternalTestCleanupUnfinished)
t.Run("LifecycleRules", f.InternalTestLifecycleRules)
} }
var _ fstests.InternalTester = (*Fs)(nil) var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -144,14 +144,6 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
request.ContentType = newInfo.ContentType request.ContentType = newInfo.ContentType
request.Info = newInfo.Info request.Info = newInfo.Info
} }
if o.fs.opt.SSECustomerKey != "" && o.fs.opt.SSECustomerKeyMD5 != "" {
request.ServerSideEncryption = &api.ServerSideEncryption{
Mode: "SSE-C",
Algorithm: o.fs.opt.SSECustomerAlgorithm,
CustomerKey: o.fs.opt.SSECustomerKeyBase64,
CustomerKeyMd5: o.fs.opt.SSECustomerKeyMD5,
}
}
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/b2_start_large_file", Path: "/b2_start_large_file",
@@ -303,12 +295,6 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
ContentLength: &sizeWithHash, ContentLength: &sizeWithHash,
} }
if up.o.fs.opt.SSECustomerKey != "" && up.o.fs.opt.SSECustomerKeyMD5 != "" {
opts.ExtraHeaders[sseAlgorithmHeader] = up.o.fs.opt.SSECustomerAlgorithm
opts.ExtraHeaders[sseKeyHeader] = up.o.fs.opt.SSECustomerKeyBase64
opts.ExtraHeaders[sseMd5Header] = up.o.fs.opt.SSECustomerKeyMD5
}
var response api.UploadPartResponse var response api.UploadPartResponse
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response) resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
@@ -348,17 +334,6 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
PartNumber: int64(part + 1), PartNumber: int64(part + 1),
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1), Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
} }
if up.o.fs.opt.SSECustomerKey != "" && up.o.fs.opt.SSECustomerKeyMD5 != "" {
serverSideEncryptionConfig := api.ServerSideEncryption{
Mode: "SSE-C",
Algorithm: up.o.fs.opt.SSECustomerAlgorithm,
CustomerKey: up.o.fs.opt.SSECustomerKeyBase64,
CustomerKeyMd5: up.o.fs.opt.SSECustomerKeyMD5,
}
request.SourceServerSideEncryption = &serverSideEncryptionConfig
request.DestinationServerSideEncryption = &serverSideEncryptionConfig
}
var response api.UploadPartResponse var response api.UploadPartResponse
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response) resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
retry, err := up.f.shouldRetry(ctx, resp, err) retry, err := up.f.shouldRetry(ctx, resp, err)
@@ -503,14 +478,17 @@ func (up *largeUpload) Copy(ctx context.Context) (err error) {
remaining = up.size remaining = up.size
) )
g.SetLimit(up.f.opt.UploadConcurrency) g.SetLimit(up.f.opt.UploadConcurrency)
for part := range up.parts { for part := 0; part < up.parts; part++ {
// Fail fast, in case an errgroup managed function returns an error // Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in copying all the other parts. // gCtx is cancelled. There is no point in copying all the other parts.
if gCtx.Err() != nil { if gCtx.Err() != nil {
break break
} }
reqSize := min(remaining, up.chunkSize) reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
part := part // for the closure part := part // for the closure
g.Go(func() (err error) { g.Go(func() (err error) {

View File

@@ -125,21 +125,10 @@ type FolderItems struct {
Offset int `json:"offset"` Offset int `json:"offset"`
Limit int `json:"limit"` Limit int `json:"limit"`
NextMarker *string `json:"next_marker,omitempty"` NextMarker *string `json:"next_marker,omitempty"`
// There is some confusion about how this is actually Order []struct {
// returned. The []struct has worked for many years, but in By string `json:"by"`
// https://github.com/rclone/rclone/issues/8776 box was Direction string `json:"direction"`
// returning it returned not as a list. We don't actually use } `json:"order"`
// this so comment it out.
//
// Order struct {
// By string `json:"by"`
// Direction string `json:"direction"`
// } `json:"order"`
//
// Order []struct {
// By string `json:"by"`
// Direction string `json:"direction"`
// } `json:"order"`
} }
// Parent defined the ID of the parent directory // Parent defined the ID of the parent directory
@@ -282,9 +271,9 @@ type User struct {
ModifiedAt time.Time `json:"modified_at"` ModifiedAt time.Time `json:"modified_at"`
Language string `json:"language"` Language string `json:"language"`
Timezone string `json:"timezone"` Timezone string `json:"timezone"`
SpaceAmount float64 `json:"space_amount"` SpaceAmount int64 `json:"space_amount"`
SpaceUsed float64 `json:"space_used"` SpaceUsed int64 `json:"space_used"`
MaxUploadSize float64 `json:"max_upload_size"` MaxUploadSize int64 `json:"max_upload_size"`
Status string `json:"status"` Status string `json:"status"`
JobTitle string `json:"job_title"` JobTitle string `json:"job_title"`
Phone string `json:"phone"` Phone string `json:"phone"`

View File

@@ -37,7 +37,6 @@ import (
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/env"
@@ -47,6 +46,7 @@ import (
"github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"github.com/youmark/pkcs8" "github.com/youmark/pkcs8"
"golang.org/x/oauth2"
) )
const ( const (
@@ -65,10 +65,12 @@ const (
// Globals // Globals
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
oauthConfig = &oauthutil.Config{ oauthConfig = &oauth2.Config{
Scopes: nil, Scopes: nil,
AuthURL: "https://app.box.com/api/oauth2/authorize", Endpoint: oauth2.Endpoint{
TokenURL: "https://app.box.com/api/oauth2/token", AuthURL: "https://app.box.com/api/oauth2/authorize",
TokenURL: "https://app.box.com/api/oauth2/token",
},
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL, RedirectURL: oauthutil.RedirectURL,
@@ -87,11 +89,13 @@ func init() {
Description: "Box", Description: "Box",
NewFs: NewFs, NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
boxAccessToken, boxAccessTokenOk := m.Get("access_token") boxAccessToken, boxAccessTokenOk := m.Get("access_token")
var err error var err error
// If using box config.json, use JWT auth // If using box config.json, use JWT auth
if usesJWTAuth(m) { if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
err = refreshJWTToken(ctx, name, m) err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to configure token with jwt authentication: %w", err) return nil, fmt.Errorf("failed to configure token with jwt authentication: %w", err)
} }
@@ -112,11 +116,6 @@ func init() {
}, { }, {
Name: "box_config_file", Name: "box_config_file",
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp, Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
}, {
Name: "config_credentials",
Help: "Box App config.json contents.\n\nLeave blank normally.",
Hide: fs.OptionHideBoth,
Sensitive: true,
}, { }, {
Name: "access_token", Name: "access_token",
Help: "Box App Primary Access Token\n\nLeave blank normally.", Help: "Box App Primary Access Token\n\nLeave blank normally.",
@@ -187,17 +186,9 @@ See: https://developer.box.com/guides/authentication/jwt/as-user/
}) })
} }
func usesJWTAuth(m configmap.Mapper) bool { func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
jsonFile, okFile := m.Get("box_config_file") jsonFile = env.ShellExpand(jsonFile)
jsonFileCredentials, okCredentials := m.Get("config_credentials") boxConfig, err := getBoxConfig(jsonFile)
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
return (okFile || okCredentials) && boxSubTypeOk && (jsonFile != "" || jsonFileCredentials != "") && boxSubType != ""
}
func refreshJWTToken(ctx context.Context, name string, m configmap.Mapper) error {
boxSubType, _ := m.Get("box_sub_type")
boxConfig, err := getBoxConfig(m)
if err != nil { if err != nil {
return fmt.Errorf("get box config: %w", err) return fmt.Errorf("get box config: %w", err)
} }
@@ -216,19 +207,12 @@ func refreshJWTToken(ctx context.Context, name string, m configmap.Mapper) error
return err return err
} }
func getBoxConfig(m configmap.Mapper) (boxConfig *api.ConfigJSON, err error) { func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
configFileCredentials, _ := m.Get("config_credentials") file, err := os.ReadFile(configFile)
configFileBytes := []byte(configFileCredentials) if err != nil {
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
if configFileCredentials == "" {
configFile, _ := m.Get("box_config_file")
configFileBytes, err = os.ReadFile(configFile)
if err != nil {
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
}
} }
err = json.Unmarshal(file, &boxConfig)
err = json.Unmarshal(configFileBytes, &boxConfig)
if err != nil { if err != nil {
return nil, fmt.Errorf("box: failed to parse Box config: %w", err) return nil, fmt.Errorf("box: failed to parse Box config: %w", err)
} }
@@ -256,8 +240,8 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomC
return claims, nil return claims, nil
} }
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]any { func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} {
signingHeaders := map[string]any{ signingHeaders := map[string]interface{}{
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID, "kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
} }
return signingHeaders return signingHeaders
@@ -274,9 +258,6 @@ func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) { func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey)) block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
if block == nil {
return nil, errors.New("box: failed to PEM decode private key")
}
if len(rest) > 0 { if len(rest) > 0 {
return nil, fmt.Errorf("box: extra data included in private key: %w", err) return nil, fmt.Errorf("box: extra data included in private key: %w", err)
} }
@@ -503,12 +484,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.srv.SetHeader("as-user", f.opt.Impersonate) f.srv.SetHeader("as-user", f.opt.Impersonate)
} }
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
if ts != nil { if ts != nil {
// If using box config.json and JWT, renewing should just refresh the token and // If using box config.json and JWT, renewing should just refresh the token and
// should do so whether there are uploads pending or not. // should do so whether there are uploads pending or not.
if usesJWTAuth(m) { if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
err := refreshJWTToken(ctx, name, m) err := refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
return err return err
}) })
f.tokenRenewer.Start() f.tokenRenewer.Start()
@@ -721,27 +705,9 @@ OUTER:
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
directoryID, err := f.dirCache.FindDir(ctx, dir, false) directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil { if err != nil {
return err return nil, err
} }
var iErr error var iErr error
_, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool { _, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool {
@@ -751,22 +717,14 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
f.dirCache.Put(remote, info.ID) f.dirCache.Put(remote, info.ID)
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID) d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
// FIXME more info from dir? // FIXME more info from dir?
err = list.Add(d) entries = append(entries, d)
if err != nil {
iErr = err
return true
}
} else if info.Type == api.ItemTypeFile { } else if info.Type == api.ItemTypeFile {
o, err := f.newObjectWithInfo(ctx, remote, info) o, err := f.newObjectWithInfo(ctx, remote, info)
if err != nil { if err != nil {
iErr = err iErr = err
return true return true
} }
err = list.Add(o) entries = append(entries, o)
if err != nil {
iErr = err
return true
}
} }
// Cache some metadata for this Item to help us process events later // Cache some metadata for this Item to help us process events later
@@ -782,12 +740,12 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
return false return false
}) })
if err != nil { if err != nil {
return err return nil, err
} }
if iErr != nil { if iErr != nil {
return iErr return nil, iErr
} }
return list.Flush() return entries, nil
} }
// Creates from the parameters passed in a half finished Object which // Creates from the parameters passed in a half finished Object which
@@ -1385,8 +1343,12 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
nextStreamPosition = streamPosition nextStreamPosition = streamPosition
for { for {
limit := f.opt.ListChunk
// box only allows a max of 500 events // box only allows a max of 500 events
limit := min(f.opt.ListChunk, 500) if limit > 500 {
limit = 500
}
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
@@ -1783,7 +1745,6 @@ var (
_ fs.DirCacheFlusher = (*Fs)(nil) _ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil) _ fs.CleanUpper = (*Fs)(nil)
_ fs.ListPer = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil) _ fs.Shutdowner = (*Fs)(nil)
_ fs.Object = (*Object)(nil) _ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil) _ fs.IDer = (*Object)(nil)

View File

@@ -105,7 +105,7 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
const defaultDelay = 10 const defaultDelay = 10
var tries int var tries int
outer: outer:
for tries = range maxTries { for tries = 0; tries < maxTries; tries++ {
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil) resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
if err != nil { if err != nil {
@@ -203,7 +203,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
errs := make(chan error, 1) errs := make(chan error, 1)
var wg sync.WaitGroup var wg sync.WaitGroup
outer: outer:
for part := range session.TotalParts { for part := 0; part < session.TotalParts; part++ {
// Check any errors // Check any errors
select { select {
case err = <-errs: case err = <-errs:
@@ -211,7 +211,10 @@ outer:
default: default:
} }
reqSize := min(remaining, chunkSize) reqSize := remaining
if reqSize >= chunkSize {
reqSize = chunkSize
}
// Make a block of memory // Make a block of memory
buf := make([]byte, reqSize) buf := make([]byte, reqSize)

View File

@@ -29,7 +29,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/atexit"
@@ -684,7 +683,7 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
start, end int64 start, end int64
} }
parseChunks := func(ranges string) (crs []chunkRange, err error) { parseChunks := func(ranges string) (crs []chunkRange, err error) {
for part := range strings.SplitSeq(ranges, ",") { for _, part := range strings.Split(ranges, ",") {
var start, end int64 = 0, math.MaxInt64 var start, end int64 = 0, math.MaxInt64
switch ints := strings.Split(part, ":"); len(ints) { switch ints := strings.Split(part, ":"); len(ints) {
case 1: case 1:
@@ -1087,13 +1086,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return cachedEntries, nil return cachedEntries, nil
} }
func (f *Fs) recurse(ctx context.Context, dir string, list *list.Helper) error { func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
entries, err := f.List(ctx, dir) entries, err := f.List(ctx, dir)
if err != nil { if err != nil {
return err return err
} }
for i := range entries { for i := 0; i < len(entries); i++ {
innerDir, ok := entries[i].(fs.Directory) innerDir, ok := entries[i].(fs.Directory)
if ok { if ok {
err := f.recurse(ctx, innerDir.Remote(), list) err := f.recurse(ctx, innerDir.Remote(), list)
@@ -1139,7 +1138,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
} }
// if we're here, we're gonna do a standard recursive traversal and cache everything // if we're here, we're gonna do a standard recursive traversal and cache everything
list := list.NewHelper(callback) list := walk.NewListRHelper(callback)
err = f.recurse(ctx, dir, list) err = f.recurse(ctx, dir, list)
if err != nil { if err != nil {
return err return err
@@ -1429,7 +1428,7 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
}() }()
// wait until both are done // wait until both are done
for range 2 { for c := 0; c < 2; c++ {
<-done <-done
} }
} }
@@ -1754,7 +1753,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
} }
// Stats returns stats about the cache storage // Stats returns stats about the cache storage
func (f *Fs) Stats() (map[string]map[string]any, error) { func (f *Fs) Stats() (map[string]map[string]interface{}, error) {
return f.cache.Stats() return f.cache.Stats()
} }
@@ -1934,7 +1933,7 @@ var commandHelp = []fs.CommandHelp{
// The result should be capable of being JSON encoded // The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user // If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that // otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) { func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
switch name { switch name {
case "stats": case "stats":
return f.Stats() return f.Stats()

View File

@@ -360,7 +360,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, int64(len(checkSample)), o.Size()) require.Equal(t, int64(len(checkSample)), o.Size())
for i := range checkSample { for i := 0; i < len(checkSample); i++ {
require.Equal(t, testData[i], checkSample[i]) require.Equal(t, testData[i], checkSample[i])
} }
} }
@@ -387,7 +387,7 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false) readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
require.NoError(t, err) require.NoError(t, err)
for i := range readData { for i := 0; i < len(readData); i++ {
require.Equalf(t, testData[i], readData[i], "at byte %v", i) require.Equalf(t, testData[i], readData[i], "at byte %v", i)
} }
} }
@@ -688,7 +688,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
co, ok := o.(*cache.Object) co, ok := o.(*cache.Object)
require.True(t, ok) require.True(t, ok)
for i := range 4 { // read first 4 for i := 0; i < 4; i++ { // read first 4
_ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false) _ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
} }
cfs.CleanUpCache(true) cfs.CleanUpCache(true)
@@ -971,7 +971,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
f, err := os.CreateTemp("", "rclonecache-tempfile") f, err := os.CreateTemp("", "rclonecache-tempfile")
require.NoError(t, err) require.NoError(t, err)
for range int(cnt) { for i := 0; i < int(cnt); i++ {
data := randStringBytes(int(chunk)) data := randStringBytes(int(chunk))
_, _ = f.Write(data) _, _ = f.Write(data)
} }
@@ -1085,9 +1085,9 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
return err return err
} }
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]any, error) { func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
var err error var err error
var l []any var l []interface{}
var list fs.DirEntries var list fs.DirEntries
list, err = f.List(context.Background(), remote) list, err = f.List(context.Background(), remote)
for _, ll := range list { for _, ll := range list {
@@ -1215,7 +1215,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
var err error var err error
var state cache.BackgroundUploadState var state cache.BackgroundUploadState
for range 2 { for i := 0; i < 2; i++ {
select { select {
case state = <-buCh: case state = <-buCh:
// continue // continue
@@ -1293,7 +1293,7 @@ func (r *run) completeAllBackgroundUploads(t *testing.T, f fs.Fs, lastRemote str
func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error { func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error {
var err error var err error
for range maxRetries { for i := 0; i < maxRetries; i++ {
err = block() err = block()
if err == nil { if err == nil {
return nil return nil

View File

@@ -17,7 +17,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:", RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil), NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata", "ListP"}, UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"}, UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"},
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"}, UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache

View File

@@ -162,7 +162,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
randInstance := rand.New(rand.NewSource(time.Now().Unix())) randInstance := rand.New(rand.NewSource(time.Now().Unix()))
lastFile := "" lastFile := ""
for i := range totalFiles { for i := 0; i < totalFiles; i++ {
size := int64(randInstance.Intn(maxSize-minSize) + minSize) size := int64(randInstance.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size) testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin" remote := "test/" + strconv.Itoa(i) + ".bin"

View File

@@ -182,7 +182,7 @@ func (r *Handle) queueOffset(offset int64) {
} }
} }
for i := range r.workers { for i := 0; i < r.workers; i++ {
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i) o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
if o < 0 || o >= r.cachedObject.Size() { if o < 0 || o >= r.cachedObject.Size() {
continue continue
@@ -222,7 +222,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
if !found { if !found {
// we're gonna give the workers a chance to pickup the chunk // we're gonna give the workers a chance to pickup the chunk
// and retry a couple of times // and retry a couple of times
for i := range r.cacheFs().opt.ReadRetries * 8 { for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
data, err = r.storage().GetChunk(r.cachedObject, chunkStart) data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
if err == nil { if err == nil {
found = true found = true

View File

@@ -209,7 +209,7 @@ func (p *plexConnector) authenticate() error {
if err != nil { if err != nil {
return err return err
} }
var data map[string]any var data map[string]interface{}
err = json.NewDecoder(resp.Body).Decode(&data) err = json.NewDecoder(resp.Body).Decode(&data)
if err != nil { if err != nil {
return fmt.Errorf("failed to obtain token: %w", err) return fmt.Errorf("failed to obtain token: %w", err)
@@ -273,11 +273,11 @@ func (p *plexConnector) isPlaying(co *Object) bool {
} }
// adapted from: https://stackoverflow.com/a/28878037 (credit) // adapted from: https://stackoverflow.com/a/28878037 (credit)
func get(m any, path ...any) (any, bool) { func get(m interface{}, path ...interface{}) (interface{}, bool) {
for _, p := range path { for _, p := range path {
switch idx := p.(type) { switch idx := p.(type) {
case string: case string:
if mm, ok := m.(map[string]any); ok { if mm, ok := m.(map[string]interface{}); ok {
if val, found := mm[idx]; found { if val, found := mm[idx]; found {
m = val m = val
continue continue
@@ -285,7 +285,7 @@ func get(m any, path ...any) (any, bool) {
} }
return nil, false return nil, false
case int: case int:
if mm, ok := m.([]any); ok { if mm, ok := m.([]interface{}); ok {
if len(mm) > idx { if len(mm) > idx {
m = mm[idx] m = mm[idx]
continue continue

View File

@@ -18,7 +18,6 @@ import (
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
bolt "go.etcd.io/bbolt" bolt "go.etcd.io/bbolt"
"go.etcd.io/bbolt/errors"
) )
// Constants // Constants
@@ -598,7 +597,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
}) })
if err != nil { if err != nil {
if err == errors.ErrDatabaseNotOpen { if err == bolt.ErrDatabaseNotOpen {
// we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore // we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore
return return
} }
@@ -607,16 +606,16 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
} }
// Stats returns a go map with the stats key values // Stats returns a go map with the stats key values
func (b *Persistent) Stats() (map[string]map[string]any, error) { func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
r := make(map[string]map[string]any) r := make(map[string]map[string]interface{})
r["data"] = make(map[string]any) r["data"] = make(map[string]interface{})
r["data"]["oldest-ts"] = time.Now() r["data"]["oldest-ts"] = time.Now()
r["data"]["oldest-file"] = "" r["data"]["oldest-file"] = ""
r["data"]["newest-ts"] = time.Now() r["data"]["newest-ts"] = time.Now()
r["data"]["newest-file"] = "" r["data"]["newest-file"] = ""
r["data"]["total-chunks"] = 0 r["data"]["total-chunks"] = 0
r["data"]["total-size"] = int64(0) r["data"]["total-size"] = int64(0)
r["files"] = make(map[string]any) r["files"] = make(map[string]interface{})
r["files"]["oldest-ts"] = time.Now() r["files"]["oldest-ts"] = time.Now()
r["files"]["oldest-name"] = "" r["files"]["oldest-name"] = ""
r["files"]["newest-ts"] = time.Now() r["files"]["newest-ts"] = time.Now()

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js //go:build !plan9 && !js
// +build !plan9,!js
package cache package cache

View File

@@ -356,8 +356,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
DirModTimeUpdatesOnWrite: true, DirModTimeUpdatesOnWrite: true,
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs) }).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
f.features.ListR = nil // Recursive listing may cause chunker skip files f.features.Disable("ListR") // Recursive listing may cause chunker skip files
f.features.ListP = nil // ListP not supported yet
return f, err return f, err
} }
@@ -633,7 +632,7 @@ func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ct
// forbidChunk prints error message or raises error if file is chunk. // forbidChunk prints error message or raises error if file is chunk.
// First argument sets log prefix, use `false` to suppress message. // First argument sets log prefix, use `false` to suppress message.
func (f *Fs) forbidChunk(o any, filePath string) error { func (f *Fs) forbidChunk(o interface{}, filePath string) error {
if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" { if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" {
if f.opt.FailHard { if f.opt.FailHard {
return fmt.Errorf("chunk overlap with %q", parentPath) return fmt.Errorf("chunk overlap with %q", parentPath)
@@ -681,7 +680,7 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
circleSec := unixSec % closestPrimeZzzzSeconds circleSec := unixSec % closestPrimeZzzzSeconds
first4chars := strconv.FormatInt(circleSec, 36) first4chars := strconv.FormatInt(circleSec, 36)
for range maxTransactionProbes { for tries := 0; tries < maxTransactionProbes; tries++ {
f.xactIDMutex.Lock() f.xactIDMutex.Lock()
randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1) randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1)
f.xactIDMutex.Unlock() f.xactIDMutex.Unlock()
@@ -1190,7 +1189,10 @@ func (f *Fs) put(
} }
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID) tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID)
size := min(c.sizeLeft, c.chunkSize) size := c.sizeLeft
if size > c.chunkSize {
size = c.chunkSize
}
savedReadCount := c.readCount savedReadCount := c.readCount
// If a single chunk is expected, avoid the extra rename operation // If a single chunk is expected, avoid the extra rename operation
@@ -1475,7 +1477,10 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
const bufLen = 1048576 // 1 MiB const bufLen = 1048576 // 1 MiB
buf := make([]byte, bufLen) buf := make([]byte, bufLen)
for size > 0 { for size > 0 {
n := min(size, bufLen) n := size
if n > bufLen {
n = bufLen
}
if _, err := io.ReadFull(in, buf[0:n]); err != nil { if _, err := io.ReadFull(in, buf[0:n]); err != nil {
return err return err
} }
@@ -1861,8 +1866,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// baseMove chains to the wrapped Move or simulates it by Copy+Delete // baseMove chains to the wrapped Move or simulates it by Copy+Delete
func (f *Fs) baseMove(ctx context.Context, src fs.Object, remote string, delMode int) (fs.Object, error) { func (f *Fs) baseMove(ctx context.Context, src fs.Object, remote string, delMode int) (fs.Object, error) {
ctx, ci := fs.AddConfig(ctx)
ci.NameTransform = nil // ensure operations.Move does not double-transform here
var ( var (
dest fs.Object dest fs.Object
err error err error
@@ -2477,7 +2480,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte)
if len(data) > maxMetadataSizeWritten { if len(data) > maxMetadataSizeWritten {
return nil, false, ErrMetaTooBig return nil, false, ErrMetaTooBig
} }
if len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' { if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
return nil, false, errors.New("invalid json") return nil, false, errors.New("invalid json")
} }
var metadata metaSimpleJSON var metadata metaSimpleJSON

View File

@@ -40,7 +40,7 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
}) })
} }
type settings map[string]any type settings map[string]interface{}
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs { func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
fsName := strings.Split(f.Name(), "{")[0] // strip off hash fsName := strings.Split(f.Name(), "{")[0] // strip off hash

View File

@@ -46,7 +46,6 @@ func TestIntegration(t *testing.T) {
"DirCacheFlush", "DirCacheFlush",
"UserInfo", "UserInfo",
"Disconnect", "Disconnect",
"ListP",
}, },
} }
if *fstest.RemoteName == "" { if *fstest.RemoteName == "" {

View File

@@ -1,48 +0,0 @@
// Package api has type definitions for cloudinary
package api
import (
"fmt"
)
// CloudinaryEncoder extends the built-in encoder
type CloudinaryEncoder interface {
// FromStandardPath takes a / separated path in Standard encoding
// and converts it to a / separated path in this encoding.
FromStandardPath(string) string
// FromStandardName takes name in Standard encoding and converts
// it in this encoding.
FromStandardName(string) string
// ToStandardPath takes a / separated path in this encoding
// and converts it to a / separated path in Standard encoding.
ToStandardPath(string) string
// ToStandardName takes name in this encoding and converts
// it in Standard encoding.
ToStandardName(string, string) string
// Encoded root of the remote (as passed into NewFs)
FromStandardFullPath(string) string
}
// UpdateOptions was created to pass options from Update to Put
type UpdateOptions struct {
PublicID string
ResourceType string
DeliveryType string
AssetFolder string
DisplayName string
}
// Header formats the option as a string
func (o *UpdateOptions) Header() (string, string) {
return "UpdateOption", fmt.Sprintf("%s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
}
// Mandatory returns whether the option must be parsed or can be ignored
func (o *UpdateOptions) Mandatory() bool {
return false
}
// String formats the option into human-readable form
func (o *UpdateOptions) String() string {
return fmt.Sprintf("Fully qualified Public ID: %s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
}

View File

@@ -1,754 +0,0 @@
// Package cloudinary provides an interface to the Cloudinary DAM
package cloudinary
import (
"context"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"path"
"slices"
"strconv"
"strings"
"time"
"github.com/cloudinary/cloudinary-go/v2"
SDKApi "github.com/cloudinary/cloudinary-go/v2/api"
"github.com/cloudinary/cloudinary-go/v2/api/admin"
"github.com/cloudinary/cloudinary-go/v2/api/admin/search"
"github.com/cloudinary/cloudinary-go/v2/api/uploader"
"github.com/rclone/rclone/backend/cloudinary/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"github.com/zeebo/blake3"
)
// Cloudinary shouldn't have a trailing dot if there is no path
func cldPathDir(somePath string) string {
if somePath == "" || somePath == "." {
return somePath
}
dir := path.Dir(somePath)
if dir == "." {
return ""
}
return dir
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "cloudinary",
Description: "Cloudinary",
NewFs: NewFs,
Options: []fs.Option{
{
Name: "cloud_name",
Help: "Cloudinary Environment Name",
Required: true,
Sensitive: true,
},
{
Name: "api_key",
Help: "Cloudinary API Key",
Required: true,
Sensitive: true,
},
{
Name: "api_secret",
Help: "Cloudinary API Secret",
Required: true,
Sensitive: true,
},
{
Name: "upload_prefix",
Help: "Specify the API endpoint for environments out of the US",
},
{
Name: "upload_preset",
Help: "Upload Preset to select asset manipulation on upload",
},
{
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
encoder.EncodeSlash |
encoder.EncodeLtGt |
encoder.EncodeDoubleQuote |
encoder.EncodeQuestion |
encoder.EncodeAsterisk |
encoder.EncodePipe |
encoder.EncodeHash |
encoder.EncodePercent |
encoder.EncodeBackSlash |
encoder.EncodeDel |
encoder.EncodeCtl |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8 |
encoder.EncodeDot),
},
{
Name: "eventually_consistent_delay",
Default: fs.Duration(0),
Advanced: true,
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
},
{
Name: "adjust_media_files_extensions",
Default: true,
Advanced: true,
Help: "Cloudinary handles media formats as a file attribute and strips it from the name, which is unlike most other file systems",
},
{
Name: "media_extensions",
Default: []string{
"3ds", "3g2", "3gp", "ai", "arw", "avi", "avif", "bmp", "bw",
"cr2", "cr3", "djvu", "dng", "eps3", "fbx", "flif", "flv", "gif",
"glb", "gltf", "hdp", "heic", "heif", "ico", "indd", "jp2", "jpe",
"jpeg", "jpg", "jxl", "jxr", "m2ts", "mov", "mp4", "mpeg", "mts",
"mxf", "obj", "ogv", "pdf", "ply", "png", "psd", "svg", "tga",
"tif", "tiff", "ts", "u3ma", "usdz", "wdp", "webm", "webp", "wmv"},
Advanced: true,
Help: "Cloudinary supported media extensions",
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
CloudName string `config:"cloud_name"`
APIKey string `config:"api_key"`
APISecret string `config:"api_secret"`
UploadPrefix string `config:"upload_prefix"`
UploadPreset string `config:"upload_preset"`
Enc encoder.MultiEncoder `config:"encoding"`
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
MediaExtensions []string `config:"media_extensions"`
AdjustMediaFilesExtensions bool `config:"adjust_media_files_extensions"`
}
// Fs represents a remote cloudinary server
type Fs struct {
name string
root string
opt Options
features *fs.Features
pacer *fs.Pacer
srv *rest.Client // For downloading assets via the Cloudinary CDN
cld *cloudinary.Cloudinary // API calls are going through the Cloudinary SDK
lastCRUD time.Time
}
// Object describes a cloudinary object
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
url string
md5sum string
publicID string
resourceType string
deliveryType string
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Initialize the Cloudinary client
cld, err := cloudinary.NewFromParams(opt.CloudName, opt.APIKey, opt.APISecret)
if err != nil {
return nil, fmt.Errorf("failed to create Cloudinary client: %w", err)
}
cld.Admin.Client = *fshttp.NewClient(ctx)
cld.Upload.Client = *fshttp.NewClient(ctx)
if opt.UploadPrefix != "" {
cld.Config.API.UploadPrefix = opt.UploadPrefix
}
client := fshttp.NewClient(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
cld: cld,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1000), pacer.MaxSleep(10000), pacer.DecayConstant(2))),
srv: rest.NewClient(client),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
if root != "" {
// Check to see if the root actually an existing file
remote := path.Base(root)
f.root = cldPathDir(root)
_, err := f.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
// File doesn't exist so return the previous root
f.root = root
return f, nil
}
return nil, err
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// ------------------------------------------------------------
// FromStandardPath implementation of the api.CloudinaryEncoder
func (f *Fs) FromStandardPath(s string) string {
return strings.ReplaceAll(f.opt.Enc.FromStandardPath(s), "&", "\uFF06")
}
// FromStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) FromStandardName(s string) string {
if f.opt.AdjustMediaFilesExtensions {
parsedURL, err := url.Parse(s)
ext := ""
if err != nil {
fs.Logf(nil, "Error parsing URL: %v", err)
} else {
ext = path.Ext(parsedURL.Path)
if slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
s = strings.TrimSuffix(parsedURL.Path, ext)
}
}
}
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
}
// ToStandardPath implementation of the api.CloudinaryEncoder
func (f *Fs) ToStandardPath(s string) string {
return strings.ReplaceAll(f.opt.Enc.ToStandardPath(s), "\uFF06", "&")
}
// ToStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) ToStandardName(s string, assetURL string) string {
ext := ""
if f.opt.AdjustMediaFilesExtensions {
parsedURL, err := url.Parse(assetURL)
if err != nil {
fs.Logf(nil, "Error parsing URL: %v", err)
} else {
ext = path.Ext(parsedURL.Path)
if !slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
ext = ""
}
}
}
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&") + ext
}
// FromStandardFullPath encodes a full path to Cloudinary standard
func (f *Fs) FromStandardFullPath(dir string) string {
return path.Join(api.CloudinaryEncoder.FromStandardPath(f, f.root), api.CloudinaryEncoder.FromStandardPath(f, dir))
}
// ToAssetFolderAPI encodes folders as expected by the Cloudinary SDK
func (f *Fs) ToAssetFolderAPI(dir string) string {
return strings.ReplaceAll(dir, "%", "%25")
}
// ToDisplayNameElastic encodes a special case of elasticsearch
func (f *Fs) ToDisplayNameElastic(dir string) string {
return strings.ReplaceAll(dir, "!", "\\!")
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// WaitEventuallyConsistent waits till the FS is eventually consistent
func (f *Fs) WaitEventuallyConsistent() {
if f.opt.EventuallyConsistentDelay == fs.Duration(0) {
return
}
delay := time.Duration(f.opt.EventuallyConsistentDelay)
timeSinceLastCRUD := time.Since(f.lastCRUD)
if timeSinceLastCRUD < delay {
time.Sleep(delay - timeSinceLastCRUD)
}
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Cloudinary root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// List the objects and directories in dir into entries
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
remotePrefix := f.FromStandardFullPath(dir)
if remotePrefix != "" && !strings.HasSuffix(remotePrefix, "/") {
remotePrefix += "/"
}
var entries fs.DirEntries
dirs := make(map[string]struct{})
nextCursor := ""
f.WaitEventuallyConsistent()
for {
// user the folders api to list folders.
folderParams := admin.SubFoldersParams{
Folder: f.ToAssetFolderAPI(remotePrefix),
MaxResults: 500,
}
if nextCursor != "" {
folderParams.NextCursor = nextCursor
}
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
if err != nil {
return nil, fmt.Errorf("failed to list sub-folders: %w", err)
}
if results.Error.Message != "" {
if strings.HasPrefix(results.Error.Message, "Can't find folder with path") {
return nil, fs.ErrorDirNotFound
}
return nil, fmt.Errorf("failed to list sub-folders: %s", results.Error.Message)
}
for _, folder := range results.Folders {
relativePath := api.CloudinaryEncoder.ToStandardPath(f, strings.TrimPrefix(folder.Path, remotePrefix))
parts := strings.Split(relativePath, "/")
// It's a directory
dirName := parts[len(parts)-1]
if _, found := dirs[dirName]; !found {
d := fs.NewDir(path.Join(dir, dirName), time.Time{})
entries = append(entries, d)
dirs[dirName] = struct{}{}
}
}
// Break if there are no more results
if results.NextCursor == "" {
break
}
nextCursor = results.NextCursor
}
for {
// Use the assets.AssetsByAssetFolder API to list assets
assetsParams := admin.AssetsByAssetFolderParams{
AssetFolder: remotePrefix,
MaxResults: 500,
}
if nextCursor != "" {
assetsParams.NextCursor = nextCursor
}
results, err := f.cld.Admin.AssetsByAssetFolder(ctx, assetsParams)
if err != nil {
return nil, fmt.Errorf("failed to list assets: %w", err)
}
for _, asset := range results.Assets {
remote := path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName, asset.SecureURL))
o := &Object{
fs: f,
remote: remote,
size: int64(asset.Bytes),
modTime: asset.CreatedAt,
url: asset.SecureURL,
publicID: asset.PublicID,
resourceType: asset.AssetType,
deliveryType: asset.Type,
}
entries = append(entries, o)
}
// Break if there are no more results
if results.NextCursor == "" {
break
}
nextCursor = results.NextCursor
}
return entries, nil
}
// NewObject finds the Object at remote. If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
searchParams := search.Query{
Expression: fmt.Sprintf("asset_folder:\"%s\" AND display_name:\"%s\"",
f.FromStandardFullPath(cldPathDir(remote)),
f.ToDisplayNameElastic(api.CloudinaryEncoder.FromStandardName(f, path.Base(remote)))),
SortBy: []search.SortByField{{"uploaded_at": "desc"}},
MaxResults: 2,
}
var results *admin.SearchResult
f.WaitEventuallyConsistent()
err := f.pacer.Call(func() (bool, error) {
var err1 error
results, err1 = f.cld.Admin.Search(ctx, searchParams)
if err1 == nil && results.TotalCount != len(results.Assets) {
err1 = errors.New("partial response so waiting for eventual consistency")
}
return shouldRetry(ctx, nil, err1)
})
if err != nil {
return nil, fs.ErrorObjectNotFound
}
if results.TotalCount == 0 || len(results.Assets) == 0 {
return nil, fs.ErrorObjectNotFound
}
asset := results.Assets[0]
o := &Object{
fs: f,
remote: remote,
size: int64(asset.Bytes),
modTime: asset.UploadedAt,
url: asset.SecureURL,
md5sum: asset.Etag,
publicID: asset.PublicID,
resourceType: asset.ResourceType,
deliveryType: asset.Type,
}
return o, nil
}
func (f *Fs) getSuggestedPublicID(assetFolder string, displayName string, modTime time.Time) string {
payload := []byte(path.Join(assetFolder, displayName))
hash := blake3.Sum256(payload)
return hex.EncodeToString(hash[:])
}
// Put uploads content to Cloudinary
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if src.Size() == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
params := uploader.UploadParams{
UploadPreset: f.opt.UploadPreset,
}
updateObject := false
var modTime time.Time
for _, option := range options {
if updateOptions, ok := option.(*api.UpdateOptions); ok {
if updateOptions.PublicID != "" {
updateObject = true
params.Overwrite = SDKApi.Bool(true)
params.Invalidate = SDKApi.Bool(true)
params.PublicID = updateOptions.PublicID
params.ResourceType = updateOptions.ResourceType
params.Type = SDKApi.DeliveryType(updateOptions.DeliveryType)
params.AssetFolder = updateOptions.AssetFolder
params.DisplayName = updateOptions.DisplayName
modTime = src.ModTime(ctx)
}
}
}
if !updateObject {
params.AssetFolder = f.FromStandardFullPath(cldPathDir(src.Remote()))
params.DisplayName = api.CloudinaryEncoder.FromStandardName(f, path.Base(src.Remote()))
// We want to conform to the unique asset ID of rclone, which is (asset_folder,display_name,last_modified).
// We also want to enable customers to choose their own public_id, in case duplicate names are not a crucial use case.
// Upload_presets that apply randomness to the public ID would not work well with rclone duplicate assets support.
params.FilenameOverride = f.getSuggestedPublicID(params.AssetFolder, params.DisplayName, src.ModTime(ctx))
}
uploadResult, err := f.cld.Upload.Upload(ctx, in, params)
f.lastCRUD = time.Now()
if err != nil {
return nil, fmt.Errorf("failed to upload to Cloudinary: %w", err)
}
if !updateObject {
modTime = uploadResult.CreatedAt
}
if uploadResult.Error.Message != "" {
return nil, errors.New(uploadResult.Error.Message)
}
o := &Object{
fs: f,
remote: src.Remote(),
size: int64(uploadResult.Bytes),
modTime: modTime,
url: uploadResult.SecureURL,
md5sum: uploadResult.Etag,
publicID: uploadResult.PublicID,
resourceType: uploadResult.ResourceType,
deliveryType: uploadResult.Type,
}
return o, nil
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash sets
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// Mkdir creates empty folders
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
params := admin.CreateFolderParams{Folder: f.ToAssetFolderAPI(f.FromStandardFullPath(dir))}
res, err := f.cld.Admin.CreateFolder(ctx, params)
f.lastCRUD = time.Now()
if err != nil {
return err
}
if res.Error.Message != "" {
return errors.New(res.Error.Message)
}
return nil
}
// Rmdir deletes empty folders
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// Additional test because Cloudinary will delete folders without
// assets, regardless of empty sub-folders
folder := f.ToAssetFolderAPI(f.FromStandardFullPath(dir))
folderParams := admin.SubFoldersParams{
Folder: folder,
MaxResults: 1,
}
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
if err != nil {
return err
}
if results.TotalCount > 0 {
return fs.ErrorDirectoryNotEmpty
}
params := admin.DeleteFolderParams{Folder: folder}
res, err := f.cld.Admin.DeleteFolder(ctx, params)
f.lastCRUD = time.Now()
if err != nil {
return err
}
if res.Error.Message != "" {
if strings.HasPrefix(res.Error.Message, "Can't find folder with path") {
return fs.ErrorDirNotFound
}
return errors.New(res.Error.Message)
}
return nil
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
420, // Too Many Requests (legacy)
429, // Too Many Requests
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if err != nil {
tryAgain := "Try again on "
if idx := strings.Index(err.Error(), tryAgain); idx != -1 {
layout := "2006-01-02 15:04:05 UTC"
dateStr := err.Error()[idx+len(tryAgain) : idx+len(tryAgain)+len(layout)]
timestamp, err2 := time.Parse(layout, dateStr)
if err2 == nil {
return true, fserrors.NewErrorRetryAfter(time.Until(timestamp))
}
}
fs.Debugf(nil, "Retrying API error %v", err)
return true, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// ------------------------------------------------------------
// Hash returns the MD5 of an object
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
if ty != hash.MD5 {
return "", hash.ErrUnsupported
}
return o.md5sum, nil
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// Size of object in bytes
func (o *Object) Size() int64 {
return o.size
}
// Storable returns if this object is storable
func (o *Object) Storable() bool {
return true
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return fs.ErrorCantSetModTime
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
var resp *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: o.url,
Options: options,
}
var offset int64
var count int64
var key string
var value string
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
offset, count = x.Decode(o.size)
if count < 0 {
count = o.size - offset
}
key, value = option.Header()
case *fs.SeekOption:
offset = x.Offset
count = o.size - offset
key, value = option.Header()
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
if key != "" && value != "" {
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders[key] = value
}
// Make sure that the asset is fully available
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
if err == nil {
cl, clErr := strconv.Atoi(resp.Header.Get("content-length"))
if clErr == nil && count == int64(cl) {
return false, nil
}
}
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed download of \"%s\": %w", o.url, err)
}
return resp.Body, err
}
// Update the object with the contents of the io.Reader
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
options = append(options, &api.UpdateOptions{
PublicID: o.publicID,
ResourceType: o.resourceType,
DeliveryType: o.deliveryType,
DisplayName: api.CloudinaryEncoder.FromStandardName(o.fs, path.Base(o.Remote())),
AssetFolder: o.fs.FromStandardFullPath(cldPathDir(o.Remote())),
})
updatedObj, err := o.fs.Put(ctx, in, src, options...)
if err != nil {
return err
}
if uo, ok := updatedObj.(*Object); ok {
o.size = uo.size
o.modTime = time.Now() // Skipping uo.modTime because the API returns the create time
o.url = uo.url
o.md5sum = uo.md5sum
o.publicID = uo.publicID
o.resourceType = uo.resourceType
o.deliveryType = uo.deliveryType
}
return nil
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
params := uploader.DestroyParams{
PublicID: o.publicID,
ResourceType: o.resourceType,
Type: o.deliveryType,
}
res, dErr := o.fs.cld.Upload.Destroy(ctx, params)
o.fs.lastCRUD = time.Now()
if dErr != nil {
return dErr
}
if res.Error.Message != "" {
return errors.New(res.Error.Message)
}
if res.Result != "ok" {
return errors.New(res.Result)
}
return nil
}

View File

@@ -1,23 +0,0 @@
// Test Cloudinary filesystem interface
package cloudinary_test
import (
"testing"
"github.com/rclone/rclone/backend/cloudinary"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
name := "TestCloudinary"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*cloudinary.Object)(nil),
SkipInvalidUTF8: true,
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
},
})
}

View File

@@ -20,7 +20,6 @@ import (
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
@@ -187,6 +186,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
g, gCtx := errgroup.WithContext(ctx) g, gCtx := errgroup.WithContext(ctx)
var mu sync.Mutex var mu sync.Mutex
for _, upstream := range opt.Upstreams { for _, upstream := range opt.Upstreams {
upstream := upstream
g.Go(func() (err error) { g.Go(func() (err error) {
equal := strings.IndexRune(upstream, '=') equal := strings.IndexRune(upstream, '=')
if equal < 0 { if equal < 0 {
@@ -240,22 +240,18 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
DirModTimeUpdatesOnWrite: true, DirModTimeUpdatesOnWrite: true,
PartialUploads: true, PartialUploads: true,
}).Fill(ctx, f) }).Fill(ctx, f)
canMove, slowHash := true, false canMove := true
for _, u := range f.upstreams { for _, u := range f.upstreams {
features = features.Mask(ctx, u.f) // Mask all upstream fs features = features.Mask(ctx, u.f) // Mask all upstream fs
if !operations.CanServerSideMove(u.f) { if !operations.CanServerSideMove(u.f) {
canMove = false canMove = false
} }
slowHash = slowHash || u.f.Features().SlowHash
} }
// We can move if all remotes support Move or Copy // We can move if all remotes support Move or Copy
if canMove { if canMove {
features.Move = f.Move features.Move = f.Move
} }
// If any of upstreams are SlowHash, propagate it
features.SlowHash = slowHash
// Enable ListR when upstreams either support ListR or is local // Enable ListR when upstreams either support ListR or is local
// But not when all upstreams are local // But not when all upstreams are local
if features.ListR == nil { if features.ListR == nil {
@@ -269,9 +265,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
} }
} }
// Enable ListP always
features.ListP = f.ListP
// Enable Purge when any upstreams support it // Enable Purge when any upstreams support it
if features.Purge == nil { if features.Purge == nil {
for _, u := range f.upstreams { for _, u := range f.upstreams {
@@ -369,6 +362,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error { func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
g, gCtx := errgroup.WithContext(ctx) g, gCtx := errgroup.WithContext(ctx)
for _, u := range f.upstreams { for _, u := range f.upstreams {
u := u
g.Go(func() (err error) { g.Go(func() (err error) {
return fn(gCtx, u) return fn(gCtx, u)
}) })
@@ -635,6 +629,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
var uChans []chan time.Duration var uChans []chan time.Duration
for _, u := range f.upstreams { for _, u := range f.upstreams {
u := u
if do := u.f.Features().ChangeNotify; do != nil { if do := u.f.Features().ChangeNotify; do != nil {
ch := make(chan time.Duration) ch := make(chan time.Duration)
uChans = append(uChans, ch) uChans = append(uChans, ch)
@@ -814,52 +809,24 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err) // defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
if f.root == "" && dir == "" { if f.root == "" && dir == "" {
entries := make(fs.DirEntries, 0, len(f.upstreams)) entries = make(fs.DirEntries, 0, len(f.upstreams))
for combineDir := range f.upstreams { for combineDir := range f.upstreams {
d := fs.NewLimitedDirWrapper(combineDir, fs.NewDir(combineDir, f.when)) d := fs.NewLimitedDirWrapper(combineDir, fs.NewDir(combineDir, f.when))
entries = append(entries, d) entries = append(entries, d)
} }
return callback(entries) return entries, nil
} }
u, uRemote, err := f.findUpstream(dir) u, uRemote, err := f.findUpstream(dir)
if err != nil { if err != nil {
return err return nil, err
} }
wrappedCallback := func(entries fs.DirEntries) error { entries, err = u.f.List(ctx, uRemote)
entries, err := u.wrapEntries(ctx, entries) if err != nil {
if err != nil { return nil, err
return err
}
return callback(entries)
} }
listP := u.f.Features().ListP return u.wrapEntries(ctx, entries)
if listP == nil {
entries, err := u.f.List(ctx, uRemote)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, uRemote, wrappedCallback)
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting

View File

@@ -2,8 +2,10 @@
package compress package compress
import ( import (
"bufio"
"bytes" "bytes"
"context" "context"
"crypto/md5"
"encoding/base64" "encoding/base64"
"encoding/binary" "encoding/binary"
"encoding/hex" "encoding/hex"
@@ -27,7 +29,6 @@ import (
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
@@ -44,7 +45,6 @@ const (
minCompressionRatio = 1.1 minCompressionRatio = 1.1
gzFileExt = ".gz" gzFileExt = ".gz"
zstdFileExt = ".zst"
metaFileExt = ".json" metaFileExt = ".json"
uncompressedFileExt = ".bin" uncompressedFileExt = ".bin"
) )
@@ -53,7 +53,6 @@ const (
const ( const (
Uncompressed = 0 Uncompressed = 0
Gzip = 2 Gzip = 2
Zstd = 4
) )
var nameRegexp = regexp.MustCompile(`^(.+?)\.([A-Za-z0-9-_]{11})$`) var nameRegexp = regexp.MustCompile(`^(.+?)\.([A-Za-z0-9-_]{11})$`)
@@ -66,10 +65,6 @@ func init() {
Value: "gzip", Value: "gzip",
Help: "Standard gzip compression with fastest parameters.", Help: "Standard gzip compression with fastest parameters.",
}, },
{
Value: "zstd",
Help: "Zstandard compression — fast modern algorithm offering adjustable speed-to-compression tradeoffs.",
},
} }
// Register our remote // Register our remote
@@ -91,23 +86,17 @@ func init() {
Examples: compressionModeOptions, Examples: compressionModeOptions,
}, { }, {
Name: "level", Name: "level",
Help: `GZIP (levels -2 to 9): Help: `GZIP compression level (-2 to 9).
- -2 — Huffman encoding only. Only use if you know what you're doing.
- -1 (default) — recommended; equivalent to level 5. Generally -1 (default, equivalent to 5) is recommended.
- 0 — turns off compression. Levels 1 to 9 increase compression at the cost of speed. Going past 6
- 19 — increase compression at the cost of speed. Going past 6 generally offers very little return. generally offers very little return.
ZSTD (levels 0 to 4): Level -2 uses Huffman encoding only. Only use if you know what you
- 0 — turns off compression entirely. are doing.
- 1 — fastest compression with the lowest ratio. Level 0 turns off compression.`,
- 2 (default) — good balance of speed and compression. Default: sgzip.DefaultCompression,
- 3 — better compression, but uses about 23x more CPU than the default. Advanced: true,
- 4 — best possible compression ratio (highest CPU cost).
Notes:
- Choose GZIP for wide compatibility; ZSTD for better speed/ratio tradeoffs.
- Negative gzip levels: -2 = Huffman-only, -1 = default (≈ level 5).`,
Required: true,
}, { }, {
Name: "ram_cache_limit", Name: "ram_cache_limit",
Help: `Some remotes don't allow the upload of files with unknown size. Help: `Some remotes don't allow the upload of files with unknown size.
@@ -122,47 +111,6 @@ this limit will be cached on disk.`,
}) })
} }
// compressionModeHandler defines the interface for handling different compression modes
type compressionModeHandler interface {
// processFileNameGetFileExtension returns the file extension for the given compression mode
processFileNameGetFileExtension(compressionMode int) string
// newObjectGetOriginalSize returns the original file size from the metadata
newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error)
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
// the configured threshold
isCompressible(r io.Reader, compressionMode int) (bool, error)
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
putCompress(
ctx context.Context,
f *Fs,
in io.Reader,
src fs.ObjectInfo,
options []fs.OpenOption,
mimeType string,
) (fs.Object, *ObjectMetadata, error)
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
openGetReadCloser(
ctx context.Context,
o *Object,
offset int64,
limit int64,
cr chunkedreader.ChunkedReader,
closer io.Closer,
options ...fs.OpenOption,
) (rc io.ReadCloser, err error)
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error)
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
// Warning: This function panics if cmeta is not of the expected type.
newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata
}
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
Remote string `config:"remote"` Remote string `config:"remote"`
@@ -176,13 +124,12 @@ type Options struct {
// Fs represents a wrapped fs.Fs // Fs represents a wrapped fs.Fs
type Fs struct { type Fs struct {
fs.Fs fs.Fs
wrapper fs.Fs wrapper fs.Fs
name string name string
root string root string
opt Options opt Options
mode int // compression mode id mode int // compression mode id
features *fs.Features // optional features features *fs.Features // optional features
modeHandler compressionModeHandler // compression mode handler
} }
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
@@ -219,28 +166,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
return nil, fmt.Errorf("failed to make remote %s:%q to wrap: %w", wName, remotePath, err) return nil, fmt.Errorf("failed to make remote %s:%q to wrap: %w", wName, remotePath, err)
} }
compressionMode := compressionModeFromName(opt.CompressionMode)
var modeHandler compressionModeHandler
switch compressionMode {
case Gzip:
modeHandler = &gzipModeHandler{}
case Zstd:
modeHandler = &zstdModeHandler{}
case Uncompressed:
modeHandler = &uncompressedModeHandler{}
default:
modeHandler = &unknownModeHandler{}
}
// Create the wrapping fs // Create the wrapping fs
f := &Fs{ f := &Fs{
Fs: wrappedFs, Fs: wrappedFs,
name: name, name: name,
root: rpath, root: rpath,
opt: *opt, opt: *opt,
mode: compressionMode, mode: compressionModeFromName(opt.CompressionMode),
modeHandler: modeHandler,
} }
// Correct root if definitely pointing to a file // Correct root if definitely pointing to a file
if err == fs.ErrorIsFile { if err == fs.ErrorIsFile {
@@ -276,19 +208,14 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
if !operations.CanServerSideMove(wrappedFs) { if !operations.CanServerSideMove(wrappedFs) {
f.features.Disable("PutStream") f.features.Disable("PutStream")
} }
// Enable ListP always
f.features.ListP = f.ListP
return f, err return f, err
} }
// compressionModeFromName converts a compression mode name to its int representation.
func compressionModeFromName(name string) int { func compressionModeFromName(name string) int {
switch name { switch name {
case "gzip": case "gzip":
return Gzip return Gzip
case "zstd":
return Zstd
default: default:
return Uncompressed return Uncompressed
} }
@@ -312,7 +239,7 @@ func base64ToInt64(str string) (int64, error) {
// Processes a file name for a compressed file. Returns the original file name, the extension, and the size of the original file. // Processes a file name for a compressed file. Returns the original file name, the extension, and the size of the original file.
// Returns -2 for the original size if the file is uncompressed. // Returns -2 for the original size if the file is uncompressed.
func processFileName(compressedFileName string, modeHandler compressionModeHandler) (origFileName string, extension string, origSize int64, err error) { func processFileName(compressedFileName string) (origFileName string, extension string, origSize int64, err error) {
// Separate the filename and size from the extension // Separate the filename and size from the extension
extensionPos := strings.LastIndex(compressedFileName, ".") extensionPos := strings.LastIndex(compressedFileName, ".")
if extensionPos == -1 { if extensionPos == -1 {
@@ -331,8 +258,7 @@ func processFileName(compressedFileName string, modeHandler compressionModeHandl
if err != nil { if err != nil {
return "", "", 0, errors.New("could not decode size") return "", "", 0, errors.New("could not decode size")
} }
ext := modeHandler.processFileNameGetFileExtension(compressionModeFromName(compressedFileName[extensionPos+1:])) return match[1], gzFileExt, size, nil
return match[1], ext, size, nil
} }
// Generates the file name for a metadata file // Generates the file name for a metadata file
@@ -357,15 +283,11 @@ func unwrapMetadataFile(filename string) (string, bool) {
// makeDataName generates the file name for a data file with specified compression mode // makeDataName generates the file name for a data file with specified compression mode
func makeDataName(remote string, size int64, mode int) (newRemote string) { func makeDataName(remote string, size int64, mode int) (newRemote string) {
switch mode { if mode != Uncompressed {
case Gzip:
newRemote = remote + "." + int64ToBase64(size) + gzFileExt newRemote = remote + "." + int64ToBase64(size) + gzFileExt
case Zstd: } else {
newRemote = remote + "." + int64ToBase64(size) + zstdFileExt
default:
newRemote = remote + uncompressedFileExt newRemote = remote + uncompressedFileExt
} }
return newRemote return newRemote
} }
@@ -379,7 +301,7 @@ func (f *Fs) dataName(remote string, size int64, compressed bool) (name string)
// addData parses an object and adds it to the DirEntries // addData parses an object and adds it to the DirEntries
func (f *Fs) addData(entries *fs.DirEntries, o fs.Object) { func (f *Fs) addData(entries *fs.DirEntries, o fs.Object) {
origFileName, _, size, err := processFileName(o.Remote(), f.modeHandler) origFileName, _, size, err := processFileName(o.Remote())
if err != nil { if err != nil {
fs.Errorf(o, "Error on parsing file name: %v", err) fs.Errorf(o, "Error on parsing file name: %v", err)
return return
@@ -430,39 +352,11 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
// found. // found.
// List entries and process them // List entries and process them
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f) entries, err = f.Fs.List(ctx, dir)
} if err != nil {
return nil, err
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.processEntries(entries)
if err != nil {
return err
}
return callback(entries)
} }
listP := f.Fs.Features().ListP return f.processEntries(entries)
if listP == nil {
entries, err := f.Fs.List(ctx, dir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, dir, wrappedCallback)
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@@ -502,12 +396,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if err != nil { if err != nil {
return nil, fmt.Errorf("error decoding metadata: %w", err) return nil, fmt.Errorf("error decoding metadata: %w", err)
} }
size, err := f.modeHandler.newObjectGetOriginalSize(meta)
if err != nil {
return nil, fmt.Errorf("error reading metadata: %w", err)
}
// Create our Object // Create our Object
o, err := f.Fs.NewObject(ctx, makeDataName(remote, size, meta.Mode)) o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -516,7 +406,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// checkCompressAndType checks if an object is compressible and determines it's mime type // checkCompressAndType checks if an object is compressible and determines it's mime type
// returns a multireader with the bytes that were read to determine mime type // returns a multireader with the bytes that were read to determine mime type
func checkCompressAndType(in io.Reader, compressionMode int, modeHandler compressionModeHandler) (newReader io.Reader, compressible bool, mimeType string, err error) { func checkCompressAndType(in io.Reader) (newReader io.Reader, compressible bool, mimeType string, err error) {
in, wrap := accounting.UnWrap(in) in, wrap := accounting.UnWrap(in)
buf := make([]byte, heuristicBytes) buf := make([]byte, heuristicBytes)
n, err := in.Read(buf) n, err := in.Read(buf)
@@ -525,7 +415,7 @@ func checkCompressAndType(in io.Reader, compressionMode int, modeHandler compres
return nil, false, "", err return nil, false, "", err
} }
mime := mimetype.Detect(buf) mime := mimetype.Detect(buf)
compressible, err = modeHandler.isCompressible(bytes.NewReader(buf), compressionMode) compressible, err = isCompressible(bytes.NewReader(buf))
if err != nil { if err != nil {
return nil, false, "", err return nil, false, "", err
} }
@@ -533,6 +423,26 @@ func checkCompressAndType(in io.Reader, compressionMode int, modeHandler compres
return wrap(in), compressible, mime.String(), nil return wrap(in), compressible, mime.String(), nil
} }
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
// the configured threshold
func isCompressible(r io.Reader) (bool, error) {
var b bytes.Buffer
w, err := sgzip.NewWriterLevel(&b, sgzip.DefaultCompression)
if err != nil {
return false, err
}
n, err := io.Copy(w, r)
if err != nil {
return false, err
}
err = w.Close()
if err != nil {
return false, err
}
ratio := float64(n) / float64(b.Len())
return ratio > minCompressionRatio, nil
}
// verifyObjectHash verifies the Objects hash // verifyObjectHash verifies the Objects hash
func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.MultiHasher, ht hash.Type) error { func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.MultiHasher, ht hash.Type) error {
srcHash := hasher.Sums()[ht] srcHash := hasher.Sums()[ht]
@@ -553,9 +463,9 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
type compressionResult[T sgzip.GzipMetadata | SzstdMetadata] struct { type compressionResult struct {
err error err error
meta T meta sgzip.GzipMetadata
} }
// replicating some of operations.Rcat functionality because we want to support remotes without streaming // replicating some of operations.Rcat functionality because we want to support remotes without streaming
@@ -596,18 +506,106 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
return nil, fmt.Errorf("failed to write temporary local file: %w", err) return nil, fmt.Errorf("failed to write temporary local file: %w", err)
} }
if _, err = tempFile.Seek(0, 0); err != nil { if _, err = tempFile.Seek(0, 0); err != nil {
return nil, fmt.Errorf("failed to seek temporary local file: %w", err) return nil, err
} }
finfo, err := tempFile.Stat() finfo, err := tempFile.Stat()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to stat temporary local file: %w", err) return nil, err
} }
return f.Fs.Put(ctx, tempFile, object.NewStaticObjectInfo(dstFileName, modTime, finfo.Size(), false, nil, f.Fs)) return f.Fs.Put(ctx, tempFile, object.NewStaticObjectInfo(dstFileName, modTime, finfo.Size(), false, nil, f.Fs))
} }
// Put a compressed version of a file. Returns a wrappable object and metadata. // Put a compressed version of a file. Returns a wrappable object and metadata.
func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, mimeType string) (fs.Object, *ObjectMetadata, error) { func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, mimeType string) (fs.Object, *ObjectMetadata, error) {
return f.modeHandler.putCompress(ctx, f, in, src, options, mimeType) // Unwrap reader accounting
in, wrap := accounting.UnWrap(in)
// Add the metadata hasher
metaHasher := md5.New()
in = io.TeeReader(in, metaHasher)
// Compress the file
pipeReader, pipeWriter := io.Pipe()
results := make(chan compressionResult)
go func() {
gz, err := sgzip.NewWriterLevel(pipeWriter, f.opt.CompressionLevel)
if err != nil {
results <- compressionResult{err: err, meta: sgzip.GzipMetadata{}}
return
}
_, err = io.Copy(gz, in)
gzErr := gz.Close()
if gzErr != nil {
fs.Errorf(nil, "Failed to close compress: %v", gzErr)
if err == nil {
err = gzErr
}
}
closeErr := pipeWriter.Close()
if closeErr != nil {
fs.Errorf(nil, "Failed to close pipe: %v", closeErr)
if err == nil {
err = closeErr
}
}
results <- compressionResult{err: err, meta: gz.MetaData()}
}()
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize)) // Probably no longer needed as sgzip has it's own buffering
// Find a hash the destination supports to compute a hash of
// the compressed data.
ht := f.Fs.Hashes().GetOne()
var hasher *hash.MultiHasher
var err error
if ht != hash.None {
// unwrap the accounting again
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
if err != nil {
return nil, nil, err
}
// add the hasher and re-wrap the accounting
wrappedIn = io.TeeReader(wrappedIn, hasher)
wrappedIn = wrap(wrappedIn)
}
// Transfer the data
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx))
if err != nil {
if o != nil {
removeErr := o.Remove(ctx)
if removeErr != nil {
fs.Errorf(o, "Failed to remove partially transferred object: %v", err)
}
}
return nil, nil, err
}
// Check whether we got an error during compression
result := <-results
err = result.err
if err != nil {
if o != nil {
removeErr := o.Remove(ctx)
if removeErr != nil {
fs.Errorf(o, "Failed to remove partially compressed object: %v", err)
}
}
return nil, nil, err
}
// Generate metadata
meta := newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
// Check the hashes of the compressed data if we were comparing them
if ht != hash.None && hasher != nil {
err = f.verifyObjectHash(ctx, o, hasher, ht)
if err != nil {
return nil, nil, err
}
}
return o, meta, nil
} }
// Put an uncompressed version of a file. Returns a wrappable object and metadata. // Put an uncompressed version of a file. Returns a wrappable object and metadata.
@@ -651,8 +649,7 @@ func (f *Fs) putUncompress(ctx context.Context, in io.Reader, src fs.ObjectInfo,
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
return o, newMetadata(o.Size(), Uncompressed, sgzip.GzipMetadata{}, hex.EncodeToString(sum), mimeType), nil
return f.modeHandler.putUncompressGetNewMetadata(o, Uncompressed, hex.EncodeToString(sum), mimeType, sum)
} }
// This function will write a metadata struct to a metadata Object for an src. Returns a wrappable metadata object. // This function will write a metadata struct to a metadata Object for an src. Returns a wrappable metadata object.
@@ -723,7 +720,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
o, err := f.NewObject(ctx, src.Remote()) o, err := f.NewObject(ctx, src.Remote())
if err == fs.ErrorObjectNotFound { if err == fs.ErrorObjectNotFound {
// Get our file compressibility // Get our file compressibility
in, compressible, mimeType, err := checkCompressAndType(in, f.mode, f.modeHandler) in, compressible, mimeType, err := checkCompressAndType(in)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -743,7 +740,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
} }
found := err == nil found := err == nil
in, compressible, mimeType, err := checkCompressAndType(in, f.mode, f.modeHandler) in, compressible, mimeType, err := checkCompressAndType(in)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -1062,12 +1059,11 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, duration fs.Duration
// ObjectMetadata describes the metadata for an Object. // ObjectMetadata describes the metadata for an Object.
type ObjectMetadata struct { type ObjectMetadata struct {
Mode int // Compression mode of the file. Mode int // Compression mode of the file.
Size int64 // Size of the object. Size int64 // Size of the object.
MD5 string // MD5 hash of the file. MD5 string // MD5 hash of the file.
MimeType string // Mime type of the file MimeType string // Mime type of the file
CompressionMetadataGzip *sgzip.GzipMetadata // Metadata for Gzip compression CompressionMetadata sgzip.GzipMetadata
CompressionMetadataZstd *SzstdMetadata // Metadata for Zstd compression
} }
// Object with external metadata // Object with external metadata
@@ -1080,6 +1076,17 @@ type Object struct {
meta *ObjectMetadata // Metadata struct for this object (nil if not loaded) meta *ObjectMetadata // Metadata struct for this object (nil if not loaded)
} }
// This function generates a metadata object
func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mimeType string) *ObjectMetadata {
meta := new(ObjectMetadata)
meta.Size = size
meta.Mode = mode
meta.CompressionMetadata = cmeta
meta.MD5 = md5
meta.MimeType = mimeType
return meta
}
// This function will read the metadata from a metadata object. // This function will read the metadata from a metadata object.
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) { func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) {
// Open our meradata object // Open our meradata object
@@ -1127,7 +1134,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.mo, o.mo.Update(ctx, in, src, options...) return o.mo, o.mo.Update(ctx, in, src, options...)
} }
in, compressible, mimeType, err := checkCompressAndType(in, o.meta.Mode, o.f.modeHandler) in, compressible, mimeType, err := checkCompressAndType(in)
if err != nil { if err != nil {
return err return err
} }
@@ -1240,7 +1247,7 @@ func (o *Object) String() string {
// Remote returns the remote path // Remote returns the remote path
func (o *Object) Remote() string { func (o *Object) Remote() string {
origFileName, _, _, err := processFileName(o.Object.Remote(), o.f.modeHandler) origFileName, _, _, err := processFileName(o.Object.Remote())
if err != nil { if err != nil {
fs.Errorf(o.f, "Could not get remote path for: %s", o.Object.Remote()) fs.Errorf(o.f, "Could not get remote path for: %s", o.Object.Remote())
return o.Object.Remote() return o.Object.Remote()
@@ -1343,6 +1350,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
return o.Object.Open(ctx, options...) return o.Object.Open(ctx, options...)
} }
// Get offset and limit from OpenOptions, pass the rest to the underlying remote // Get offset and limit from OpenOptions, pass the rest to the underlying remote
var openOptions = []fs.OpenOption{&fs.SeekOption{Offset: 0}}
var offset, limit int64 = 0, -1 var offset, limit int64 = 0, -1
for _, option := range options { for _, option := range options {
switch x := option.(type) { switch x := option.(type) {
@@ -1350,12 +1358,31 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
offset = x.Offset offset = x.Offset
case *fs.RangeOption: case *fs.RangeOption:
offset, limit = x.Decode(o.Size()) offset, limit = x.Decode(o.Size())
default:
openOptions = append(openOptions, option)
} }
} }
// Get a chunkedreader for the wrapped object // Get a chunkedreader for the wrapped object
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams) chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams)
var retCloser io.Closer = chunkedReader // Get file handle
return o.f.modeHandler.openGetReadCloser(ctx, o, offset, limit, chunkedReader, retCloser, options...) var file io.Reader
if offset != 0 {
file, err = sgzip.NewReaderAt(chunkedReader, &o.meta.CompressionMetadata, offset)
} else {
file, err = sgzip.NewReader(chunkedReader)
}
if err != nil {
return nil, err
}
var fileReader io.Reader
if limit != -1 {
fileReader = io.LimitReader(file, limit)
} else {
fileReader = file
}
// Return a ReadCloser
return ReadCloserWrapper{Reader: fileReader, Closer: chunkedReader}, nil
} }
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source // ObjectInfo describes a wrapped fs.ObjectInfo for being the source

View File

@@ -48,27 +48,7 @@ func TestRemoteGzip(t *testing.T) {
opt.ExtraConfig = []fstests.ExtraConfigItem{ opt.ExtraConfig = []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"}, {Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir}, {Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "mode", Value: "gzip"}, {Name: name, Key: "compression_mode", Value: "gzip"},
{Name: name, Key: "level", Value: "-1"},
}
opt.QuickTestOK = true
fstests.Run(t, &opt)
}
// TestRemoteZstd tests ZSTD compression
func TestRemoteZstd(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-zstd")
name := "TestCompressZstd"
opt := defaultOpt
opt.RemoteName = name + ":"
opt.ExtraConfig = []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "mode", Value: "zstd"},
{Name: name, Key: "level", Value: "2"},
} }
opt.QuickTestOK = true opt.QuickTestOK = true
fstests.Run(t, &opt) fstests.Run(t, &opt)

View File

@@ -1,207 +0,0 @@
package compress
import (
"bufio"
"bytes"
"context"
"crypto/md5"
"encoding/hex"
"errors"
"io"
"github.com/buengese/sgzip"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/chunkedreader"
"github.com/rclone/rclone/fs/hash"
)
// gzipModeHandler implements compressionModeHandler for gzip
type gzipModeHandler struct{}
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
// the configured threshold
func (g *gzipModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
var b bytes.Buffer
var n int64
w, err := sgzip.NewWriterLevel(&b, sgzip.DefaultCompression)
if err != nil {
return false, err
}
n, err = io.Copy(w, r)
if err != nil {
return false, err
}
err = w.Close()
if err != nil {
return false, err
}
ratio := float64(n) / float64(b.Len())
return ratio > minCompressionRatio, nil
}
// newObjectGetOriginalSize returns the original file size from the metadata
func (g *gzipModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
if meta.CompressionMetadataGzip == nil {
return 0, errors.New("missing gzip metadata")
}
return meta.CompressionMetadataGzip.Size, nil
}
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
func (g *gzipModeHandler) openGetReadCloser(
ctx context.Context,
o *Object,
offset int64,
limit int64,
cr chunkedreader.ChunkedReader,
closer io.Closer,
options ...fs.OpenOption,
) (rc io.ReadCloser, err error) {
var file io.Reader
if offset != 0 {
file, err = sgzip.NewReaderAt(cr, o.meta.CompressionMetadataGzip, offset)
} else {
file, err = sgzip.NewReader(cr)
}
if err != nil {
return nil, err
}
var fileReader io.Reader
if limit != -1 {
fileReader = io.LimitReader(file, limit)
} else {
fileReader = file
}
// Return a ReadCloser
return ReadCloserWrapper{Reader: fileReader, Closer: closer}, nil
}
// processFileNameGetFileExtension returns the file extension for the given compression mode
func (g *gzipModeHandler) processFileNameGetFileExtension(compressionMode int) string {
if compressionMode == Gzip {
return gzFileExt
}
return ""
}
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
func (g *gzipModeHandler) putCompress(
ctx context.Context,
f *Fs,
in io.Reader,
src fs.ObjectInfo,
options []fs.OpenOption,
mimeType string,
) (fs.Object, *ObjectMetadata, error) {
// Unwrap reader accounting
in, wrap := accounting.UnWrap(in)
// Add the metadata hasher
metaHasher := md5.New()
in = io.TeeReader(in, metaHasher)
// Compress the file
pipeReader, pipeWriter := io.Pipe()
resultsGzip := make(chan compressionResult[sgzip.GzipMetadata])
go func() {
gz, err := sgzip.NewWriterLevel(pipeWriter, f.opt.CompressionLevel)
if err != nil {
resultsGzip <- compressionResult[sgzip.GzipMetadata]{err: err, meta: sgzip.GzipMetadata{}}
close(resultsGzip)
return
}
_, err = io.Copy(gz, in)
gzErr := gz.Close()
if gzErr != nil && err == nil {
err = gzErr
}
closeErr := pipeWriter.Close()
if closeErr != nil && err == nil {
err = closeErr
}
resultsGzip <- compressionResult[sgzip.GzipMetadata]{err: err, meta: gz.MetaData()}
close(resultsGzip)
}()
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize)) // Probably no longer needed as sgzip has it's own buffering
// Find a hash the destination supports to compute a hash of
// the compressed data.
ht := f.Fs.Hashes().GetOne()
var hasher *hash.MultiHasher
var err error
if ht != hash.None {
// unwrap the accounting again
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
if err != nil {
return nil, nil, err
}
// add the hasher and re-wrap the accounting
wrappedIn = io.TeeReader(wrappedIn, hasher)
wrappedIn = wrap(wrappedIn)
}
// Transfer the data
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
if err != nil {
if o != nil {
if removeErr := o.Remove(ctx); removeErr != nil {
fs.Errorf(o, "Failed to remove partially transferred object: %v", removeErr)
}
}
return nil, nil, err
}
// Check whether we got an error during compression
result := <-resultsGzip
if result.err != nil {
if o != nil {
if removeErr := o.Remove(ctx); removeErr != nil {
fs.Errorf(o, "Failed to remove partially compressed object: %v", removeErr)
}
}
return nil, nil, result.err
}
// Generate metadata
meta := g.newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
// Check the hashes of the compressed data if we were comparing them
if ht != hash.None && hasher != nil {
err = f.verifyObjectHash(ctx, o, hasher, ht)
if err != nil {
return nil, nil, err
}
}
return o, meta, nil
}
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
func (g *gzipModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
return o, g.newMetadata(o.Size(), mode, sgzip.GzipMetadata{}, hex.EncodeToString(sum), mimeType), nil
}
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
// Warning: This function panics if cmeta is not of the expected type.
func (g *gzipModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
meta, ok := cmeta.(sgzip.GzipMetadata)
if !ok {
panic("invalid cmeta type: expected sgzip.GzipMetadata")
}
objMeta := new(ObjectMetadata)
objMeta.Size = size
objMeta.Mode = mode
objMeta.CompressionMetadataGzip = &meta
objMeta.CompressionMetadataZstd = nil
objMeta.MD5 = md5
objMeta.MimeType = mimeType
return objMeta
}

View File

@@ -1,327 +0,0 @@
package compress
import (
"context"
"errors"
"io"
"runtime"
"sync"
szstd "github.com/a1ex3/zstd-seekable-format-go/pkg"
"github.com/klauspost/compress/zstd"
)
const szstdChunkSize int = 1 << 20 // 1 MiB chunk size
// SzstdMetadata holds metadata for szstd compressed files.
type SzstdMetadata struct {
BlockSize int // BlockSize is the size of the blocks in the zstd file
Size int64 // Size is the uncompressed size of the file
BlockData []uint32 // BlockData is the block data for the zstd file, used for seeking
}
// SzstdWriter is a writer that compresses data in szstd format.
type SzstdWriter struct {
enc *zstd.Encoder
w szstd.ConcurrentWriter
metadata SzstdMetadata
mu sync.Mutex
}
// NewWriterSzstd creates a new szstd writer with the specified options.
// It initializes the szstd writer with a zstd encoder and returns a pointer to the SzstdWriter.
// The writer can be used to write data in chunks, and it will automatically handle block sizes and metadata.
func NewWriterSzstd(w io.Writer, opts ...zstd.EOption) (*SzstdWriter, error) {
encoder, err := zstd.NewWriter(nil, opts...)
if err != nil {
return nil, err
}
sw, err := szstd.NewWriter(w, encoder)
if err != nil {
if err := encoder.Close(); err != nil {
return nil, err
}
return nil, err
}
return &SzstdWriter{
enc: encoder,
w: sw,
metadata: SzstdMetadata{
BlockSize: szstdChunkSize,
Size: 0,
},
}, nil
}
// Write writes data to the szstd writer in chunks of szstdChunkSize.
// It handles the block size and metadata updates automatically.
func (w *SzstdWriter) Write(p []byte) (int, error) {
if len(p) == 0 {
return 0, nil
}
if w.metadata.BlockData == nil {
numBlocks := (len(p) + w.metadata.BlockSize - 1) / w.metadata.BlockSize
w.metadata.BlockData = make([]uint32, 1, numBlocks+1)
w.metadata.BlockData[0] = 0
}
start := 0
total := len(p)
var writerFunc szstd.FrameSource = func() ([]byte, error) {
if start >= total {
return nil, nil
}
end := min(start+w.metadata.BlockSize, total)
chunk := p[start:end]
size := end - start
w.mu.Lock()
w.metadata.Size += int64(size)
w.mu.Unlock()
start = end
return chunk, nil
}
// write sizes of compressed blocks in the callback
err := w.w.WriteMany(context.Background(), writerFunc,
szstd.WithWriteCallback(func(size uint32) {
w.mu.Lock()
lastOffset := w.metadata.BlockData[len(w.metadata.BlockData)-1]
w.metadata.BlockData = append(w.metadata.BlockData, lastOffset+size)
w.mu.Unlock()
}),
)
if err != nil {
return 0, err
}
return total, nil
}
// Close closes the SzstdWriter and its underlying encoder.
func (w *SzstdWriter) Close() error {
if err := w.w.Close(); err != nil {
return err
}
if err := w.enc.Close(); err != nil {
return err
}
return nil
}
// GetMetadata returns the metadata of the szstd writer.
func (w *SzstdWriter) GetMetadata() SzstdMetadata {
return w.metadata
}
// SzstdReaderAt is a reader that allows random access in szstd compressed data.
type SzstdReaderAt struct {
r szstd.Reader
decoder *zstd.Decoder
metadata *SzstdMetadata
pos int64
mu sync.Mutex
}
// NewReaderAtSzstd creates a new SzstdReaderAt at the specified io.ReadSeeker.
func NewReaderAtSzstd(rs io.ReadSeeker, meta *SzstdMetadata, offset int64, opts ...zstd.DOption) (*SzstdReaderAt, error) {
decoder, err := zstd.NewReader(nil, opts...)
if err != nil {
return nil, err
}
r, err := szstd.NewReader(rs, decoder)
if err != nil {
decoder.Close()
return nil, err
}
sr := &SzstdReaderAt{
r: r,
decoder: decoder,
metadata: meta,
pos: 0,
}
// Set initial position to the provided offset
if _, err := sr.Seek(offset, io.SeekStart); err != nil {
if err := sr.Close(); err != nil {
return nil, err
}
return nil, err
}
return sr, nil
}
// Seek sets the offset for the next Read.
func (s *SzstdReaderAt) Seek(offset int64, whence int) (int64, error) {
s.mu.Lock()
defer s.mu.Unlock()
pos, err := s.r.Seek(offset, whence)
if err == nil {
s.pos = pos
}
return pos, err
}
func (s *SzstdReaderAt) Read(p []byte) (int, error) {
s.mu.Lock()
defer s.mu.Unlock()
n, err := s.r.Read(p)
if err == nil {
s.pos += int64(n)
}
return n, err
}
// ReadAt reads data at the specified offset.
func (s *SzstdReaderAt) ReadAt(p []byte, off int64) (int, error) {
if off < 0 {
return 0, errors.New("invalid offset")
}
if off >= s.metadata.Size {
return 0, io.EOF
}
endOff := min(off+int64(len(p)), s.metadata.Size)
// Find all blocks covered by the range
type blockInfo struct {
index int // Block index
offsetInBlock int64 // Offset within the block for starting reading
bytesToRead int64 // How many bytes to read from this block
}
var blocks []blockInfo
uncompressedOffset := int64(0)
currentOff := off
for i := 0; i < len(s.metadata.BlockData)-1; i++ {
blockUncompressedEnd := min(uncompressedOffset+int64(s.metadata.BlockSize), s.metadata.Size)
if currentOff < blockUncompressedEnd && endOff > uncompressedOffset {
offsetInBlock := max(0, currentOff-uncompressedOffset)
bytesToRead := min(blockUncompressedEnd-uncompressedOffset-offsetInBlock, endOff-currentOff)
blocks = append(blocks, blockInfo{
index: i,
offsetInBlock: offsetInBlock,
bytesToRead: bytesToRead,
})
currentOff += bytesToRead
if currentOff >= endOff {
break
}
}
uncompressedOffset = blockUncompressedEnd
}
if len(blocks) == 0 {
return 0, io.EOF
}
// Parallel block decoding
type decodeResult struct {
index int
data []byte
err error
}
resultCh := make(chan decodeResult, len(blocks))
var wg sync.WaitGroup
sem := make(chan struct{}, runtime.NumCPU())
for _, block := range blocks {
wg.Add(1)
go func(block blockInfo) {
defer wg.Done()
sem <- struct{}{}
defer func() { <-sem }()
startOffset := int64(s.metadata.BlockData[block.index])
endOffset := int64(s.metadata.BlockData[block.index+1])
compressedSize := endOffset - startOffset
compressed := make([]byte, compressedSize)
_, err := s.r.ReadAt(compressed, startOffset)
if err != nil && err != io.EOF {
resultCh <- decodeResult{index: block.index, err: err}
return
}
decoded, err := s.decoder.DecodeAll(compressed, nil)
if err != nil {
resultCh <- decodeResult{index: block.index, err: err}
return
}
resultCh <- decodeResult{index: block.index, data: decoded, err: nil}
}(block)
}
go func() {
wg.Wait()
close(resultCh)
}()
// Collect results in block index order
totalRead := 0
results := make(map[int]decodeResult)
expected := len(blocks)
minIndex := blocks[0].index
for res := range resultCh {
results[res.index] = res
for {
if result, ok := results[minIndex]; ok {
if result.err != nil {
return 0, result.err
}
// find the corresponding blockInfo
var blk blockInfo
for _, b := range blocks {
if b.index == result.index {
blk = b
break
}
}
start := blk.offsetInBlock
end := start + blk.bytesToRead
copy(p[totalRead:totalRead+int(blk.bytesToRead)], result.data[start:end])
totalRead += int(blk.bytesToRead)
minIndex++
if minIndex-blocks[0].index >= len(blocks) {
break
}
} else {
break
}
}
if len(results) == expected && minIndex-blocks[0].index >= len(blocks) {
break
}
}
return totalRead, nil
}
// Close closes the SzstdReaderAt and underlying decoder.
func (s *SzstdReaderAt) Close() error {
if err := s.r.Close(); err != nil {
return err
}
s.decoder.Close()
return nil
}

View File

@@ -1,65 +0,0 @@
package compress
import (
"context"
"fmt"
"io"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/chunkedreader"
)
// uncompressedModeHandler implements compressionModeHandler for uncompressed files
type uncompressedModeHandler struct{}
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
// the configured threshold
func (u *uncompressedModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
return false, nil
}
// newObjectGetOriginalSize returns the original file size from the metadata
func (u *uncompressedModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
return 0, nil
}
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
func (u *uncompressedModeHandler) openGetReadCloser(
ctx context.Context,
o *Object,
offset int64,
limit int64,
cr chunkedreader.ChunkedReader,
closer io.Closer,
options ...fs.OpenOption,
) (rc io.ReadCloser, err error) {
return o.Object.Open(ctx, options...)
}
// processFileNameGetFileExtension returns the file extension for the given compression mode
func (u *uncompressedModeHandler) processFileNameGetFileExtension(compressionMode int) string {
return ""
}
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
func (u *uncompressedModeHandler) putCompress(
ctx context.Context,
f *Fs,
in io.Reader,
src fs.ObjectInfo,
options []fs.OpenOption,
mimeType string,
) (fs.Object, *ObjectMetadata, error) {
return nil, nil, fmt.Errorf("unsupported compression mode %d", f.mode)
}
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
func (u *uncompressedModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
return nil, nil, fmt.Errorf("unsupported compression mode %d", Uncompressed)
}
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
// Warning: This function panics if cmeta is not of the expected type.
func (u *uncompressedModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
return nil
}

View File

@@ -1,65 +0,0 @@
package compress
import (
"context"
"fmt"
"io"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/chunkedreader"
)
// unknownModeHandler implements compressionModeHandler for unknown compression types
type unknownModeHandler struct{}
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
// the configured threshold
func (unk *unknownModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
return false, fmt.Errorf("unknown compression mode %d", compressionMode)
}
// newObjectGetOriginalSize returns the original file size from the metadata
func (unk *unknownModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
return 0, nil
}
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
func (unk *unknownModeHandler) openGetReadCloser(
ctx context.Context,
o *Object,
offset int64,
limit int64,
cr chunkedreader.ChunkedReader,
closer io.Closer,
options ...fs.OpenOption,
) (rc io.ReadCloser, err error) {
return nil, fmt.Errorf("unknown compression mode %d", o.meta.Mode)
}
// processFileNameGetFileExtension returns the file extension for the given compression mode
func (unk *unknownModeHandler) processFileNameGetFileExtension(compressionMode int) string {
return ""
}
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
func (unk *unknownModeHandler) putCompress(
ctx context.Context,
f *Fs,
in io.Reader,
src fs.ObjectInfo,
options []fs.OpenOption,
mimeType string,
) (fs.Object, *ObjectMetadata, error) {
return nil, nil, fmt.Errorf("unknown compression mode %d", f.mode)
}
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
func (unk *unknownModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
return nil, nil, fmt.Errorf("unknown compression mode")
}
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
// Warning: This function panics if cmeta is not of the expected type.
func (unk *unknownModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
return nil
}

View File

@@ -1,192 +0,0 @@
package compress
import (
"bufio"
"bytes"
"context"
"crypto/md5"
"encoding/hex"
"errors"
"io"
"github.com/klauspost/compress/zstd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/chunkedreader"
"github.com/rclone/rclone/fs/hash"
)
// zstdModeHandler implements compressionModeHandler for zstd
type zstdModeHandler struct{}
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
// the configured threshold
func (z *zstdModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
var b bytes.Buffer
var n int64
w, err := NewWriterSzstd(&b, zstd.WithEncoderLevel(zstd.SpeedDefault))
if err != nil {
return false, err
}
n, err = io.Copy(w, r)
if err != nil {
return false, err
}
err = w.Close()
if err != nil {
return false, err
}
ratio := float64(n) / float64(b.Len())
return ratio > minCompressionRatio, nil
}
// newObjectGetOriginalSize returns the original file size from the metadata
func (z *zstdModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
if meta.CompressionMetadataZstd == nil {
return 0, errors.New("missing zstd metadata")
}
return meta.CompressionMetadataZstd.Size, nil
}
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
func (z *zstdModeHandler) openGetReadCloser(
ctx context.Context,
o *Object,
offset int64,
limit int64,
cr chunkedreader.ChunkedReader,
closer io.Closer,
options ...fs.OpenOption,
) (rc io.ReadCloser, err error) {
var file io.Reader
if offset != 0 {
file, err = NewReaderAtSzstd(cr, o.meta.CompressionMetadataZstd, offset)
} else {
file, err = zstd.NewReader(cr)
}
if err != nil {
return nil, err
}
var fileReader io.Reader
if limit != -1 {
fileReader = io.LimitReader(file, limit)
} else {
fileReader = file
}
// Return a ReadCloser
return ReadCloserWrapper{Reader: fileReader, Closer: closer}, nil
}
// processFileNameGetFileExtension returns the file extension for the given compression mode
func (z *zstdModeHandler) processFileNameGetFileExtension(compressionMode int) string {
if compressionMode == Zstd {
return zstdFileExt
}
return ""
}
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
func (z *zstdModeHandler) putCompress(
ctx context.Context,
f *Fs,
in io.Reader,
src fs.ObjectInfo,
options []fs.OpenOption,
mimeType string,
) (fs.Object, *ObjectMetadata, error) {
// Unwrap reader accounting
in, wrap := accounting.UnWrap(in)
// Add the metadata hasher
metaHasher := md5.New()
in = io.TeeReader(in, metaHasher)
// Compress the file
pipeReader, pipeWriter := io.Pipe()
resultsZstd := make(chan compressionResult[SzstdMetadata])
go func() {
writer, err := NewWriterSzstd(pipeWriter, zstd.WithEncoderLevel(zstd.EncoderLevel(f.opt.CompressionLevel)))
if err != nil {
resultsZstd <- compressionResult[SzstdMetadata]{err: err}
close(resultsZstd)
return
}
_, err = io.Copy(writer, in)
if wErr := writer.Close(); wErr != nil && err == nil {
err = wErr
}
if cErr := pipeWriter.Close(); cErr != nil && err == nil {
err = cErr
}
resultsZstd <- compressionResult[SzstdMetadata]{err: err, meta: writer.GetMetadata()}
close(resultsZstd)
}()
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize))
ht := f.Fs.Hashes().GetOne()
var hasher *hash.MultiHasher
var err error
if ht != hash.None {
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
if err != nil {
return nil, nil, err
}
wrappedIn = io.TeeReader(wrappedIn, hasher)
wrappedIn = wrap(wrappedIn)
}
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
if err != nil {
return nil, nil, err
}
result := <-resultsZstd
if result.err != nil {
if o != nil {
_ = o.Remove(ctx)
}
return nil, nil, result.err
}
// Build metadata using uncompressed size for filename
meta := z.newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
if ht != hash.None && hasher != nil {
err = f.verifyObjectHash(ctx, o, hasher, ht)
if err != nil {
return nil, nil, err
}
}
return o, meta, nil
}
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
func (z *zstdModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
return o, z.newMetadata(o.Size(), mode, SzstdMetadata{}, hex.EncodeToString(sum), mimeType), nil
}
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
// Warning: This function panics if cmeta is not of the expected type.
func (z *zstdModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
meta, ok := cmeta.(SzstdMetadata)
if !ok {
panic("invalid cmeta type: expected SzstdMetadata")
}
objMeta := new(ObjectMetadata)
objMeta.Size = size
objMeta.Mode = mode
objMeta.CompressionMetadataGzip = nil
objMeta.CompressionMetadataZstd = &meta
objMeta.MD5 = md5
objMeta.MimeType = mimeType
return objMeta
}

View File

@@ -192,7 +192,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
dirNameEncrypt: dirNameEncrypt, dirNameEncrypt: dirNameEncrypt,
encryptedSuffix: ".bin", encryptedSuffix: ".bin",
} }
c.buffers.New = func() any { c.buffers.New = func() interface{} {
return new([blockSize]byte) return new([blockSize]byte)
} }
err := c.Key(password, salt) err := c.Key(password, salt)
@@ -336,7 +336,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
_, _ = result.WriteString(strconv.Itoa(dir) + ".") _, _ = result.WriteString(strconv.Itoa(dir) + ".")
// but we'll augment it with the nameKey for real calculation // but we'll augment it with the nameKey for real calculation
for i := range len(c.nameKey) { for i := 0; i < len(c.nameKey); i++ {
dir += int(c.nameKey[i]) dir += int(c.nameKey[i])
} }
@@ -403,14 +403,14 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
if ciphertext == "" { if ciphertext == "" {
return "", nil return "", nil
} }
before, after, ok := strings.Cut(ciphertext, ".") pos := strings.Index(ciphertext, ".")
if !ok { if pos == -1 {
return "", ErrorNotAnEncryptedFile return "", ErrorNotAnEncryptedFile
} // No . } // No .
num := before num := ciphertext[:pos]
if num == "!" { if num == "!" {
// No rotation; probably original was not valid unicode // No rotation; probably original was not valid unicode
return after, nil return ciphertext[pos+1:], nil
} }
dir, err := strconv.Atoi(num) dir, err := strconv.Atoi(num)
if err != nil { if err != nil {
@@ -418,14 +418,14 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
} }
// add the nameKey to get the real rotate distance // add the nameKey to get the real rotate distance
for i := range len(c.nameKey) { for i := 0; i < len(c.nameKey); i++ {
dir += int(c.nameKey[i]) dir += int(c.nameKey[i])
} }
var result bytes.Buffer var result bytes.Buffer
inQuote := false inQuote := false
for _, runeValue := range after { for _, runeValue := range ciphertext[pos+1:] {
switch { switch {
case inQuote: case inQuote:
_, _ = result.WriteRune(runeValue) _, _ = result.WriteRune(runeValue)
@@ -664,7 +664,7 @@ func (n *nonce) increment() {
// add a uint64 to the nonce // add a uint64 to the nonce
func (n *nonce) add(x uint64) { func (n *nonce) add(x uint64) {
carry := uint16(0) carry := uint16(0)
for i := range 8 { for i := 0; i < 8; i++ {
digit := (*n)[i] digit := (*n)[i]
xDigit := byte(x) xDigit := byte(x)
x >>= 8 x >>= 8

View File

@@ -1307,7 +1307,10 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
end := len(ciphertext) end := len(ciphertext)
if underlyingLimit >= 0 { if underlyingLimit >= 0 {
end = min(int(underlyingOffset+underlyingLimit), len(ciphertext)) end = int(underlyingOffset + underlyingLimit)
if end > len(ciphertext) {
end = len(ciphertext)
}
} }
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end])) reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
return reader, nil return reader, nil
@@ -1487,7 +1490,7 @@ func TestDecrypterRead(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
// Test truncating the file at each possible point // Test truncating the file at each possible point
for i := range len(file16) - 1 { for i := 0; i < len(file16)-1; i++ {
what := fmt.Sprintf("truncating to %d/%d", i, len(file16)) what := fmt.Sprintf("truncating to %d/%d", i, len(file16))
cd := newCloseDetector(bytes.NewBuffer(file16[:i])) cd := newCloseDetector(bytes.NewBuffer(file16[:i]))
fh, err := c.newDecrypter(cd) fh, err := c.newDecrypter(cd)

View File

@@ -18,7 +18,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
) )
// Globals // Globals
@@ -294,9 +293,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
PartialUploads: true, PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs) }).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// Enable ListP always
f.features.ListP = f.ListP
return f, err return f, err
} }
@@ -420,40 +416,11 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f) entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
} if err != nil {
return nil, err
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.encryptEntries(ctx, entries)
if err != nil {
return err
}
return callback(entries)
} }
listP := f.Fs.Features().ListP return f.encryptEntries(ctx, entries)
encryptedDir := f.cipher.EncryptDirName(dir)
if listP == nil {
entries, err := f.Fs.List(ctx, encryptedDir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, encryptedDir, wrappedCallback)
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@@ -923,30 +890,28 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
var commandHelp = []fs.CommandHelp{ var commandHelp = []fs.CommandHelp{
{ {
Name: "encode", Name: "encode",
Short: "Encode the given filename(s).", Short: "Encode the given filename(s)",
Long: `This encodes the filenames given as arguments returning a list of Long: `This encodes the filenames given as arguments returning a list of
strings of the encoded results. strings of the encoded results.
Usage examples: Usage Example:
` + "```console" + ` rclone backend encode crypt: file1 [file2...]
rclone backend encode crypt: file1 [file2...] rclone rc backend/command command=encode fs=crypt: file1 [file2...]
rclone rc backend/command command=encode fs=crypt: file1 [file2...] `,
` + "```",
}, },
{ {
Name: "decode", Name: "decode",
Short: "Decode the given filename(s).", Short: "Decode the given filename(s)",
Long: `This decodes the filenames given as arguments returning a list of Long: `This decodes the filenames given as arguments returning a list of
strings of the decoded results. It will return an error if any of the strings of the decoded results. It will return an error if any of the
inputs are invalid. inputs are invalid.
Usage examples: Usage Example:
` + "```console" + ` rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
rclone backend decode crypt: encryptedfile1 [encryptedfile2...] rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...] `,
` + "```",
}, },
} }
@@ -959,7 +924,7 @@ rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile
// The result should be capable of being JSON encoded // The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user // If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that // otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name { switch name {
case "decode": case "decode":
out := make([]string, 0, len(arg)) out := make([]string, 0, len(arg))

View File

@@ -25,7 +25,7 @@ func Pad(n int, buf []byte) []byte {
} }
length := len(buf) length := len(buf)
padding := n - (length % n) padding := n - (length % n)
for range padding { for i := 0; i < padding; i++ {
buf = append(buf, byte(padding)) buf = append(buf, byte(padding))
} }
if (len(buf) % n) != 0 { if (len(buf) % n) != 0 {
@@ -54,7 +54,7 @@ func Unpad(n int, buf []byte) ([]byte, error) {
if padding == 0 { if padding == 0 {
return nil, ErrorPaddingTooShort return nil, ErrorPaddingTooShort
} }
for i := range padding { for i := 0; i < padding; i++ {
if buf[length-1-i] != byte(padding) { if buf[length-1-i] != byte(padding) {
return nil, ErrorPaddingNotAllTheSame return nil, ErrorPaddingNotAllTheSame
} }

View File

@@ -1,38 +0,0 @@
// Type definitions specific to Dataverse
package api
// DataverseDatasetResponse is returned by the Dataverse dataset API
type DataverseDatasetResponse struct {
Status string `json:"status"`
Data DataverseDataset `json:"data"`
}
// DataverseDataset is the representation of a dataset
type DataverseDataset struct {
LatestVersion DataverseDatasetVersion `json:"latestVersion"`
}
// DataverseDatasetVersion is the representation of a dataset version
type DataverseDatasetVersion struct {
LastUpdateTime string `json:"lastUpdateTime"`
Files []DataverseFile `json:"files"`
}
// DataverseFile is the representation of a file found in a dataset
type DataverseFile struct {
DirectoryLabel string `json:"directoryLabel"`
DataFile DataverseDataFile `json:"dataFile"`
}
// DataverseDataFile represents file metadata details
type DataverseDataFile struct {
ID int64 `json:"id"`
Filename string `json:"filename"`
ContentType string `json:"contentType"`
FileSize int64 `json:"filesize"`
OriginalFileFormat string `json:"originalFileFormat"`
OriginalFileSize int64 `json:"originalFileSize"`
OriginalFileName string `json:"originalFileName"`
MD5 string `json:"md5"`
}

View File

@@ -1,33 +0,0 @@
// Type definitions specific to InvenioRDM
package api
// InvenioRecordResponse is the representation of a record stored in InvenioRDM
type InvenioRecordResponse struct {
Links InvenioRecordResponseLinks `json:"links"`
}
// InvenioRecordResponseLinks represents a record's links
type InvenioRecordResponseLinks struct {
Self string `json:"self"`
}
// InvenioFilesResponse is the representation of a record's files
type InvenioFilesResponse struct {
Entries []InvenioFilesResponseEntry `json:"entries"`
}
// InvenioFilesResponseEntry is the representation of a file entry
type InvenioFilesResponseEntry struct {
Key string `json:"key"`
Checksum string `json:"checksum"`
Size int64 `json:"size"`
Updated string `json:"updated"`
MimeType string `json:"mimetype"`
Links InvenioFilesResponseEntryLinks `json:"links"`
}
// InvenioFilesResponseEntryLinks represents file links details
type InvenioFilesResponseEntryLinks struct {
Content string `json:"content"`
}

View File

@@ -1,26 +0,0 @@
// Package api has general type definitions for doi
package api
// DoiResolverResponse is returned by the DOI resolver API
//
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
type DoiResolverResponse struct {
ResponseCode int `json:"responseCode"`
Handle string `json:"handle"`
Values []DoiResolverResponseValue `json:"values"`
}
// DoiResolverResponseValue is a single handle record value
type DoiResolverResponseValue struct {
Index int `json:"index"`
Type string `json:"type"`
Data DoiResolverResponseValueData `json:"data"`
TTL int `json:"ttl"`
Timestamp string `json:"timestamp"`
}
// DoiResolverResponseValueData is the data held in a handle value
type DoiResolverResponseValueData struct {
Format string `json:"format"`
Value any `json:"value"`
}

View File

@@ -1,112 +0,0 @@
// Implementation for Dataverse
package doi
import (
"context"
"fmt"
"net/http"
"net/url"
"path"
"strings"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
// Returns true if resolvedURL is likely a DOI hosted on a Dataverse intallation
func activateDataverse(resolvedURL *url.URL) (isActive bool) {
queryValues := resolvedURL.Query()
persistentID := queryValues.Get("persistentId")
return persistentID != ""
}
// Resolve the main API endpoint for a DOI hosted on a Dataverse installation
func resolveDataverseEndpoint(resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
queryValues := resolvedURL.Query()
persistentID := queryValues.Get("persistentId")
query := url.Values{}
query.Add("persistentId", persistentID)
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/datasets/:persistentId/", RawQuery: query.Encode()})
return Dataverse, endpointURL, nil
}
// dataverseProvider implements the doiProvider interface for Dataverse installations
type dataverseProvider struct {
f *Fs
}
// ListEntries returns the full list of entries found at the remote, regardless of root
func (dp *dataverseProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
// Use the cache if populated
cachedEntries, found := dp.f.cache.GetMaybe("files")
if found {
parsedEntries, ok := cachedEntries.([]Object)
if ok {
for _, entry := range parsedEntries {
newEntry := entry
entries = append(entries, &newEntry)
}
return entries, nil
}
}
filesURL := dp.f.endpoint
var res *http.Response
var result api.DataverseDatasetResponse
opts := rest.Opts{
Method: "GET",
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
Parameters: filesURL.Query(),
}
err = dp.f.pacer.Call(func() (bool, error) {
res, err = dp.f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("readDir failed: %w", err)
}
modTime, modTimeErr := time.Parse(time.RFC3339, result.Data.LatestVersion.LastUpdateTime)
if modTimeErr != nil {
fs.Logf(dp.f, "error: could not parse last update time %v", modTimeErr)
modTime = timeUnset
}
for _, file := range result.Data.LatestVersion.Files {
contentURLPath := fmt.Sprintf("/api/access/datafile/%d", file.DataFile.ID)
query := url.Values{}
query.Add("format", "original")
contentURL := dp.f.endpoint.ResolveReference(&url.URL{Path: contentURLPath, RawQuery: query.Encode()})
entry := &Object{
fs: dp.f,
remote: path.Join(file.DirectoryLabel, file.DataFile.Filename),
contentURL: contentURL.String(),
size: file.DataFile.FileSize,
modTime: modTime,
md5: file.DataFile.MD5,
contentType: file.DataFile.ContentType,
}
if file.DataFile.OriginalFileName != "" {
entry.remote = path.Join(file.DirectoryLabel, file.DataFile.OriginalFileName)
entry.size = file.DataFile.OriginalFileSize
entry.contentType = file.DataFile.OriginalFileFormat
}
entries = append(entries, entry)
}
// Populate the cache
cacheEntries := []Object{}
for _, entry := range entries {
cacheEntries = append(cacheEntries, *entry)
}
dp.f.cache.Put("files", cacheEntries)
return entries, nil
}
func newDataverseProvider(f *Fs) doiProvider {
return &dataverseProvider{
f: f,
}
}

View File

@@ -1,653 +0,0 @@
// Package doi provides a filesystem interface for digital objects identified by DOIs.
//
// See: https://www.doi.org/the-identifier/what-is-a-doi/
package doi
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"path"
"strings"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/cache"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
const (
// the URL of the DOI resolver
//
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
doiResolverAPIURL = "https://doi.org/api"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
var (
errorReadOnly = errors.New("doi remotes are read only")
timeUnset = time.Unix(0, 0)
)
func init() {
fsi := &fs.RegInfo{
Name: "doi",
Description: "DOI datasets",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "doi",
Help: "The DOI or the doi.org URL.",
Required: true,
}, {
Name: fs.ConfigProvider,
Help: `DOI provider.
The DOI provider can be set when rclone does not automatically recognize a supported DOI provider.`,
Examples: []fs.OptionExample{
{
Value: "auto",
Help: "Auto-detect provider",
},
{
Value: string(Zenodo),
Help: "Zenodo",
}, {
Value: string(Dataverse),
Help: "Dataverse",
}, {
Value: string(Invenio),
Help: "Invenio",
}},
Required: false,
Advanced: true,
}, {
Name: "doi_resolver_api_url",
Help: `The URL of the DOI resolver API to use.
The DOI resolver can be set for testing or for cases when the the canonical DOI resolver API cannot be used.
Defaults to "https://doi.org/api".`,
Required: false,
Advanced: true,
}},
}
fs.Register(fsi)
}
// Provider defines the type of provider hosting the DOI
type Provider string
const (
// Zenodo provider, see https://zenodo.org
Zenodo Provider = "zenodo"
// Dataverse provider, see https://dataverse.harvard.edu
Dataverse Provider = "dataverse"
// Invenio provider, see https://inveniordm.docs.cern.ch
Invenio Provider = "invenio"
)
// Options defines the configuration for this backend
type Options struct {
Doi string `config:"doi"` // The DOI, a digital identifier of an object, usually a dataset
Provider string `config:"provider"` // The DOI provider
DoiResolverAPIURL string `config:"doi_resolver_api_url"` // The URL of the DOI resolver API to use.
}
// Fs stores the interface to the remote HTTP files
type Fs struct {
name string // name of this remote
root string // the path we are working on
provider Provider // the DOI provider
doiProvider doiProvider // the interface used to interact with the DOI provider
features *fs.Features // optional features
opt Options // options for this backend
ci *fs.ConfigInfo // global config
endpoint *url.URL // the main API endpoint for this remote
endpointURL string // endpoint as a string
srv *rest.Client // the connection to the server
pacer *fs.Pacer // pacer for API calls
cache *cache.Cache // a cache for the remote metadata
}
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
type Object struct {
fs *Fs // what this object is part of
remote string // the remote path
contentURL string // the URL where the contents of the file can be downloaded
size int64 // size of the object
modTime time.Time // modification time of the object
contentType string // content type of the object
md5 string // MD5 hash of the object content
}
// doiProvider is the interface used to list objects in a DOI
type doiProvider interface {
// ListEntries returns the full list of entries found at the remote, regardless of root
ListEntries(ctx context.Context) (entries []*Object, err error)
}
// Parse the input string as a DOI
// Examples:
// 10.1000/182 -> 10.1000/182
// https://doi.org/10.1000/182 -> 10.1000/182
// doi:10.1000/182 -> 10.1000/182
func parseDoi(doi string) string {
doiURL, err := url.Parse(doi)
if err != nil {
return doi
}
if doiURL.Scheme == "doi" {
return strings.TrimLeft(strings.TrimPrefix(doi, "doi:"), "/")
}
if strings.HasSuffix(doiURL.Hostname(), "doi.org") {
return strings.TrimLeft(doiURL.Path, "/")
}
return doi
}
// Resolve a DOI to a URL
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
func resolveDoiURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (doiURL *url.URL, err error) {
resolverURL := opt.DoiResolverAPIURL
if resolverURL == "" {
resolverURL = doiResolverAPIURL
}
var result api.DoiResolverResponse
params := url.Values{}
params.Add("index", "1")
opts := rest.Opts{
Method: "GET",
RootURL: resolverURL,
Path: "/handles/" + opt.Doi,
Parameters: params,
}
err = pacer.Call(func() (bool, error) {
res, err := srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, err
}
if result.ResponseCode != 1 {
return nil, fmt.Errorf("could not resolve DOI (error code %d)", result.ResponseCode)
}
resolvedURLStr := ""
for _, value := range result.Values {
if value.Type == "URL" && value.Data.Format == "string" {
valueStr, ok := value.Data.Value.(string)
if !ok {
return nil, fmt.Errorf("could not resolve DOI (incorrect response format)")
}
resolvedURLStr = valueStr
}
}
resolvedURL, err := url.Parse(resolvedURLStr)
if err != nil {
return nil, err
}
return resolvedURL, nil
}
// Resolve the passed configuration into a provider and enpoint
func resolveEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (provider Provider, endpoint *url.URL, err error) {
resolvedURL, err := resolveDoiURL(ctx, srv, pacer, opt)
if err != nil {
return "", nil, err
}
switch opt.Provider {
case string(Dataverse):
return resolveDataverseEndpoint(resolvedURL)
case string(Invenio):
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
case string(Zenodo):
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
}
hostname := strings.ToLower(resolvedURL.Hostname())
if hostname == "dataverse.harvard.edu" || activateDataverse(resolvedURL) {
return resolveDataverseEndpoint(resolvedURL)
}
if hostname == "zenodo.org" || strings.HasSuffix(hostname, ".zenodo.org") {
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
}
if activateInvenio(ctx, srv, pacer, resolvedURL) {
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
}
return "", nil, fmt.Errorf("provider '%s' is not supported", resolvedURL.Hostname())
}
// Make the http connection from the passed options
func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err error) {
provider, endpoint, err := resolveEndpoint(ctx, f.srv, f.pacer, opt)
if err != nil {
return false, err
}
// Update f with the new parameters
f.srv.SetRoot(endpoint.ResolveReference(&url.URL{Path: "/"}).String())
f.endpoint = endpoint
f.endpointURL = endpoint.String()
f.provider = provider
f.opt.Provider = string(provider)
switch f.provider {
case Dataverse:
f.doiProvider = newDataverseProvider(f)
case Invenio, Zenodo:
f.doiProvider = newInvenioProvider(f)
default:
return false, fmt.Errorf("provider type '%s' not supported", f.provider)
}
// Determine if the root is a file
entries, err := f.doiProvider.ListEntries(ctx)
if err != nil {
return false, err
}
for _, entry := range entries {
if entry.remote == f.root {
isFile = true
break
}
}
return isFile, nil
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this res and err
// deserve to be retried. It returns the err as a convenience.
func shouldRetry(ctx context.Context, res *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
}
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
root = strings.Trim(root, "/")
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
opt.Doi = parseDoi(opt.Doi)
client := fshttp.NewClient(ctx)
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: ci,
srv: rest.NewClient(client),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
cache: cache.New(),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
isFile, err := f.httpConnection(ctx, opt)
if err != nil {
return nil, err
}
if isFile {
// return an error with an fs which points to the parent
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.root = newRoot
return f, fs.ErrorIsFile
}
return f, nil
}
// Name returns the configured name of the file system
func (f *Fs) Name() string {
return f.name
}
// Root returns the root for the filesystem
func (f *Fs) Root() string {
return f.root
}
// String returns the URL for the filesystem
func (f *Fs) String() string {
return fmt.Sprintf("DOI %s", f.opt.Doi)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision is the remote http file system's modtime precision, which we have no way of knowing. We estimate at 1s
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
// return hash.Set(hash.None)
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return errorReadOnly
}
// Remove a remote http file object
func (o *Object) Remove(ctx context.Context) error {
return errorReadOnly
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return errorReadOnly
}
// NewObject creates a new remote http file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
entries, err := f.doiProvider.ListEntries(ctx)
if err != nil {
return nil, err
}
remoteFullPath := remote
if f.root != "" {
remoteFullPath = path.Join(f.root, remote)
}
for _, entry := range entries {
if entry.Remote() == remoteFullPath {
return entry, nil
}
}
return nil, fs.ErrorObjectNotFound
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
fileEntries, err := f.doiProvider.ListEntries(ctx)
if err != nil {
return nil, fmt.Errorf("error listing %q: %w", dir, err)
}
fullDir := path.Join(f.root, dir)
if fullDir != "" {
fullDir += "/"
}
dirPaths := map[string]bool{}
for _, entry := range fileEntries {
// First, filter out files not in `fullDir`
if !strings.HasPrefix(entry.remote, fullDir) {
continue
}
// Then, find entries in subfolers
remotePath := entry.remote
if fullDir != "" {
remotePath = strings.TrimLeft(strings.TrimPrefix(remotePath, fullDir), "/")
}
parts := strings.SplitN(remotePath, "/", 2)
if len(parts) == 1 {
newEntry := *entry
newEntry.remote = path.Join(dir, remotePath)
entries = append(entries, &newEntry)
} else {
dirPaths[path.Join(dir, parts[0])] = true
}
}
for dirPath := range dirPaths {
entry := fs.NewDir(dirPath, time.Time{})
entries = append(entries, entry)
}
return entries, nil
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly
}
// Fs is the filesystem this remote http file object is located within
func (o *Object) Fs() fs.Info {
return o.fs
}
// String returns the URL to the remote HTTP file
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote the name of the remote HTTP file, relative to the fs root
func (o *Object) Remote() string {
return o.remote
}
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
return o.md5, nil
}
// Size returns the size in bytes of the remote http file
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the remote http file
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// SetModTime sets the modification and access time to the specified time
//
// it also updates the info field
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return errorReadOnly
}
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
func (o *Object) Storable() bool {
return true
}
// Open a remote http file object for reading. Seek is supported
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.size)
opts := rest.Opts{
Method: "GET",
RootURL: o.contentURL,
Options: options,
}
var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("Open failed: %w", err)
}
// Handle non-compliant redirects
if res.Header.Get("Location") != "" {
newURL, err := res.Location()
if err == nil {
opts.RootURL = newURL.String()
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("Open failed: %w", err)
}
}
}
return res.Body, nil
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return errorReadOnly
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.contentType
}
var commandHelp = []fs.CommandHelp{{
Name: "metadata",
Short: "Show metadata about the DOI.",
Long: `This command returns a JSON object with some information about the DOI.
Usage example:
` + "```console" + `
rclone backend metadata doi:
` + "```" + `
It returns a JSON object representing metadata about the DOI.`,
}, {
Name: "set",
Short: "Set command for updating the config parameters.",
Long: `This set command can be used to update the config parameters
for a running doi backend.
Usage examples:
` + "```console" + `
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
` + "```" + `
The option keys are named as they are in the config file.
This rebuilds the connection to the doi backend when it is called with
the new parameters. Only new parameters need be passed as the values
will default to those currently in use.
It doesn't return anything.`,
}}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
switch name {
case "metadata":
return f.ShowMetadata(ctx)
case "set":
newOpt := f.opt
err := configstruct.Set(configmap.Simple(opt), &newOpt)
if err != nil {
return nil, fmt.Errorf("reading config: %w", err)
}
_, err = f.httpConnection(ctx, &newOpt)
if err != nil {
return nil, fmt.Errorf("updating session: %w", err)
}
f.opt = newOpt
keys := []string{}
for k := range opt {
keys = append(keys, k)
}
fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", "))
return nil, nil
default:
return nil, fs.ErrorCommandNotFound
}
}
// ShowMetadata returns some metadata about the corresponding DOI
func (f *Fs) ShowMetadata(ctx context.Context) (metadata any, err error) {
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
if err != nil {
return nil, err
}
info := map[string]any{}
info["DOI"] = f.opt.Doi
info["URL"] = doiURL.String()
info["metadataURL"] = f.endpointURL
info["provider"] = f.provider
return info, nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
)

View File

@@ -1,260 +0,0 @@
package doi
import (
"context"
"crypto/md5"
"encoding/hex"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"net/url"
"sort"
"strings"
"testing"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/hash"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var remoteName = "TestDoi"
func TestParseDoi(t *testing.T) {
// 10.1000/182 -> 10.1000/182
doi := "10.1000/182"
parsed := parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// https://doi.org/10.1000/182 -> 10.1000/182
doi = "https://doi.org/10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// https://dx.doi.org/10.1000/182 -> 10.1000/182
doi = "https://dxdoi.org/10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// doi:10.1000/182 -> 10.1000/182
doi = "doi:10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// doi://10.1000/182 -> 10.1000/182
doi = "doi://10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
}
// prepareMockDoiResolverServer prepares a test server to resolve DOIs
func prepareMockDoiResolverServer(t *testing.T, resolvedURL string) (doiResolverAPIURL string) {
mux := http.NewServeMux()
// Handle requests for resolving DOIs
mux.HandleFunc("GET /api/handles/{handle...}", func(w http.ResponseWriter, r *http.Request) {
// Check that we are resolving a DOI
handle := strings.TrimPrefix(r.URL.Path, "/api/handles/")
assert.NotEmpty(t, handle)
index := r.URL.Query().Get("index")
assert.Equal(t, "1", index)
// Return the most basic response
result := api.DoiResolverResponse{
ResponseCode: 1,
Handle: handle,
Values: []api.DoiResolverResponseValue{
{
Index: 1,
Type: "URL",
Data: api.DoiResolverResponseValueData{
Format: "string",
Value: resolvedURL,
},
},
},
}
resultBytes, err := json.Marshal(result)
require.NoError(t, err)
w.Header().Add("Content-Type", "application/json")
_, err = w.Write(resultBytes)
require.NoError(t, err)
})
// Make the test server
ts := httptest.NewServer(mux)
// Close the server at the end of the test
t.Cleanup(ts.Close)
return ts.URL + "/api"
}
func md5Sum(text string) string {
hash := md5.Sum([]byte(text))
return hex.EncodeToString(hash[:])
}
// prepareMockZenodoServer prepares a test server that mocks Zenodo.org
func prepareMockZenodoServer(t *testing.T, files map[string]string) *httptest.Server {
mux := http.NewServeMux()
// Handle requests for a single record
mux.HandleFunc("GET /api/records/{recordID...}", func(w http.ResponseWriter, r *http.Request) {
// Check that we are returning data about a single record
recordID := strings.TrimPrefix(r.URL.Path, "/api/records/")
assert.NotEmpty(t, recordID)
// Return the most basic response
selfURL, err := url.Parse("http://" + r.Host)
require.NoError(t, err)
selfURL = selfURL.JoinPath(r.URL.String())
result := api.InvenioRecordResponse{
Links: api.InvenioRecordResponseLinks{
Self: selfURL.String(),
},
}
resultBytes, err := json.Marshal(result)
require.NoError(t, err)
w.Header().Add("Content-Type", "application/json")
_, err = w.Write(resultBytes)
require.NoError(t, err)
})
// Handle requests for listing files in a record
mux.HandleFunc("GET /api/records/{record}/files", func(w http.ResponseWriter, r *http.Request) {
// Return the most basic response
filesBaseURL, err := url.Parse("http://" + r.Host)
require.NoError(t, err)
filesBaseURL = filesBaseURL.JoinPath("/api/files/")
entries := []api.InvenioFilesResponseEntry{}
for filename, contents := range files {
entries = append(entries,
api.InvenioFilesResponseEntry{
Key: filename,
Checksum: md5Sum(contents),
Size: int64(len(contents)),
Updated: time.Now().UTC().Format(time.RFC3339),
MimeType: "text/plain; charset=utf-8",
Links: api.InvenioFilesResponseEntryLinks{
Content: filesBaseURL.JoinPath(filename).String(),
},
},
)
}
result := api.InvenioFilesResponse{
Entries: entries,
}
resultBytes, err := json.Marshal(result)
require.NoError(t, err)
w.Header().Add("Content-Type", "application/json")
_, err = w.Write(resultBytes)
require.NoError(t, err)
})
// Handle requests for file contents
mux.HandleFunc("/api/files/{file}", func(w http.ResponseWriter, r *http.Request) {
// Check that we are returning the contents of a file
filename := strings.TrimPrefix(r.URL.Path, "/api/files/")
assert.NotEmpty(t, filename)
contents, found := files[filename]
if !found {
w.WriteHeader(404)
return
}
// Return the most basic response
_, err := w.Write([]byte(contents))
require.NoError(t, err)
})
// Make the test server
ts := httptest.NewServer(mux)
// Close the server at the end of the test
t.Cleanup(ts.Close)
return ts
}
func TestZenodoRemote(t *testing.T) {
recordID := "2600782"
doi := "10.5281/zenodo.2600782"
// The files in the dataset
files := map[string]string{
"README.md": "This is a dataset.",
"data.txt": "Some data",
}
ts := prepareMockZenodoServer(t, files)
resolvedURL := ts.URL + "/record/" + recordID
doiResolverAPIURL := prepareMockDoiResolverServer(t, resolvedURL)
testConfig := configmap.Simple{
"type": "doi",
"doi": doi,
"provider": "zenodo",
"doi_resolver_api_url": doiResolverAPIURL,
}
f, err := NewFs(context.Background(), remoteName, "", testConfig)
require.NoError(t, err)
// Test listing the DOI files
entries, err := f.List(context.Background(), "")
require.NoError(t, err)
sort.Sort(entries)
require.Equal(t, len(files), len(entries))
e := entries[0]
assert.Equal(t, "README.md", e.Remote())
assert.Equal(t, int64(18), e.Size())
_, ok := e.(*Object)
assert.True(t, ok)
e = entries[1]
assert.Equal(t, "data.txt", e.Remote())
assert.Equal(t, int64(9), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
// Test reading the DOI files
o, err := f.NewObject(context.Background(), "README.md")
require.NoError(t, err)
assert.Equal(t, int64(18), o.Size())
md5Hash, err := o.Hash(context.Background(), hash.MD5)
require.NoError(t, err)
assert.Equal(t, "464352b1cab5240e44528a56fda33d9d", md5Hash)
fd, err := o.Open(context.Background())
require.NoError(t, err)
data, err := io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, []byte(files["README.md"]), data)
do, ok := o.(fs.MimeTyper)
require.True(t, ok)
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
o, err = f.NewObject(context.Background(), "data.txt")
require.NoError(t, err)
assert.Equal(t, int64(9), o.Size())
md5Hash, err = o.Hash(context.Background(), hash.MD5)
require.NoError(t, err)
assert.Equal(t, "5b82f8bf4df2bfb0e66ccaa7306fd024", md5Hash)
fd, err = o.Open(context.Background())
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, []byte(files["data.txt"]), data)
do, ok = o.(fs.MimeTyper)
require.True(t, ok)
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
}

View File

@@ -1,16 +0,0 @@
// Test DOI filesystem interface
package doi
import (
"testing"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestDoi:",
NilObject: (*Object)(nil),
})
}

View File

@@ -1,164 +0,0 @@
// Implementation for InvenioRDM
package doi
import (
"context"
"fmt"
"net/http"
"net/url"
"regexp"
"strings"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
var invenioRecordRegex = regexp.MustCompile(`\/records?\/(.+)`)
// Returns true if resolvedURL is likely a DOI hosted on an InvenioRDM intallation
func activateInvenio(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (isActive bool) {
_, _, err := resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
return err == nil
}
// Resolve the main API endpoint for a DOI hosted on an InvenioRDM installation
func resolveInvenioEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
var res *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: resolvedURL.String(),
}
err = pacer.Call(func() (bool, error) {
res, err = srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err)
})
if err != nil {
return "", nil, err
}
// First, attempt to grab the API URL from the headers
var linksetURL *url.URL
links := parseLinkHeader(res.Header.Get("Link"))
for _, link := range links {
if link.Rel == "linkset" && link.Type == "application/linkset+json" {
parsed, err := url.Parse(link.Href)
if err == nil {
linksetURL = parsed
break
}
}
}
if linksetURL != nil {
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, linksetURL)
if err == nil {
return Invenio, endpoint, nil
}
fs.Logf(nil, "using linkset URL failed: %s", err.Error())
}
// If there is no linkset header, try to grab the record ID from the URL
recordID := ""
resURL := res.Request.URL
match := invenioRecordRegex.FindStringSubmatch(resURL.EscapedPath())
if match != nil {
recordID = match[1]
guessedURL := res.Request.URL.ResolveReference(&url.URL{
Path: "/api/records/" + recordID,
})
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, guessedURL)
if err == nil {
return Invenio, endpoint, nil
}
fs.Logf(nil, "guessing the URL failed: %s", err.Error())
}
return "", nil, fmt.Errorf("could not resolve the Invenio API endpoint for '%s'", resolvedURL.String())
}
func checkInvenioAPIURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (endpoint *url.URL, err error) {
var result api.InvenioRecordResponse
opts := rest.Opts{
Method: "GET",
RootURL: resolvedURL.String(),
}
err = pacer.Call(func() (bool, error) {
res, err := srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, err
}
if result.Links.Self == "" {
return nil, fmt.Errorf("could not parse API response from '%s'", resolvedURL.String())
}
return url.Parse(result.Links.Self)
}
// invenioProvider implements the doiProvider interface for InvenioRDM installations
type invenioProvider struct {
f *Fs
}
// ListEntries returns the full list of entries found at the remote, regardless of root
func (ip *invenioProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
// Use the cache if populated
cachedEntries, found := ip.f.cache.GetMaybe("files")
if found {
parsedEntries, ok := cachedEntries.([]Object)
if ok {
for _, entry := range parsedEntries {
newEntry := entry
entries = append(entries, &newEntry)
}
return entries, nil
}
}
filesURL := ip.f.endpoint.JoinPath("files")
var result api.InvenioFilesResponse
opts := rest.Opts{
Method: "GET",
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
}
err = ip.f.pacer.Call(func() (bool, error) {
res, err := ip.f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("readDir failed: %w", err)
}
for _, file := range result.Entries {
modTime, modTimeErr := time.Parse(time.RFC3339, file.Updated)
if modTimeErr != nil {
fs.Logf(ip.f, "error: could not parse last update time %v", modTimeErr)
modTime = timeUnset
}
entry := &Object{
fs: ip.f,
remote: file.Key,
contentURL: file.Links.Content,
size: file.Size,
modTime: modTime,
contentType: file.MimeType,
md5: strings.TrimPrefix(file.Checksum, "md5:"),
}
entries = append(entries, entry)
}
// Populate the cache
cacheEntries := []Object{}
for _, entry := range entries {
cacheEntries = append(cacheEntries, *entry)
}
ip.f.cache.Put("files", cacheEntries)
return entries, nil
}
func newInvenioProvider(f *Fs) doiProvider {
return &invenioProvider{
f: f,
}
}

View File

@@ -1,75 +0,0 @@
package doi
import (
"regexp"
"strings"
)
var linkRegex = regexp.MustCompile(`^<(.+)>$`)
var valueRegex = regexp.MustCompile(`^"(.+)"$`)
// headerLink represents a link as presented in HTTP headers
// MDN Reference: https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Link
type headerLink struct {
Href string
Rel string
Type string
Extras map[string]string
}
func parseLinkHeader(header string) (links []headerLink) {
for link := range strings.SplitSeq(header, ",") {
link = strings.TrimSpace(link)
parsed := parseLink(link)
if parsed != nil {
links = append(links, *parsed)
}
}
return links
}
func parseLink(link string) (parsedLink *headerLink) {
var parts []string
for part := range strings.SplitSeq(link, ";") {
parts = append(parts, strings.TrimSpace(part))
}
match := linkRegex.FindStringSubmatch(parts[0])
if match == nil {
return nil
}
result := &headerLink{
Href: match[1],
Extras: map[string]string{},
}
for _, keyValue := range parts[1:] {
parsed := parseKeyValue(keyValue)
if parsed != nil {
key, value := parsed[0], parsed[1]
switch strings.ToLower(key) {
case "rel":
result.Rel = value
case "type":
result.Type = value
default:
result.Extras[key] = value
}
}
}
return result
}
func parseKeyValue(keyValue string) []string {
parts := strings.SplitN(keyValue, "=", 2)
if parts[0] == "" || len(parts) < 2 {
return nil
}
match := valueRegex.FindStringSubmatch(parts[1])
if match != nil {
parts[1] = match[1]
return parts
}
return parts
}

View File

@@ -1,44 +0,0 @@
package doi
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestParseLinkHeader(t *testing.T) {
header := "<https://zenodo.org/api/records/15063252> ; rel=\"linkset\" ; type=\"application/linkset+json\""
links := parseLinkHeader(header)
expected := headerLink{
Href: "https://zenodo.org/api/records/15063252",
Rel: "linkset",
Type: "application/linkset+json",
Extras: map[string]string{},
}
assert.Contains(t, links, expected)
header = "<https://api.example.com/issues?page=2>; rel=\"prev\", <https://api.example.com/issues?page=4>; rel=\"next\", <https://api.example.com/issues?page=10>; rel=\"last\", <https://api.example.com/issues?page=1>; rel=\"first\""
links = parseLinkHeader(header)
expectedList := []headerLink{{
Href: "https://api.example.com/issues?page=2",
Rel: "prev",
Type: "",
Extras: map[string]string{},
}, {
Href: "https://api.example.com/issues?page=4",
Rel: "next",
Type: "",
Extras: map[string]string{},
}, {
Href: "https://api.example.com/issues?page=10",
Rel: "last",
Type: "",
Extras: map[string]string{},
}, {
Href: "https://api.example.com/issues?page=1",
Rel: "first",
Type: "",
Extras: map[string]string{},
}}
assert.Equal(t, links, expectedList)
}

View File

@@ -1,47 +0,0 @@
// Implementation for Zenodo
package doi
import (
"context"
"fmt"
"net/url"
"regexp"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
var zenodoRecordRegex = regexp.MustCompile(`zenodo[.](.+)`)
// Resolve the main API endpoint for a DOI hosted on Zenodo
func resolveZenodoEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL, doi string) (provider Provider, endpoint *url.URL, err error) {
match := zenodoRecordRegex.FindStringSubmatch(doi)
if match == nil {
return "", nil, fmt.Errorf("could not derive API endpoint URL from '%s'", resolvedURL.String())
}
recordID := match[1]
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/records/" + recordID})
var result api.InvenioRecordResponse
opts := rest.Opts{
Method: "GET",
RootURL: endpointURL.String(),
}
err = pacer.Call(func() (bool, error) {
res, err := srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return "", nil, err
}
endpointURL, err = url.Parse(result.Links.Self)
if err != nil {
return "", nil, err
}
return Zenodo, endpointURL, nil
}

View File

@@ -18,7 +18,6 @@ import (
"net/http" "net/http"
"os" "os"
"path" "path"
"slices"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@@ -38,8 +37,8 @@ import (
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/env"
@@ -81,10 +80,9 @@ const (
// Globals // Globals
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
driveConfig = &oauthutil.Config{ driveConfig = &oauth2.Config{
Scopes: []string{scopePrefix + "drive"}, Scopes: []string{scopePrefix + "drive"},
AuthURL: google.Endpoint.AuthURL, Endpoint: google.Endpoint,
TokenURL: google.Endpoint.TokenURL,
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL, RedirectURL: oauthutil.RedirectURL,
@@ -191,7 +189,7 @@ func driveScopes(scopesString string) (scopes []string) {
if scopesString == "" { if scopesString == "" {
scopesString = defaultScope scopesString = defaultScope
} }
for scope := range strings.SplitSeq(scopesString, ",") { for _, scope := range strings.Split(scopesString, ",") {
scope = strings.TrimSpace(scope) scope = strings.TrimSpace(scope)
scopes = append(scopes, scopePrefix+scope) scopes = append(scopes, scopePrefix+scope)
} }
@@ -200,7 +198,13 @@ func driveScopes(scopesString string) (scopes []string) {
// Returns true if one of the scopes was "drive.appfolder" // Returns true if one of the scopes was "drive.appfolder"
func driveScopesContainsAppFolder(scopes []string) bool { func driveScopesContainsAppFolder(scopes []string) bool {
return slices.Contains(scopes, scopePrefix+"drive.appfolder") for _, scope := range scopes {
if scope == scopePrefix+"drive.appfolder" {
return true
}
}
return false
} }
func driveOAuthOptions() []fs.Option { func driveOAuthOptions() []fs.Option {
@@ -954,7 +958,12 @@ func parseDrivePath(path string) (root string, err error) {
type listFn func(*drive.File) bool type listFn func(*drive.File) bool
func containsString(slice []string, s string) bool { func containsString(slice []string, s string) bool {
return slices.Contains(slice, s) for _, e := range slice {
if e == s {
return true
}
}
return false
} }
// getFile returns drive.File for the ID passed and fields passed in // getFile returns drive.File for the ID passed and fields passed in
@@ -1143,7 +1152,13 @@ OUTER:
// Check the case of items is correct since // Check the case of items is correct since
// the `=` operator is case insensitive. // the `=` operator is case insensitive.
if title != "" && title != item.Name { if title != "" && title != item.Name {
found := slices.Contains(stems, item.Name) found := false
for _, stem := range stems {
if stem == item.Name {
found = true
break
}
}
if !found { if !found {
continue continue
} }
@@ -1196,7 +1211,6 @@ func fixMimeType(mimeTypeIn string) string {
} }
return mimeTypeOut return mimeTypeOut
} }
func fixMimeTypeMap(in map[string][]string) (out map[string][]string) { func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
out = make(map[string][]string, len(in)) out = make(map[string][]string, len(in))
for k, v := range in { for k, v := range in {
@@ -1207,11 +1221,9 @@ func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
} }
return out return out
} }
func isInternalMimeType(mimeType string) bool { func isInternalMimeType(mimeType string) bool {
return strings.HasPrefix(mimeType, "application/vnd.google-apps.") return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
} }
func isLinkMimeType(mimeType string) bool { func isLinkMimeType(mimeType string) bool {
return strings.HasPrefix(mimeType, "application/x-link-") return strings.HasPrefix(mimeType, "application/x-link-")
} }
@@ -1220,7 +1232,7 @@ func isLinkMimeType(mimeType string) bool {
// into a list of unique extensions with leading "." and a list of associated MIME types // into a list of unique extensions with leading "." and a list of associated MIME types
func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, err error) { func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, err error) {
for _, extensionText := range extensionsIn { for _, extensionText := range extensionsIn {
for extension := range strings.SplitSeq(extensionText, ",") { for _, extension := range strings.Split(extensionText, ",") {
extension = strings.ToLower(strings.TrimSpace(extension)) extension = strings.ToLower(strings.TrimSpace(extension))
if extension == "" { if extension == "" {
continue continue
@@ -1546,10 +1558,13 @@ func (f *Fs) getFileFields(ctx context.Context) (fields googleapi.Field) {
func (f *Fs) newRegularObject(ctx context.Context, remote string, info *drive.File) (obj fs.Object, err error) { func (f *Fs) newRegularObject(ctx context.Context, remote string, info *drive.File) (obj fs.Object, err error) {
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video // wipe checksum if SkipChecksumGphotos and file is type Photo or Video
if f.opt.SkipChecksumGphotos { if f.opt.SkipChecksumGphotos {
if slices.Contains(info.Spaces, "photos") { for _, space := range info.Spaces {
info.Md5Checksum = "" if space == "photos" {
info.Sha1Checksum = "" info.Md5Checksum = ""
info.Sha256Checksum = "" info.Sha1Checksum = ""
info.Sha256Checksum = ""
break
}
} }
} }
o := &Object{ o := &Object{
@@ -1641,8 +1656,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.F
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil). // When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
func (f *Fs) newObjectWithExportInfo( func (f *Fs) newObjectWithExportInfo(
ctx context.Context, remote string, info *drive.File, ctx context.Context, remote string, info *drive.File,
extension, exportName, exportMimeType string, isDocument bool, extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
) (o fs.Object, err error) {
// Note that resolveShortcut will have been called already if // Note that resolveShortcut will have been called already if
// we are being called from a listing. However the drive.Item // we are being called from a listing. However the drive.Item
// will have been resolved so this will do nothing. // will have been resolved so this will do nothing.
@@ -1745,7 +1759,7 @@ func (f *Fs) createDir(ctx context.Context, pathID, leaf string, metadata fs.Met
} }
var updateMetadata updateMetadataFn var updateMetadata updateMetadataFn
if len(metadata) > 0 { if len(metadata) > 0 {
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true, true) updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true)
if err != nil { if err != nil {
return nil, fmt.Errorf("create dir: failed to update metadata: %w", err) return nil, fmt.Errorf("create dir: failed to update metadata: %w", err)
} }
@@ -1776,7 +1790,7 @@ func (f *Fs) updateDir(ctx context.Context, dirID string, metadata fs.Metadata)
} }
dirID = actualID(dirID) dirID = actualID(dirID)
updateInfo := &drive.File{} updateInfo := &drive.File{}
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true, true) updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true)
if err != nil { if err != nil {
return nil, fmt.Errorf("update dir: failed to update metadata from source object: %w", err) return nil, fmt.Errorf("update dir: failed to update metadata from source object: %w", err)
} }
@@ -1833,7 +1847,6 @@ func linkTemplate(mt string) *template.Template {
}) })
return _linkTemplates[mt] return _linkTemplates[mt]
} }
func (f *Fs) fetchFormats(ctx context.Context) { func (f *Fs) fetchFormats(ctx context.Context) {
fetchFormatsOnce.Do(func() { fetchFormatsOnce.Do(func() {
var about *drive.About var about *drive.About
@@ -1879,8 +1892,7 @@ func (f *Fs) importFormats(ctx context.Context) map[string][]string {
// Look through the exportExtensions and find the first format that can be // Look through the exportExtensions and find the first format that can be
// converted. If none found then return ("", "", false) // converted. If none found then return ("", "", false)
func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string) ( func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string) (
extension, mimeType string, isDocument bool, extension, mimeType string, isDocument bool) {
) {
exportMimeTypes, isDocument := f.exportFormats(ctx)[itemMimeType] exportMimeTypes, isDocument := f.exportFormats(ctx)[itemMimeType]
if isDocument { if isDocument {
for _, _extension := range f.exportExtensions { for _, _extension := range f.exportExtensions {
@@ -1965,28 +1977,9 @@ func (f *Fs) findImportFormat(ctx context.Context, mimeType string) string {
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
entriesAdded := 0
directoryID, err := f.dirCache.FindDir(ctx, dir, false) directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil { if err != nil {
return err return nil, err
} }
directoryID = actualID(directoryID) directoryID = actualID(directoryID)
@@ -1998,30 +1991,25 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
return true return true
} }
if entry != nil { if entry != nil {
err = list.Add(entry) entries = append(entries, entry)
if err != nil {
iErr = err
return true
}
entriesAdded++
} }
return false return false
}) })
if err != nil { if err != nil {
return err return nil, err
} }
if iErr != nil { if iErr != nil {
return iErr return nil, iErr
} }
// If listing the root of a teamdrive and got no entries, // If listing the root of a teamdrive and got no entries,
// double check we have access // double check we have access
if f.isTeamDrive && entriesAdded == 0 && f.root == "" && dir == "" { if f.isTeamDrive && len(entries) == 0 && f.root == "" && dir == "" {
err = f.teamDriveOK(ctx) err = f.teamDriveOK(ctx)
if err != nil { if err != nil {
return err return nil, err
} }
} }
return list.Flush() return entries, nil
} }
// listREntry is a task to be executed by a litRRunner // listREntry is a task to be executed by a litRRunner
@@ -2213,7 +2201,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
in := make(chan listREntry, listRInputBuffer) in := make(chan listREntry, listRInputBuffer)
out := make(chan error, f.ci.Checkers) out := make(chan error, f.ci.Checkers)
list := list.NewHelper(callback) list := walk.NewListRHelper(callback)
overflow := []listREntry{} overflow := []listREntry{}
listed := 0 listed := 0
@@ -2251,7 +2239,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
wg.Add(1) wg.Add(1)
in <- listREntry{directoryID, dir} in <- listREntry{directoryID, dir}
for range f.ci.Checkers { for i := 0; i < f.ci.Checkers; i++ {
go f.listRRunner(ctx, &wg, in, out, cb, sendJob) go f.listRRunner(ctx, &wg, in, out, cb, sendJob)
} }
go func() { go func() {
@@ -2260,8 +2248,11 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// if the input channel overflowed add the collected entries to the channel now // if the input channel overflowed add the collected entries to the channel now
for len(overflow) > 0 { for len(overflow) > 0 {
mu.Lock() mu.Lock()
l := len(overflow)
// only fill half of the channel to prevent entries being put into overflow again // only fill half of the channel to prevent entries being put into overflow again
l := min(len(overflow), listRInputBuffer/2) if l > listRInputBuffer/2 {
l = listRInputBuffer / 2
}
wg.Add(l) wg.Add(l)
for _, d := range overflow[:l] { for _, d := range overflow[:l] {
in <- d in <- d
@@ -2281,7 +2272,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
mu.Unlock() mu.Unlock()
}() }()
// wait until the all workers to finish // wait until the all workers to finish
for range f.ci.Checkers { for i := 0; i < f.ci.Checkers; i++ {
e := <-out e := <-out
mu.Lock() mu.Lock()
// if one worker returns an error early, close the input so all other workers exit // if one worker returns an error early, close the input so all other workers exit
@@ -2697,7 +2688,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
if shortcutID != "" { if shortcutID != "" {
return f.delete(ctx, shortcutID, f.opt.UseTrash) return f.delete(ctx, shortcutID, f.opt.UseTrash)
} }
trashedFiles := false var trashedFiles = false
if check { if check {
found, err := f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, true, func(item *drive.File) bool { found, err := f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, true, func(item *drive.File) bool {
if !item.Trashed { if !item.Trashed {
@@ -2934,6 +2925,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
err := f.svc.Files.EmptyTrash().Context(ctx).Do() err := f.svc.Files.EmptyTrash().Context(ctx).Do()
return f.shouldRetry(ctx, err) return f.shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return err return err
} }
@@ -3194,7 +3186,6 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
} }
}() }()
} }
func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (pageToken string, err error) { func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (pageToken string, err error) {
var startPageToken *drive.StartPageToken var startPageToken *drive.StartPageToken
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
@@ -3533,14 +3524,14 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
return f.unTrash(ctx, dir, directoryID, true) return f.unTrash(ctx, dir, directoryID, true)
} }
// copy or move file with id to dest // copy file with id to dest
func (f *Fs) copyOrMoveID(ctx context.Context, operation string, id, dest string) (err error) { func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
info, err := f.getFile(ctx, id, f.getFileFields(ctx)) info, err := f.getFile(ctx, id, f.getFileFields(ctx))
if err != nil { if err != nil {
return fmt.Errorf("couldn't find id: %w", err) return fmt.Errorf("couldn't find id: %w", err)
} }
if info.MimeType == driveFolderType { if info.MimeType == driveFolderType {
return fmt.Errorf("can't %s directory use: rclone %s --drive-root-folder-id %s %s %s", operation, operation, id, fs.ConfigString(f), dest) return fmt.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
} }
info.Name = f.opt.Enc.ToStandardName(info.Name) info.Name = f.opt.Enc.ToStandardName(info.Name)
o, err := f.newObjectWithInfo(ctx, info.Name, info) o, err := f.newObjectWithInfo(ctx, info.Name, info)
@@ -3561,15 +3552,9 @@ func (f *Fs) copyOrMoveID(ctx context.Context, operation string, id, dest string
if err != nil { if err != nil {
return err return err
} }
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
var opErr error if err != nil {
if operation == "moveid" { return fmt.Errorf("copy failed: %w", err)
_, opErr = operations.Move(ctx, dstFs, nil, destLeaf, o)
} else {
_, opErr = operations.Copy(ctx, dstFs, nil, destLeaf, o)
}
if opErr != nil {
return fmt.Errorf("%s failed: %w", operation, opErr)
} }
return nil return nil
} }
@@ -3664,47 +3649,41 @@ func (f *Fs) rescue(ctx context.Context, dirID string, delete bool) (err error)
var commandHelp = []fs.CommandHelp{{ var commandHelp = []fs.CommandHelp{{
Name: "get", Name: "get",
Short: "Get command for fetching the drive config parameters.", Short: "Get command for fetching the drive config parameters",
Long: `This is a get command which will be used to fetch the various drive config Long: `This is a get command which will be used to fetch the various drive config parameters
parameters.
Usage examples: Usage Examples:
` + "```console" + ` rclone backend get drive: [-o service_account_file] [-o chunk_size]
rclone backend get drive: [-o service_account_file] [-o chunk_size] rclone rc backend/command command=get fs=drive: [-o service_account_file] [-o chunk_size]
rclone rc backend/command command=get fs=drive: [-o service_account_file] [-o chunk_size] `,
` + "```",
Opts: map[string]string{ Opts: map[string]string{
"chunk_size": "Show the current upload chunk size.", "chunk_size": "show the current upload chunk size",
"service_account_file": "Show the current service account file.", "service_account_file": "show the current service account file",
}, },
}, { }, {
Name: "set", Name: "set",
Short: "Set command for updating the drive config parameters.", Short: "Set command for updating the drive config parameters",
Long: `This is a set command which will be used to update the various drive config Long: `This is a set command which will be used to update the various drive config parameters
parameters.
Usage examples: Usage Examples:
` + "```console" + ` rclone backend set drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
rclone backend set drive: [-o service_account_file=sa.json] [-o chunk_size=67108864] rclone rc backend/command command=set fs=drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
rclone rc backend/command command=set fs=drive: [-o service_account_file=sa.json] [-o chunk_size=67108864] `,
` + "```",
Opts: map[string]string{ Opts: map[string]string{
"chunk_size": "Update the current upload chunk size.", "chunk_size": "update the current upload chunk size",
"service_account_file": "Update the current service account file.", "service_account_file": "update the current service account file",
}, },
}, { }, {
Name: "shortcut", Name: "shortcut",
Short: "Create shortcuts from files or directories.", Short: "Create shortcuts from files or directories",
Long: `This command creates shortcuts from files or directories. Long: `This command creates shortcuts from files or directories.
Usage examples: Usage:
` + "```console" + ` rclone backend shortcut drive: source_item destination_shortcut
rclone backend shortcut drive: source_item destination_shortcut rclone backend shortcut drive: source_item -o target=drive2: destination_shortcut
rclone backend shortcut drive: source_item -o target=drive2: destination_shortcut
` + "```" + `
In the first example this creates a shortcut from the "source_item" In the first example this creates a shortcut from the "source_item"
which can be a file or a directory to the "destination_shortcut". The which can be a file or a directory to the "destination_shortcut". The
@@ -3714,100 +3693,90 @@ from "drive:"
In the second example this creates a shortcut from the "source_item" In the second example this creates a shortcut from the "source_item"
relative to "drive:" to the "destination_shortcut" relative to relative to "drive:" to the "destination_shortcut" relative to
"drive2:". This may fail with a permission error if the user "drive2:". This may fail with a permission error if the user
authenticated with "drive2:" can't read files from "drive:".`, authenticated with "drive2:" can't read files from "drive:".
`,
Opts: map[string]string{ Opts: map[string]string{
"target": "Optional target remote for the shortcut destination.", "target": "optional target remote for the shortcut destination",
}, },
}, { }, {
Name: "drives", Name: "drives",
Short: "List the Shared Drives available to this account.", Short: "List the Shared Drives available to this account",
Long: `This command lists the Shared Drives (Team Drives) available to this Long: `This command lists the Shared Drives (Team Drives) available to this
account. account.
Usage example: Usage:
` + "```console" + ` rclone backend [-o config] drives drive:
rclone backend [-o config] drives drive:
` + "```" + `
This will return a JSON list of objects like this: This will return a JSON list of objects like this
` + "```json" + ` [
[ {
{ "id": "0ABCDEF-01234567890",
"id": "0ABCDEF-01234567890", "kind": "drive#teamDrive",
"kind": "drive#teamDrive", "name": "My Drive"
"name": "My Drive" },
}, {
{ "id": "0ABCDEFabcdefghijkl",
"id": "0ABCDEFabcdefghijkl", "kind": "drive#teamDrive",
"kind": "drive#teamDrive", "name": "Test Drive"
"name": "Test Drive" }
} ]
]
` + "```" + `
With the -o config parameter it will output the list in a format With the -o config parameter it will output the list in a format
suitable for adding to a config file to make aliases for all the suitable for adding to a config file to make aliases for all the
drives found and a combined drive. drives found and a combined drive.
` + "```ini" + ` [My Drive]
[My Drive] type = alias
type = alias remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
[Test Drive] [Test Drive]
type = alias type = alias
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=: remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
[AllDrives] [AllDrives]
type = combine type = combine
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:" upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
` + "```" + `
Adding this to the rclone config file will cause those team drives to Adding this to the rclone config file will cause those team drives to
be accessible with the aliases shown. Any illegal characters will be be accessible with the aliases shown. Any illegal characters will be
substituted with "_" and duplicate names will have numbers suffixed. substituted with "_" and duplicate names will have numbers suffixed.
It will also add a remote called AllDrives which shows all the shared It will also add a remote called AllDrives which shows all the shared
drives combined into one directory tree.`, drives combined into one directory tree.
`,
}, { }, {
Name: "untrash", Name: "untrash",
Short: "Untrash files and directories.", Short: "Untrash files and directories",
Long: `This command untrashes all the files and directories in the directory Long: `This command untrashes all the files and directories in the directory
passed in recursively. passed in recursively.
Usage example: Usage:
` + "```console" + `
rclone backend untrash drive:directory
rclone backend --interactive untrash drive:directory subdir
` + "```" + `
This takes an optional directory to trash which make this easier to This takes an optional directory to trash which make this easier to
use via the API. use via the API.
Use the --interactive/-i or --dry-run flag to see what would be restored before rclone backend untrash drive:directory
restoring it. rclone backend --interactive untrash drive:directory subdir
Use the --interactive/-i or --dry-run flag to see what would be restored before restoring it.
Result: Result:
` + "```json" + ` {
{ "Untrashed": 17,
"Untrashed": 17, "Errors": 0
"Errors": 0 }
} `,
` + "```",
}, { }, {
Name: "copyid", Name: "copyid",
Short: "Copy files by ID.", Short: "Copy files by ID",
Long: `This command copies files by ID. Long: `This command copies files by ID
Usage examples: Usage:
` + "```console" + ` rclone backend copyid drive: ID path
rclone backend copyid drive: ID path rclone backend copyid drive: ID1 path1 ID2 path2
rclone backend copyid drive: ID1 path1 ID2 path2
` + "```" + `
It copies the drive file with ID given to the path (an rclone path which It copies the drive file with ID given to the path (an rclone path which
will be passed internally to rclone copyto). The ID and path pairs can be will be passed internally to rclone copyto). The ID and path pairs can be
@@ -3820,89 +3789,58 @@ component will be used as the file name.
If the destination is a drive backend then server-side copying will be If the destination is a drive backend then server-side copying will be
attempted if possible. attempted if possible.
Use the --interactive/-i or --dry-run flag to see what would be copied before Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
copying.`, `,
}, {
Name: "moveid",
Short: "Move files by ID.",
Long: `This command moves files by ID.
Usage examples:
` + "```console" + `
rclone backend moveid drive: ID path
rclone backend moveid drive: ID1 path1 ID2 path2
` + "```" + `
It moves the drive file with ID given to the path (an rclone path which
will be passed internally to rclone moveto).
The path should end with a / to indicate move the file as named to
this directory. If it doesn't end with a / then the last path
component will be used as the file name.
If the destination is a drive backend then server-side moving will be
attempted if possible.
Use the --interactive/-i or --dry-run flag to see what would be moved beforehand.`,
}, { }, {
Name: "exportformats", Name: "exportformats",
Short: "Dump the export formats for debug purposes.", Short: "Dump the export formats for debug purposes",
}, { }, {
Name: "importformats", Name: "importformats",
Short: "Dump the import formats for debug purposes.", Short: "Dump the import formats for debug purposes",
}, { }, {
Name: "query", Name: "query",
Short: "List files using Google Drive query language.", Short: "List files using Google Drive query language",
Long: `This command lists files based on a query. Long: `This command lists files based on a query
Usage example: Usage:
` + "```console" + `
rclone backend query drive: query
` + "```" + `
rclone backend query drive: query
The query syntax is documented at [Google Drive Search query terms and The query syntax is documented at [Google Drive Search query terms and
operators](https://developers.google.com/drive/api/guides/ref-search-terms). operators](https://developers.google.com/drive/api/guides/ref-search-terms).
For example: For example:
` + "```console" + ` rclone backend query drive: "'0ABc9DEFGHIJKLMNop0QRatUVW3X' in parents and name contains 'foo'"
rclone backend query drive: "'0ABc9DEFGHIJKLMNop0QRatUVW3X' in parents and name contains 'foo'"
` + "```" + `
If the query contains literal ' or \ characters, these need to be escaped with If the query contains literal ' or \ characters, these need to be escaped with
\ characters. "'" becomes "\'" and "\" becomes "\\\", for example to match a \ characters. "'" becomes "\'" and "\" becomes "\\\", for example to match a
file named "foo ' \.txt": file named "foo ' \.txt":
` + "```console" + ` rclone backend query drive: "name = 'foo \' \\\.txt'"
rclone backend query drive: "name = 'foo \' \\\.txt'"
` + "```" + `
The result is a JSON array of matches, for example: The result is a JSON array of matches, for example:
` + "```json" + ` [
[ {
{ "createdTime": "2017-06-29T19:58:28.537Z",
"createdTime": "2017-06-29T19:58:28.537Z", "id": "0AxBe_CDEF4zkGHI4d0FjYko2QkD",
"id": "0AxBe_CDEF4zkGHI4d0FjYko2QkD", "md5Checksum": "68518d16be0c6fbfab918be61d658032",
"md5Checksum": "68518d16be0c6fbfab918be61d658032", "mimeType": "text/plain",
"mimeType": "text/plain", "modifiedTime": "2024-02-02T10:40:02.874Z",
"modifiedTime": "2024-02-02T10:40:02.874Z", "name": "foo ' \\.txt",
"name": "foo ' \\.txt", "parents": [
"parents": [ "0BxAe_BCDE4zkFGZpcWJGek0xbzC"
"0BxAe_BCDE4zkFGZpcWJGek0xbzC" ],
], "resourceKey": "0-ABCDEFGHIXJQpIGqBJq3MC",
"resourceKey": "0-ABCDEFGHIXJQpIGqBJq3MC", "sha1Checksum": "8f284fa768bfb4e45d076a579ab3905ab6bfa893",
"sha1Checksum": "8f284fa768bfb4e45d076a579ab3905ab6bfa893", "size": "311",
"size": "311", "webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC" }
} ]`,
]
` + "```console",
}, { }, {
Name: "rescue", Name: "rescue",
Short: "Rescue or delete any orphaned files.", Short: "Rescue or delete any orphaned files",
Long: `This command rescues or deletes any orphaned files or directories. Long: `This command rescues or deletes any orphaned files or directories.
Sometimes files can get orphaned in Google Drive. This means that they Sometimes files can get orphaned in Google Drive. This means that they
@@ -3911,31 +3849,26 @@ are no longer in any folder in Google Drive.
This command finds those files and either rescues them to a directory This command finds those files and either rescues them to a directory
you specify or deletes them. you specify or deletes them.
Usage:
This can be used in 3 ways. This can be used in 3 ways.
First, list all orphaned files: First, list all orphaned files
` + "```console" + ` rclone backend rescue drive:
rclone backend rescue drive:
` + "```" + `
Second rescue all orphaned files to the directory indicated: Second rescue all orphaned files to the directory indicated
` + "```console" + ` rclone backend rescue drive: "relative/path/to/rescue/directory"
rclone backend rescue drive: "relative/path/to/rescue/directory"
` + "```" + `
E.g. to rescue all orphans to a directory called "Orphans" in the top level: e.g. To rescue all orphans to a directory called "Orphans" in the top level
` + "```console" + ` rclone backend rescue drive: Orphans
rclone backend rescue drive: Orphans
` + "```" + `
Third delete all orphaned files to the trash: Third delete all orphaned files to the trash
` + "```console" + ` rclone backend rescue drive: -o delete
rclone backend rescue drive: -o delete `,
` + "```",
}} }}
// Command the backend to run a named command // Command the backend to run a named command
@@ -3947,7 +3880,7 @@ rclone backend rescue drive: -o delete
// The result should be capable of being JSON encoded // The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user // If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that // otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name { switch name {
case "get": case "get":
out := make(map[string]string) out := make(map[string]string)
@@ -4036,16 +3969,16 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
dir = arg[0] dir = arg[0]
} }
return f.unTrashDir(ctx, dir, true) return f.unTrashDir(ctx, dir, true)
case "copyid", "moveid": case "copyid":
if len(arg)%2 != 0 { if len(arg)%2 != 0 {
return nil, errors.New("need an even number of arguments") return nil, errors.New("need an even number of arguments")
} }
for len(arg) > 0 { for len(arg) > 0 {
id, dest := arg[0], arg[1] id, dest := arg[0], arg[1]
arg = arg[2:] arg = arg[2:]
err = f.copyOrMoveID(ctx, name, id, dest) err = f.copyID(ctx, id, dest)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed %s %q to %q: %w", name, id, dest, err) return nil, fmt.Errorf("failed copying %q to %q: %w", id, dest, err)
} }
} }
return nil, nil return nil, nil
@@ -4056,13 +3989,14 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
case "query": case "query":
if len(arg) == 1 { if len(arg) == 1 {
query := arg[0] query := arg[0]
results, err := f.query(ctx, query) var results, err = f.query(ctx, query)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to execute query: %q, error: %w", query, err) return nil, fmt.Errorf("failed to execute query: %q, error: %w", query, err)
} }
return results, nil return results, nil
} else {
return nil, errors.New("need a query argument")
} }
return nil, errors.New("need a query argument")
case "rescue": case "rescue":
dirID := "" dirID := ""
_, delete := opt["delete"] _, delete := opt["delete"]
@@ -4122,7 +4056,6 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
} }
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 { if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
@@ -4137,8 +4070,7 @@ func (o *baseObject) Size() int64 {
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote // getRemoteInfoWithExport returns a drive.File and the export settings for the remote
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) ( func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error, info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
) {
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false) leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil { if err != nil {
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
@@ -4351,13 +4283,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
} }
return o.baseObject.open(ctx, o.url, options...) return o.baseObject.open(ctx, o.url, options...)
} }
func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Update the size with what we are reading as it can change from // Update the size with what we are reading as it can change from
// the HEAD in the listing to this GET. This stops rclone marking // the HEAD in the listing to this GET. This stops rclone marking
// the transfer as corrupted. // the transfer as corrupted.
var offset, end int64 = 0, -1 var offset, end int64 = 0, -1
newOptions := options[:0] var newOptions = options[:0]
for _, o := range options { for _, o := range options {
// Note that Range requests don't work on Google docs: // Note that Range requests don't work on Google docs:
// https://developers.google.com/drive/v3/web/manage-downloads#partial_download // https://developers.google.com/drive/v3/web/manage-downloads#partial_download
@@ -4384,10 +4315,9 @@ func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in
} }
return return
} }
func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1 var offset, limit int64 = 0, -1
data := o.content var data = o.content
for _, option := range options { for _, option := range options {
switch x := option.(type) { switch x := option.(type) {
case *fs.SeekOption: case *fs.SeekOption:
@@ -4412,8 +4342,7 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
} }
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader, func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
src fs.ObjectInfo, src fs.ObjectInfo) (info *drive.File, err error) {
) (info *drive.File, err error) {
// Make the API request to upload metadata and file data. // Make the API request to upload metadata and file data.
size := src.Size() size := src.Size()
if size >= 0 && size < int64(o.fs.opt.UploadCutoff) { if size >= 0 && size < int64(o.fs.opt.UploadCutoff) {
@@ -4491,7 +4420,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return nil return nil
} }
func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
srcMimeType := fs.MimeType(ctx, src) srcMimeType := fs.MimeType(ctx, src)
importMimeType := "" importMimeType := ""
@@ -4587,7 +4515,6 @@ func (o *baseObject) Metadata(ctx context.Context) (metadata fs.Metadata, err er
func (o *documentObject) ext() string { func (o *documentObject) ext() string {
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:] return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
} }
func (o *linkObject) ext() string { func (o *linkObject) ext() string {
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:] return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
} }
@@ -4671,7 +4598,6 @@ var (
_ fs.PutUncheckeder = (*Fs)(nil) _ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil) _ fs.ListRer = (*Fs)(nil)
_ fs.ListPer = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil) _ fs.MergeDirser = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil) _ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil) _ fs.MkdirMetadataer = (*Fs)(nil)

View File

@@ -479,8 +479,8 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
require.NoError(t, f.Purge(ctx, "trashDir")) require.NoError(t, f.Purge(ctx, "trashDir"))
} }
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyOrMoveID // TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) { func (f *Fs) InternalTestCopyID(t *testing.T) {
ctx := context.Background() ctx := context.Background()
obj, err := f.NewObject(ctx, existingFile) obj, err := f.NewObject(ctx, existingFile)
require.NoError(t, err) require.NoError(t, err)
@@ -498,7 +498,7 @@ func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
} }
t.Run("BadID", func(t *testing.T) { t.Run("BadID", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "moveid", "ID-NOT-FOUND", dir+"/") err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
require.Error(t, err) require.Error(t, err)
assert.Contains(t, err.Error(), "couldn't find id") assert.Contains(t, err.Error(), "couldn't find id")
}) })
@@ -506,31 +506,19 @@ func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
t.Run("Directory", func(t *testing.T) { t.Run("Directory", func(t *testing.T) {
rootID, err := f.dirCache.RootID(ctx, false) rootID, err := f.dirCache.RootID(ctx, false)
require.NoError(t, err) require.NoError(t, err)
err = f.copyOrMoveID(ctx, "moveid", rootID, dir+"/") err = f.copyID(ctx, rootID, dir+"/")
require.Error(t, err) require.Error(t, err)
assert.Contains(t, err.Error(), "can't moveid directory") assert.Contains(t, err.Error(), "can't copy directory")
}) })
t.Run("MoveWithoutDestName", func(t *testing.T) { t.Run("WithoutDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/") err = f.copyID(ctx, o.id, dir+"/")
require.NoError(t, err) require.NoError(t, err)
checkFile(path.Base(existingFile)) checkFile(path.Base(existingFile))
}) })
t.Run("CopyWithoutDestName", func(t *testing.T) { t.Run("WithDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/") err = f.copyID(ctx, o.id, dir+"/potato.txt")
require.NoError(t, err)
checkFile(path.Base(existingFile))
})
t.Run("MoveWithDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/potato.txt")
require.NoError(t, err)
checkFile("potato.txt")
})
t.Run("CopyWithDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/potato.txt")
require.NoError(t, err) require.NoError(t, err)
checkFile("potato.txt") checkFile("potato.txt")
}) })
@@ -659,7 +647,7 @@ func (f *Fs) InternalTest(t *testing.T) {
}) })
t.Run("Shortcuts", f.InternalTestShortcuts) t.Run("Shortcuts", f.InternalTestShortcuts)
t.Run("UnTrash", f.InternalTestUnTrash) t.Run("UnTrash", f.InternalTestUnTrash)
t.Run("CopyOrMoveID", f.InternalTestCopyOrMoveID) t.Run("CopyID", f.InternalTestCopyID)
t.Run("Query", f.InternalTestQuery) t.Run("Query", f.InternalTestQuery)
t.Run("AgeQuery", f.InternalTestAgeQuery) t.Run("AgeQuery", f.InternalTestAgeQuery)
t.Run("ShouldRetry", f.InternalTestShouldRetry) t.Run("ShouldRetry", f.InternalTestShouldRetry)

View File

@@ -4,7 +4,6 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"maps"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@@ -325,7 +324,9 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
metadata := make(fs.Metadata, 16) metadata := make(fs.Metadata, 16)
// Dump user metadata first as it overrides system metadata // Dump user metadata first as it overrides system metadata
maps.Copy(metadata, info.Properties) for k, v := range info.Properties {
metadata[k] = v
}
// System metadata // System metadata
metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission) metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission)
@@ -386,6 +387,7 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
g.SetLimit(o.fs.ci.Checkers) g.SetLimit(o.fs.ci.Checkers)
var mu sync.Mutex // protect the info.Permissions from concurrent writes var mu sync.Mutex // protect the info.Permissions from concurrent writes
for _, permissionID := range info.PermissionIds { for _, permissionID := range info.PermissionIds {
permissionID := permissionID
g.Go(func() error { g.Go(func() error {
// must fetch the team drive ones individually to check the inherited flag // must fetch the team drive ones individually to check the inherited flag
perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive) perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive)
@@ -506,7 +508,7 @@ type updateMetadataFn func(context.Context, *drive.File) error
// //
// It returns a callback which should be called to finish the updates // It returns a callback which should be called to finish the updates
// after the data is uploaded. // after the data is uploaded.
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update, isFolder bool) (callback updateMetadataFn, err error) { func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update bool) (callback updateMetadataFn, err error) {
callbackFns := []updateMetadataFn{} callbackFns := []updateMetadataFn{}
callback = func(ctx context.Context, info *drive.File) error { callback = func(ctx context.Context, info *drive.File) error {
for _, fn := range callbackFns { for _, fn := range callbackFns {
@@ -519,6 +521,7 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
} }
// merge metadata into request and user metadata // merge metadata into request and user metadata
for k, v := range meta { for k, v := range meta {
k, v := k, v
// parse a boolean from v and write into out // parse a boolean from v and write into out
parseBool := func(out *bool) error { parseBool := func(out *bool) error {
b, err := strconv.ParseBool(v) b, err := strconv.ParseBool(v)
@@ -530,9 +533,7 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
} }
switch k { switch k {
case "copy-requires-writer-permission": case "copy-requires-writer-permission":
if isFolder { if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
fs.Debugf(f, "Ignoring %s=%s as can't set on folders", k, v)
} else if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
return nil, err return nil, err
} }
case "writers-can-share": case "writers-can-share":
@@ -629,7 +630,7 @@ func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, opti
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read metadata from source object: %w", err) return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
} }
callback, err = f.updateMetadata(ctx, updateInfo, meta, update, false) callback, err = f.updateMetadata(ctx, updateInfo, meta, update)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to update metadata from source object: %w", err) return nil, fmt.Errorf("failed to update metadata from source object: %w", err)
} }

View File

@@ -177,7 +177,10 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
if start >= rx.ContentLength { if start >= rx.ContentLength {
break break
} }
reqSize = min(rx.ContentLength-start, int64(rx.f.opt.ChunkSize)) reqSize = rx.ContentLength - start
if reqSize >= int64(rx.f.opt.ChunkSize) {
reqSize = int64(rx.f.opt.ChunkSize)
}
chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize) chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
} else { } else {
// If size unknown read into buffer // If size unknown read into buffer

View File

@@ -11,6 +11,7 @@ import (
"fmt" "fmt"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fs/fserrors"
) )
// finishBatch commits the batch, returning a batch status to poll or maybe complete // finishBatch commits the batch, returning a batch status to poll or maybe complete
@@ -20,10 +21,14 @@ func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinish
} }
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
complete, err = f.srv.UploadSessionFinishBatchV2(arg) complete, err = f.srv.UploadSessionFinishBatchV2(arg)
if retry, err := shouldRetryExclude(ctx, err); !retry { // If error is insufficient space then don't retry
return retry, err if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
} }
// after the first chunk is uploaded, we retry everything except the excluded errors // after the first chunk is uploaded, we retry everything
return err != nil, err return err != nil, err
}) })
if err != nil { if err != nil {

View File

@@ -55,7 +55,10 @@ func (d *digest) Write(p []byte) (n int, err error) {
n = len(p) n = len(p)
for len(p) > 0 { for len(p) > 0 {
d.writtenMore = true d.writtenMore = true
toWrite := min(bytesPerBlock-d.n, len(p)) toWrite := bytesPerBlock - d.n
if toWrite > len(p) {
toWrite = len(p)
}
_, err = d.blockHash.Write(p[:toWrite]) _, err = d.blockHash.Write(p[:toWrite])
if err != nil { if err != nil {
panic(hashReturnedError) panic(hashReturnedError)

View File

@@ -11,7 +11,7 @@ import (
func testChunk(t *testing.T, chunk int) { func testChunk(t *testing.T, chunk int) {
data := make([]byte, chunk) data := make([]byte, chunk)
for i := range chunk { for i := 0; i < chunk; i++ {
data[i] = 'A' data[i] = 'A'
} }
for _, test := range []struct { for _, test := range []struct {

View File

@@ -47,8 +47,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/batcher" "github.com/rclone/rclone/lib/batcher"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/oauthutil"
@@ -93,12 +91,9 @@ const (
maxFileNameLength = 255 maxFileNameLength = 255
) )
type exportAPIFormat string
type exportExtension string // dotless
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
dropboxConfig = &oauthutil.Config{ dropboxConfig = &oauth2.Config{
Scopes: []string{ Scopes: []string{
"files.metadata.write", "files.metadata.write",
"files.content.write", "files.content.write",
@@ -113,8 +108,7 @@ var (
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize", // AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token", // TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
// }, // },
AuthURL: dropbox.OAuthEndpoint("").AuthURL, Endpoint: dropbox.OAuthEndpoint(""),
TokenURL: dropbox.OAuthEndpoint("").TokenURL,
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -136,20 +130,10 @@ var (
DefaultTimeoutAsync: 10 * time.Second, DefaultTimeoutAsync: 10 * time.Second,
DefaultBatchSizeAsync: 100, DefaultBatchSizeAsync: 100,
} }
exportKnownAPIFormats = map[exportAPIFormat]exportExtension{
"markdown": "md",
"html": "html",
}
// Populated based on exportKnownAPIFormats
exportKnownExtensions = map[exportExtension]exportAPIFormat{}
paperExtension = ".paper"
paperTemplateExtension = ".papert"
) )
// Gets an oauth config with the right scopes // Gets an oauth config with the right scopes
func getOauthConfig(m configmap.Mapper) *oauthutil.Config { func getOauthConfig(m configmap.Mapper) *oauth2.Config {
// If not impersonating, use standard scopes // If not impersonating, use standard scopes
if impersonate, _ := m.Get("impersonate"); impersonate == "" { if impersonate, _ := m.Get("impersonate"); impersonate == "" {
return dropboxConfig return dropboxConfig
@@ -261,61 +245,23 @@ folders.`,
Help: "Specify a different Dropbox namespace ID to use as the root for all paths.", Help: "Specify a different Dropbox namespace ID to use as the root for all paths.",
Default: "", Default: "",
Advanced: true, Advanced: true,
}, { }}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
Name: "export_formats",
Help: `Comma separated list of preferred formats for exporting files
Certain Dropbox files can only be accessed by exporting them to another format.
These include Dropbox Paper documents.
For each such file, rclone will choose the first format on this list that Dropbox
considers valid. If none is valid, it will choose Dropbox's default format.
Known formats include: "html", "md" (markdown)`,
Default: fs.CommaSepList{"html", "md"},
Advanced: true,
}, {
Name: "skip_exports",
Help: "Skip exportable files in all listings.\n\nIf given, exportable files practically become invisible to rclone.",
Default: false,
Advanced: true,
}, {
Name: "show_all_exports",
Default: false,
Help: `Show all exportable files in listings.
Adding this flag will allow all exportable files to be server side copied.
Note that rclone doesn't add extensions to the exportable file names in this mode.
Do **not** use this flag when trying to download exportable files - rclone
will fail to download them.
`,
Advanced: true,
},
}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
}) })
for apiFormat, ext := range exportKnownAPIFormats {
exportKnownExtensions[ext] = apiFormat
}
} }
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"` Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"` SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"` SharedFolders bool `config:"shared_folders"`
BatchMode string `config:"batch_mode"` BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"` BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"` BatchTimeout fs.Duration `config:"batch_timeout"`
AsyncBatch bool `config:"async_batch"` AsyncBatch bool `config:"async_batch"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"` PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
RootNsid string `config:"root_namespace"` RootNsid string `config:"root_namespace"`
ExportFormats fs.CommaSepList `config:"export_formats"`
SkipExports bool `config:"skip_exports"`
ShowAllExports bool `config:"show_all_exports"`
} }
// Fs represents a remote dropbox server // Fs represents a remote dropbox server
@@ -335,18 +281,8 @@ type Fs struct {
pacer *fs.Pacer // To pace the API calls pacer *fs.Pacer // To pace the API calls
ns string // The namespace we are using or "" for none ns string // The namespace we are using or "" for none
batcher *batcher.Batcher[*files.UploadSessionFinishArg, *files.FileMetadata] batcher *batcher.Batcher[*files.UploadSessionFinishArg, *files.FileMetadata]
exportExts []exportExtension
} }
type exportType int
const (
notExport exportType = iota // a regular file
exportHide // should be hidden
exportListOnly // listable, but can't export
exportExportable // can export
)
// Object describes a dropbox object // Object describes a dropbox object
// //
// Dropbox Objects always have full metadata // Dropbox Objects always have full metadata
@@ -358,9 +294,6 @@ type Object struct {
bytes int64 // size of the object bytes int64 // size of the object
modTime time.Time // time it was last modified modTime time.Time // time it was last modified
hash string // content_hash of the object hash string // content_hash of the object
exportType exportType
exportAPIFormat exportAPIFormat
} }
// Name of the remote (as passed into NewFs) // Name of the remote (as passed into NewFs)
@@ -383,46 +316,32 @@ func (f *Fs) Features() *fs.Features {
return f.features return f.features
} }
// Some specific errors which should be excluded from retries // shouldRetry returns a boolean as to whether this err deserves to be
func shouldRetryExclude(ctx context.Context, err error) (bool, error) { // retried. It returns the err as a convenience
if err == nil { func shouldRetry(ctx context.Context, err error) (bool, error) {
return false, err
}
if fserrors.ContextError(ctx, &err) { if fserrors.ContextError(ctx, &err) {
return false, err return false, err
} }
// First check for specific errors if err == nil {
// return false, err
// These come back from the SDK in a whole host of different }
// error types, but there doesn't seem to be a consistent way
// of reading the error cause, so here we just check using the
// error string which isn't perfect but does the job.
errString := err.Error() errString := err.Error()
// First check for specific errors
if strings.Contains(errString, "insufficient_space") { if strings.Contains(errString, "insufficient_space") {
return false, fserrors.FatalError(err) return false, fserrors.FatalError(err)
} else if strings.Contains(errString, "malformed_path") { } else if strings.Contains(errString, "malformed_path") {
return false, fserrors.NoRetryError(err) return false, fserrors.NoRetryError(err)
} }
return true, err
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if retry, err := shouldRetryExclude(ctx, err); !retry {
return retry, err
}
// Then handle any official Retry-After header from Dropbox's SDK // Then handle any official Retry-After header from Dropbox's SDK
switch e := err.(type) { switch e := err.(type) {
case auth.RateLimitAPIError: case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 { if e.RateLimitError.RetryAfter > 0 {
fs.Logf(nil, "Error %v. Too many requests or write operations. Trying again in %d seconds.", err, e.RateLimitError.RetryAfter) fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second) err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
} }
return true, err return true, err
} }
// Keep old behavior for backward compatibility // Keep old behavior for backward compatibility
errString := err.Error()
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" { if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
return true, err return true, err
} }
@@ -501,14 +420,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
HeaderGenerator: f.headerGenerator, HeaderGenerator: f.headerGenerator,
} }
for _, e := range opt.ExportFormats {
ext := exportExtension(e)
if exportKnownExtensions[ext] == "" {
return nil, fmt.Errorf("dropbox: unknown export format '%s'", e)
}
f.exportExts = append(f.exportExts, ext)
}
// unauthorized config for endpoints that fail with auth // unauthorized config for endpoints that fail with auth
ucfg := dropbox.Config{ ucfg := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
@@ -661,126 +572,38 @@ func (f *Fs) setRoot(root string) {
} }
} }
type getMetadataResult struct {
entry files.IsMetadata
notFound bool
err error
}
// getMetadata gets the metadata for a file or directory // getMetadata gets the metadata for a file or directory
func (f *Fs) getMetadata(ctx context.Context, objPath string) (res getMetadataResult) { func (f *Fs) getMetadata(ctx context.Context, objPath string) (entry files.IsMetadata, notFound bool, err error) {
res.err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
res.entry, res.err = f.srv.GetMetadata(&files.GetMetadataArg{ entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
Path: f.opt.Enc.FromStandardPath(objPath), Path: f.opt.Enc.FromStandardPath(objPath),
}) })
return shouldRetry(ctx, res.err) return shouldRetry(ctx, err)
}) })
if res.err != nil { if err != nil {
switch e := res.err.(type) { switch e := err.(type) {
case files.GetMetadataAPIError: case files.GetMetadataAPIError:
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound { if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
res.notFound = true notFound = true
res.err = nil err = nil
} }
} }
} }
return return
} }
// Get metadata such that the result would be exported with the given extension
// Return a channel that will eventually receive the metadata
func (f *Fs) getMetadataForExt(ctx context.Context, filePath string, wantExportExtension exportExtension) chan getMetadataResult {
ch := make(chan getMetadataResult, 1)
wantDownloadable := (wantExportExtension == "")
go func() {
defer close(ch)
res := f.getMetadata(ctx, filePath)
info, ok := res.entry.(*files.FileMetadata)
if !ok { // Can't check anything about file, just return what we have
ch <- res
return
}
// Return notFound if downloadability or extension doesn't match
if wantDownloadable != info.IsDownloadable {
ch <- getMetadataResult{notFound: true}
return
}
if !info.IsDownloadable {
_, ext := f.chooseExportFormat(info)
if ext != wantExportExtension {
ch <- getMetadataResult{notFound: true}
return
}
}
// Return our real result or error
ch <- res
}()
return ch
}
// For a given rclone-path, figure out what the Dropbox-path may be, in order of preference.
// Multiple paths might be plausible, due to export path munging.
func (f *Fs) possibleMetadatas(ctx context.Context, filePath string) (ret []<-chan getMetadataResult) {
ret = []<-chan getMetadataResult{}
// Prefer an exact match
ret = append(ret, f.getMetadataForExt(ctx, filePath, ""))
// Check if we're plausibly an export path, otherwise we're done
if f.opt.SkipExports || f.opt.ShowAllExports {
return
}
dotted := path.Ext(filePath)
if dotted == "" {
return
}
ext := exportExtension(dotted[1:])
if exportKnownExtensions[ext] == "" {
return
}
// We might be an export path! Try all possibilities
base := strings.TrimSuffix(filePath, dotted)
// `foo.papert.md` will only come from `foo.papert`. Never check something like `foo.papert.paper`
if strings.HasSuffix(base, paperTemplateExtension) {
ret = append(ret, f.getMetadataForExt(ctx, base, ext))
return
}
// Otherwise, try both `foo.md` coming from `foo`, or from `foo.paper`
ret = append(ret, f.getMetadataForExt(ctx, base, ext))
ret = append(ret, f.getMetadataForExt(ctx, base+paperExtension, ext))
return
}
// getFileMetadata gets the metadata for a file // getFileMetadata gets the metadata for a file
func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (*files.FileMetadata, error) { func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *files.FileMetadata, err error) {
var res getMetadataResult entry, notFound, err := f.getMetadata(ctx, filePath)
if err != nil {
// Try all possible metadatas return nil, err
possibleMetadatas := f.possibleMetadatas(ctx, filePath)
for _, ch := range possibleMetadatas {
res = <-ch
if res.err != nil {
return nil, res.err
}
if !res.notFound {
break
}
} }
if notFound {
if res.notFound {
return nil, fs.ErrorObjectNotFound return nil, fs.ErrorObjectNotFound
} }
fileInfo, ok := entry.(*files.FileMetadata)
fileInfo, ok := res.entry.(*files.FileMetadata)
if !ok { if !ok {
if _, ok = res.entry.(*files.FolderMetadata); ok { if _, ok = entry.(*files.FolderMetadata); ok {
return nil, fs.ErrorIsDir return nil, fs.ErrorIsDir
} }
return nil, fs.ErrorNotAFile return nil, fs.ErrorNotAFile
@@ -789,15 +612,15 @@ func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (*files.FileM
} }
// getDirMetadata gets the metadata for a directory // getDirMetadata gets the metadata for a directory
func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (*files.FolderMetadata, error) { func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (dirInfo *files.FolderMetadata, err error) {
res := f.getMetadata(ctx, dirPath) entry, notFound, err := f.getMetadata(ctx, dirPath)
if res.err != nil { if err != nil {
return nil, res.err return nil, err
} }
if res.notFound { if notFound {
return nil, fs.ErrorDirNotFound return nil, fs.ErrorDirNotFound
} }
dirInfo, ok := res.entry.(*files.FolderMetadata) dirInfo, ok := entry.(*files.FolderMetadata)
if !ok { if !ok {
return nil, fs.ErrorIsFile return nil, fs.ErrorIsFile
} }
@@ -835,7 +658,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// listSharedFolders lists all available shared folders mounted and not mounted // listSharedFolders lists all available shared folders mounted and not mounted
// we'll need the id later so we have to return them in original format // we'll need the id later so we have to return them in original format
func (f *Fs) listSharedFolders(ctx context.Context, callback func(fs.DirEntry) error) (err error) { func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) {
started := false started := false
var res *sharing.ListFoldersResult var res *sharing.ListFoldersResult
for { for {
@@ -848,7 +671,7 @@ func (f *Fs) listSharedFolders(ctx context.Context, callback func(fs.DirEntry) e
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return err return nil, err
} }
started = true started = true
} else { } else {
@@ -860,15 +683,15 @@ func (f *Fs) listSharedFolders(ctx context.Context, callback func(fs.DirEntry) e
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return fmt.Errorf("list continue: %w", err) return nil, fmt.Errorf("list continue: %w", err)
} }
} }
for _, entry := range res.Entries { for _, entry := range res.Entries {
leaf := f.opt.Enc.ToStandardName(entry.Name) leaf := f.opt.Enc.ToStandardName(entry.Name)
d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId) d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId)
err = callback(d) entries = append(entries, d)
if err != nil { if err != nil {
return err return nil, err
} }
} }
if res.Cursor == "" { if res.Cursor == "" {
@@ -876,26 +699,22 @@ func (f *Fs) listSharedFolders(ctx context.Context, callback func(fs.DirEntry) e
} }
} }
return nil return entries, nil
} }
// findSharedFolder find the id for a given shared folder name // findSharedFolder find the id for a given shared folder name
// somewhat annoyingly there is no endpoint to query a shared folder by it's name // somewhat annoyingly there is no endpoint to query a shared folder by it's name
// so our only option is to iterate over all shared folders // so our only option is to iterate over all shared folders
func (f *Fs) findSharedFolder(ctx context.Context, name string) (id string, err error) { func (f *Fs) findSharedFolder(ctx context.Context, name string) (id string, err error) {
errFoundFile := errors.New("found file") entries, err := f.listSharedFolders(ctx)
err = f.listSharedFolders(ctx, func(entry fs.DirEntry) error { if err != nil {
if entry.(*fs.Dir).Remote() == name {
id = entry.(*fs.Dir).ID()
return errFoundFile
}
return nil
})
if errors.Is(err, errFoundFile) {
return id, nil
} else if err != nil {
return "", err return "", err
} }
for _, entry := range entries {
if entry.(*fs.Dir).Remote() == name {
return entry.(*fs.Dir).ID(), nil
}
}
return "", fs.ErrorDirNotFound return "", fs.ErrorDirNotFound
} }
@@ -913,7 +732,7 @@ func (f *Fs) mountSharedFolder(ctx context.Context, id string) error {
// listReceivedFiles lists shared the user as access to (note this means individual // listReceivedFiles lists shared the user as access to (note this means individual
// files not files contained in shared folders) // files not files contained in shared folders)
func (f *Fs) listReceivedFiles(ctx context.Context, callback func(fs.DirEntry) error) (err error) { func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err error) {
started := false started := false
var res *sharing.ListFilesResult var res *sharing.ListFilesResult
for { for {
@@ -926,7 +745,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context, callback func(fs.DirEntry) e
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return err return nil, err
} }
started = true started = true
} else { } else {
@@ -938,7 +757,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context, callback func(fs.DirEntry) e
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return fmt.Errorf("list continue: %w", err) return nil, fmt.Errorf("list continue: %w", err)
} }
} }
for _, entry := range res.Entries { for _, entry := range res.Entries {
@@ -951,34 +770,27 @@ func (f *Fs) listReceivedFiles(ctx context.Context, callback func(fs.DirEntry) e
modTime: *entry.TimeInvited, modTime: *entry.TimeInvited,
} }
if err != nil { if err != nil {
return err return nil, err
}
err = callback(o)
if err != nil {
return err
} }
entries = append(entries, o)
} }
if res.Cursor == "" { if res.Cursor == "" {
break break
} }
} }
return nil return entries, nil
} }
func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err error) { func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err error) {
errFoundFile := errors.New("found file") files, err := f.listReceivedFiles(ctx)
err = f.listReceivedFiles(ctx, func(entry fs.DirEntry) error { if err != nil {
if entry.(*Object).remote == name {
o = entry.(*Object)
return errFoundFile
}
return nil
})
if errors.Is(err, errFoundFile) {
return o, nil
} else if err != nil {
return nil, err return nil, err
} }
for _, entry := range files {
if entry.(*Object).remote == name {
return entry.(*Object), nil
}
}
return nil, fs.ErrorObjectNotFound return nil, fs.ErrorObjectNotFound
} }
@@ -992,37 +804,11 @@ func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err er
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
list := list.NewHelper(callback)
if f.opt.SharedFiles { if f.opt.SharedFiles {
err := f.listReceivedFiles(ctx, list.Add) return f.listReceivedFiles(ctx)
if err != nil {
return err
}
return list.Flush()
} }
if f.opt.SharedFolders { if f.opt.SharedFolders {
err := f.listSharedFolders(ctx, list.Add) return f.listSharedFolders(ctx)
if err != nil {
return err
}
return list.Flush()
} }
root := f.slashRoot root := f.slashRoot
@@ -1034,15 +820,16 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (
var res *files.ListFolderResult var res *files.ListFolderResult
for { for {
if !started { if !started {
arg := files.NewListFolderArg(f.opt.Enc.FromStandardPath(root)) arg := files.ListFolderArg{
arg.Recursive = false Path: f.opt.Enc.FromStandardPath(root),
arg.Limit = 1000 Recursive: false,
Limit: 1000,
}
if root == "/" { if root == "/" {
arg.Path = "" // Specify root folder as empty string arg.Path = "" // Specify root folder as empty string
} }
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.ListFolder(arg) res, err = f.srv.ListFolder(&arg)
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
@@ -1052,7 +839,7 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (
err = fs.ErrorDirNotFound err = fs.ErrorDirNotFound
} }
} }
return err return nil, err
} }
started = true started = true
} else { } else {
@@ -1064,7 +851,7 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return fmt.Errorf("list continue: %w", err) return nil, fmt.Errorf("list continue: %w", err)
} }
} }
for _, entry := range res.Entries { for _, entry := range res.Entries {
@@ -1089,28 +876,20 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (
remote := path.Join(dir, leaf) remote := path.Join(dir, leaf)
if folderInfo != nil { if folderInfo != nil {
d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id) d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id)
err = list.Add(d) entries = append(entries, d)
if err != nil {
return err
}
} else if fileInfo != nil { } else if fileInfo != nil {
o, err := f.newObjectWithInfo(ctx, remote, fileInfo) o, err := f.newObjectWithInfo(ctx, remote, fileInfo)
if err != nil { if err != nil {
return err return nil, err
}
if o.(*Object).exportType.listable() {
err = list.Add(o)
if err != nil {
return err
}
} }
entries = append(entries, o)
} }
} }
if !res.HasMore { if !res.HasMore {
break break
} }
} }
return list.Flush() return entries, nil
} }
// Put the object // Put the object
@@ -1189,14 +968,16 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
} }
// check directory empty // check directory empty
arg := files.NewListFolderArg(encRoot) arg := files.ListFolderArg{
arg.Recursive = false Path: encRoot,
Recursive: false,
}
if root == "/" { if root == "/" {
arg.Path = "" // Specify root folder as empty string arg.Path = "" // Specify root folder as empty string
} }
var res *files.ListFolderResult var res *files.ListFolderResult
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.ListFolder(arg) res, err = f.srv.ListFolder(&arg)
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
@@ -1239,20 +1020,13 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't copy - not same remote type") fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
// Find and remove existing object
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
if err != nil {
return nil, err
}
defer cleanup(&err)
// Temporary Object under construction // Temporary Object under construction
dstObj := &Object{ dstObj := &Object{
fs: f, fs: f,
@@ -1266,6 +1040,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()), ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
}, },
} }
var err error
var result *files.RelocationResult var result *files.RelocationResult
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
result, err = f.srv.CopyV2(&arg) result, err = f.srv.CopyV2(&arg)
@@ -1330,16 +1105,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
var result *files.RelocationResult var result *files.RelocationResult
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
result, err = f.srv.MoveV2(&arg) result, err = f.srv.MoveV2(&arg)
switch e := err.(type) {
case files.MoveV2APIError:
// There seems to be a bit of eventual consistency here which causes this to
// fail on just created objects
// See: https://github.com/rclone/rclone/issues/8881
if e.EndpointError != nil && e.EndpointError.FromLookup != nil && e.EndpointError.FromLookup.Tag == files.LookupErrorNotFound {
fs.Debugf(srcObj, "Retrying move on %v error", err)
return true, err
}
}
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
@@ -1387,16 +1152,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil && createArg.Settings.Expires != nil && strings.Contains(err.Error(), sharing.SharedLinkSettingsErrorNotAuthorized) {
// Some plans can't create links with expiry
fs.Debugf(absPath, "can't create link with expiry, trying without")
createArg.Settings.Expires = nil
err = f.pacer.Call(func() (bool, error) {
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
return shouldRetry(ctx, err)
})
}
if err != nil && strings.Contains(err.Error(), if err != nil && strings.Contains(err.Error(),
sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) { sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
fs.Debugf(absPath, "has a public link already, attempting to retrieve it") fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
@@ -1500,9 +1255,9 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
} }
} }
usage = &fs.Usage{ usage = &fs.Usage{
Total: fs.NewUsageValue(total), // quota of bytes that can be used Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
Used: fs.NewUsageValue(used), // bytes in use Used: fs.NewUsageValue(int64(used)), // bytes in use
Free: fs.NewUsageValue(total - used), // bytes which can be uploaded before reaching the quota Free: fs.NewUsageValue(int64(total - used)), // bytes which can be uploaded before reaching the quota
} }
return usage, nil return usage, nil
} }
@@ -1561,14 +1316,16 @@ func (f *Fs) changeNotifyCursor(ctx context.Context) (cursor string, err error)
var startCursor *files.ListFolderGetLatestCursorResult var startCursor *files.ListFolderGetLatestCursorResult
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
arg := files.NewListFolderArg(f.opt.Enc.FromStandardPath(f.slashRoot)) arg := files.ListFolderArg{
arg.Recursive = true Path: f.opt.Enc.FromStandardPath(f.slashRoot),
Recursive: true,
}
if arg.Path == "/" { if arg.Path == "/" {
arg.Path = "" arg.Path = ""
} }
startCursor, err = f.srv.ListFolderGetLatestCursor(arg) startCursor, err = f.srv.ListFolderGetLatestCursor(&arg)
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
@@ -1672,50 +1429,8 @@ func (f *Fs) Shutdown(ctx context.Context) error {
return nil return nil
} }
func (f *Fs) chooseExportFormat(info *files.FileMetadata) (exportAPIFormat, exportExtension) {
// Find API export formats Dropbox supports for this file
// Sometimes Dropbox lists a format in ExportAs but not ExportOptions, so check both
ei := info.ExportInfo
dropboxFormatStrings := append([]string{ei.ExportAs}, ei.ExportOptions...)
// Find which extensions these correspond to
exportExtensions := map[exportExtension]exportAPIFormat{}
var dropboxPreferredAPIFormat exportAPIFormat
var dropboxPreferredExtension exportExtension
for _, format := range dropboxFormatStrings {
apiFormat := exportAPIFormat(format)
// Only consider formats we know about
if ext, ok := exportKnownAPIFormats[apiFormat]; ok {
if dropboxPreferredAPIFormat == "" {
dropboxPreferredAPIFormat = apiFormat
dropboxPreferredExtension = ext
}
exportExtensions[ext] = apiFormat
}
}
// See if the user picked a valid extension
for _, ext := range f.exportExts {
if apiFormat, ok := exportExtensions[ext]; ok {
return apiFormat, ext
}
}
// If no matches, prefer the first valid format Dropbox lists
return dropboxPreferredAPIFormat, dropboxPreferredExtension
}
// ------------------------------------------------------------ // ------------------------------------------------------------
func (et exportType) listable() bool {
return et != exportHide
}
// something we should _try_ to export
func (et exportType) exportable() bool {
return et == exportExportable || et == exportListOnly
}
// Fs returns the parent Fs // Fs returns the parent Fs
func (o *Object) Fs() fs.Info { func (o *Object) Fs() fs.Info {
return o.fs return o.fs
@@ -1759,32 +1474,6 @@ func (o *Object) Size() int64 {
return o.bytes return o.bytes
} }
func (o *Object) setMetadataForExport(info *files.FileMetadata) {
o.bytes = -1
o.hash = ""
if o.fs.opt.SkipExports {
o.exportType = exportHide
return
}
if o.fs.opt.ShowAllExports {
o.exportType = exportListOnly
return
}
var exportExt exportExtension
o.exportAPIFormat, exportExt = o.fs.chooseExportFormat(info)
if o.exportAPIFormat == "" {
o.exportType = exportHide
} else {
o.exportType = exportExportable
// get rid of any paper extension, if present
o.remote = strings.TrimSuffix(o.remote, paperExtension)
// add the export extension
o.remote += "." + string(exportExt)
}
}
// setMetadataFromEntry sets the fs data from a files.FileMetadata // setMetadataFromEntry sets the fs data from a files.FileMetadata
// //
// This isn't a complete set of metadata and has an inaccurate date // This isn't a complete set of metadata and has an inaccurate date
@@ -1793,10 +1482,6 @@ func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
o.bytes = int64(info.Size) o.bytes = int64(info.Size)
o.modTime = info.ClientModified o.modTime = info.ClientModified
o.hash = info.ContentHash o.hash = info.ContentHash
if !info.IsDownloadable {
o.setMetadataForExport(info)
}
return nil return nil
} }
@@ -1860,27 +1545,6 @@ func (o *Object) Storable() bool {
return true return true
} }
func (o *Object) export(ctx context.Context) (in io.ReadCloser, err error) {
if o.exportType == exportListOnly || o.exportAPIFormat == "" {
fs.Debugf(o.remote, "No export format found")
return nil, fs.ErrorObjectNotFound
}
arg := files.ExportArg{Path: o.id, ExportFormat: string(o.exportAPIFormat)}
var exportResult *files.ExportResult
err = o.fs.pacer.Call(func() (bool, error) {
exportResult, in, err = o.fs.srv.Export(&arg)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
o.bytes = int64(exportResult.ExportMetadata.Size)
o.hash = exportResult.ExportMetadata.ExportHash
return
}
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.fs.opt.SharedFiles { if o.fs.opt.SharedFiles {
@@ -1900,10 +1564,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return return
} }
if o.exportType.exportable() {
return o.export(ctx)
}
fs.FixRangeOption(options, o.bytes) fs.FixRangeOption(options, o.bytes)
headers := fs.OpenOptionHeaders(options) headers := fs.OpenOptionHeaders(options)
arg := files.DownloadArg{ arg := files.DownloadArg{
@@ -2032,10 +1692,14 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
entry, err = o.fs.srv.UploadSessionFinish(args, nil) entry, err = o.fs.srv.UploadSessionFinish(args, nil)
if retry, err := shouldRetryExclude(ctx, err); !retry { // If error is insufficient space then don't retry
return retry, err if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
} }
// after the first chunk is uploaded, we retry everything except the excluded errors // after the first chunk is uploaded, we retry everything
return err != nil, err return err != nil, err
}) })
if err != nil { if err != nil {
@@ -2141,7 +1805,6 @@ var (
_ fs.Mover = (*Fs)(nil) _ fs.Mover = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil)
_ fs.ListPer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil)
_ fs.Shutdowner = &Fs{} _ fs.Shutdowner = &Fs{}
_ fs.Object = (*Object)(nil) _ fs.Object = (*Object)(nil)

View File

@@ -1,16 +1,9 @@
package dropbox package dropbox
import ( import (
"context"
"io"
"strings"
"testing" "testing"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestInternalCheckPathLength(t *testing.T) { func TestInternalCheckPathLength(t *testing.T) {
@@ -49,54 +42,3 @@ func TestInternalCheckPathLength(t *testing.T) {
assert.Equal(t, test.ok, err == nil, test.in) assert.Equal(t, test.ok, err == nil, test.in)
} }
} }
func (f *Fs) importPaperForTest(t *testing.T) {
content := `# test doc
Lorem ipsum __dolor__ sit amet
[link](http://google.com)
`
arg := files.PaperCreateArg{
Path: f.slashRootSlash + "export.paper",
ImportFormat: &files.ImportFormat{Tagged: dropbox.Tagged{Tag: files.ImportFormatMarkdown}},
}
var err error
err = f.pacer.Call(func() (bool, error) {
reader := strings.NewReader(content)
_, err = f.srv.PaperCreate(&arg, reader)
return shouldRetry(context.Background(), err)
})
require.NoError(t, err)
}
func (f *Fs) InternalTestPaperExport(t *testing.T) {
ctx := context.Background()
f.importPaperForTest(t)
f.exportExts = []exportExtension{"html"}
obj, err := f.NewObject(ctx, "export.html")
require.NoError(t, err)
rc, err := obj.Open(ctx)
require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }()
buf, err := io.ReadAll(rc)
require.NoError(t, err)
text := string(buf)
for _, excerpt := range []string{
"Lorem ipsum",
"<b>dolor</b>",
`href="http://google.com"`,
} {
require.Contains(t, text, excerpt)
}
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("PaperExport", f.InternalTestPaperExport)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -216,11 +216,11 @@ var ItemFields = mustFields(Item{})
// fields returns the JSON fields in use by opt as a | separated // fields returns the JSON fields in use by opt as a | separated
// string. // string.
func fields(opt any) (pipeTags string, err error) { func fields(opt interface{}) (pipeTags string, err error) {
var tags []string var tags []string
def := reflect.ValueOf(opt) def := reflect.ValueOf(opt)
defType := def.Type() defType := def.Type()
for i := range def.NumField() { for i := 0; i < def.NumField(); i++ {
field := defType.Field(i) field := defType.Field(i)
tag, ok := field.Tag.Lookup("json") tag, ok := field.Tag.Lookup("json")
if !ok { if !ok {
@@ -239,7 +239,7 @@ func fields(opt any) (pipeTags string, err error) {
// mustFields returns the JSON fields in use by opt as a | separated // mustFields returns the JSON fields in use by opt as a | separated
// string. It panics on failure. // string. It panics on failure.
func mustFields(opt any) string { func mustFields(opt interface{}) string {
tags, err := fields(opt) tags, err := fields(opt)
if err != nil { if err != nil {
panic(err) panic(err)
@@ -351,12 +351,12 @@ type SpaceInfo struct {
// DeleteResponse is returned from doDeleteFile // DeleteResponse is returned from doDeleteFile
type DeleteResponse struct { type DeleteResponse struct {
Status Status
Deleted []string `json:"deleted"` Deleted []string `json:"deleted"`
Errors []any `json:"errors"` Errors []interface{} `json:"errors"`
ID string `json:"fi_id"` ID string `json:"fi_id"`
BackgroundTask int `json:"backgroundtask"` BackgroundTask int `json:"backgroundtask"`
UsSize string `json:"us_size"` UsSize string `json:"us_size"`
PaSize string `json:"pa_size"` PaSize string `json:"pa_size"`
//SpaceInfo SpaceInfo `json:"spaceinfo"` //SpaceInfo SpaceInfo `json:"spaceinfo"`
} }

View File

@@ -371,7 +371,7 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
} }
// params for rpc // params for rpc
type params map[string]any type params map[string]interface{}
// rpc calls the rpc.php method of the SME file fabric // rpc calls the rpc.php method of the SME file fabric
// //

View File

@@ -1,81 +0,0 @@
// Package api defines types for interacting with the FileLu API.
package api
import "encoding/json"
// CreateFolderResponse represents the response for creating a folder.
type CreateFolderResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
Result struct {
FldID any `json:"fld_id"`
} `json:"result"`
}
// DeleteFolderResponse represents the response for deleting a folder.
type DeleteFolderResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
}
// FolderListResponse represents the response for listing folders.
type FolderListResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
Result struct {
Files []struct {
Name string `json:"name"`
FldID json.Number `json:"fld_id"`
Path string `json:"path"`
FileCode string `json:"file_code"`
Size int64 `json:"size"`
} `json:"files"`
Folders []struct {
Name string `json:"name"`
FldID json.Number `json:"fld_id"`
Path string `json:"path"`
} `json:"folders"`
} `json:"result"`
}
// FileDirectLinkResponse represents the response for a direct link to a file.
type FileDirectLinkResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
Result struct {
URL string `json:"url"`
Size int64 `json:"size"`
} `json:"result"`
}
// FileInfoResponse represents the response for file information.
type FileInfoResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
Result []struct {
Size string `json:"size"`
Name string `json:"name"`
FileCode string `json:"filecode"`
Hash string `json:"hash"`
Status int `json:"status"`
} `json:"result"`
}
// DeleteFileResponse represents the response for deleting a file.
type DeleteFileResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
}
// AccountInfoResponse represents the response for account information.
type AccountInfoResponse struct {
Status int `json:"status"` // HTTP status code of the response.
Msg string `json:"msg"` // Message describing the response.
Result struct {
PremiumExpire string `json:"premium_expire"` // Expiration date of premium access.
Email string `json:"email"` // User's email address.
UType string `json:"utype"` // User type (e.g., premium or free).
Storage string `json:"storage"` // Total storage available to the user.
StorageUsed string `json:"storage_used"` // Amount of storage used.
} `json:"result"` // Nested result structure containing account details.
}

View File

@@ -1,366 +0,0 @@
// Package filelu provides an interface to the FileLu storage system.
package filelu
import (
"context"
"fmt"
"io"
"net/http"
"os"
"path"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
// Register the backend with Rclone
func init() {
fs.Register(&fs.RegInfo{
Name: "filelu",
Description: "FileLu Cloud Storage",
NewFs: NewFs,
Options: []fs.Option{{
Name: "key",
Help: "Your FileLu Rclone key from My Account",
Required: true,
Sensitive: true,
},
{
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
encoder.EncodeSlash |
encoder.EncodeLtGt |
encoder.EncodeExclamation |
encoder.EncodeDoubleQuote |
encoder.EncodeSingleQuote |
encoder.EncodeBackQuote |
encoder.EncodeQuestion |
encoder.EncodeDollar |
encoder.EncodeColon |
encoder.EncodeAsterisk |
encoder.EncodePipe |
encoder.EncodeHash |
encoder.EncodePercent |
encoder.EncodeBackSlash |
encoder.EncodeCrLf |
encoder.EncodeDel |
encoder.EncodeCtl |
encoder.EncodeLeftSpace |
encoder.EncodeLeftPeriod |
encoder.EncodeLeftTilde |
encoder.EncodeLeftCrLfHtVt |
encoder.EncodeRightPeriod |
encoder.EncodeRightCrLfHtVt |
encoder.EncodeSquareBracket |
encoder.EncodeSemicolon |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8 |
encoder.EncodeDot),
},
}})
}
// Options defines the configuration for the FileLu backend
type Options struct {
Key string `config:"key"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents the FileLu file system
type Fs struct {
name string
root string
opt Options
features *fs.Features
endpoint string
pacer *pacer.Pacer
srv *rest.Client
client *http.Client
targetFile string
}
// NewFs creates a new Fs object for FileLu
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, fmt.Errorf("failed to parse config: %w", err)
}
if opt.Key == "" {
return nil, fmt.Errorf("FileLu Rclone Key is required")
}
client := fshttp.NewClient(ctx)
if strings.TrimSpace(root) == "" {
root = ""
}
root = strings.Trim(root, "/")
filename := ""
f := &Fs{
name: name,
opt: *opt,
endpoint: "https://filelu.com/rclone",
client: client,
srv: rest.NewClient(client).SetRoot("https://filelu.com/rclone"),
pacer: pacer.New(),
targetFile: filename,
root: root,
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
WriteMetadata: false,
SlowHash: true,
}).Fill(ctx, f)
rootContainer, rootDirectory := rootSplit(f.root)
if rootContainer != "" && rootDirectory != "" {
// Check to see if the (container,directory) is actually an existing file
oldRoot := f.root
newRoot, leaf := path.Split(oldRoot)
f.root = strings.Trim(newRoot, "/")
_, err := f.NewObject(ctx, leaf)
if err != nil {
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
// File doesn't exist or is a directory so return old f
f.root = strings.Trim(oldRoot, "/")
return f, nil
}
return nil, err
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// Mkdir to create directory on remote server.
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
fullPath := path.Clean(f.root + "/" + dir)
_, err := f.createFolder(ctx, fullPath)
return err
}
// About provides usage statistics for the remote
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
accountInfo, err := f.getAccountInfo(ctx)
if err != nil {
return nil, err
}
totalStorage, err := parseStorageToBytes(accountInfo.Result.Storage)
if err != nil {
return nil, fmt.Errorf("failed to parse total storage: %w", err)
}
usedStorage, err := parseStorageToBytes(accountInfo.Result.StorageUsed)
if err != nil {
return nil, fmt.Errorf("failed to parse used storage: %w", err)
}
return &fs.Usage{
Total: fs.NewUsageValue(totalStorage), // Total bytes available
Used: fs.NewUsageValue(usedStorage), // Total bytes used
Free: fs.NewUsageValue(totalStorage - usedStorage),
}, nil
}
// Purge deletes the directory and all its contents
func (f *Fs) Purge(ctx context.Context, dir string) error {
fullPath := path.Join(f.root, dir)
if fullPath != "" {
fullPath = "/" + strings.Trim(fullPath, "/")
}
return f.deleteFolder(ctx, fullPath)
}
// List returns a list of files and folders
// List returns a list of files and folders for the given directory
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
// Compose full path for API call
fullPath := path.Join(f.root, dir)
fullPath = "/" + strings.Trim(fullPath, "/")
if fullPath == "/" {
fullPath = ""
}
var entries fs.DirEntries
result, err := f.getFolderList(ctx, fullPath)
if err != nil {
return nil, err
}
fldMap := map[string]bool{}
for _, folder := range result.Result.Folders {
fldMap[folder.FldID.String()] = true
if f.root == "" && dir == "" && strings.Contains(folder.Path, "/") {
continue
}
paths := strings.Split(folder.Path, fullPath+"/")
remote := paths[0]
if len(paths) > 1 {
remote = paths[1]
}
if strings.Contains(remote, "/") {
continue
}
pathsWithoutRoot := strings.Split(folder.Path, "/"+f.root+"/")
remotePathWithoutRoot := pathsWithoutRoot[0]
if len(pathsWithoutRoot) > 1 {
remotePathWithoutRoot = pathsWithoutRoot[1]
}
remotePathWithoutRoot = strings.TrimPrefix(remotePathWithoutRoot, "/")
entries = append(entries, fs.NewDir(remotePathWithoutRoot, time.Now()))
}
for _, file := range result.Result.Files {
if _, ok := fldMap[file.FldID.String()]; ok {
continue
}
remote := path.Join(dir, file.Name)
// trim leading slashes
remote = strings.TrimPrefix(remote, "/")
obj := &Object{
fs: f,
remote: remote,
size: file.Size,
modTime: time.Now(),
}
entries = append(entries, obj)
}
return entries, nil
}
// Put uploads a file directly to the destination folder in the FileLu storage system.
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if src.Size() == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
err := f.uploadFile(ctx, in, src.Remote())
if err != nil {
return nil, err
}
newObject := &Object{
fs: f,
remote: src.Remote(),
size: src.Size(),
modTime: src.ModTime(ctx),
}
fs.Infof(f, "Put: Successfully uploaded new file %q", src.Remote())
return newObject, nil
}
// Move moves the file to the specified location
func (f *Fs) Move(ctx context.Context, src fs.Object, destinationPath string) (fs.Object, error) {
if strings.HasPrefix(destinationPath, "/") || strings.Contains(destinationPath, ":\\") {
dir := path.Dir(destinationPath)
if err := os.MkdirAll(dir, 0755); err != nil {
return nil, fmt.Errorf("failed to create destination directory: %w", err)
}
reader, err := src.Open(ctx)
if err != nil {
return nil, fmt.Errorf("failed to open source file: %w", err)
}
defer func() {
if err := reader.Close(); err != nil {
fs.Logf(nil, "Failed to close file body: %v", err)
}
}()
dest, err := os.Create(destinationPath)
if err != nil {
return nil, fmt.Errorf("failed to create destination file: %w", err)
}
defer func() {
if err := dest.Close(); err != nil {
fs.Logf(nil, "Failed to close file body: %v", err)
}
}()
if _, err := io.Copy(dest, reader); err != nil {
return nil, fmt.Errorf("failed to copy file content: %w", err)
}
if err := src.Remove(ctx); err != nil {
return nil, fmt.Errorf("failed to remove source file: %w", err)
}
return nil, nil
}
reader, err := src.Open(ctx)
if err != nil {
return nil, fmt.Errorf("failed to open source object: %w", err)
}
defer func() {
if err := reader.Close(); err != nil {
fs.Logf(nil, "Failed to close file body: %v", err)
}
}()
err = f.uploadFile(ctx, reader, destinationPath)
if err != nil {
return nil, fmt.Errorf("failed to upload file to destination: %w", err)
}
if err := src.Remove(ctx); err != nil {
return nil, fmt.Errorf("failed to delete source file: %w", err)
}
return &Object{
fs: f,
remote: destinationPath,
size: src.Size(),
modTime: src.ModTime(ctx),
}, nil
}
// Rmdir removes a directory
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
fullPath := path.Join(f.root, dir)
if fullPath != "" {
fullPath = "/" + strings.Trim(fullPath, "/")
}
// Step 1: Check if folder is empty
listResp, err := f.getFolderList(ctx, fullPath)
if err != nil {
return err
}
if len(listResp.Result.Files) > 0 || len(listResp.Result.Folders) > 0 {
return fmt.Errorf("Rmdir: directory %q is not empty", fullPath)
}
// Step 2: Delete the folder
return f.deleteFolder(ctx, fullPath)
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

View File

@@ -1,324 +0,0 @@
package filelu
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"github.com/rclone/rclone/backend/filelu/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/rest"
)
// createFolder creates a folder at the specified path.
func (f *Fs) createFolder(ctx context.Context, dirPath string) (*api.CreateFolderResponse, error) {
encodedDir := f.fromStandardPath(dirPath)
apiURL := fmt.Sprintf("%s/folder/create?folder_path=%s&key=%s",
f.endpoint,
url.QueryEscape(encodedDir),
url.QueryEscape(f.opt.Key), // assuming f.opt.Key is the correct field
)
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
var resp *http.Response
result := api.CreateFolderResponse{}
err = f.pacer.Call(func() (bool, error) {
var innerErr error
resp, innerErr = f.client.Do(req)
return fserrors.ShouldRetry(innerErr), innerErr
})
if err != nil {
return nil, fmt.Errorf("request failed: %w", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
err = json.NewDecoder(resp.Body).Decode(&result)
if err != nil {
return nil, fmt.Errorf("error decoding response: %w", err)
}
if result.Status != 200 {
return nil, fmt.Errorf("error: %s", result.Msg)
}
fs.Infof(f, "Successfully created folder %q with ID %v", dirPath, result.Result.FldID)
return &result, nil
}
// getFolderList List both files and folders in a directory.
func (f *Fs) getFolderList(ctx context.Context, path string) (*api.FolderListResponse, error) {
encodedDir := f.fromStandardPath(path)
apiURL := fmt.Sprintf("%s/folder/list?folder_path=%s&key=%s",
f.endpoint,
url.QueryEscape(encodedDir),
url.QueryEscape(f.opt.Key),
)
var body []byte
err := f.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
if err != nil {
return false, fmt.Errorf("failed to create request: %w", err)
}
resp, err := f.client.Do(req)
if err != nil {
return shouldRetry(err), fmt.Errorf("failed to list directory: %w", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
body, err = io.ReadAll(resp.Body)
if err != nil {
return false, fmt.Errorf("error reading response body: %w", err)
}
return shouldRetryHTTP(resp.StatusCode), nil
})
if err != nil {
return nil, err
}
var response api.FolderListResponse
if err := json.NewDecoder(bytes.NewReader(body)).Decode(&response); err != nil {
return nil, fmt.Errorf("error decoding response: %w", err)
}
if response.Status != 200 {
if strings.Contains(response.Msg, "Folder not found") {
return nil, fs.ErrorDirNotFound
}
return nil, fmt.Errorf("API error: %s", response.Msg)
}
for index := range response.Result.Folders {
response.Result.Folders[index].Path = f.toStandardPath(response.Result.Folders[index].Path)
}
for index := range response.Result.Files {
response.Result.Files[index].Name = f.toStandardPath(response.Result.Files[index].Name)
}
return &response, nil
}
// deleteFolder deletes a folder at the specified path.
func (f *Fs) deleteFolder(ctx context.Context, fullPath string) error {
fullPath = f.fromStandardPath(fullPath)
deleteURL := fmt.Sprintf("%s/folder/delete?folder_path=%s&key=%s",
f.endpoint,
url.QueryEscape(fullPath),
url.QueryEscape(f.opt.Key),
)
delResp := api.DeleteFolderResponse{}
err := f.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", deleteURL, nil)
if err != nil {
return false, err
}
resp, err := f.client.Do(req)
if err != nil {
return fserrors.ShouldRetry(err), err
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
body, err := io.ReadAll(resp.Body)
if err != nil {
return false, err
}
if err := json.Unmarshal(body, &delResp); err != nil {
return false, fmt.Errorf("error decoding delete response: %w", err)
}
if delResp.Status != 200 {
return false, fmt.Errorf("delete error: %s", delResp.Msg)
}
return false, nil
})
if err != nil {
return err
}
fs.Infof(f, "Rmdir: successfully deleted %q", fullPath)
return nil
}
// getDirectLink of file from FileLu to download.
func (f *Fs) getDirectLink(ctx context.Context, filePath string) (string, int64, error) {
filePath = f.fromStandardPath(filePath)
apiURL := fmt.Sprintf("%s/file/direct_link?file_path=%s&key=%s",
f.endpoint,
url.QueryEscape(filePath),
url.QueryEscape(f.opt.Key),
)
result := api.FileDirectLinkResponse{}
err := f.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
if err != nil {
return false, fmt.Errorf("failed to create request: %w", err)
}
resp, err := f.client.Do(req)
if err != nil {
return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return false, fmt.Errorf("error decoding response: %w", err)
}
if result.Status != 200 {
return false, fmt.Errorf("API error: %s", result.Msg)
}
return shouldRetryHTTP(resp.StatusCode), nil
})
if err != nil {
return "", 0, err
}
return result.Result.URL, result.Result.Size, nil
}
// deleteFile deletes a file based on filePath
func (f *Fs) deleteFile(ctx context.Context, filePath string) error {
filePath = f.fromStandardPath(filePath)
apiURL := fmt.Sprintf("%s/file/remove?file_path=%s&key=%s",
f.endpoint,
url.QueryEscape(filePath),
url.QueryEscape(f.opt.Key),
)
result := api.DeleteFileResponse{}
err := f.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
if err != nil {
return false, fmt.Errorf("failed to create request: %w", err)
}
resp, err := f.client.Do(req)
if err != nil {
return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return false, fmt.Errorf("error decoding response: %w", err)
}
if result.Status != 200 {
return false, fmt.Errorf("API error: %s", result.Msg)
}
return shouldRetryHTTP(resp.StatusCode), nil
})
return err
}
// getAccountInfo retrieves account information
func (f *Fs) getAccountInfo(ctx context.Context) (*api.AccountInfoResponse, error) {
opts := rest.Opts{
Method: "GET",
Path: "/account/info",
Parameters: url.Values{
"key": {f.opt.Key},
},
}
var result api.AccountInfoResponse
err := f.pacer.Call(func() (bool, error) {
_, callErr := f.srv.CallJSON(ctx, &opts, nil, &result)
return fserrors.ShouldRetry(callErr), callErr
})
if err != nil {
return nil, err
}
if result.Status != 200 {
return nil, fmt.Errorf("error: %s", result.Msg)
}
return &result, nil
}
// getFileInfo retrieves file information based on file code
func (f *Fs) getFileInfo(ctx context.Context, fileCode string) (*api.FileInfoResponse, error) {
u, _ := url.Parse(f.endpoint + "/file/info2")
q := u.Query()
q.Set("file_code", fileCode) // raw path — Go handles escaping properly here
q.Set("key", f.opt.Key)
u.RawQuery = q.Encode()
apiURL := f.endpoint + "/file/info2?" + u.RawQuery
var body []byte
err := f.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
if err != nil {
return false, fmt.Errorf("failed to create request: %w", err)
}
resp, err := f.client.Do(req)
if err != nil {
return shouldRetry(err), fmt.Errorf("failed to fetch file info: %w", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
body, err = io.ReadAll(resp.Body)
if err != nil {
return false, fmt.Errorf("error reading response body: %w", err)
}
return shouldRetryHTTP(resp.StatusCode), nil
})
if err != nil {
return nil, err
}
result := api.FileInfoResponse{}
if err := json.NewDecoder(bytes.NewReader(body)).Decode(&result); err != nil {
return nil, fmt.Errorf("error decoding response: %w", err)
}
if result.Status != 200 || len(result.Result) == 0 {
return nil, fs.ErrorObjectNotFound
}
return &result, nil
}

View File

@@ -1,193 +0,0 @@
package filelu
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"mime/multipart"
"net/http"
"net/url"
"path"
"strings"
"github.com/rclone/rclone/fs"
)
// uploadFile uploads a file to FileLu
func (f *Fs) uploadFile(ctx context.Context, fileContent io.Reader, fileFullPath string) error {
directory := path.Dir(fileFullPath)
fileName := path.Base(fileFullPath)
if directory == "." {
directory = ""
}
destinationFolderPath := path.Join(f.root, directory)
if destinationFolderPath != "" {
destinationFolderPath = "/" + strings.Trim(destinationFolderPath, "/")
}
existingEntries, err := f.List(ctx, path.Dir(fileFullPath))
if err != nil {
if errors.Is(err, fs.ErrorDirNotFound) {
err = f.Mkdir(ctx, path.Dir(fileFullPath))
if err != nil {
return fmt.Errorf("failed to create directory: %w", err)
}
} else {
return fmt.Errorf("failed to list existing files: %w", err)
}
}
for _, entry := range existingEntries {
if entry.Remote() == fileFullPath {
_, ok := entry.(fs.Object)
if !ok {
continue
}
// If the file exists but is different, remove it
filePath := "/" + strings.Trim(destinationFolderPath+"/"+fileName, "/")
err = f.deleteFile(ctx, filePath)
if err != nil {
return fmt.Errorf("failed to delete existing file: %w", err)
}
}
}
uploadURL, sessID, err := f.getUploadServer(ctx)
if err != nil {
return fmt.Errorf("failed to retrieve upload server: %w", err)
}
// Since the fileCode isn't used, just handle the error
if _, err := f.uploadFileWithDestination(ctx, uploadURL, sessID, fileName, fileContent, destinationFolderPath); err != nil {
return fmt.Errorf("failed to upload file: %w", err)
}
return nil
}
// getUploadServer gets the upload server URL with proper key authentication
func (f *Fs) getUploadServer(ctx context.Context) (string, string, error) {
apiURL := fmt.Sprintf("%s/upload/server?key=%s", f.endpoint, url.QueryEscape(f.opt.Key))
var result struct {
Status int `json:"status"`
SessID string `json:"sess_id"`
Result string `json:"result"`
Msg string `json:"msg"`
}
err := f.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
if err != nil {
return false, fmt.Errorf("failed to create request: %w", err)
}
resp, err := f.client.Do(req)
if err != nil {
return shouldRetry(err), fmt.Errorf("failed to get upload server: %w", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return false, fmt.Errorf("error decoding response: %w", err)
}
if result.Status != 200 {
return false, fmt.Errorf("API error: %s", result.Msg)
}
return shouldRetryHTTP(resp.StatusCode), nil
})
if err != nil {
return "", "", err
}
return result.Result, result.SessID, nil
}
// uploadFileWithDestination uploads a file directly to a specified folder using file content reader.
func (f *Fs) uploadFileWithDestination(ctx context.Context, uploadURL, sessID, fileName string, fileContent io.Reader, dirPath string) (string, error) {
destinationPath := f.fromStandardPath(dirPath)
encodedFileName := f.fromStandardPath(fileName)
pr, pw := io.Pipe()
writer := multipart.NewWriter(pw)
isDeletionRequired := false
go func() {
defer func() {
if err := pw.Close(); err != nil {
fs.Logf(nil, "Failed to close: %v", err)
}
}()
_ = writer.WriteField("sess_id", sessID)
_ = writer.WriteField("utype", "prem")
_ = writer.WriteField("fld_path", destinationPath)
part, err := writer.CreateFormFile("file_0", encodedFileName)
if err != nil {
pw.CloseWithError(fmt.Errorf("failed to create form file: %w", err))
return
}
if _, err := io.Copy(part, fileContent); err != nil {
isDeletionRequired = true
pw.CloseWithError(fmt.Errorf("failed to copy file content: %w", err))
return
}
if err := writer.Close(); err != nil {
pw.CloseWithError(fmt.Errorf("failed to close writer: %w", err))
}
}()
var fileCode string
err := f.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "POST", uploadURL, pr)
if err != nil {
return false, fmt.Errorf("failed to create upload request: %w", err)
}
req.Header.Set("Content-Type", writer.FormDataContentType())
resp, err := f.client.Do(req)
if err != nil {
return shouldRetry(err), fmt.Errorf("failed to send upload request: %w", err)
}
defer respBodyClose(resp.Body)
var result []struct {
FileCode string `json:"file_code"`
FileStatus string `json:"file_status"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return false, fmt.Errorf("failed to parse upload response: %w", err)
}
if len(result) == 0 || result[0].FileStatus != "OK" {
return false, fmt.Errorf("upload failed with status: %s", result[0].FileStatus)
}
fileCode = result[0].FileCode
return shouldRetryHTTP(resp.StatusCode), nil
})
if err != nil && isDeletionRequired {
// Attempt to delete the file if upload fails
_ = f.deleteFile(ctx, destinationPath+"/"+fileName)
}
return fileCode, err
}
// respBodyClose to check body response.
func respBodyClose(responseBody io.Closer) {
if cerr := responseBody.Close(); cerr != nil {
fmt.Printf("Error closing response body: %v\n", cerr)
}
}

View File

@@ -1,112 +0,0 @@
package filelu
import (
"context"
"errors"
"fmt"
"path"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
)
// errFileNotFound represent file not found error
var errFileNotFound = errors.New("file not found")
// getFileCode retrieves the file code for a given file path
func (f *Fs) getFileCode(ctx context.Context, filePath string) (string, error) {
// Prepare parent directory
parentDir := path.Dir(filePath)
// Call List to get all the files
result, err := f.getFolderList(ctx, parentDir)
if err != nil {
return "", err
}
for _, file := range result.Result.Files {
filePathFromServer := parentDir + "/" + file.Name
if parentDir == "/" {
filePathFromServer = "/" + file.Name
}
if filePath == filePathFromServer {
return file.FileCode, nil
}
}
return "", errFileNotFound
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
func (f *Fs) fromStandardPath(remote string) string {
return f.opt.Enc.FromStandardPath(remote)
}
func (f *Fs) toStandardPath(remote string) string {
return f.opt.Enc.ToStandardPath(remote)
}
// Hashes returns an empty hash set, indicating no hash support
func (f *Fs) Hashes() hash.Set {
return hash.NewHashSet() // Properly creates an empty hash set
}
// Name returns the remote name
func (f *Fs) Name() string {
return f.name
}
// Root returns the root path
func (f *Fs) Root() string {
return f.root
}
// Precision returns the precision of the remote
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
func (f *Fs) String() string {
return fmt.Sprintf("FileLu root '%s'", f.root)
}
// isFileCode checks if a string looks like a file code
func isFileCode(s string) bool {
if len(s) != 12 {
return false
}
for _, c := range s {
if !((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9')) {
return false
}
}
return true
}
func shouldRetry(err error) bool {
return fserrors.ShouldRetry(err)
}
func shouldRetryHTTP(code int) bool {
return code == 429 || code >= 500
}
func rootSplit(absPath string) (bucket, bucketPath string) {
// No bucket
if absPath == "" {
return "", ""
}
slash := strings.IndexRune(absPath, '/')
// Bucket but no path
if slash < 0 {
return absPath, ""
}
return absPath[:slash], absPath[slash+1:]
}

View File

@@ -1,259 +0,0 @@
package filelu
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
)
// Object describes a FileLu object
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
}
// NewObject creates a new Object for the given remote path
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
var filePath string
filePath = path.Join(f.root, remote)
filePath = "/" + strings.Trim(filePath, "/")
// Get File code
fileCode, err := f.getFileCode(ctx, filePath)
if err != nil {
return nil, fs.ErrorObjectNotFound
}
// Get File info
fileInfos, err := f.getFileInfo(ctx, fileCode)
if err != nil {
return nil, fmt.Errorf("failed to get file info: %w", err)
}
fileInfo := fileInfos.Result[0]
size, _ := strconv.ParseInt(fileInfo.Size, 10, 64)
returnedRemote := remote
return &Object{
fs: f,
remote: returnedRemote,
size: size,
modTime: time.Now(),
}, nil
}
// Open opens the object for reading
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
filePath := path.Join(o.fs.root, o.remote)
// Get direct link
directLink, size, err := o.fs.getDirectLink(ctx, filePath)
if err != nil {
return nil, fmt.Errorf("failed to get direct link: %w", err)
}
o.size = size
// Offset and Count for range download
var offset int64
var count int64
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
offset, count = x.Decode(o.size)
if count < 0 {
count = o.size - offset
}
case *fs.SeekOption:
offset = x.Offset
count = o.size
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
var reader io.ReadCloser
err = o.fs.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", directLink, nil)
if err != nil {
return false, fmt.Errorf("failed to create download request: %w", err)
}
resp, err := o.fs.client.Do(req)
if err != nil {
return shouldRetry(err), fmt.Errorf("failed to download file: %w", err)
}
if resp.StatusCode != http.StatusOK {
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
return false, fmt.Errorf("failed to download file: HTTP %d", resp.StatusCode)
}
// Wrap the response body to handle offset and count
currentContents, err := io.ReadAll(resp.Body)
if err != nil {
return false, fmt.Errorf("failed to read response body: %w", err)
}
if offset > 0 {
if offset > int64(len(currentContents)) {
return false, fmt.Errorf("offset %d exceeds file size %d", offset, len(currentContents))
}
currentContents = currentContents[offset:]
}
if count > 0 && count < int64(len(currentContents)) {
currentContents = currentContents[:count]
}
reader = io.NopCloser(bytes.NewReader(currentContents))
return false, nil
})
if err != nil {
return nil, err
}
return reader, nil
}
// Update updates the object with new data
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if src.Size() <= 0 {
return fs.ErrorCantUploadEmptyFiles
}
err := o.fs.uploadFile(ctx, in, o.remote)
if err != nil {
return fmt.Errorf("failed to upload file: %w", err)
}
o.size = src.Size()
return nil
}
// Remove deletes the object from FileLu
func (o *Object) Remove(ctx context.Context) error {
fullPath := "/" + strings.Trim(path.Join(o.fs.root, o.remote), "/")
err := o.fs.deleteFile(ctx, fullPath)
if err != nil {
return err
}
fs.Infof(o.fs, "Successfully deleted file: %s", fullPath)
return nil
}
// Hash returns the MD5 hash of an object
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
var fileCode string
if isFileCode(o.fs.root) {
fileCode = o.fs.root
} else {
matches := regexp.MustCompile(`\((.*?)\)`).FindAllStringSubmatch(o.remote, -1)
for _, match := range matches {
if len(match) > 1 && len(match[1]) == 12 {
fileCode = match[1]
break
}
}
}
if fileCode == "" {
return "", fmt.Errorf("no valid file code found in the remote path")
}
apiURL := fmt.Sprintf("%s/file/info?file_code=%s&key=%s",
o.fs.endpoint, url.QueryEscape(fileCode), url.QueryEscape(o.fs.opt.Key))
var result struct {
Status int `json:"status"`
Msg string `json:"msg"`
Result []struct {
Hash string `json:"hash"`
} `json:"result"`
}
err := o.fs.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
if err != nil {
return false, err
}
resp, err := o.fs.client.Do(req)
if err != nil {
return shouldRetry(err), err
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return false, err
}
return shouldRetryHTTP(resp.StatusCode), nil
})
if err != nil {
return "", err
}
if result.Status != 200 || len(result.Result) == 0 {
return "", fmt.Errorf("error: unable to fetch hash: %s", result.Msg)
}
return result.Result[0].Hash, nil
}
// String returns a string representation of the object
func (o *Object) String() string {
return o.remote
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of the object
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// SetModTime sets the modification time of the object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return fs.ErrorCantSetModTime
}
// Storable indicates whether the object is storable
func (o *Object) Storable() bool {
return true
}

View File

@@ -1,16 +0,0 @@
package filelu_test
import (
"testing"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests for the FileLu backend
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFileLu:",
NilObject: nil,
SkipInvalidUTF8: true,
})
}

View File

@@ -1,15 +0,0 @@
package filelu
import (
"fmt"
)
// parseStorageToBytes converts a storage string (e.g., "10") to bytes
func parseStorageToBytes(storage string) (int64, error) {
var gb float64
_, err := fmt.Sscanf(storage, "%f", &gb)
if err != nil {
return 0, fmt.Errorf("failed to parse storage: %w", err)
}
return int64(gb * 1024 * 1024 * 1024), nil
}

View File

@@ -10,7 +10,6 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
"slices"
"strings" "strings"
"time" "time"
@@ -170,9 +169,11 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
} }
if apiErr, ok := err.(files_sdk.ResponseError); ok { if apiErr, ok := err.(files_sdk.ResponseError); ok {
if slices.Contains(retryErrorCodes, apiErr.HttpCode) { for _, e := range retryErrorCodes {
fs.Debugf(nil, "Retrying API error %v", err) if apiErr.HttpCode == e {
return true, err fs.Debugf(nil, "Retrying API error %v", err)
return true, err
}
} }
} }

View File

@@ -9,7 +9,6 @@ import (
"io" "io"
"net" "net"
"net/textproto" "net/textproto"
"net/url"
"path" "path"
"runtime" "runtime"
"strings" "strings"
@@ -163,16 +162,6 @@ Enabled by default. Use 0 to disable.`,
Help: "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)", Help: "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)",
Default: false, Default: false,
Advanced: true, Advanced: true,
}, {
Name: "allow_insecure_tls_ciphers",
Help: `Allow insecure TLS ciphers
Setting this flag will allow the usage of the following TLS ciphers in addition to the secure defaults:
- TLS_RSA_WITH_AES_128_GCM_SHA256
`,
Default: false,
Advanced: true,
}, { }, {
Name: "shut_timeout", Name: "shut_timeout",
Help: "Maximum time to wait for data connection closing status.", Help: "Maximum time to wait for data connection closing status.",
@@ -196,14 +185,6 @@ Supports the format user:pass@host:port, user@host:port, host:port.
Example: Example:
myUser:myPass@localhost:9005 myUser:myPass@localhost:9005
`,
Advanced: true,
}, {
Name: "http_proxy",
Default: "",
Help: `URL for HTTP CONNECT proxy
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
`, `,
Advanced: true, Advanced: true,
}, { }, {
@@ -246,30 +227,28 @@ a write only folder.
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
Host string `config:"host"` Host string `config:"host"`
User string `config:"user"` User string `config:"user"`
Pass string `config:"pass"` Pass string `config:"pass"`
Port string `config:"port"` Port string `config:"port"`
TLS bool `config:"tls"` TLS bool `config:"tls"`
ExplicitTLS bool `config:"explicit_tls"` ExplicitTLS bool `config:"explicit_tls"`
TLSCacheSize int `config:"tls_cache_size"` TLSCacheSize int `config:"tls_cache_size"`
DisableTLS13 bool `config:"disable_tls13"` DisableTLS13 bool `config:"disable_tls13"`
AllowInsecureTLSCiphers bool `config:"allow_insecure_tls_ciphers"` Concurrency int `config:"concurrency"`
Concurrency int `config:"concurrency"` SkipVerifyTLSCert bool `config:"no_check_certificate"`
SkipVerifyTLSCert bool `config:"no_check_certificate"` DisableEPSV bool `config:"disable_epsv"`
DisableEPSV bool `config:"disable_epsv"` DisableMLSD bool `config:"disable_mlsd"`
DisableMLSD bool `config:"disable_mlsd"` DisableUTF8 bool `config:"disable_utf8"`
DisableUTF8 bool `config:"disable_utf8"` WritingMDTM bool `config:"writing_mdtm"`
WritingMDTM bool `config:"writing_mdtm"` ForceListHidden bool `config:"force_list_hidden"`
ForceListHidden bool `config:"force_list_hidden"` IdleTimeout fs.Duration `config:"idle_timeout"`
IdleTimeout fs.Duration `config:"idle_timeout"` CloseTimeout fs.Duration `config:"close_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"` ShutTimeout fs.Duration `config:"shut_timeout"`
ShutTimeout fs.Duration `config:"shut_timeout"` AskPassword bool `config:"ask_password"`
AskPassword bool `config:"ask_password"` Enc encoder.MultiEncoder `config:"encoding"`
Enc encoder.MultiEncoder `config:"encoding"` SocksProxy string `config:"socks_proxy"`
SocksProxy string `config:"socks_proxy"` NoCheckUpload bool `config:"no_check_upload"`
HTTPProxy string `config:"http_proxy"`
NoCheckUpload bool `config:"no_check_upload"`
} }
// Fs represents a remote FTP server // Fs represents a remote FTP server
@@ -283,12 +262,10 @@ type Fs struct {
user string user string
pass string pass string
dialAddr string dialAddr string
tlsConf *tls.Config // default TLS client config
poolMu sync.Mutex poolMu sync.Mutex
pool []*ftp.ServerConn pool []*ftp.ServerConn
drain *time.Timer // used to drain the pool when we stop using the connections drain *time.Timer // used to drain the pool when we stop using the connections
tokens *pacer.TokenDispenser tokens *pacer.TokenDispenser
proxyURL *url.URL // address of HTTP proxy read from environment
pacer *fs.Pacer // pacer for FTP connections pacer *fs.Pacer // pacer for FTP connections
fGetTime bool // true if the ftp library accepts GetTime fGetTime bool // true if the ftp library accepts GetTime
fSetTime bool // true if the ftp library accepts SetTime fSetTime bool // true if the ftp library accepts SetTime
@@ -409,14 +386,9 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
func (f *Fs) tlsConfig() *tls.Config { func (f *Fs) tlsConfig() *tls.Config {
var tlsConfig *tls.Config var tlsConfig *tls.Config
if f.opt.TLS || f.opt.ExplicitTLS { if f.opt.TLS || f.opt.ExplicitTLS {
if f.tlsConf != nil { tlsConfig = &tls.Config{
tlsConfig = f.tlsConf.Clone() ServerName: f.opt.Host,
} else { InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
tlsConfig = new(tls.Config)
}
tlsConfig.ServerName = f.opt.Host
if f.opt.SkipVerifyTLSCert {
tlsConfig.InsecureSkipVerify = true
} }
if f.opt.TLSCacheSize > 0 { if f.opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize) tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
@@ -424,14 +396,6 @@ func (f *Fs) tlsConfig() *tls.Config {
if f.opt.DisableTLS13 { if f.opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12 tlsConfig.MaxVersion = tls.VersionTLS12
} }
if f.opt.AllowInsecureTLSCiphers {
var ids []uint16
// Read default ciphers
for _, cs := range tls.CipherSuites() {
ids = append(ids, cs.ID)
}
tlsConfig.CipherSuites = append(ids, tls.TLS_RSA_WITH_AES_128_GCM_SHA256)
}
} }
return tlsConfig return tlsConfig
} }
@@ -449,28 +413,11 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
dial := func(network, address string) (conn net.Conn, err error) { dial := func(network, address string) (conn net.Conn, err error) {
fs.Debugf(f, "dial(%q,%q)", network, address) fs.Debugf(f, "dial(%q,%q)", network, address)
defer func() { defer func() {
if err != nil { fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
fs.Debugf(f, "> dial: conn=%v, err=%v", conn, err)
} else {
fs.Debugf(f, "> dial: conn=%s->%s, err=%v", conn.LocalAddr(), conn.RemoteAddr(), err)
}
}() }()
baseDialer := fshttp.NewDialer(ctx) baseDialer := fshttp.NewDialer(ctx)
if f.opt.SocksProxy != "" || f.proxyURL != nil { if f.opt.SocksProxy != "" {
// We need to make the onward connection to f.opt.Host. However the FTP conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
// library sets the host to the proxy IP after using EPSV or PASV so we need
// to correct that here.
var dialPort string
_, dialPort, err = net.SplitHostPort(address)
if err != nil {
return nil, err
}
dialAddress := net.JoinHostPort(f.opt.Host, dialPort)
if f.opt.SocksProxy != "" {
conn, err = proxy.SOCKS5Dial(network, dialAddress, f.opt.SocksProxy, baseDialer)
} else {
conn, err = proxy.HTTPConnectDial(network, dialAddress, f.proxyURL, baseDialer)
}
} else { } else {
conn, err = baseDialer.Dial(network, address) conn, err = baseDialer.Dial(network, address)
} }
@@ -679,20 +626,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
dialAddr: dialAddr, dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency), tokens: pacer.NewTokenDispenser(opt.Concurrency),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
tlsConf: fshttp.NewTransport(ctx).TLSClientConfig,
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
PartialUploads: true, PartialUploads: true,
}).Fill(ctx, f) }).Fill(ctx, f)
// get proxy URL if set
if opt.HTTPProxy != "" {
proxyURL, err := url.Parse(opt.HTTPProxy)
if err != nil {
return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err)
}
f.proxyURL = proxyURL
}
// set the pool drainer timer going // set the pool drainer timer going
if f.opt.IdleTimeout > 0 { if f.opt.IdleTimeout > 0 {
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) }) f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
@@ -1292,7 +1230,7 @@ func (f *ftpReadCloser) Close() error {
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257 // See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
if errX := textprotoError(err); errX != nil { if errX := textprotoError(err); errX != nil {
switch errX.Code { switch errX.Code {
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend, ftp.StatusRequestedFileActionOK: case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
err = nil err = nil
} }
} }

View File

@@ -17,7 +17,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
type settings map[string]any type settings map[string]interface{}
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs { func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
fsName := strings.Split(f.Name(), "{")[0] // strip off hash fsName := strings.Split(f.Name(), "{")[0] // strip off hash
@@ -52,7 +52,7 @@ func (f *Fs) testUploadTimeout(t *testing.T) {
ci.Timeout = saveTimeout ci.Timeout = saveTimeout
}() }()
ci.LowLevelRetries = 1 ci.LowLevelRetries = 1
ci.Timeout = fs.Duration(idleTimeout) ci.Timeout = idleTimeout
upload := func(concurrency int, shutTimeout time.Duration) (obj fs.Object, err error) { upload := func(concurrency int, shutTimeout time.Duration) (obj fs.Object, err error) {
fixFs := deriveFs(ctx, t, f, settings{ fixFs := deriveFs(ctx, t, f, settings{

View File

@@ -194,9 +194,33 @@ type DeleteResponse struct {
Data map[string]Error Data map[string]Error
} }
// DirectUploadURL returns the direct upload URL for Gofile // Server is an upload server
func DirectUploadURL() string { type Server struct {
return "https://upload.gofile.io/uploadfile" Name string `json:"name"`
Zone string `json:"zone"`
}
// String returns a string representation of the Server
func (s *Server) String() string {
return fmt.Sprintf("%s (%s)", s.Name, s.Zone)
}
// Root returns the root URL for the server
func (s *Server) Root() string {
return fmt.Sprintf("https://%s.gofile.io/", s.Name)
}
// URL returns the upload URL for the server
func (s *Server) URL() string {
return fmt.Sprintf("https://%s.gofile.io/contents/uploadfile", s.Name)
}
// ServersResponse is the output from /servers
type ServersResponse struct {
Error
Data struct {
Servers []Server `json:"servers"`
} `json:"data"`
} }
// UploadResponse is returned by POST /contents/uploadfile // UploadResponse is returned by POST /contents/uploadfile

Some files were not shown because too many files have changed in this diff Show More