mirror of
https://github.com/rclone/rclone.git
synced 2026-01-26 14:23:22 +00:00
Compare commits
1 Commits
fix-metada
...
feat/cache
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2804f5068a |
212
.github/workflows/build_android.yml
vendored
Normal file
212
.github/workflows/build_android.yml
vendored
Normal file
@@ -0,0 +1,212 @@
|
||||
---
|
||||
# Github Actions build for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build_android.yml" -*-
|
||||
|
||||
name: Build & Push Android Builds
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
tags:
|
||||
- '**'
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
android:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- job_name: android-all
|
||||
platform: linux/amd64/android/go1.24
|
||||
os: ubuntu-latest
|
||||
go: '>=1.24.0-rc.1'
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
cache: false
|
||||
|
||||
- name: Set Environment Variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_ENV
|
||||
echo "GOCACHE=$(go env GOCACHE)" >> $GITHUB_ENV
|
||||
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
||||
|
||||
- name: Set PLATFORM Variable
|
||||
shell: bash
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Get ImageOS
|
||||
# There's no way around this, because "ImageOS" is only available to
|
||||
# processes, but the setup-go action uses it in its key.
|
||||
id: imageos
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
return process.env.ImageOS
|
||||
|
||||
- name: Set CACHE_PREFIX Variable
|
||||
shell: bash
|
||||
run: |
|
||||
cache_prefix=${{ runner.os }}-${{ steps.imageos.outputs.result }}-${{ env.PLATFORM }}
|
||||
echo "CACHE_PREFIX=${cache_prefix}" >> $GITHUB_ENV
|
||||
|
||||
- name: Load Go Module Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
${{ env.GOMODCACHE }}
|
||||
key: ${{ env.CACHE_PREFIX }}-modcache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ env.CACHE_PREFIX }}-modcache
|
||||
|
||||
# Both load & update the cache when on default branch
|
||||
- name: Load Go Build & Test Cache
|
||||
id: go-cache
|
||||
uses: actions/cache@v4
|
||||
if: github.ref_name == github.event.repository.default_branch && github.event_name != 'pull_request'
|
||||
with:
|
||||
path: |
|
||||
${{ env.GOCACHE }}
|
||||
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ env.CACHE_PREFIX }}-cache
|
||||
|
||||
# Only load the cache when not on default branch
|
||||
- name: Load Go Build & Test Cache
|
||||
id: go-cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
if: github.ref_name != github.event.repository.default_branch || github.event_name == 'pull_request'
|
||||
with:
|
||||
path: |
|
||||
${{ env.GOCACHE }}
|
||||
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ env.CACHE_PREFIX }}-cache
|
||||
|
||||
- name: Build Native rclone
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
|
||||
- name: Install gomobile
|
||||
shell: bash
|
||||
run: |
|
||||
go install golang.org/x/mobile/cmd/gobind@latest
|
||||
go install golang.org/x/mobile/cmd/gomobile@latest
|
||||
env PATH=$PATH:~/go/bin gomobile init
|
||||
echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV
|
||||
|
||||
- name: arm-v7a - gomobile build
|
||||
shell: bash
|
||||
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||
|
||||
- name: arm-v7a - Set Environment Variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||
echo 'GOARM=7' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm-v7a - Build
|
||||
shell: bash
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
|
||||
|
||||
- name: arm64-v8a - Set Environment Variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm64-v8a - Build
|
||||
shell: bash
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
|
||||
|
||||
- name: x86 - Set Environment Variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x86 - Build
|
||||
shell: bash
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
|
||||
|
||||
- name: x64 - Set Environment Variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x64 - Build
|
||||
shell: bash
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 .
|
||||
|
||||
- name: Delete Existing Cache
|
||||
continue-on-error: true
|
||||
shell: bash
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
cache_ids=($(gh cache list --key "${{ env.CACHE_PREFIX }}-cache" --json id | jq '.[].id'))
|
||||
for cache_id in "${cache_ids[@]}"; do
|
||||
echo "Deleting Cache: $cache_id"
|
||||
gh cache delete "$cache_id"
|
||||
done
|
||||
if: github.ref_name == github.event.repository.default_branch && github.event_name != 'pull_request' && steps.go-cache.outputs.cache-hit != 'true'
|
||||
|
||||
- name: Deploy Built Binaries
|
||||
shell: bash
|
||||
run: |
|
||||
make ci_upload
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# Upload artifacts if not a PR && not a fork
|
||||
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
75
.github/workflows/build_publish_docker_image.yml
vendored
75
.github/workflows/build_publish_docker_image.yml
vendored
@@ -4,6 +4,10 @@
|
||||
|
||||
name: Build & Push Docker Images
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
@@ -41,32 +45,26 @@ jobs:
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
|
||||
steps:
|
||||
- name: Free Space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
shell: bash
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Set PLATFORM Variable
|
||||
shell: bash
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set CACHE_NAME Variable
|
||||
shell: python
|
||||
env:
|
||||
GITHUB_EVENT_REPOSITORY_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
|
||||
run: |
|
||||
import os, re
|
||||
|
||||
@@ -82,8 +80,11 @@ jobs:
|
||||
|
||||
ref_name_slug = "cache"
|
||||
|
||||
if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request":
|
||||
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
|
||||
if os.environ.get("GITHUB_REF_NAME"):
|
||||
if os.environ['GITHUB_EVENT_NAME'] == "pull_request":
|
||||
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
|
||||
elif os.environ['GITHUB_REF_NAME'] != os.environ['GITHUB_EVENT_REPOSITORY_DEFAULT_BRANCH']:
|
||||
ref_name_slug += "-ref-" + slugify(os.environ['GITHUB_REF_NAME'])
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"CACHE_NAME={ref_name_slug}\n")
|
||||
@@ -98,6 +99,12 @@ jobs:
|
||||
script: |
|
||||
return process.env.ImageOS
|
||||
|
||||
- name: Set CACHE_PREFIX Variable
|
||||
shell: bash
|
||||
run: |
|
||||
cache_prefix=${{ runner.os }}-${{ steps.imageos.outputs.result }}-${{ env.PLATFORM }}-docker-go
|
||||
echo "CACHE_PREFIX=${cache_prefix}" >> $GITHUB_ENV
|
||||
|
||||
- name: Extract Metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
@@ -130,22 +137,35 @@ jobs:
|
||||
- name: Load Go Build Cache for Docker
|
||||
id: go-cache
|
||||
uses: actions/cache@v4
|
||||
if: github.ref_name == github.event.repository.default_branch
|
||||
with:
|
||||
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
||||
# Cache only the go builds, the module download is cached via the docker layer caching
|
||||
path: |
|
||||
go-build-cache
|
||||
/tmp/go-build-cache
|
||||
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ env.CACHE_PREFIX }}-cache
|
||||
|
||||
- name: Load Go Build Cache for Docker
|
||||
id: go-cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
if: github.ref_name != github.event.repository.default_branch
|
||||
with:
|
||||
# Cache only the go builds, the module download is cached via the docker layer caching
|
||||
path: |
|
||||
/tmp/go-build-cache
|
||||
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ env.CACHE_PREFIX }}-cache
|
||||
|
||||
- name: Inject Go Build Cache into Docker
|
||||
uses: reproducible-containers/buildkit-cache-dance@v3
|
||||
with:
|
||||
cache-map: |
|
||||
{
|
||||
"go-build-cache": "/root/.cache/go-build"
|
||||
"/tmp/go-build-cache": "/root/.cache/go-build"
|
||||
}
|
||||
skip-extraction: ${{ steps.go-cache.outputs.cache-hit }}
|
||||
skip-extraction: ${{ steps.go-cache.outputs.cache-hit || steps.go-cache-restore.outputs.cache-hit }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
@@ -172,9 +192,10 @@ jobs:
|
||||
outputs: |
|
||||
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
|
||||
cache-from: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-${{ env.CACHE_NAME }}
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-cache
|
||||
cache-to: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-${{ env.CACHE_NAME }},image-manifest=true,mode=max,compression=zstd
|
||||
|
||||
- name: Export Image Digest
|
||||
run: |
|
||||
@@ -190,6 +211,19 @@ jobs:
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Delete Existing Cache
|
||||
if: github.ref_name == github.event.repository.default_branch && steps.go-cache.outputs.cache-hit != 'true'
|
||||
continue-on-error: true
|
||||
shell: bash
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
cache_ids=($(gh cache list --key "${{ env.CACHE_PREFIX }}-cache" --json id | jq '.[].id'))
|
||||
for cache_id in "${cache_ids[@]}"; do
|
||||
echo "Deleting Cache: $cache_id"
|
||||
gh cache delete "$cache_id"
|
||||
done
|
||||
|
||||
merge-image:
|
||||
name: Merge & Push Final Docker Image
|
||||
runs-on: ubuntu-24.04
|
||||
@@ -205,6 +239,7 @@ jobs:
|
||||
merge-multiple: true
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
shell: bash
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
|
||||
104
.github/workflows/lint.yml
vendored
Normal file
104
.github/workflows/lint.yml
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
---
|
||||
# Github Actions build for rclone
|
||||
# -*- compile-command: "yamllint -f parsable lint.yml" -*-
|
||||
|
||||
name: Lint & Vulnerability Check
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
tags:
|
||||
- '**'
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 30
|
||||
name: "lint"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Get runner parameters
|
||||
id: get-runner-parameters
|
||||
shell: bash
|
||||
run: |
|
||||
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
|
||||
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Go
|
||||
id: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.23.0-rc.1'
|
||||
check-latest: true
|
||||
cache: false
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
~/.cache/go-build
|
||||
~/.cache/golangci-lint
|
||||
key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }}
|
||||
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
||||
|
||||
- name: Code quality test (Linux)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (Windows)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "windows"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (macOS)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "darwin"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (FreeBSD)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "freebsd"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (OpenBSD)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "openbsd"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Install govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
- name: Scan for vulnerabilities
|
||||
run: govulncheck ./...
|
||||
6
Makefile
6
Makefile
@@ -88,13 +88,13 @@ test: rclone test_all
|
||||
|
||||
# Quick test
|
||||
quicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
|
||||
|
||||
racequicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -cpu=2 -race ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
|
||||
|
||||
compiletest:
|
||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -run XXX ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
|
||||
@@ -74,14 +74,13 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||
* Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
|
||||
* OVHcloud Object Storage (Swift) [:page_facing_up:](https://rclone.org/swift/)
|
||||
* OVHcloud Object Storage (S3-compatible) [:page_facing_up:](https://rclone.org/s3/#ovhcloud)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||
|
||||
@@ -979,24 +979,6 @@ func (f *Fs) deleteObjects(ctx context.Context, IDs []string, useTrash bool) (er
|
||||
return nil
|
||||
}
|
||||
|
||||
// untrash a file or directory by ID
|
||||
//
|
||||
// If a name collision occurs in the destination folder, PikPak might automatically
|
||||
// rename the restored item(s) by appending a numbered suffix. For example,
|
||||
// foo.txt -> foo(1).txt or foo(2).txt if foo(1).txt already exists
|
||||
func (f *Fs) untrashObjects(ctx context.Context, IDs []string) (err error) {
|
||||
if len(IDs) == 0 {
|
||||
return nil
|
||||
}
|
||||
req := api.RequestBatch{
|
||||
IDs: IDs,
|
||||
}
|
||||
if err := f.requestBatchAction(ctx, "batchUntrash", &req); err != nil {
|
||||
return fmt.Errorf("untrash object failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
@@ -1081,14 +1063,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
return f.waitTask(ctx, info.TaskID)
|
||||
}
|
||||
|
||||
// Move the object to a new parent folder
|
||||
//
|
||||
// Objects cannot be moved to their current folder.
|
||||
// "file_move_or_copy_to_cur" (9): Please don't move or copy to current folder or sub folder
|
||||
//
|
||||
// If a name collision occurs in the destination folder, PikPak might automatically
|
||||
// rename the moved item(s) by appending a numbered suffix. For example,
|
||||
// foo.txt -> foo(1).txt or foo(2).txt if foo(1).txt already exists
|
||||
// Move the object
|
||||
func (f *Fs) moveObjects(ctx context.Context, IDs []string, dirID string) (err error) {
|
||||
if len(IDs) == 0 {
|
||||
return nil
|
||||
@@ -1104,12 +1079,6 @@ func (f *Fs) moveObjects(ctx context.Context, IDs []string, dirID string) (err e
|
||||
}
|
||||
|
||||
// renames the object
|
||||
//
|
||||
// The new name must be different from the current name.
|
||||
// "file_rename_to_same_name" (3): Name of file or folder is not changed
|
||||
//
|
||||
// Within the same folder, object names must be unique.
|
||||
// "file_duplicated_name" (3): File name cannot be repeated
|
||||
func (f *Fs) renameObject(ctx context.Context, ID, newName string) (info *api.File, err error) {
|
||||
req := api.File{
|
||||
Name: f.opt.Enc.FromStandardName(newName),
|
||||
@@ -1194,13 +1163,18 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
err = srcObj.readMetaData(ctx)
|
||||
err := srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
srcLeaf, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1211,74 +1185,31 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if srcObj.parent != dstParentID {
|
||||
// Perform the move. A numbered copy might be generated upon name collision.
|
||||
if srcParentID != dstParentID {
|
||||
// Do the move
|
||||
if err = f.moveObjects(ctx, []string{srcObj.id}, dstParentID); err != nil {
|
||||
return nil, fmt.Errorf("move: failed to move object %s to new parent %s: %w", srcObj.id, dstParentID, err)
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// FIXME: Restored file might have a numbered name if a conflict occurs
|
||||
if mvErr := f.moveObjects(ctx, []string{srcObj.id}, srcObj.parent); mvErr != nil {
|
||||
fs.Logf(f, "move: couldn't restore original object %q to %q after move failure: %v", dstObj.id, src.Remote(), mvErr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
// Manually update info of moved object to save API calls
|
||||
dstObj.id = srcObj.id
|
||||
dstObj.mimeType = srcObj.mimeType
|
||||
dstObj.gcid = srcObj.gcid
|
||||
dstObj.md5sum = srcObj.md5sum
|
||||
dstObj.hasMetaData = true
|
||||
|
||||
// Find the moved object and any conflict object with the same name.
|
||||
var moved, conflict *api.File
|
||||
_, err = f.listAll(ctx, dstParentID, api.KindOfFile, "false", func(item *api.File) bool {
|
||||
if item.ID == srcObj.id {
|
||||
moved = item
|
||||
if item.Name == dstLeaf {
|
||||
return true
|
||||
}
|
||||
} else if item.Name == dstLeaf {
|
||||
conflict = item
|
||||
if srcLeaf != dstLeaf {
|
||||
// Rename
|
||||
info, err := f.renameObject(ctx, srcObj.id, dstLeaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move: couldn't rename moved file: %w", err)
|
||||
}
|
||||
// Stop early if both found
|
||||
return moved != nil && conflict != nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move: couldn't locate moved file %q in destination directory %q: %w", srcObj.id, dstParentID, err)
|
||||
return dstObj, dstObj.setMetaData(info)
|
||||
}
|
||||
if moved == nil {
|
||||
return nil, fmt.Errorf("move: moved file %q not found in destination", srcObj.id)
|
||||
}
|
||||
|
||||
// If moved object already has the correct name, return
|
||||
if moved.Name == dstLeaf {
|
||||
return dstObj, dstObj.setMetaData(moved)
|
||||
}
|
||||
// If name collision, delete conflicting file first
|
||||
if conflict != nil {
|
||||
if err = f.deleteObjects(ctx, []string{conflict.ID}, true); err != nil {
|
||||
return nil, fmt.Errorf("move: couldn't delete conflicting file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if restoreErr := f.untrashObjects(ctx, []string{conflict.ID}); restoreErr != nil {
|
||||
fs.Logf(f, "move: couldn't restore conflicting file: %v", restoreErr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
info, err := f.renameObject(ctx, srcObj.id, dstLeaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move: couldn't rename moved file %q to %q: %w", dstObj.id, dstLeaf, err)
|
||||
}
|
||||
return dstObj, dstObj.setMetaData(info)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// copy objects
|
||||
//
|
||||
// Objects cannot be copied to their current folder.
|
||||
// "file_move_or_copy_to_cur" (9): Please don't move or copy to current folder or sub folder
|
||||
//
|
||||
// If a name collision occurs in the destination folder, PikPak might automatically
|
||||
// rename the copied item(s) by appending a numbered suffix. For example,
|
||||
// foo.txt -> foo(1).txt or foo(2).txt if foo(1).txt already exists
|
||||
func (f *Fs) copyObjects(ctx context.Context, IDs []string, dirID string) (err error) {
|
||||
if len(IDs) == 0 {
|
||||
return nil
|
||||
@@ -1302,13 +1233,13 @@ func (f *Fs) copyObjects(ctx context.Context, IDs []string, dirID string) (err e
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
err = srcObj.readMetaData(ctx)
|
||||
err := srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1323,55 +1254,31 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
||||
fs.Debugf(src, "Can't copy - same parent")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Check for possible conflicts: Pikpak creates numbered copies on name collision.
|
||||
var conflict *api.File
|
||||
_, srcLeaf := dircache.SplitPath(srcObj.remote)
|
||||
if srcLeaf == dstLeaf {
|
||||
if conflict, err = f.readMetaDataForPath(ctx, remote); err == nil {
|
||||
// delete conflicting file
|
||||
if err = f.deleteObjects(ctx, []string{conflict.ID}, true); err != nil {
|
||||
return nil, fmt.Errorf("copy: couldn't delete conflicting file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if restoreErr := f.untrashObjects(ctx, []string{conflict.ID}); restoreErr != nil {
|
||||
fs.Logf(f, "copy: couldn't restore conflicting file: %v", restoreErr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
} else if err != fs.ErrorObjectNotFound {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
dstDir, _ := dircache.SplitPath(remote)
|
||||
dstObj.remote = path.Join(dstDir, srcLeaf)
|
||||
if conflict, err = f.readMetaDataForPath(ctx, dstObj.remote); err == nil {
|
||||
tmpName := conflict.Name + "-rclone-copy-" + random.String(8)
|
||||
if _, err = f.renameObject(ctx, conflict.ID, tmpName); err != nil {
|
||||
return nil, fmt.Errorf("copy: couldn't rename conflicting file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if _, renameErr := f.renameObject(ctx, conflict.ID, conflict.Name); renameErr != nil {
|
||||
fs.Logf(f, "copy: couldn't rename conflicting file back to original: %v", renameErr)
|
||||
}
|
||||
}()
|
||||
} else if err != fs.ErrorObjectNotFound {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
if err := f.copyObjects(ctx, []string{srcObj.id}, dstParentID); err != nil {
|
||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||
}
|
||||
err = dstObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
// Update info of the copied object with new parent but source name
|
||||
if info, err := dstObj.fs.readMetaDataForPath(ctx, srcObj.remote); err != nil {
|
||||
return nil, fmt.Errorf("copy: couldn't locate copied file: %w", err)
|
||||
} else if err = dstObj.setMetaData(info); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Can't copy and change name in one step so we have to check if we have
|
||||
// the correct name after copy
|
||||
srcLeaf, _, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if srcLeaf != dstLeaf {
|
||||
return f.Move(ctx, dstObj, remote)
|
||||
// Rename
|
||||
info, err := f.renameObject(ctx, dstObj.id, dstLeaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: couldn't rename copied file: %w", err)
|
||||
}
|
||||
return dstObj, dstObj.setMetaData(info)
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
129
backend/s3/s3.go
129
backend/s3/s3.go
@@ -149,9 +149,6 @@ var providerOption = fs.Option{
|
||||
}, {
|
||||
Value: "Outscale",
|
||||
Help: "OUTSCALE Object Storage (OOS)",
|
||||
}, {
|
||||
Value: "OVHcloud",
|
||||
Help: "OVHcloud Object Storage",
|
||||
}, {
|
||||
Value: "Petabox",
|
||||
Help: "Petabox Object Storage",
|
||||
@@ -538,59 +535,6 @@ func init() {
|
||||
Value: "ap-northeast-1",
|
||||
Help: "Tokyo, Japan",
|
||||
}},
|
||||
}, {
|
||||
// References:
|
||||
// https://help.ovhcloud.com/csm/en-public-cloud-storage-s3-location?id=kb_article_view&sysparm_article=KB0047384
|
||||
// https://support.us.ovhcloud.com/hc/en-us/articles/10667991081107-Endpoints-and-Object-Storage-Geoavailability
|
||||
Name: "region",
|
||||
Help: "Region where your bucket will be created and your data stored.\n",
|
||||
Provider: "OVHcloud",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "gra",
|
||||
Help: "Gravelines, France",
|
||||
}, {
|
||||
Value: "rbx",
|
||||
Help: "Roubaix, France",
|
||||
}, {
|
||||
Value: "sbg",
|
||||
Help: "Strasbourg, France",
|
||||
}, {
|
||||
Value: "eu-west-par",
|
||||
Help: "Paris, France (3AZ)",
|
||||
}, {
|
||||
Value: "de",
|
||||
Help: "Frankfurt, Germany",
|
||||
}, {
|
||||
Value: "uk",
|
||||
Help: "London, United Kingdom",
|
||||
}, {
|
||||
Value: "waw",
|
||||
Help: "Warsaw, Poland",
|
||||
}, {
|
||||
Value: "bhs",
|
||||
Help: "Beauharnois, Canada",
|
||||
}, {
|
||||
Value: "ca-east-tor",
|
||||
Help: "Toronto, Canada",
|
||||
}, {
|
||||
Value: "sgp",
|
||||
Help: "Singapore",
|
||||
}, {
|
||||
Value: "ap-southeast-syd",
|
||||
Help: "Sydney, Australia",
|
||||
}, {
|
||||
Value: "ap-south-mum",
|
||||
Help: "Mumbai, India",
|
||||
}, {
|
||||
Value: "us-east-va",
|
||||
Help: "Vint Hill, Virginia, USA",
|
||||
}, {
|
||||
Value: "us-west-or",
|
||||
Help: "Hillsboro, Oregon, USA",
|
||||
}, {
|
||||
Value: "rbx-archive",
|
||||
Help: "Roubaix, France (Cold Archive)",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region where your bucket will be created and your data stored.\n",
|
||||
@@ -643,7 +587,7 @@ func init() {
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,IONOS,Petabox,Liara,Linode,Magalu,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive,Mega,Zata",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,IONOS,Petabox,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive,Mega,Zata",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
@@ -1230,71 +1174,6 @@ func init() {
|
||||
Value: "obs.ru-northwest-2.myhuaweicloud.com",
|
||||
Help: "RU-Moscow2",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for OVHcloud Object Storage.",
|
||||
Provider: "OVHcloud",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "s3.gra.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Gravelines, France",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.rbx.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Roubaix, France",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.sbg.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Strasbourg, France",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.eu-west-par.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Paris, France (3AZ)",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.de.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Frankfurt, Germany",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.uk.io.cloud.ovh.net",
|
||||
Help: "OVHcloud London, United Kingdom",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.waw.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Warsaw, Poland",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.bhs.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Beauharnois, Canada",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.ca-east-tor.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Toronto, Canada",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.sgp.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Singapore",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.ap-southeast-syd.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Sydney, Australia",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.ap-south-mum.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Mumbai, India",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.us-east-va.io.cloud.ovh.us",
|
||||
Help: "OVHcloud Vint Hill, Virginia, USA",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.us-west-or.io.cloud.ovh.us",
|
||||
Help: "OVHcloud Hillsboro, Oregon, USA",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.rbx-archive.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Roubaix, France (Cold Archive)",
|
||||
Provider: "OVHcloud",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Scaleway Object Storage.",
|
||||
@@ -1532,7 +1411,7 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,LyveCloud,Magalu,OVHcloud,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox,Zata",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,LyveCloud,Magalu,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox,Zata",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -2067,7 +1946,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,FlashBlade,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox,Mega",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,FlashBlade,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox,Mega",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -3710,8 +3589,6 @@ func setQuirks(opt *Options) {
|
||||
useAlreadyExists = false // untested
|
||||
case "Outscale":
|
||||
virtualHostStyle = false
|
||||
case "OVHcloud":
|
||||
// No quirks
|
||||
case "RackCorp":
|
||||
// No quirks
|
||||
useMultipartEtag = false // untested
|
||||
|
||||
3
bin/go-test-cache/go.mod
Normal file
3
bin/go-test-cache/go.mod
Normal file
@@ -0,0 +1,3 @@
|
||||
module go-test-cache
|
||||
|
||||
go 1.24
|
||||
123
bin/go-test-cache/main.go
Normal file
123
bin/go-test-cache/main.go
Normal file
@@ -0,0 +1,123 @@
|
||||
// This code was copied from:
|
||||
// https://github.com/fastly/cli/blob/main/scripts/go-test-cache/main.go
|
||||
// which in turn is based on the following script and was generated using AI.
|
||||
// https://github.com/airplanedev/blog-examples/blob/main/go-test-caching/update_file_timestamps.py?ref=airplane.ghost.io
|
||||
//
|
||||
// REFERENCE ARTICLE:
|
||||
// https://web.archive.org/web/20240308061717/https://www.airplane.dev/blog/caching-golang-tests-in-ci
|
||||
//
|
||||
// It updates the mtime of the files to a mtime dervived from the sha1 hash of their contents.
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
bufSize = 65536
|
||||
baseDate = 1684178360
|
||||
timeFormat = "2006-01-02 15:04:05"
|
||||
)
|
||||
|
||||
func main() {
|
||||
repoRoot := "."
|
||||
allDirs := make([]string, 0)
|
||||
|
||||
err := filepath.Walk(repoRoot, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
dirPath := filepath.Join(repoRoot, path)
|
||||
relPath, _ := filepath.Rel(repoRoot, dirPath)
|
||||
|
||||
if strings.HasPrefix(relPath, ".") {
|
||||
return nil
|
||||
}
|
||||
|
||||
allDirs = append(allDirs, dirPath)
|
||||
} else {
|
||||
filePath := filepath.Join(repoRoot, path)
|
||||
relPath, _ := filepath.Rel(repoRoot, filePath)
|
||||
|
||||
if strings.HasPrefix(relPath, ".") {
|
||||
return nil
|
||||
}
|
||||
|
||||
sha1Hash, err := getFileSHA1(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
modTime := getModifiedTime(sha1Hash)
|
||||
|
||||
log.Printf("Setting modified time of file %s to %s\n", relPath, modTime.Format(timeFormat))
|
||||
err = os.Chtimes(filePath, modTime, modTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal("Error:", err)
|
||||
}
|
||||
|
||||
sort.Slice(allDirs, func(i, j int) bool {
|
||||
return len(allDirs[i]) > len(allDirs[j]) || (len(allDirs[i]) == len(allDirs[j]) && allDirs[i] < allDirs[j])
|
||||
})
|
||||
|
||||
for _, dirPath := range allDirs {
|
||||
relPath, _ := filepath.Rel(repoRoot, dirPath)
|
||||
|
||||
log.Printf("Setting modified time of directory %s to %s\n", relPath, time.Unix(baseDate, 0).Format(timeFormat))
|
||||
err := os.Chtimes(dirPath, time.Unix(baseDate, 0), time.Unix(baseDate, 0))
|
||||
if err != nil {
|
||||
log.Fatal("Error:", err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Println("Done")
|
||||
}
|
||||
|
||||
func getFileSHA1(filePath string) (string, error) {
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// G401: Use of weak cryptographic primitive
|
||||
// Disabling as the hash is used not for security reasons.
|
||||
// The hash is used as a cache key to improve test run times.
|
||||
// #nosec
|
||||
// nosemgrep: go.lang.security.audit.crypto.use_of_weak_crypto.use-of-sha1
|
||||
hash := sha1.New()
|
||||
if _, err := io.CopyBuffer(hash, file, make([]byte, bufSize)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(hash.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func getModifiedTime(sha1Hash string) time.Time {
|
||||
hashBytes := []byte(sha1Hash)
|
||||
lastFiveBytes := hashBytes[:5]
|
||||
lastFiveValue := int64(0)
|
||||
|
||||
for _, b := range lastFiveBytes {
|
||||
lastFiveValue = (lastFiveValue << 8) + int64(b)
|
||||
}
|
||||
|
||||
modTime := baseDate - (lastFiveValue % 10000)
|
||||
return time.Unix(modTime, 0)
|
||||
}
|
||||
@@ -316,10 +316,10 @@ See the [VFS File Caching](#vfs-file-caching) section for more info.
|
||||
When using NFS mount on macOS, if you don't specify |--vfs-cache-mode|
|
||||
the mount point will be read-only.
|
||||
|
||||
Bucket-based remotes - Azure Blob, Swift, S3, Google Cloud Storage and B2 -
|
||||
can't store empty directories. Of these, only Azure Blob, Google Cloud Storage
|
||||
and S3 can preserve them when you add `--xxx-directory_markers`; otherwise,
|
||||
empty directories will vanish once they drop out of the directory cache.
|
||||
The bucket-based remotes (e.g. Swift, S3, Google Compute Storage, B2)
|
||||
do not support the concept of empty directories, so empty
|
||||
directories will have a tendency to disappear once they fall out of
|
||||
the directory cache.
|
||||
|
||||
When `rclone mount` is invoked on Unix with `--daemon` flag, the main rclone
|
||||
program will wait for the background mount to become ready or until the timeout
|
||||
|
||||
@@ -158,14 +158,13 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="Microsoft OneDrive" home="https://onedrive.live.com/" config="/onedrive/" >}}
|
||||
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
|
||||
{{< provider name="Nextcloud" home="https://nextcloud.com/" config="/webdav/#nextcloud" >}}
|
||||
{{< provider name="OVH" home="https://www.ovh.co.uk/public-cloud/storage/object-storage/" config="/swift/" >}}
|
||||
{{< provider name="Blomp Cloud Storage" home="https://rclone.org/swift/" config="/swift/" >}}
|
||||
{{< provider name="OpenDrive" home="https://www.opendrive.com/" config="/opendrive/" >}}
|
||||
{{< provider name="OpenStack Swift" home="https://docs.openstack.org/swift/latest/" config="/swift/" >}}
|
||||
{{< provider name="Oracle Cloud Storage Swift" home="https://docs.oracle.com/en-us/iaas/integration/doc/configure-object-storage.html" config="/swift/" >}}
|
||||
{{< provider name="Oracle Object Storage" home="https://www.oracle.com/cloud/storage/object-storage" config="/oracleobjectstorage/" >}}
|
||||
{{< provider name="Outscale" home="https://en.outscale.com/storage/outscale-object-storage/" config="/s3/#outscale" >}}
|
||||
{{< provider name="OVHcloud Object Storage (Swift)" home="https://www.ovhcloud.com/en/public-cloud/object-storage/" config="/swift/" >}}
|
||||
{{< provider name="OVHcloud Object Storage (S3-compatible)" home="https://www.ovhcloud.com/en/public-cloud/object-storage/" config="/s3/#ovhcloud" >}}
|
||||
{{< provider name="ownCloud" home="https://owncloud.org/" config="/webdav/#owncloud" >}}
|
||||
{{< provider name="pCloud" home="https://www.pcloud.com/" config="/pcloud/" >}}
|
||||
{{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}}
|
||||
|
||||
@@ -29,7 +29,6 @@ The S3 backend can be used with a number of different providers:
|
||||
{{< provider name="MEGA S4 Object Storage" home="https://mega.io/objectstorage" config="/s3/#mega" >}}
|
||||
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
|
||||
{{< provider name="Outscale" home="https://en.outscale.com/storage/outscale-object-storage/" config="/s3/#outscale" >}}
|
||||
{{< provider name="OVHcloud" home="https://www.ovhcloud.com/en/public-cloud/object-storage/" config="/s3/#ovhcloud" >}}
|
||||
{{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}}
|
||||
{{< provider name="Pure Storage FlashBlade" home="https://www.purestorage.com/products/unstructured-data-storage.html" config="/s3/#pure-storage-flashblade" >}}
|
||||
{{< provider name="Qiniu Cloud Object Storage (Kodo)" home="https://www.qiniu.com/en/products/kodo" config="/s3/#qiniu" >}}
|
||||
@@ -3612,206 +3611,6 @@ d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
### OVHcloud {#ovhcloud}
|
||||
|
||||
[OVHcloud Object Storage](https://www.ovhcloud.com/en-ie/public-cloud/object-storage/)
|
||||
is an S3-compatible general-purpose object storage platform available in all OVHcloud regions.
|
||||
To use the platform, you will need an access key and secret key. To know more about it and how
|
||||
to interact with the platform, take a look at the [documentation](https://ovh.to/8stqhuo).
|
||||
|
||||
Here is an example of making an OVHcloud Object Storage configuration with `rclone config`:
|
||||
|
||||
```
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
|
||||
Enter name for new remote.
|
||||
name> ovhcloud-rbx
|
||||
|
||||
Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
[...]
|
||||
XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Outscale, OVHcloud, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, Selectel, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others
|
||||
\ (s3)
|
||||
[...]
|
||||
Storage> s3
|
||||
|
||||
Option provider.
|
||||
Choose your S3 provider.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
[...]
|
||||
XX / OVHcloud Object Storage
|
||||
\ (OVHcloud)
|
||||
[...]
|
||||
provider> OVHcloud
|
||||
|
||||
Option env_auth.
|
||||
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
Only applies if access_key_id and secret_access_key is blank.
|
||||
Choose a number from below, or type in your own boolean value (true or false).
|
||||
Press Enter for the default (false).
|
||||
1 / Enter AWS credentials in the next step.
|
||||
\ (false)
|
||||
2 / Get AWS credentials from the environment (env vars or IAM).
|
||||
\ (true)
|
||||
env_auth> 1
|
||||
|
||||
Option access_key_id.
|
||||
AWS Access Key ID.
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
access_key_id> my_access
|
||||
|
||||
Option secret_access_key.
|
||||
AWS Secret Access Key (password).
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
secret_access_key> my_secret
|
||||
|
||||
Option region.
|
||||
Region where your bucket will be created and your data stored.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / Gravelines, France
|
||||
\ (gra)
|
||||
2 / Roubaix, France
|
||||
\ (rbx)
|
||||
3 / Strasbourg, France
|
||||
\ (sbg)
|
||||
4 / Paris, France (3AZ)
|
||||
\ (eu-west-par)
|
||||
5 / Frankfurt, Germany
|
||||
\ (de)
|
||||
6 / London, United Kingdom
|
||||
\ (uk)
|
||||
7 / Warsaw, Poland
|
||||
\ (waw)
|
||||
8 / Beauharnois, Canada
|
||||
\ (bhs)
|
||||
9 / Toronto, Canada
|
||||
\ (ca-east-tor)
|
||||
10 / Singapore
|
||||
\ (sgp)
|
||||
11 / Sydney, Australia
|
||||
\ (ap-southeast-syd)
|
||||
12 / Mumbai, India
|
||||
\ (ap-south-mum)
|
||||
13 / Vint Hill, Virginia, USA
|
||||
\ (us-east-va)
|
||||
14 / Hillsboro, Oregon, USA
|
||||
\ (us-west-or)
|
||||
15 / Roubaix, France (Cold Archive)
|
||||
\ (rbx-archive)
|
||||
region> 2
|
||||
|
||||
Option endpoint.
|
||||
Endpoint for OVHcloud Object Storage.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / OVHcloud Gravelines, France
|
||||
\ (s3.gra.io.cloud.ovh.net)
|
||||
2 / OVHcloud Roubaix, France
|
||||
\ (s3.rbx.io.cloud.ovh.net)
|
||||
3 / OVHcloud Strasbourg, France
|
||||
\ (s3.sbg.io.cloud.ovh.net)
|
||||
4 / OVHcloud Paris, France (3AZ)
|
||||
\ (s3.eu-west-par.io.cloud.ovh.net)
|
||||
5 / OVHcloud Frankfurt, Germany
|
||||
\ (s3.de.io.cloud.ovh.net)
|
||||
6 / OVHcloud London, United Kingdom
|
||||
\ (s3.uk.io.cloud.ovh.net)
|
||||
7 / OVHcloud Warsaw, Poland
|
||||
\ (s3.waw.io.cloud.ovh.net)
|
||||
8 / OVHcloud Beauharnois, Canada
|
||||
\ (s3.bhs.io.cloud.ovh.net)
|
||||
9 / OVHcloud Toronto, Canada
|
||||
\ (s3.ca-east-tor.io.cloud.ovh.net)
|
||||
10 / OVHcloud Singapore
|
||||
\ (s3.sgp.io.cloud.ovh.net)
|
||||
11 / OVHcloud Sydney, Australia
|
||||
\ (s3.ap-southeast-syd.io.cloud.ovh.net)
|
||||
12 / OVHcloud Mumbai, India
|
||||
\ (s3.ap-south-mum.io.cloud.ovh.net)
|
||||
13 / OVHcloud Vint Hill, Virginia, USA
|
||||
\ (s3.us-east-va.io.cloud.ovh.us)
|
||||
14 / OVHcloud Hillsboro, Oregon, USA
|
||||
\ (s3.us-west-or.io.cloud.ovh.us)
|
||||
15 / OVHcloud Roubaix, France (Cold Archive)
|
||||
\ (s3.rbx-archive.io.cloud.ovh.net)
|
||||
endpoint> 2
|
||||
|
||||
Option acl.
|
||||
Canned ACL used when creating buckets and storing or copying objects.
|
||||
This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
Note that this ACL is applied when server-side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
If the acl is an empty string then no X-Amz-Acl: header is added and
|
||||
the default (private) will be used.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
/ Owner gets FULL_CONTROL.
|
||||
1 | No one else has access rights (default).
|
||||
\ (private)
|
||||
/ Owner gets FULL_CONTROL.
|
||||
2 | The AllUsers group gets READ access.
|
||||
\ (public-read)
|
||||
/ Owner gets FULL_CONTROL.
|
||||
3 | The AllUsers group gets READ and WRITE access.
|
||||
| Granting this on a bucket is generally not recommended.
|
||||
\ (public-read-write)
|
||||
/ Owner gets FULL_CONTROL.
|
||||
4 | The AuthenticatedUsers group gets READ access.
|
||||
\ (authenticated-read)
|
||||
/ Object owner gets FULL_CONTROL.
|
||||
5 | Bucket owner gets READ access.
|
||||
| If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
|
||||
\ (bucket-owner-read)
|
||||
/ Both the object owner and the bucket owner get FULL_CONTROL over the object.
|
||||
6 | If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
|
||||
\ (bucket-owner-full-control)
|
||||
acl> 1
|
||||
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: s3
|
||||
- provider: OVHcloud
|
||||
- access_key_id: my_access
|
||||
- secret_access_key: my_secret
|
||||
- region: rbx
|
||||
- endpoint: s3.rbx.io.cloud.ovh.net
|
||||
- acl: private
|
||||
Keep this "ovhcloud-rbx" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
Your configuration file should now look like this:
|
||||
|
||||
```
|
||||
[ovhcloud-rbx]
|
||||
type = s3
|
||||
provider = OVHcloud
|
||||
access_key_id = my_access
|
||||
secret_access_key = my_secret
|
||||
region = rbx
|
||||
endpoint = s3.rbx.io.cloud.ovh.net
|
||||
acl = private
|
||||
```
|
||||
|
||||
|
||||
### Qiniu Cloud Object Storage (Kodo) {#qiniu}
|
||||
|
||||
[Qiniu Cloud Object Storage (Kodo)](https://www.qiniu.com/en/products/kodo), a completely independent-researched core technology which is proven by repeated customer experience has occupied absolute leading market leader position. Kodo can be widely applied to mass data management.
|
||||
|
||||
@@ -11,7 +11,7 @@ Commercial implementations of that being:
|
||||
|
||||
* [Rackspace Cloud Files](https://www.rackspace.com/cloud/files/)
|
||||
* [Memset Memstore](https://www.memset.com/cloud/storage/)
|
||||
* [OVH Object Storage](https://www.ovhcloud.com/en/public-cloud/object-storage/)
|
||||
* [OVH Object Storage](https://www.ovh.co.uk/public-cloud/storage/object-storage/)
|
||||
* [Oracle Cloud Storage](https://docs.oracle.com/en-us/iaas/integration/doc/configure-object-storage.html)
|
||||
* [Blomp Cloud Storage](https://www.blomp.com/cloud-storage/)
|
||||
* [IBM Bluemix Cloud ObjectStorage Swift](https://console.bluemix.net/docs/infrastructure/objectstorage-swift/index.html)
|
||||
|
||||
@@ -6,12 +6,6 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
|
||||
<meta name="description" content="{{ .Description }}">
|
||||
<meta name="author" content="Nick Craig-Wood">
|
||||
<meta property="og:site_name" content="Rclone" />
|
||||
<meta property="og:type" content="website" />
|
||||
<meta property="og:image" content="/img/rclone-1200x630.png">
|
||||
<meta property="og:url" content="{{ .Permalink }}" />
|
||||
<meta property="og:title" content="{{ .Title }}" />
|
||||
<meta property="og:description" content="{{ .Description }}" />
|
||||
<link rel="shortcut icon" type="image/png" href="/img/rclone-32x32.png"/>
|
||||
<script defer data-domain="rclone.org" src="https://weblog.rclone.org/js/script.js"></script>
|
||||
<title>{{ block "title" . }}{{ .Title }}{{ end }}</title>
|
||||
|
||||
BIN
docs/static/img/rclone-1200x630.png
vendored
BIN
docs/static/img/rclone-1200x630.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 36 KiB |
@@ -391,7 +391,6 @@ func (s *StatsInfo) _stopAverageLoop() {
|
||||
if s.average.started {
|
||||
s.average.cancel()
|
||||
s.average.stopped.Wait()
|
||||
s.average.started = false
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -67,44 +67,11 @@ func (m *Metadata) MergeOptions(options []OpenOption) {
|
||||
//
|
||||
// If the object has no metadata then metadata will be nil
|
||||
func GetMetadata(ctx context.Context, o DirEntry) (metadata Metadata, err error) {
|
||||
if do, ok := o.(Metadataer); ok {
|
||||
metadata, err = do.Metadata(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
do, ok := o.(Metadataer)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
if f, ok := o.Fs().(Fs); ok {
|
||||
Debugf(o, "GetMetadata")
|
||||
features := f.Features()
|
||||
if _, isDir := o.(Directory); isDir {
|
||||
// if bucket-based remote listing the root mark directories as buckets
|
||||
isBucket := features.BucketBased && o.Remote() == "" && f.Root() == ""
|
||||
if isBucket {
|
||||
if metadata == nil {
|
||||
metadata = make(Metadata, 1)
|
||||
}
|
||||
metadata["content-type"] = "inode/bucket"
|
||||
}
|
||||
} else if obj, isObj := o.(Object); isObj && !features.SlowHash {
|
||||
// If have hashes and they are not slow then add them here
|
||||
hashes := f.Hashes()
|
||||
if hashes.Count() > 0 {
|
||||
if metadata == nil {
|
||||
metadata = make(Metadata, hashes.Count())
|
||||
}
|
||||
for _, hashType := range hashes.Array() {
|
||||
hash, err := obj.Hash(ctx, hashType)
|
||||
if err != nil {
|
||||
Errorf(obj, "failed to read hash: %v", err)
|
||||
} else if hash != "" {
|
||||
metadata["hash:"+hashType.String()] = hash
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return metadata, err
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// mapItem descripts the item to be mapped
|
||||
|
||||
@@ -820,7 +820,7 @@ func rcCheck(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
return nil, rc.NewErrParamInvalid(errors.New("need srcFs parameter when not using checkFileHash"))
|
||||
}
|
||||
|
||||
oneway, _ := in.GetBool("oneWay")
|
||||
oneway, _ := in.GetBool("oneway")
|
||||
download, _ := in.GetBool("download")
|
||||
|
||||
opt := &CheckOpt{
|
||||
|
||||
@@ -49,7 +49,7 @@ Parameters:
|
||||
|
||||
Note that these are the global options which are unaffected by use of
|
||||
the _config and _filter parameters. If you wish to read the parameters
|
||||
set in _config or _filter use options/local.
|
||||
set in _config then use options/config and for _filter use options/filter.
|
||||
|
||||
This shows the internal names of the option within rclone which should
|
||||
map to the external options very easily with a few exceptions.
|
||||
|
||||
Reference in New Issue
Block a user