mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
62 Commits
feat/cache
...
fix-2732-a
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dc78afb1b3 | ||
|
|
d4e68bf66b | ||
|
|
743d160fdd | ||
|
|
dc95f36bc1 | ||
|
|
d3e3af377a | ||
|
|
db4812fbfa | ||
|
|
ff9cbab5fa | ||
|
|
30d8ab5f2f | ||
|
|
d71a4195d6 | ||
|
|
64ed9b175f | ||
|
|
2b10340e4e | ||
|
|
3c596f8d11 | ||
|
|
6a9c221841 | ||
|
|
c49b24ff90 | ||
|
|
edbbfd1e86 | ||
|
|
0e0af7499c | ||
|
|
eb4fe3ef4c | ||
|
|
70eb0f21d9 | ||
|
|
12378bae27 | ||
|
|
3c08c4df3a | ||
|
|
897509ae10 | ||
|
|
0eb7ee2e16 | ||
|
|
c1ebfb7e04 | ||
|
|
3d62058693 | ||
|
|
122890799f | ||
|
|
65078d5846 | ||
|
|
92f304902d | ||
|
|
45477a6c7d | ||
|
|
79b549b5a4 | ||
|
|
318880b4ad | ||
|
|
75521dcf6e | ||
|
|
8bf20dd545 | ||
|
|
744bce1246 | ||
|
|
c817fc5c57 | ||
|
|
0bb4d0a985 | ||
|
|
a8605abd34 | ||
|
|
953fb4490b | ||
|
|
b17c3d18af | ||
|
|
b45580fa19 | ||
|
|
1c26f40078 | ||
|
|
667ad093eb | ||
|
|
2c369aedf5 | ||
|
|
7a0d5ab0b4 | ||
|
|
75582b804b | ||
|
|
73452551c6 | ||
|
|
cb3cf5068b | ||
|
|
428f518771 | ||
|
|
0411a41e11 | ||
|
|
07b37bcd12 | ||
|
|
0506826ff5 | ||
|
|
4fcd36a5ab | ||
|
|
b2f43f39ba | ||
|
|
074d73d12b | ||
|
|
6457bcf51e | ||
|
|
8d12519f3d | ||
|
|
8a7c401366 | ||
|
|
0aae8f346f | ||
|
|
e991328967 | ||
|
|
614d02a673 | ||
|
|
018ebdded5 | ||
|
|
fc08983d71 | ||
|
|
7b61084891 |
10
.github/workflows/build.yml
vendored
10
.github/workflows/build.yml
vendored
@@ -282,6 +282,16 @@ jobs:
|
||||
- name: Scan for vulnerabilities
|
||||
run: govulncheck ./...
|
||||
|
||||
- name: Check Markdown format
|
||||
uses: DavidAnson/markdownlint-cli2-action@v20
|
||||
with:
|
||||
globs: |
|
||||
CONTRIBUTING.md
|
||||
MAINTAINERS.md
|
||||
README.md
|
||||
RELEASE.md
|
||||
docs/content/{authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
|
||||
|
||||
- name: Scan edits of autogenerated files
|
||||
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
|
||||
if: github.event_name == 'pull_request'
|
||||
|
||||
212
.github/workflows/build_android.yml
vendored
212
.github/workflows/build_android.yml
vendored
@@ -1,212 +0,0 @@
|
||||
---
|
||||
# Github Actions build for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build_android.yml" -*-
|
||||
|
||||
name: Build & Push Android Builds
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
tags:
|
||||
- '**'
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
android:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- job_name: android-all
|
||||
platform: linux/amd64/android/go1.24
|
||||
os: ubuntu-latest
|
||||
go: '>=1.24.0-rc.1'
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
cache: false
|
||||
|
||||
- name: Set Environment Variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_ENV
|
||||
echo "GOCACHE=$(go env GOCACHE)" >> $GITHUB_ENV
|
||||
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
||||
|
||||
- name: Set PLATFORM Variable
|
||||
shell: bash
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Get ImageOS
|
||||
# There's no way around this, because "ImageOS" is only available to
|
||||
# processes, but the setup-go action uses it in its key.
|
||||
id: imageos
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
return process.env.ImageOS
|
||||
|
||||
- name: Set CACHE_PREFIX Variable
|
||||
shell: bash
|
||||
run: |
|
||||
cache_prefix=${{ runner.os }}-${{ steps.imageos.outputs.result }}-${{ env.PLATFORM }}
|
||||
echo "CACHE_PREFIX=${cache_prefix}" >> $GITHUB_ENV
|
||||
|
||||
- name: Load Go Module Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
${{ env.GOMODCACHE }}
|
||||
key: ${{ env.CACHE_PREFIX }}-modcache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ env.CACHE_PREFIX }}-modcache
|
||||
|
||||
# Both load & update the cache when on default branch
|
||||
- name: Load Go Build & Test Cache
|
||||
id: go-cache
|
||||
uses: actions/cache@v4
|
||||
if: github.ref_name == github.event.repository.default_branch && github.event_name != 'pull_request'
|
||||
with:
|
||||
path: |
|
||||
${{ env.GOCACHE }}
|
||||
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ env.CACHE_PREFIX }}-cache
|
||||
|
||||
# Only load the cache when not on default branch
|
||||
- name: Load Go Build & Test Cache
|
||||
id: go-cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
if: github.ref_name != github.event.repository.default_branch || github.event_name == 'pull_request'
|
||||
with:
|
||||
path: |
|
||||
${{ env.GOCACHE }}
|
||||
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ env.CACHE_PREFIX }}-cache
|
||||
|
||||
- name: Build Native rclone
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
|
||||
- name: Install gomobile
|
||||
shell: bash
|
||||
run: |
|
||||
go install golang.org/x/mobile/cmd/gobind@latest
|
||||
go install golang.org/x/mobile/cmd/gomobile@latest
|
||||
env PATH=$PATH:~/go/bin gomobile init
|
||||
echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV
|
||||
|
||||
- name: arm-v7a - gomobile build
|
||||
shell: bash
|
||||
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||
|
||||
- name: arm-v7a - Set Environment Variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||
echo 'GOARM=7' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm-v7a - Build
|
||||
shell: bash
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
|
||||
|
||||
- name: arm64-v8a - Set Environment Variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm64-v8a - Build
|
||||
shell: bash
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
|
||||
|
||||
- name: x86 - Set Environment Variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x86 - Build
|
||||
shell: bash
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
|
||||
|
||||
- name: x64 - Set Environment Variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x64 - Build
|
||||
shell: bash
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 .
|
||||
|
||||
- name: Delete Existing Cache
|
||||
continue-on-error: true
|
||||
shell: bash
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
cache_ids=($(gh cache list --key "${{ env.CACHE_PREFIX }}-cache" --json id | jq '.[].id'))
|
||||
for cache_id in "${cache_ids[@]}"; do
|
||||
echo "Deleting Cache: $cache_id"
|
||||
gh cache delete "$cache_id"
|
||||
done
|
||||
if: github.ref_name == github.event.repository.default_branch && github.event_name != 'pull_request' && steps.go-cache.outputs.cache-hit != 'true'
|
||||
|
||||
- name: Deploy Built Binaries
|
||||
shell: bash
|
||||
run: |
|
||||
make ci_upload
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# Upload artifacts if not a PR && not a fork
|
||||
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
75
.github/workflows/build_publish_docker_image.yml
vendored
75
.github/workflows/build_publish_docker_image.yml
vendored
@@ -4,10 +4,6 @@
|
||||
|
||||
name: Build & Push Docker Images
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
@@ -45,26 +41,32 @@ jobs:
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
|
||||
steps:
|
||||
- name: Free Space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
shell: bash
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Set PLATFORM Variable
|
||||
shell: bash
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set CACHE_NAME Variable
|
||||
shell: python
|
||||
env:
|
||||
GITHUB_EVENT_REPOSITORY_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
|
||||
run: |
|
||||
import os, re
|
||||
|
||||
@@ -80,11 +82,8 @@ jobs:
|
||||
|
||||
ref_name_slug = "cache"
|
||||
|
||||
if os.environ.get("GITHUB_REF_NAME"):
|
||||
if os.environ['GITHUB_EVENT_NAME'] == "pull_request":
|
||||
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
|
||||
elif os.environ['GITHUB_REF_NAME'] != os.environ['GITHUB_EVENT_REPOSITORY_DEFAULT_BRANCH']:
|
||||
ref_name_slug += "-ref-" + slugify(os.environ['GITHUB_REF_NAME'])
|
||||
if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request":
|
||||
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"CACHE_NAME={ref_name_slug}\n")
|
||||
@@ -99,12 +98,6 @@ jobs:
|
||||
script: |
|
||||
return process.env.ImageOS
|
||||
|
||||
- name: Set CACHE_PREFIX Variable
|
||||
shell: bash
|
||||
run: |
|
||||
cache_prefix=${{ runner.os }}-${{ steps.imageos.outputs.result }}-${{ env.PLATFORM }}-docker-go
|
||||
echo "CACHE_PREFIX=${cache_prefix}" >> $GITHUB_ENV
|
||||
|
||||
- name: Extract Metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
@@ -137,35 +130,22 @@ jobs:
|
||||
- name: Load Go Build Cache for Docker
|
||||
id: go-cache
|
||||
uses: actions/cache@v4
|
||||
if: github.ref_name == github.event.repository.default_branch
|
||||
with:
|
||||
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
||||
# Cache only the go builds, the module download is cached via the docker layer caching
|
||||
path: |
|
||||
/tmp/go-build-cache
|
||||
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ env.CACHE_PREFIX }}-cache
|
||||
|
||||
- name: Load Go Build Cache for Docker
|
||||
id: go-cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
if: github.ref_name != github.event.repository.default_branch
|
||||
with:
|
||||
# Cache only the go builds, the module download is cached via the docker layer caching
|
||||
path: |
|
||||
/tmp/go-build-cache
|
||||
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ env.CACHE_PREFIX }}-cache
|
||||
go-build-cache
|
||||
|
||||
- name: Inject Go Build Cache into Docker
|
||||
uses: reproducible-containers/buildkit-cache-dance@v3
|
||||
with:
|
||||
cache-map: |
|
||||
{
|
||||
"/tmp/go-build-cache": "/root/.cache/go-build"
|
||||
"go-build-cache": "/root/.cache/go-build"
|
||||
}
|
||||
skip-extraction: ${{ steps.go-cache.outputs.cache-hit || steps.go-cache-restore.outputs.cache-hit }}
|
||||
skip-extraction: ${{ steps.go-cache.outputs.cache-hit }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
@@ -192,10 +172,9 @@ jobs:
|
||||
outputs: |
|
||||
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
|
||||
cache-from: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-${{ env.CACHE_NAME }}
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-cache
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
||||
cache-to: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-${{ env.CACHE_NAME }},image-manifest=true,mode=max,compression=zstd
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd
|
||||
|
||||
- name: Export Image Digest
|
||||
run: |
|
||||
@@ -211,19 +190,6 @@ jobs:
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Delete Existing Cache
|
||||
if: github.ref_name == github.event.repository.default_branch && steps.go-cache.outputs.cache-hit != 'true'
|
||||
continue-on-error: true
|
||||
shell: bash
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
cache_ids=($(gh cache list --key "${{ env.CACHE_PREFIX }}-cache" --json id | jq '.[].id'))
|
||||
for cache_id in "${cache_ids[@]}"; do
|
||||
echo "Deleting Cache: $cache_id"
|
||||
gh cache delete "$cache_id"
|
||||
done
|
||||
|
||||
merge-image:
|
||||
name: Merge & Push Final Docker Image
|
||||
runs-on: ubuntu-24.04
|
||||
@@ -239,7 +205,6 @@ jobs:
|
||||
merge-multiple: true
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
shell: bash
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
|
||||
104
.github/workflows/lint.yml
vendored
104
.github/workflows/lint.yml
vendored
@@ -1,104 +0,0 @@
|
||||
---
|
||||
# Github Actions build for rclone
|
||||
# -*- compile-command: "yamllint -f parsable lint.yml" -*-
|
||||
|
||||
name: Lint & Vulnerability Check
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
tags:
|
||||
- '**'
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 30
|
||||
name: "lint"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Get runner parameters
|
||||
id: get-runner-parameters
|
||||
shell: bash
|
||||
run: |
|
||||
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
|
||||
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Go
|
||||
id: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.23.0-rc.1'
|
||||
check-latest: true
|
||||
cache: false
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
~/.cache/go-build
|
||||
~/.cache/golangci-lint
|
||||
key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }}
|
||||
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
||||
|
||||
- name: Code quality test (Linux)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (Windows)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "windows"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (macOS)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "darwin"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (FreeBSD)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "freebsd"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (OpenBSD)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "openbsd"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Install govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
- name: Scan for vulnerabilities
|
||||
run: govulncheck ./...
|
||||
43
.markdownlint.yml
Normal file
43
.markdownlint.yml
Normal file
@@ -0,0 +1,43 @@
|
||||
default: true
|
||||
|
||||
# Use specific styles, to be consistent accross all documents.
|
||||
# Default is to accept any as long as it is consistent within the same document.
|
||||
heading-style: # MD003
|
||||
style: atx
|
||||
ul-style: # MD004
|
||||
style: dash
|
||||
hr-style: # MD035
|
||||
style: ---
|
||||
code-block-style: # MD046
|
||||
style: fenced
|
||||
code-fence-style: # MD048
|
||||
style: backtick
|
||||
emphasis-style: # MD049
|
||||
style: asterisk
|
||||
strong-style: # MD050
|
||||
style: asterisk
|
||||
|
||||
# Allow multiple headers with same text as long as they are not siblings.
|
||||
no-duplicate-heading: # MD024
|
||||
siblings_only: true
|
||||
|
||||
# Allow long lines in code blocks and tables.
|
||||
line-length: # MD013
|
||||
code_blocks: false
|
||||
tables: false
|
||||
|
||||
# The Markdown files used to generated docs with Hugo contain a top level
|
||||
# header, even though the YAML front matter has a title property (which is
|
||||
# used for the HTML document title only). Suppress Markdownlint warning:
|
||||
# Multiple top-level headings in the same document.
|
||||
single-title: # MD025
|
||||
level: 1
|
||||
front_matter_title:
|
||||
|
||||
# The HTML docs generated by Hugo from Markdown files may have slightly
|
||||
# different header anchors than GitHub rendered Markdown, e.g. Hugo trims
|
||||
# leading dashes so "--config string" becomes "#config-string" while it is
|
||||
# "#--config-string" in GitHub preview. When writing links to headers in the
|
||||
# Markdown files we must use whatever works in the final HTML generated docs.
|
||||
# Suppress Markdownlint warning: Link fragments should be valid.
|
||||
link-fragments: false # MD051
|
||||
503
CONTRIBUTING.md
503
CONTRIBUTING.md
@@ -15,61 +15,81 @@ with the [latest beta of rclone](https://beta.rclone.org/):
|
||||
- Rclone version (e.g. output from `rclone version`)
|
||||
- Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
|
||||
- The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
||||
- A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||
- if the log contains secrets then edit the file with a text editor first to obscure them
|
||||
- A log of the command with the `-vv` flag (e.g. output from
|
||||
`rclone -vv copy /tmp remote:tmp`)
|
||||
- if the log contains secrets then edit the file with a text editor first to
|
||||
obscure them
|
||||
|
||||
## Submitting a new feature or bug fix
|
||||
|
||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||
like to implement then please submit a pull request via GitHub.
|
||||
|
||||
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) first so it can be discussed.
|
||||
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues)
|
||||
first so it can be discussed.
|
||||
|
||||
To prepare your pull request first press the fork button on [rclone's GitHub
|
||||
page](https://github.com/rclone/rclone).
|
||||
|
||||
Then [install Git](https://git-scm.com/downloads) and set your public contribution [name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git) and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
|
||||
Then [install Git](https://git-scm.com/downloads) and set your public contribution
|
||||
[name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git)
|
||||
and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
|
||||
|
||||
Next open your terminal, change directory to your preferred folder and initialise your local rclone project:
|
||||
Next open your terminal, change directory to your preferred folder and initialise
|
||||
your local rclone project:
|
||||
|
||||
git clone https://github.com/rclone/rclone.git
|
||||
cd rclone
|
||||
git remote rename origin upstream
|
||||
# if you have SSH keys setup in your GitHub account:
|
||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||
# otherwise:
|
||||
git remote add origin https://github.com/YOURUSER/rclone.git
|
||||
```sh
|
||||
git clone https://github.com/rclone/rclone.git
|
||||
cd rclone
|
||||
git remote rename origin upstream
|
||||
# if you have SSH keys setup in your GitHub account:
|
||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||
# otherwise:
|
||||
git remote add origin https://github.com/YOURUSER/rclone.git
|
||||
```
|
||||
|
||||
Note that most of the terminal commands in the rest of this guide must be executed from the rclone folder created above.
|
||||
Note that most of the terminal commands in the rest of this guide must be
|
||||
executed from the rclone folder created above.
|
||||
|
||||
Now [install Go](https://golang.org/doc/install) and verify your installation:
|
||||
|
||||
go version
|
||||
```sh
|
||||
go version
|
||||
```
|
||||
|
||||
Great, you can now compile and execute your own version of rclone:
|
||||
|
||||
go build
|
||||
./rclone version
|
||||
```sh
|
||||
go build
|
||||
./rclone version
|
||||
```
|
||||
|
||||
(Note that you can also replace `go build` with `make`, which will include a
|
||||
more accurate version number in the executable as well as enable you to specify
|
||||
more build options.) Finally make a branch to add your new feature
|
||||
|
||||
git checkout -b my-new-feature
|
||||
```sh
|
||||
git checkout -b my-new-feature
|
||||
```
|
||||
|
||||
And get hacking.
|
||||
|
||||
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) and a quick view on the rclone [code organisation](#code-organisation).
|
||||
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins)
|
||||
and a quick view on the rclone [code organisation](#code-organisation).
|
||||
|
||||
When ready - test the affected functionality and run the unit tests for the code you changed
|
||||
When ready - test the affected functionality and run the unit tests for the
|
||||
code you changed
|
||||
|
||||
cd folder/with/changed/files
|
||||
go test -v
|
||||
```sh
|
||||
cd folder/with/changed/files
|
||||
go test -v
|
||||
```
|
||||
|
||||
Note that you may need to make a test remote, e.g. `TestSwift` for some
|
||||
of the unit tests.
|
||||
|
||||
This is typically enough if you made a simple bug fix, otherwise please read the rclone [testing](#testing) section too.
|
||||
This is typically enough if you made a simple bug fix, otherwise please read
|
||||
the rclone [testing](#testing) section too.
|
||||
|
||||
Make sure you
|
||||
|
||||
@@ -79,14 +99,19 @@ Make sure you
|
||||
|
||||
When you are done with that push your changes to GitHub:
|
||||
|
||||
git push -u origin my-new-feature
|
||||
```sh
|
||||
git push -u origin my-new-feature
|
||||
```
|
||||
|
||||
and open the GitHub website to [create your pull
|
||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
Your changes will then get reviewed and you might get asked to fix some stuff. If so, then make the changes in the same branch, commit and push your updates to GitHub.
|
||||
Your changes will then get reviewed and you might get asked to fix some stuff.
|
||||
If so, then make the changes in the same branch, commit and push your updates to
|
||||
GitHub.
|
||||
|
||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master)
|
||||
or [squash your commits](#squashing-your-commits).
|
||||
|
||||
## Using Git and GitHub
|
||||
|
||||
@@ -94,87 +119,118 @@ You may sometimes be asked to [base your changes on the latest master](#basing-y
|
||||
|
||||
Follow the guideline for [commit messages](#commit-messages) and then:
|
||||
|
||||
git checkout my-new-feature # To switch to your branch
|
||||
git status # To see the new and changed files
|
||||
git add FILENAME # To select FILENAME for the commit
|
||||
git status # To verify the changes to be committed
|
||||
git commit # To do the commit
|
||||
git log # To verify the commit. Use q to quit the log
|
||||
```sh
|
||||
git checkout my-new-feature # To switch to your branch
|
||||
git status # To see the new and changed files
|
||||
git add FILENAME # To select FILENAME for the commit
|
||||
git status # To verify the changes to be committed
|
||||
git commit # To do the commit
|
||||
git log # To verify the commit. Use q to quit the log
|
||||
```
|
||||
|
||||
You can modify the message or changes in the latest commit using:
|
||||
|
||||
git commit --amend
|
||||
```sh
|
||||
git commit --amend
|
||||
```
|
||||
|
||||
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
If you amend to commits that have been pushed to GitHub, then you will have to
|
||||
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
|
||||
### Replacing your previously pushed commits
|
||||
|
||||
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
|
||||
Note that you are about to rewrite the GitHub history of your branch. It is good
|
||||
practice to involve your collaborators before modifying commits that have been
|
||||
pushed to GitHub.
|
||||
|
||||
Your previously pushed commits are replaced by:
|
||||
|
||||
git push --force origin my-new-feature
|
||||
```sh
|
||||
git push --force origin my-new-feature
|
||||
```
|
||||
|
||||
### Basing your changes on the latest master
|
||||
|
||||
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
||||
To base your changes on the latest version of the
|
||||
[rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
||||
|
||||
git checkout master
|
||||
git fetch upstream
|
||||
git merge --ff-only
|
||||
git push origin --follow-tags # optional update of your fork in GitHub
|
||||
git checkout my-new-feature
|
||||
git rebase master
|
||||
```sh
|
||||
git checkout master
|
||||
git fetch upstream
|
||||
git merge --ff-only
|
||||
git push origin --follow-tags # optional update of your fork in GitHub
|
||||
git checkout my-new-feature
|
||||
git rebase master
|
||||
```
|
||||
|
||||
If you rebase commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
If you rebase commits that have been pushed to GitHub, then you will have to
|
||||
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
|
||||
### Squashing your commits ###
|
||||
### Squashing your commits
|
||||
|
||||
To combine your commits into one commit:
|
||||
|
||||
git log # To count the commits to squash, e.g. the last 2
|
||||
git reset --soft HEAD~2 # To undo the 2 latest commits
|
||||
git status # To check everything is as expected
|
||||
```sh
|
||||
git log # To count the commits to squash, e.g. the last 2
|
||||
git reset --soft HEAD~2 # To undo the 2 latest commits
|
||||
git status # To check everything is as expected
|
||||
```
|
||||
|
||||
If everything is fine, then make the new combined commit:
|
||||
|
||||
git commit # To commit the undone commits as one
|
||||
```sh
|
||||
git commit # To commit the undone commits as one
|
||||
```
|
||||
|
||||
otherwise, you may roll back using:
|
||||
|
||||
git reflog # To check that HEAD{1} is your previous state
|
||||
git reset --soft 'HEAD@{1}' # To roll back to your previous state
|
||||
```sh
|
||||
git reflog # To check that HEAD{1} is your previous state
|
||||
git reset --soft 'HEAD@{1}' # To roll back to your previous state
|
||||
```
|
||||
|
||||
If you squash commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
If you squash commits that have been pushed to GitHub, then you will have to
|
||||
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
|
||||
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
|
||||
Tip: You may like to use `git rebase -i master` if you are experienced or have a
|
||||
more complex situation.
|
||||
|
||||
### GitHub Continuous Integration
|
||||
|
||||
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
||||
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions)
|
||||
to build and test the project, which should be automatically available for your
|
||||
fork too from the `Actions` tab in your repository.
|
||||
|
||||
## Testing
|
||||
|
||||
### Code quality tests
|
||||
|
||||
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then you can run the same tests as get run in the CI which can be very helpful.
|
||||
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then
|
||||
you can run the same tests as get run in the CI which can be very helpful.
|
||||
|
||||
You can run them with `make check` or with `golangci-lint run ./...`.
|
||||
|
||||
Using these tests ensures that the rclone codebase all uses the same coding standards. These tests also check for easy mistakes to make (like forgetting to check an error return).
|
||||
Using these tests ensures that the rclone codebase all uses the same coding
|
||||
standards. These tests also check for easy mistakes to make (like forgetting
|
||||
to check an error return).
|
||||
|
||||
### Quick testing
|
||||
|
||||
rclone's tests are run from the go testing framework, so at the top
|
||||
level you can run this to run all the tests.
|
||||
|
||||
go test -v ./...
|
||||
```sh
|
||||
go test -v ./...
|
||||
```
|
||||
|
||||
You can also use `make`, if supported by your platform
|
||||
|
||||
make quicktest
|
||||
```sh
|
||||
make quicktest
|
||||
```
|
||||
|
||||
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
|
||||
The quicktest is [automatically run by GitHub](#github-continuous-integration)
|
||||
when you push your branch to GitHub.
|
||||
|
||||
### Backend testing
|
||||
|
||||
@@ -190,41 +246,51 @@ need to make a remote called `TestDrive`.
|
||||
You can then run the unit tests in the drive directory. These tests
|
||||
are skipped if `TestDrive:` isn't defined.
|
||||
|
||||
cd backend/drive
|
||||
go test -v
|
||||
```sh
|
||||
cd backend/drive
|
||||
go test -v
|
||||
```
|
||||
|
||||
You can then run the integration tests which test all of rclone's
|
||||
operations. Normally these get run against the local file system,
|
||||
but they can be run against any of the remotes.
|
||||
|
||||
cd fs/sync
|
||||
go test -v -remote TestDrive:
|
||||
go test -v -remote TestDrive: -fast-list
|
||||
```sh
|
||||
cd fs/sync
|
||||
go test -v -remote TestDrive:
|
||||
go test -v -remote TestDrive: -fast-list
|
||||
|
||||
cd fs/operations
|
||||
go test -v -remote TestDrive:
|
||||
cd fs/operations
|
||||
go test -v -remote TestDrive:
|
||||
```
|
||||
|
||||
If you want to use the integration test framework to run these tests
|
||||
altogether with an HTML report and test retries then from the
|
||||
project root:
|
||||
|
||||
go install github.com/rclone/rclone/fstest/test_all
|
||||
test_all -backends drive
|
||||
```sh
|
||||
go install github.com/rclone/rclone/fstest/test_all
|
||||
test_all -backends drive
|
||||
```
|
||||
|
||||
### Full integration testing
|
||||
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
then change into the project root and run
|
||||
|
||||
make check
|
||||
make test
|
||||
```sh
|
||||
make check
|
||||
make test
|
||||
```
|
||||
|
||||
The commands may require some extra go packages which you can install with
|
||||
|
||||
make build_dep
|
||||
```sh
|
||||
make build_dep
|
||||
```
|
||||
|
||||
The full integration tests are run daily on the integration test server. You can
|
||||
find the results at https://pub.rclone.org/integration-tests/
|
||||
find the results at <https://pub.rclone.org/integration-tests/>
|
||||
|
||||
## Code Organisation
|
||||
|
||||
@@ -232,46 +298,48 @@ Rclone code is organised into a small number of top level directories
|
||||
with modules beneath.
|
||||
|
||||
- backend - the rclone backends for interfacing to cloud providers -
|
||||
- all - import this to load all the cloud providers
|
||||
- ...providers
|
||||
- all - import this to load all the cloud providers
|
||||
- ...providers
|
||||
- bin - scripts for use while building or maintaining rclone
|
||||
- cmd - the rclone commands
|
||||
- all - import this to load all the commands
|
||||
- ...commands
|
||||
- all - import this to load all the commands
|
||||
- ...commands
|
||||
- cmdtest - end-to-end tests of commands, flags, environment variables,...
|
||||
- docs - the documentation and website
|
||||
- content - adjust these docs only - everything else is autogenerated
|
||||
- command - these are auto-generated - edit the corresponding .go file
|
||||
- content - adjust these docs only, except those marked autogenerated
|
||||
or portions marked autogenerated where the corresponding .go file must be
|
||||
edited instead, and everything else is autogenerated
|
||||
- commands - these are auto-generated, edit the corresponding .go file
|
||||
- fs - main rclone definitions - minimal amount of code
|
||||
- accounting - bandwidth limiting and statistics
|
||||
- asyncreader - an io.Reader which reads ahead
|
||||
- config - manage the config file and flags
|
||||
- driveletter - detect if a name is a drive letter
|
||||
- filter - implements include/exclude filtering
|
||||
- fserrors - rclone specific error handling
|
||||
- fshttp - http handling for rclone
|
||||
- fspath - path handling for rclone
|
||||
- hash - defines rclone's hash types and functions
|
||||
- list - list a remote
|
||||
- log - logging facilities
|
||||
- march - iterates directories in lock step
|
||||
- object - in memory Fs objects
|
||||
- operations - primitives for sync, e.g. Copy, Move
|
||||
- sync - sync directories
|
||||
- walk - walk a directory
|
||||
- accounting - bandwidth limiting and statistics
|
||||
- asyncreader - an io.Reader which reads ahead
|
||||
- config - manage the config file and flags
|
||||
- driveletter - detect if a name is a drive letter
|
||||
- filter - implements include/exclude filtering
|
||||
- fserrors - rclone specific error handling
|
||||
- fshttp - http handling for rclone
|
||||
- fspath - path handling for rclone
|
||||
- hash - defines rclone's hash types and functions
|
||||
- list - list a remote
|
||||
- log - logging facilities
|
||||
- march - iterates directories in lock step
|
||||
- object - in memory Fs objects
|
||||
- operations - primitives for sync, e.g. Copy, Move
|
||||
- sync - sync directories
|
||||
- walk - walk a directory
|
||||
- fstest - provides integration test framework
|
||||
- fstests - integration tests for the backends
|
||||
- mockdir - mocks an fs.Directory
|
||||
- mockobject - mocks an fs.Object
|
||||
- test_all - Runs integration tests for everything
|
||||
- fstests - integration tests for the backends
|
||||
- mockdir - mocks an fs.Directory
|
||||
- mockobject - mocks an fs.Object
|
||||
- test_all - Runs integration tests for everything
|
||||
- graphics - the images used in the website, etc.
|
||||
- lib - libraries used by the backend
|
||||
- atexit - register functions to run when rclone exits
|
||||
- dircache - directory ID to name caching
|
||||
- oauthutil - helpers for using oauth
|
||||
- pacer - retries with backoff and paces operations
|
||||
- readers - a selection of useful io.Readers
|
||||
- rest - a thin abstraction over net/http for REST
|
||||
- atexit - register functions to run when rclone exits
|
||||
- dircache - directory ID to name caching
|
||||
- oauthutil - helpers for using oauth
|
||||
- pacer - retries with backoff and paces operations
|
||||
- readers - a selection of useful io.Readers
|
||||
- rest - a thin abstraction over net/http for REST
|
||||
- librclone - in memory interface to rclone's API for embedding rclone
|
||||
- vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
||||
|
||||
@@ -279,6 +347,36 @@ with modules beneath.
|
||||
|
||||
If you are adding a new feature then please update the documentation.
|
||||
|
||||
The documentation sources are generally in Markdown format, in conformance
|
||||
with the CommonMark specification and compatible with GitHub Flavored
|
||||
Markdown (GFM). The markdown format is checked as part of the lint operation
|
||||
that runs automatically on pull requests, to enforce standards and consistency.
|
||||
This is based on the [markdownlint](https://github.com/DavidAnson/markdownlint)
|
||||
tool, which can also be integrated into editors so you can perform the same
|
||||
checks while writing.
|
||||
|
||||
HTML pages, served as website <rclone.org>, are generated from the Markdown,
|
||||
using [Hugo](https://gohugo.io). Note that when generating the HTML pages,
|
||||
there is currently used a different algorithm for generating header anchors
|
||||
than what GitHub uses for its Markdown rendering. For example, in the HTML docs
|
||||
generated by Hugo any leading `-` characters are ignored, which means when
|
||||
linking to a header with text `--config string` we therefore need to use the
|
||||
link `#config-string` in our Markdown source, which will not work in GitHub's
|
||||
preview where `#--config-string` would be the correct link.
|
||||
|
||||
Most of the documentation are written directly in text files with extension
|
||||
`.md`, mainly within folder `docs/content`. Note that several of such files
|
||||
are autogenerated (e.g. the command documentation, and `docs/content/flags.md`),
|
||||
or contain autogenerated portions (e.g. the backend documentation under
|
||||
`docs/content/commands`). These are marked with an `autogenerated` comment.
|
||||
The sources of the autogenerated text are usually Markdown formatted text
|
||||
embedded as string values in the Go source code, so you need to locate these
|
||||
and edit the `.go` file instead. The `MANUAL.*`, `rclone.1` and other text
|
||||
files in the root of the repository are also autogenerated. The autogeneration
|
||||
of files, and the website, will be done during the release process. See the
|
||||
`make doc` and `make website` targets in the Makefile if you are interested in
|
||||
how. You don't need to run these when adding a feature.
|
||||
|
||||
If you add a new general flag (not for a backend), then document it in
|
||||
`docs/content/docs.md` - the flags there are supposed to be in
|
||||
alphabetical order.
|
||||
@@ -287,39 +385,40 @@ If you add a new backend option/flag, then it should be documented in
|
||||
the source file in the `Help:` field.
|
||||
|
||||
- Start with the most important information about the option,
|
||||
as a single sentence on a single line.
|
||||
- This text will be used for the command-line flag help.
|
||||
- It will be combined with other information, such as any default value,
|
||||
and the result will look odd if not written as a single sentence.
|
||||
- It should end with a period/full stop character, which will be shown
|
||||
in docs but automatically removed when producing the flag help.
|
||||
- Try to keep it below 80 characters, to reduce text wrapping in the terminal.
|
||||
as a single sentence on a single line.
|
||||
- This text will be used for the command-line flag help.
|
||||
- It will be combined with other information, such as any default value,
|
||||
and the result will look odd if not written as a single sentence.
|
||||
- It should end with a period/full stop character, which will be shown
|
||||
in docs but automatically removed when producing the flag help.
|
||||
- Try to keep it below 80 characters, to reduce text wrapping in the terminal.
|
||||
- More details can be added in a new paragraph, after an empty line (`"\n\n"`).
|
||||
- Like with docs generated from Markdown, a single line break is ignored
|
||||
and two line breaks creates a new paragraph.
|
||||
- This text will be shown to the user in `rclone config`
|
||||
and in the docs (where it will be added by `make backenddocs`,
|
||||
normally run some time before next release).
|
||||
- Like with docs generated from Markdown, a single line break is ignored
|
||||
and two line breaks creates a new paragraph.
|
||||
- This text will be shown to the user in `rclone config`
|
||||
and in the docs (where it will be added by `make backenddocs`,
|
||||
normally run some time before next release).
|
||||
- To create options of enumeration type use the `Examples:` field.
|
||||
- Each example value have their own `Help:` field, but they are treated
|
||||
a bit different than the main option help text. They will be shown
|
||||
as an unordered list, therefore a single line break is enough to
|
||||
create a new list item. Also, for enumeration texts like name of
|
||||
countries, it looks better without an ending period/full stop character.
|
||||
- Each example value have their own `Help:` field, but they are treated
|
||||
a bit different than the main option help text. They will be shown
|
||||
as an unordered list, therefore a single line break is enough to
|
||||
create a new list item. Also, for enumeration texts like name of
|
||||
countries, it looks better without an ending period/full stop character.
|
||||
|
||||
The only documentation you need to edit are the `docs/content/*.md`
|
||||
files. The `MANUAL.*`, `rclone.1`, website, etc. are all auto-generated
|
||||
from those during the release process. See the `make doc` and `make
|
||||
website` targets in the Makefile if you are interested in how. You
|
||||
don't need to run these when adding a feature.
|
||||
When writing documentation for an entirely new backend,
|
||||
see [backend documentation](#backend-documentation).
|
||||
|
||||
Documentation for rclone sub commands is with their code, e.g.
|
||||
`cmd/ls/ls.go`. Write flag help strings as a single sentence on a single
|
||||
line, without a period/full stop character at the end, as it will be
|
||||
combined unmodified with other information (such as any default value).
|
||||
If you are updating documentation for a command, you must do that in the
|
||||
command source code, e.g. `cmd/ls/ls.go`. Write flag help strings as a single
|
||||
sentence on a single line, without a period/full stop character at the end,
|
||||
as it will be combined unmodified with other information (such as any default
|
||||
value).
|
||||
|
||||
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||
for small changes in the docs which makes it very easy.
|
||||
Note that you can use
|
||||
[GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||
for small changes in the docs which makes it very easy. Just remember the
|
||||
caveat when linking to header anchors, noted above, which means that GitHub's
|
||||
Markdown preview may not be an entirely reliable verification of the results.
|
||||
|
||||
## Making a release
|
||||
|
||||
@@ -350,13 +449,13 @@ change will get linked into the issue.
|
||||
|
||||
Here is an example of a short commit message:
|
||||
|
||||
```
|
||||
```text
|
||||
drive: add team drive support - fixes #885
|
||||
```
|
||||
|
||||
And here is an example of a longer one:
|
||||
|
||||
```
|
||||
```text
|
||||
mount: fix hang on errored upload
|
||||
|
||||
In certain circumstances, if an upload failed then the mount could hang
|
||||
@@ -379,7 +478,9 @@ To add a dependency `github.com/ncw/new_dependency` see the
|
||||
instructions below. These will fetch the dependency and add it to
|
||||
`go.mod` and `go.sum`.
|
||||
|
||||
go get github.com/ncw/new_dependency
|
||||
```sh
|
||||
go get github.com/ncw/new_dependency
|
||||
```
|
||||
|
||||
You can add constraints on that package when doing `go get` (see the
|
||||
go docs linked above), but don't unless you really need to.
|
||||
@@ -391,7 +492,9 @@ and `go.sum` in the same commit as your other changes.
|
||||
|
||||
If you need to update a dependency then run
|
||||
|
||||
go get golang.org/x/crypto
|
||||
```sh
|
||||
go get golang.org/x/crypto
|
||||
```
|
||||
|
||||
Check in a single commit as above.
|
||||
|
||||
@@ -434,25 +537,38 @@ remote or an fs.
|
||||
### Getting going
|
||||
|
||||
- Create `backend/remote/remote.go` (copy this from a similar remote)
|
||||
- box is a good one to start from if you have a directory-based remote (and shows how to use the directory cache)
|
||||
- b2 is a good one to start from if you have a bucket-based remote
|
||||
- box is a good one to start from if you have a directory-based remote (and
|
||||
shows how to use the directory cache)
|
||||
- b2 is a good one to start from if you have a bucket-based remote
|
||||
- Add your remote to the imports in `backend/all/all.go`
|
||||
- HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good Go SDK from the provider then use that instead.
|
||||
- Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||
- `rclone purge -v TestRemote:rclone-info`
|
||||
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||
- open `remote.csv` in a spreadsheet and examine
|
||||
- HTTP based remotes are easiest to maintain if they use rclone's
|
||||
[lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but
|
||||
if there is a really good Go SDK from the provider then use that instead.
|
||||
- Try to implement as many optional methods as possible as it makes the remote
|
||||
more usable.
|
||||
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to
|
||||
make sure we can encode any path name and `rclone info` to help determine the
|
||||
encodings needed
|
||||
- `rclone purge -v TestRemote:rclone-info`
|
||||
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||
- open `remote.csv` in a spreadsheet and examine
|
||||
|
||||
### Guidelines for a speedy merge
|
||||
|
||||
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend.
|
||||
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) if your backend is HTTP based - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
|
||||
- **Do** follow your example backend exactly - use the same code order, function names, layout, structure. **Don't** move stuff around and **Don't** delete the comments.
|
||||
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few backends like that - don't follow them!)
|
||||
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest)
|
||||
if you are implementing a REST like backend and parsing XML/JSON in the backend.
|
||||
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp)
|
||||
if your backend is HTTP based - this adds features like `--dump bodies`,
|
||||
`--tpslimit`, `--user-agent` without you having to code anything!
|
||||
- **Do** follow your example backend exactly - use the same code order, function
|
||||
names, layout, structure. **Don't** move stuff around and **Don't** delete the
|
||||
comments.
|
||||
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few
|
||||
backends like that - don't follow them!)
|
||||
- **Do** put your API type definitions in a separate file - by preference `api/types.go`
|
||||
- **Remember** we have >50 backends to maintain so keeping them as similar as possible to each other is a high priority!
|
||||
- **Remember** we have >50 backends to maintain so keeping them as similar as
|
||||
possible to each other is a high priority!
|
||||
|
||||
### Unit tests
|
||||
|
||||
@@ -463,19 +579,20 @@ remote or an fs.
|
||||
### Integration tests
|
||||
|
||||
- Add your backend to `fstest/test_all/config.yaml`
|
||||
- Once you've done that then you can use the integration test framework from the project root:
|
||||
- go install ./...
|
||||
- test_all -backends remote
|
||||
- Once you've done that then you can use the integration test framework from
|
||||
the project root:
|
||||
- go install ./...
|
||||
- test_all -backends remote
|
||||
|
||||
Or if you want to run the integration tests manually:
|
||||
|
||||
- Make sure integration tests pass with
|
||||
- `cd fs/operations`
|
||||
- `go test -v -remote TestRemote:`
|
||||
- `cd fs/sync`
|
||||
- `go test -v -remote TestRemote:`
|
||||
- `cd fs/operations`
|
||||
- `go test -v -remote TestRemote:`
|
||||
- `cd fs/sync`
|
||||
- `go test -v -remote TestRemote:`
|
||||
- If your remote defines `ListR` check with this also
|
||||
- `go test -v -remote TestRemote: -fast-list`
|
||||
- `go test -v -remote TestRemote: -fast-list`
|
||||
|
||||
See the [testing](#testing) section for more information on integration tests.
|
||||
|
||||
@@ -487,10 +604,13 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
|
||||
`Google Drive`) but with the local file system last.
|
||||
|
||||
- `README.md` - main GitHub page
|
||||
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||
- update them in your backend with `bin/make_backend_docs.py remote`
|
||||
- `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table.
|
||||
- `docs/content/remote.md` - main docs page (note the backend options are
|
||||
automatically added to this file with `make backenddocs`)
|
||||
- make sure this has the `autogenerated options` comments in (see your
|
||||
reference backend docs)
|
||||
- update them in your backend with `bin/make_backend_docs.py remote`
|
||||
- `docs/content/overview.md` - overview docs - add an entry into the Features
|
||||
table and the Optional Features table.
|
||||
- `docs/content/docs.md` - list of remotes in config section
|
||||
- `docs/content/_index.md` - front page of rclone.org
|
||||
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
@@ -506,21 +626,21 @@ It is quite easy to add a new S3 provider to rclone.
|
||||
You'll need to modify the following files
|
||||
|
||||
- `backend/s3/s3.go`
|
||||
- Add the provider to `providerOption` at the top of the file
|
||||
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
||||
- Exclude your provider from generic config questions (eg `region` and `endpoint).
|
||||
- Add the provider to the `setQuirks` function - see the documentation there.
|
||||
- Add the provider to `providerOption` at the top of the file
|
||||
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
||||
- Exclude your provider from generic config questions (eg `region` and `endpoint).
|
||||
- Add the provider to the `setQuirks` function - see the documentation there.
|
||||
- `docs/content/s3.md`
|
||||
- Add the provider at the top of the page.
|
||||
- Add a section about the provider linked from there.
|
||||
- Add a transcript of a trial `rclone config` session
|
||||
- Edit the transcript to remove things which might change in subsequent versions
|
||||
- **Do not** alter or add to the autogenerated parts of `s3.md`
|
||||
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
|
||||
- Add the provider at the top of the page.
|
||||
- Add a section about the provider linked from there.
|
||||
- Add a transcript of a trial `rclone config` session
|
||||
- Edit the transcript to remove things which might change in subsequent versions
|
||||
- **Do not** alter or add to the autogenerated parts of `s3.md`
|
||||
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
|
||||
- `README.md` - this is the home page in github
|
||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
||||
- `docs/content/_index.md` - this is the home page of rclone.org
|
||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
||||
|
||||
When adding the provider, endpoints, quirks, docs etc keep them in
|
||||
alphabetical order by `Provider` name, but with `AWS` first and
|
||||
@@ -541,31 +661,34 @@ For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone
|
||||
|
||||
## Writing a plugin
|
||||
|
||||
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
|
||||
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
|
||||
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
|
||||
New features (backends, commands) can also be added "out-of-tree", through Go
|
||||
plugins. Changes will be kept in a dynamically loaded file instead of being
|
||||
compiled into the main binary. This is useful if you can't merge your changes
|
||||
upstream or don't want to maintain a fork of rclone.
|
||||
|
||||
### Usage
|
||||
|
||||
- Naming
|
||||
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
||||
- `KIND` should be one of `backend`, `command` or `bundle`.
|
||||
- Example: A plugin with backend support for PiFS would be called
|
||||
`librcloneplugin_backend_pifs.so`.
|
||||
- Loading
|
||||
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
|
||||
- Supported on rclone v1.50 or greater.
|
||||
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
|
||||
- If this variable doesn't exist, plugin support is disabled.
|
||||
- Plugins must be compiled against the exact version of rclone to work.
|
||||
(The rclone used during building the plugin must be the same as the source of rclone)
|
||||
- Naming
|
||||
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
||||
- `KIND` should be one of `backend`, `command` or `bundle`.
|
||||
- Example: A plugin with backend support for PiFS would be called
|
||||
`librcloneplugin_backend_pifs.so`.
|
||||
- Loading
|
||||
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
|
||||
- Supported on rclone v1.50 or greater.
|
||||
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
|
||||
- If this variable doesn't exist, plugin support is disabled.
|
||||
- Plugins must be compiled against the exact version of rclone to work.
|
||||
(The rclone used during building the plugin must be the same as the source
|
||||
of rclone)
|
||||
|
||||
### Building
|
||||
|
||||
To turn your existing additions into a Go plugin, move them to an external repository
|
||||
and change the top-level package name to `main`.
|
||||
|
||||
Check `rclone --version` and make sure that the plugin's rclone dependency and host Go version match.
|
||||
Check `rclone --version` and make sure that the plugin's rclone dependency and
|
||||
host Go version match.
|
||||
|
||||
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
|
||||
|
||||
@@ -583,6 +706,6 @@ add them out of tree.
|
||||
This may be easier than using a plugin and is supported on all
|
||||
platforms not just macOS and Linux.
|
||||
|
||||
This is explained further in https://github.com/rclone/rclone_out_of_tree_example
|
||||
This is explained further in <https://github.com/rclone/rclone_out_of_tree_example>
|
||||
which has an example of an out of tree backend `ram` (which is a
|
||||
renamed version of the `memory` backend).
|
||||
|
||||
118
MAINTAINERS.md
118
MAINTAINERS.md
@@ -1,4 +1,4 @@
|
||||
# Maintainers guide for rclone #
|
||||
# Maintainers guide for rclone
|
||||
|
||||
Current active maintainers of rclone are:
|
||||
|
||||
@@ -24,80 +24,108 @@ Current active maintainers of rclone are:
|
||||
| Dan McArdle | @dmcardle | gitannex |
|
||||
| Sam Harrison | @childish-sambino | filescom |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
## This is a work in progress draft
|
||||
|
||||
This is a guide for how to be an rclone maintainer. This is mostly a write-up of what I (@ncw) attempt to do.
|
||||
This is a guide for how to be an rclone maintainer. This is mostly a write-up
|
||||
of what I (@ncw) attempt to do.
|
||||
|
||||
## Triaging Tickets ##
|
||||
## Triaging Tickets
|
||||
|
||||
When a ticket comes in it should be triaged. This means it should be classified by adding labels and placed into a milestone. Quite a lot of tickets need a bit of back and forth to determine whether it is a valid ticket so tickets may remain without labels or milestone for a while.
|
||||
When a ticket comes in it should be triaged. This means it should be classified
|
||||
by adding labels and placed into a milestone. Quite a lot of tickets need a bit
|
||||
of back and forth to determine whether it is a valid ticket so tickets may
|
||||
remain without labels or milestone for a while.
|
||||
|
||||
Rclone uses the labels like this:
|
||||
|
||||
* `bug` - a definitely verified bug
|
||||
* `can't reproduce` - a problem which we can't reproduce
|
||||
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
|
||||
* `duplicate` - normally close these and ask the user to subscribe to the original
|
||||
* `enhancement: new remote` - a new rclone backend
|
||||
* `enhancement` - a new feature
|
||||
* `FUSE` - to do with `rclone mount` command
|
||||
* `good first issue` - mark these if you find a small self-contained issue - these get shown to new visitors to the project
|
||||
* `help` wanted - mark these if you find a self-contained issue - these get shown to new visitors to the project
|
||||
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
||||
* `maintenance` - internal enhancement, code re-organisation, etc.
|
||||
* `Needs Go 1.XX` - waiting for that version of Go to be released
|
||||
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
|
||||
* `Remote: XXX` - which rclone backend this affects
|
||||
* `thinking` - not decided on the course of action yet
|
||||
- `bug` - a definitely verified bug
|
||||
- `can't reproduce` - a problem which we can't reproduce
|
||||
- `doc fix` - a bug in the documentation - if users need help understanding the
|
||||
docs add this label
|
||||
- `duplicate` - normally close these and ask the user to subscribe to the original
|
||||
- `enhancement: new remote` - a new rclone backend
|
||||
- `enhancement` - a new feature
|
||||
- `FUSE` - to do with `rclone mount` command
|
||||
- `good first issue` - mark these if you find a small self-contained issue -
|
||||
these get shown to new visitors to the project
|
||||
- `help` wanted - mark these if you find a self-contained issue - these get
|
||||
shown to new visitors to the project
|
||||
- `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
||||
- `maintenance` - internal enhancement, code re-organisation, etc.
|
||||
- `Needs Go 1.XX` - waiting for that version of Go to be released
|
||||
- `question` - not a `bug` or `enhancement` - direct to the forum for next time
|
||||
- `Remote: XXX` - which rclone backend this affects
|
||||
- `thinking` - not decided on the course of action yet
|
||||
|
||||
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
|
||||
If it turns out to be a bug or an enhancement it should be tagged as such, with
|
||||
the appropriate other tags. Don't forget the "good first issue" tag to give new
|
||||
contributors something easy to do to get going.
|
||||
|
||||
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (e.g. the next go release).
|
||||
When a ticket is tagged it should be added to a milestone, either the next
|
||||
release, the one after, Soon or Help Wanted. Bugs can be added to the
|
||||
"Known Bugs" milestone if they aren't planned to be fixed or need to wait for
|
||||
something (e.g. the next go release).
|
||||
|
||||
The milestones have these meanings:
|
||||
|
||||
* v1.XX - stuff we would like to fit into this release
|
||||
* v1.XX+1 - stuff we are leaving until the next release
|
||||
* Soon - stuff we think is a good idea - waiting to be scheduled for a release
|
||||
* Help wanted - blue sky stuff that might get moved up, or someone could help with
|
||||
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
|
||||
- v1.XX - stuff we would like to fit into this release
|
||||
- v1.XX+1 - stuff we are leaving until the next release
|
||||
- Soon - stuff we think is a good idea - waiting to be scheduled for a release
|
||||
- Help wanted - blue sky stuff that might get moved up, or someone could help with
|
||||
- Known bugs - bugs waiting on external factors or we aren't going to fix for
|
||||
the moment
|
||||
|
||||
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
|
||||
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile)
|
||||
are good candidates for ones that have slipped between the gaps and need
|
||||
following up.
|
||||
|
||||
## Closing Tickets ##
|
||||
## Closing Tickets
|
||||
|
||||
Close tickets as soon as you can - make sure they are tagged with a release. Post a link to a beta in the ticket with the fix in, asking for feedback.
|
||||
Close tickets as soon as you can - make sure they are tagged with a release.
|
||||
Post a link to a beta in the ticket with the fix in, asking for feedback.
|
||||
|
||||
## Pull requests ##
|
||||
## Pull requests
|
||||
|
||||
Try to process pull requests promptly!
|
||||
|
||||
Merging pull requests on GitHub itself works quite well nowadays so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||
Merging pull requests on GitHub itself works quite well nowadays so you can
|
||||
squash and rebase or rebase pull requests. rclone doesn't use merge commits.
|
||||
Use the squash and rebase option if you need to edit the commit message.
|
||||
|
||||
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
|
||||
After merging the commit, in your local master branch, do `git pull` then run
|
||||
`bin/update-authors.py` to update the authors file then `git push`.
|
||||
|
||||
Sometimes pull requests need to be left open for a while - this especially true of contributions of new backends which take a long time to get right.
|
||||
Sometimes pull requests need to be left open for a while - this especially true
|
||||
of contributions of new backends which take a long time to get right.
|
||||
|
||||
## Merges ##
|
||||
## Merges
|
||||
|
||||
If you are merging a branch locally then do `git merge --ff-only branch-name` to avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
|
||||
If you are merging a branch locally then do `git merge --ff-only branch-name` to
|
||||
avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
|
||||
|
||||
## Release cycle ##
|
||||
## Release cycle
|
||||
|
||||
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer if there is something big to merge that didn't stabilize properly or for personal reasons.
|
||||
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer
|
||||
if there is something big to merge that didn't stabilize properly or for personal
|
||||
reasons.
|
||||
|
||||
High impact regressions should be fixed before the next release.
|
||||
|
||||
Near the start of the release cycle, the dependencies should be updated with `make update` to give time for bugs to surface.
|
||||
Near the start of the release cycle, the dependencies should be updated with
|
||||
`make update` to give time for bugs to surface.
|
||||
|
||||
Towards the end of the release cycle try not to merge anything too big so let things settle down.
|
||||
Towards the end of the release cycle try not to merge anything too big so let
|
||||
things settle down.
|
||||
|
||||
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time-consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
|
||||
Follow the instructions in RELEASE.md for making the release. Note that the
|
||||
testing part is the most time-consuming often needing several rounds of test
|
||||
and fix depending on exactly how many new features rclone has gained.
|
||||
|
||||
## Mailing list ##
|
||||
## Mailing list
|
||||
|
||||
There is now an invite-only mailing list for rclone developers `rclone-dev` on google groups.
|
||||
There is now an invite-only mailing list for rclone developers `rclone-dev` on
|
||||
google groups.
|
||||
|
||||
## TODO ##
|
||||
## TODO
|
||||
|
||||
I should probably make a dev@rclone.org to register with cloud providers.
|
||||
I should probably make a <dev@rclone.org> to register with cloud providers.
|
||||
|
||||
8
Makefile
8
Makefile
@@ -88,13 +88,13 @@ test: rclone test_all
|
||||
|
||||
# Quick test
|
||||
quicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) ./...
|
||||
|
||||
racequicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -cpu=2 -race ./...
|
||||
|
||||
compiletest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -run XXX ./...
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
@@ -243,7 +243,7 @@ fetch_binaries:
|
||||
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
||||
|
||||
serve: website
|
||||
cd docs && hugo server --logLevel info -w --disableFastRender
|
||||
cd docs && hugo server --logLevel info -w --disableFastRender --ignoreCache
|
||||
|
||||
tag: retag doc
|
||||
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
|
||||
|
||||
260
README.md
260
README.md
@@ -1,6 +1,6 @@
|
||||
|
||||
|
||||
<!-- markdownlint-disable-next-line first-line-heading no-inline-html -->
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
||||
<!-- markdownlint-disable-next-line no-inline-html -->
|
||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
||||
|
||||
[Website](https://rclone.org) |
|
||||
@@ -18,102 +18,104 @@
|
||||
|
||||
# Rclone
|
||||
|
||||
Rclone *("rsync for cloud storage")* is a command-line program to sync files and directories to and from different cloud storage providers.
|
||||
Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
directories to and from different cloud storage providers.
|
||||
|
||||
## Storage providers
|
||||
|
||||
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
* FileLu [:page_facing_up:](https://rclone.org/filelu/)
|
||||
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
||||
* FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
* Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
* iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
|
||||
* ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
||||
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||
* Linkbox [:page_facing_up:](https://rclone.org/linkbox)
|
||||
* Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
|
||||
* Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* MEGA [:page_facing_up:](https://rclone.org/mega/)
|
||||
* MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega)
|
||||
* Memory [:page_facing_up:](https://rclone.org/memory/)
|
||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
* Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
|
||||
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||
* Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||
* Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
||||
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
* rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
* Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
||||
* Zata.ai [:page_facing_up:](https://rclone.org/s3/#Zata)
|
||||
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||
- 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||
- Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
||||
- Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
- Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
- ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
- Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
- Box [:page_facing_up:](https://rclone.org/box/)
|
||||
- Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
- Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
- Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
- FileLu [:page_facing_up:](https://rclone.org/filelu/)
|
||||
- Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
||||
- FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
|
||||
- FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
- GoFile [:page_facing_up:](https://rclone.org/gofile/)
|
||||
- Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
- Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
- Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
- HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
- Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
||||
- HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
- HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
- Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
- iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
|
||||
- ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
||||
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
- IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||
- Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
- Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
||||
- Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||
- Linkbox [:page_facing_up:](https://rclone.org/linkbox)
|
||||
- Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
|
||||
- Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
|
||||
- Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
- Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
- MEGA [:page_facing_up:](https://rclone.org/mega/)
|
||||
- MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega)
|
||||
- Memory [:page_facing_up:](https://rclone.org/memory/)
|
||||
- Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
- Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
|
||||
- Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||
- Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||
- Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||
- Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
- OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
- OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
- Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
- Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||
- Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
|
||||
- OVHcloud Object Storage (Swift) [:page_facing_up:](https://rclone.org/swift/)
|
||||
- OVHcloud Object Storage (S3-compatible) [:page_facing_up:](https://rclone.org/s3/#ovhcloud)
|
||||
- ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
- pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
- Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||
- PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||
- Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
|
||||
- premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
- put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
- Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||
- QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
- Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
||||
- Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||
- Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
- RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
- rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
|
||||
- Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
- Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
- Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
|
||||
- SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
- Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
- Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
||||
- Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
- Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
|
||||
- Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
- WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
- Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
- Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
||||
- Zata.ai [:page_facing_up:](https://rclone.org/s3/#Zata)
|
||||
- The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||
|
||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||
|
||||
@@ -121,50 +123,54 @@ Please see [the full list of all storage providers and their features](https://r
|
||||
|
||||
These backends adapt or modify other storage providers
|
||||
|
||||
* Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
|
||||
* Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
|
||||
* Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
|
||||
* Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
|
||||
* Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
|
||||
* Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
|
||||
* Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
|
||||
* Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
|
||||
- Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
|
||||
- Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
|
||||
- Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
|
||||
- Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
|
||||
- Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
|
||||
- Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
|
||||
- Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
|
||||
- Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
|
||||
|
||||
## Features
|
||||
|
||||
* MD5/SHA-1 hashes checked at all times for file integrity
|
||||
* Timestamps preserved on files
|
||||
* Partial syncs supported on a whole file basis
|
||||
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
||||
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
||||
* [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync bidirectionally
|
||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||
* Can sync to and from network, e.g. two different cloud accounts
|
||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||
* Optional transparent compression ([Compress](https://rclone.org/compress/))
|
||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
* Multi-threaded downloads to local disk
|
||||
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDAV/FTP/SFTP/DLNA
|
||||
- MD5/SHA-1 hashes checked at all times for file integrity
|
||||
- Timestamps preserved on files
|
||||
- Partial syncs supported on a whole file basis
|
||||
- [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed
|
||||
files
|
||||
- [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory
|
||||
identical
|
||||
- [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync
|
||||
bidirectionally
|
||||
- [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash
|
||||
equality
|
||||
- Can sync to and from network, e.g. two different cloud accounts
|
||||
- Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||
- Optional transparent compression ([Compress](https://rclone.org/compress/))
|
||||
- Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
- Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
- Multi-threaded downloads to local disk
|
||||
- Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files
|
||||
over HTTP/WebDAV/FTP/SFTP/DLNA
|
||||
|
||||
## Installation & documentation
|
||||
|
||||
Please see the [rclone website](https://rclone.org/) for:
|
||||
|
||||
* [Installation](https://rclone.org/install/)
|
||||
* [Documentation & configuration](https://rclone.org/docs/)
|
||||
* [Changelog](https://rclone.org/changelog/)
|
||||
* [FAQ](https://rclone.org/faq/)
|
||||
* [Storage providers](https://rclone.org/overview/)
|
||||
* [Forum](https://forum.rclone.org/)
|
||||
* ...and more
|
||||
- [Installation](https://rclone.org/install/)
|
||||
- [Documentation & configuration](https://rclone.org/docs/)
|
||||
- [Changelog](https://rclone.org/changelog/)
|
||||
- [FAQ](https://rclone.org/faq/)
|
||||
- [Storage providers](https://rclone.org/overview/)
|
||||
- [Forum](https://forum.rclone.org/)
|
||||
- ...and more
|
||||
|
||||
## Downloads
|
||||
|
||||
* https://rclone.org/downloads/
|
||||
- <https://rclone.org/downloads/>
|
||||
|
||||
License
|
||||
-------
|
||||
## License
|
||||
|
||||
This is free software under the terms of the MIT license (check the
|
||||
[COPYING file](/COPYING) included in this package).
|
||||
|
||||
155
RELEASE.md
155
RELEASE.md
@@ -4,52 +4,55 @@ This file describes how to make the various kinds of releases
|
||||
|
||||
## Extra required software for making a release
|
||||
|
||||
* [gh the github cli](https://github.com/cli/cli) for uploading packages
|
||||
* pandoc for making the html and man pages
|
||||
- [gh the github cli](https://github.com/cli/cli) for uploading packages
|
||||
- pandoc for making the html and man pages
|
||||
|
||||
## Making a release
|
||||
|
||||
* git checkout master # see below for stable branch
|
||||
* git pull # IMPORTANT
|
||||
* git status - make sure everything is checked in
|
||||
* Check GitHub actions build for master is Green
|
||||
* make test # see integration test server or run locally
|
||||
* make tag
|
||||
* edit docs/content/changelog.md # make sure to remove duplicate logs from point releases
|
||||
* make tidy
|
||||
* make doc
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX.0"
|
||||
* make retag
|
||||
* git push origin # without --follow-tags so it doesn't push the tag if it fails
|
||||
* git push --follow-tags origin
|
||||
* # Wait for the GitHub builds to complete then...
|
||||
* make fetch_binaries
|
||||
* make tarball
|
||||
* make vendorball
|
||||
* make sign_upload
|
||||
* make check_sign
|
||||
* make upload
|
||||
* make upload_website
|
||||
* make upload_github
|
||||
* make startdev # make startstable for stable branch
|
||||
* # announce with forum post, twitter post, patreon post
|
||||
- git checkout master # see below for stable branch
|
||||
- git pull # IMPORTANT
|
||||
- git status - make sure everything is checked in
|
||||
- Check GitHub actions build for master is Green
|
||||
- make test # see integration test server or run locally
|
||||
- make tag
|
||||
- edit docs/content/changelog.md # make sure to remove duplicate logs from point
|
||||
releases
|
||||
- make tidy
|
||||
- make doc
|
||||
- git status - to check for new man pages - git add them
|
||||
- git commit -a -v -m "Version v1.XX.0"
|
||||
- make retag
|
||||
- git push origin # without --follow-tags so it doesn't push the tag if it fails
|
||||
- git push --follow-tags origin
|
||||
- \# Wait for the GitHub builds to complete then...
|
||||
- make fetch_binaries
|
||||
- make tarball
|
||||
- make vendorball
|
||||
- make sign_upload
|
||||
- make check_sign
|
||||
- make upload
|
||||
- make upload_website
|
||||
- make upload_github
|
||||
- make startdev # make startstable for stable branch
|
||||
- \# announce with forum post, twitter post, patreon post
|
||||
|
||||
## Update dependencies
|
||||
|
||||
Early in the next release cycle update the dependencies.
|
||||
|
||||
* Review any pinned packages in go.mod and remove if possible
|
||||
* `make updatedirect`
|
||||
* `make GOTAGS=cmount`
|
||||
* `make compiletest`
|
||||
* Fix anything which doesn't compile at this point and commit changes here
|
||||
* `git commit -a -v -m "build: update all dependencies"`
|
||||
- Review any pinned packages in go.mod and remove if possible
|
||||
- `make updatedirect`
|
||||
- `make GOTAGS=cmount`
|
||||
- `make compiletest`
|
||||
- Fix anything which doesn't compile at this point and commit changes here
|
||||
- `git commit -a -v -m "build: update all dependencies"`
|
||||
|
||||
If the `make updatedirect` upgrades the version of go in the `go.mod`
|
||||
|
||||
go 1.22.0
|
||||
|
||||
```text
|
||||
go 1.22.0
|
||||
```
|
||||
|
||||
then go to manual mode. `go1.22` here is the lowest supported version
|
||||
in the `go.mod`.
|
||||
|
||||
@@ -57,7 +60,7 @@ If `make updatedirect` added a `toolchain` directive then remove it.
|
||||
We don't want to force a toolchain on our users. Linux packagers are
|
||||
often using a version of Go that is a few versions out of date.
|
||||
|
||||
```
|
||||
```sh
|
||||
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
||||
go get -d $(cat /tmp/potential-upgrades)
|
||||
go mod tidy -go=1.22 -compat=1.22
|
||||
@@ -67,7 +70,7 @@ If the `go mod tidy` fails use the output from it to remove the
|
||||
package which can't be upgraded from `/tmp/potential-upgrades` when
|
||||
done
|
||||
|
||||
```
|
||||
```sh
|
||||
git co go.mod go.sum
|
||||
```
|
||||
|
||||
@@ -77,12 +80,12 @@ Optionally upgrade the direct and indirect dependencies. This is very
|
||||
likely to fail if the manual method was used abve - in that case
|
||||
ignore it as it is too time consuming to fix.
|
||||
|
||||
* `make update`
|
||||
* `make GOTAGS=cmount`
|
||||
* `make compiletest`
|
||||
* roll back any updates which didn't compile
|
||||
* `git commit -a -v --amend`
|
||||
* **NB** watch out for this changing the default go version in `go.mod`
|
||||
- `make update`
|
||||
- `make GOTAGS=cmount`
|
||||
- `make compiletest`
|
||||
- roll back any updates which didn't compile
|
||||
- `git commit -a -v --amend`
|
||||
- **NB** watch out for this changing the default go version in `go.mod`
|
||||
|
||||
Note that `make update` updates all direct and indirect dependencies
|
||||
and there can occasionally be forwards compatibility problems with
|
||||
@@ -99,7 +102,9 @@ The above procedure will not upgrade major versions, so v2 to v3.
|
||||
However this tool can show which major versions might need to be
|
||||
upgraded:
|
||||
|
||||
go run github.com/icholy/gomajor@latest list -major
|
||||
```sh
|
||||
go run github.com/icholy/gomajor@latest list -major
|
||||
```
|
||||
|
||||
Expect API breakage when updating major versions.
|
||||
|
||||
@@ -107,7 +112,9 @@ Expect API breakage when updating major versions.
|
||||
|
||||
At some point after the release run
|
||||
|
||||
bin/tidy-beta v1.55
|
||||
```sh
|
||||
bin/tidy-beta v1.55
|
||||
```
|
||||
|
||||
where the version number is that of a couple ago to remove old beta binaries.
|
||||
|
||||
@@ -117,54 +124,64 @@ If rclone needs a point release due to some horrendous bug:
|
||||
|
||||
Set vars
|
||||
|
||||
* BASE_TAG=v1.XX # e.g. v1.52
|
||||
* NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
|
||||
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
||||
- BASE_TAG=v1.XX # e.g. v1.52
|
||||
- NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
|
||||
- echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
||||
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
|
||||
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
|
||||
* make startstable
|
||||
- git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
|
||||
- make startstable
|
||||
|
||||
Now
|
||||
|
||||
* git co ${BASE_TAG}-stable
|
||||
* git cherry-pick any fixes
|
||||
* make startstable
|
||||
* Do the steps as above
|
||||
* git co master
|
||||
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
|
||||
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||
* git push
|
||||
- git co ${BASE_TAG}-stable
|
||||
- git cherry-pick any fixes
|
||||
- make startstable
|
||||
- Do the steps as above
|
||||
- git co master
|
||||
- `#` cherry pick the changes to the changelog - check the diff to make sure it
|
||||
is correct
|
||||
- git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||
- git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||
- git push
|
||||
|
||||
## Sponsor logos
|
||||
|
||||
If updating the website note that the sponsor logos have been moved out of the main repository.
|
||||
If updating the website note that the sponsor logos have been moved out of the
|
||||
main repository.
|
||||
|
||||
You will need to checkout `/docs/static/img/logos` from https://github.com/rclone/third-party-logos
|
||||
You will need to checkout `/docs/static/img/logos` from <https://github.com/rclone/third-party-logos>
|
||||
which is a private repo containing artwork from sponsors.
|
||||
|
||||
## Update the website between releases
|
||||
|
||||
Create an update website branch based off the last release
|
||||
|
||||
git co -b update-website
|
||||
```sh
|
||||
git co -b update-website
|
||||
```
|
||||
|
||||
If the branch already exists, double check there are no commits that need saving.
|
||||
|
||||
Now reset the branch to the last release
|
||||
|
||||
git reset --hard v1.64.0
|
||||
```sh
|
||||
git reset --hard v1.64.0
|
||||
```
|
||||
|
||||
Create the changes, check them in, test with `make serve` then
|
||||
|
||||
make upload_test_website
|
||||
```sh
|
||||
make upload_test_website
|
||||
```
|
||||
|
||||
Check out https://test.rclone.org and when happy
|
||||
Check out <https://test.rclone.org> and when happy
|
||||
|
||||
make upload_website
|
||||
```sh
|
||||
make upload_website
|
||||
```
|
||||
|
||||
Cherry pick any changes back to master and the stable branch if it is active.
|
||||
|
||||
@@ -172,14 +189,14 @@ Cherry pick any changes back to master and the stable branch if it is active.
|
||||
|
||||
To do a basic build of rclone's docker image to debug builds locally:
|
||||
|
||||
```
|
||||
```sh
|
||||
docker buildx build --load -t rclone/rclone:testing --progress=plain .
|
||||
docker run --rm rclone/rclone:testing version
|
||||
```
|
||||
|
||||
To test the multipatform build
|
||||
|
||||
```
|
||||
```sh
|
||||
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
|
||||
```
|
||||
|
||||
@@ -187,6 +204,6 @@ To make a full build then set the tags correctly and add `--push`
|
||||
|
||||
Note that you can't only build one architecture - you need to build them all.
|
||||
|
||||
```
|
||||
```sh
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
```
|
||||
|
||||
@@ -271,9 +271,9 @@ type User struct {
|
||||
ModifiedAt time.Time `json:"modified_at"`
|
||||
Language string `json:"language"`
|
||||
Timezone string `json:"timezone"`
|
||||
SpaceAmount int64 `json:"space_amount"`
|
||||
SpaceUsed int64 `json:"space_used"`
|
||||
MaxUploadSize int64 `json:"max_upload_size"`
|
||||
SpaceAmount float64 `json:"space_amount"`
|
||||
SpaceUsed float64 `json:"space_used"`
|
||||
MaxUploadSize float64 `json:"max_upload_size"`
|
||||
Status string `json:"status"`
|
||||
JobTitle string `json:"job_title"`
|
||||
Phone string `json:"phone"`
|
||||
|
||||
@@ -1446,9 +1446,9 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
}
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(int64(used)), // bytes in use
|
||||
Free: fs.NewUsageValue(int64(total - used)), // bytes which can be uploaded before reaching the quota
|
||||
Total: fs.NewUsageValue(total), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(used), // bytes in use
|
||||
Free: fs.NewUsageValue(total - used), // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
@@ -163,6 +163,16 @@ Enabled by default. Use 0 to disable.`,
|
||||
Help: "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "allow_insecure_tls_ciphers",
|
||||
Help: `Allow insecure TLS ciphers
|
||||
|
||||
Setting this flag will allow the usage of the following TLS ciphers in addition to the secure defaults:
|
||||
|
||||
- TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "shut_timeout",
|
||||
Help: "Maximum time to wait for data connection closing status.",
|
||||
@@ -236,29 +246,30 @@ a write only folder.
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Port string `config:"port"`
|
||||
TLS bool `config:"tls"`
|
||||
ExplicitTLS bool `config:"explicit_tls"`
|
||||
TLSCacheSize int `config:"tls_cache_size"`
|
||||
DisableTLS13 bool `config:"disable_tls13"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
DisableEPSV bool `config:"disable_epsv"`
|
||||
DisableMLSD bool `config:"disable_mlsd"`
|
||||
DisableUTF8 bool `config:"disable_utf8"`
|
||||
WritingMDTM bool `config:"writing_mdtm"`
|
||||
ForceListHidden bool `config:"force_list_hidden"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
HTTPProxy string `config:"http_proxy"`
|
||||
NoCheckUpload bool `config:"no_check_upload"`
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Port string `config:"port"`
|
||||
TLS bool `config:"tls"`
|
||||
ExplicitTLS bool `config:"explicit_tls"`
|
||||
TLSCacheSize int `config:"tls_cache_size"`
|
||||
DisableTLS13 bool `config:"disable_tls13"`
|
||||
AllowInsecureTLSCiphers bool `config:"allow_insecure_tls_ciphers"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
DisableEPSV bool `config:"disable_epsv"`
|
||||
DisableMLSD bool `config:"disable_mlsd"`
|
||||
DisableUTF8 bool `config:"disable_utf8"`
|
||||
WritingMDTM bool `config:"writing_mdtm"`
|
||||
ForceListHidden bool `config:"force_list_hidden"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
HTTPProxy string `config:"http_proxy"`
|
||||
NoCheckUpload bool `config:"no_check_upload"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
@@ -407,6 +418,14 @@ func (f *Fs) tlsConfig() *tls.Config {
|
||||
if f.opt.DisableTLS13 {
|
||||
tlsConfig.MaxVersion = tls.VersionTLS12
|
||||
}
|
||||
if f.opt.AllowInsecureTLSCiphers {
|
||||
var ids []uint16
|
||||
// Read default ciphers
|
||||
for _, cs := range tls.CipherSuites() {
|
||||
ids = append(ids, cs.ID)
|
||||
}
|
||||
tlsConfig.CipherSuites = append(ids, tls.TLS_RSA_WITH_AES_128_GCM_SHA256)
|
||||
}
|
||||
}
|
||||
return tlsConfig
|
||||
}
|
||||
|
||||
@@ -371,9 +371,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
return nil, err
|
||||
}
|
||||
return &fs.Usage{
|
||||
Total: fs.NewUsageValue(int64(info.Capacity)),
|
||||
Used: fs.NewUsageValue(int64(info.Used)),
|
||||
Free: fs.NewUsageValue(int64(info.Remaining)),
|
||||
Total: fs.NewUsageValue(info.Capacity),
|
||||
Used: fs.NewUsageValue(info.Used),
|
||||
Free: fs.NewUsageValue(info.Remaining),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -946,9 +946,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
return nil, fmt.Errorf("failed to get Mega Quota: %w", err)
|
||||
}
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(int64(q.Mstrg)), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(int64(q.Cstrg)), // bytes in use
|
||||
Free: fs.NewUsageValue(int64(q.Mstrg - q.Cstrg)), // bytes which can be uploaded before reaching the quota
|
||||
Total: fs.NewUsageValue(q.Mstrg), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(q.Cstrg), // bytes in use
|
||||
Free: fs.NewUsageValue(q.Mstrg - q.Cstrg), // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
@@ -979,6 +979,24 @@ func (f *Fs) deleteObjects(ctx context.Context, IDs []string, useTrash bool) (er
|
||||
return nil
|
||||
}
|
||||
|
||||
// untrash a file or directory by ID
|
||||
//
|
||||
// If a name collision occurs in the destination folder, PikPak might automatically
|
||||
// rename the restored item(s) by appending a numbered suffix. For example,
|
||||
// foo.txt -> foo(1).txt or foo(2).txt if foo(1).txt already exists
|
||||
func (f *Fs) untrashObjects(ctx context.Context, IDs []string) (err error) {
|
||||
if len(IDs) == 0 {
|
||||
return nil
|
||||
}
|
||||
req := api.RequestBatch{
|
||||
IDs: IDs,
|
||||
}
|
||||
if err := f.requestBatchAction(ctx, "batchUntrash", &req); err != nil {
|
||||
return fmt.Errorf("untrash object failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
@@ -1063,7 +1081,14 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
return f.waitTask(ctx, info.TaskID)
|
||||
}
|
||||
|
||||
// Move the object
|
||||
// Move the object to a new parent folder
|
||||
//
|
||||
// Objects cannot be moved to their current folder.
|
||||
// "file_move_or_copy_to_cur" (9): Please don't move or copy to current folder or sub folder
|
||||
//
|
||||
// If a name collision occurs in the destination folder, PikPak might automatically
|
||||
// rename the moved item(s) by appending a numbered suffix. For example,
|
||||
// foo.txt -> foo(1).txt or foo(2).txt if foo(1).txt already exists
|
||||
func (f *Fs) moveObjects(ctx context.Context, IDs []string, dirID string) (err error) {
|
||||
if len(IDs) == 0 {
|
||||
return nil
|
||||
@@ -1079,6 +1104,12 @@ func (f *Fs) moveObjects(ctx context.Context, IDs []string, dirID string) (err e
|
||||
}
|
||||
|
||||
// renames the object
|
||||
//
|
||||
// The new name must be different from the current name.
|
||||
// "file_rename_to_same_name" (3): Name of file or folder is not changed
|
||||
//
|
||||
// Within the same folder, object names must be unique.
|
||||
// "file_duplicated_name" (3): File name cannot be repeated
|
||||
func (f *Fs) renameObject(ctx context.Context, ID, newName string) (info *api.File, err error) {
|
||||
req := api.File{
|
||||
Name: f.opt.Enc.FromStandardName(newName),
|
||||
@@ -1163,18 +1194,13 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
err := srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
srcLeaf, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
|
||||
err = srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1185,31 +1211,74 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if srcParentID != dstParentID {
|
||||
// Do the move
|
||||
if srcObj.parent != dstParentID {
|
||||
// Perform the move. A numbered copy might be generated upon name collision.
|
||||
if err = f.moveObjects(ctx, []string{srcObj.id}, dstParentID); err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("move: failed to move object %s to new parent %s: %w", srcObj.id, dstParentID, err)
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// FIXME: Restored file might have a numbered name if a conflict occurs
|
||||
if mvErr := f.moveObjects(ctx, []string{srcObj.id}, srcObj.parent); mvErr != nil {
|
||||
fs.Logf(f, "move: couldn't restore original object %q to %q after move failure: %v", dstObj.id, src.Remote(), mvErr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
// Manually update info of moved object to save API calls
|
||||
dstObj.id = srcObj.id
|
||||
dstObj.mimeType = srcObj.mimeType
|
||||
dstObj.gcid = srcObj.gcid
|
||||
dstObj.md5sum = srcObj.md5sum
|
||||
dstObj.hasMetaData = true
|
||||
|
||||
if srcLeaf != dstLeaf {
|
||||
// Rename
|
||||
info, err := f.renameObject(ctx, srcObj.id, dstLeaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move: couldn't rename moved file: %w", err)
|
||||
// Find the moved object and any conflict object with the same name.
|
||||
var moved, conflict *api.File
|
||||
_, err = f.listAll(ctx, dstParentID, api.KindOfFile, "false", func(item *api.File) bool {
|
||||
if item.ID == srcObj.id {
|
||||
moved = item
|
||||
if item.Name == dstLeaf {
|
||||
return true
|
||||
}
|
||||
} else if item.Name == dstLeaf {
|
||||
conflict = item
|
||||
}
|
||||
return dstObj, dstObj.setMetaData(info)
|
||||
// Stop early if both found
|
||||
return moved != nil && conflict != nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move: couldn't locate moved file %q in destination directory %q: %w", srcObj.id, dstParentID, err)
|
||||
}
|
||||
return dstObj, nil
|
||||
if moved == nil {
|
||||
return nil, fmt.Errorf("move: moved file %q not found in destination", srcObj.id)
|
||||
}
|
||||
|
||||
// If moved object already has the correct name, return
|
||||
if moved.Name == dstLeaf {
|
||||
return dstObj, dstObj.setMetaData(moved)
|
||||
}
|
||||
// If name collision, delete conflicting file first
|
||||
if conflict != nil {
|
||||
if err = f.deleteObjects(ctx, []string{conflict.ID}, true); err != nil {
|
||||
return nil, fmt.Errorf("move: couldn't delete conflicting file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if restoreErr := f.untrashObjects(ctx, []string{conflict.ID}); restoreErr != nil {
|
||||
fs.Logf(f, "move: couldn't restore conflicting file: %v", restoreErr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
info, err := f.renameObject(ctx, srcObj.id, dstLeaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move: couldn't rename moved file %q to %q: %w", dstObj.id, dstLeaf, err)
|
||||
}
|
||||
return dstObj, dstObj.setMetaData(info)
|
||||
}
|
||||
|
||||
// copy objects
|
||||
//
|
||||
// Objects cannot be copied to their current folder.
|
||||
// "file_move_or_copy_to_cur" (9): Please don't move or copy to current folder or sub folder
|
||||
//
|
||||
// If a name collision occurs in the destination folder, PikPak might automatically
|
||||
// rename the copied item(s) by appending a numbered suffix. For example,
|
||||
// foo.txt -> foo(1).txt or foo(2).txt if foo(1).txt already exists
|
||||
func (f *Fs) copyObjects(ctx context.Context, IDs []string, dirID string) (err error) {
|
||||
if len(IDs) == 0 {
|
||||
return nil
|
||||
@@ -1233,13 +1302,13 @@ func (f *Fs) copyObjects(ctx context.Context, IDs []string, dirID string) (err e
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
err := srcObj.readMetaData(ctx)
|
||||
err = srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1254,31 +1323,55 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't copy - same parent")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Check for possible conflicts: Pikpak creates numbered copies on name collision.
|
||||
var conflict *api.File
|
||||
_, srcLeaf := dircache.SplitPath(srcObj.remote)
|
||||
if srcLeaf == dstLeaf {
|
||||
if conflict, err = f.readMetaDataForPath(ctx, remote); err == nil {
|
||||
// delete conflicting file
|
||||
if err = f.deleteObjects(ctx, []string{conflict.ID}, true); err != nil {
|
||||
return nil, fmt.Errorf("copy: couldn't delete conflicting file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if restoreErr := f.untrashObjects(ctx, []string{conflict.ID}); restoreErr != nil {
|
||||
fs.Logf(f, "copy: couldn't restore conflicting file: %v", restoreErr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
} else if err != fs.ErrorObjectNotFound {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
dstDir, _ := dircache.SplitPath(remote)
|
||||
dstObj.remote = path.Join(dstDir, srcLeaf)
|
||||
if conflict, err = f.readMetaDataForPath(ctx, dstObj.remote); err == nil {
|
||||
tmpName := conflict.Name + "-rclone-copy-" + random.String(8)
|
||||
if _, err = f.renameObject(ctx, conflict.ID, tmpName); err != nil {
|
||||
return nil, fmt.Errorf("copy: couldn't rename conflicting file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if _, renameErr := f.renameObject(ctx, conflict.ID, conflict.Name); renameErr != nil {
|
||||
fs.Logf(f, "copy: couldn't rename conflicting file back to original: %v", renameErr)
|
||||
}
|
||||
}()
|
||||
} else if err != fs.ErrorObjectNotFound {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
if err := f.copyObjects(ctx, []string{srcObj.id}, dstParentID); err != nil {
|
||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||
}
|
||||
// Update info of the copied object with new parent but source name
|
||||
if info, err := dstObj.fs.readMetaDataForPath(ctx, srcObj.remote); err != nil {
|
||||
return nil, fmt.Errorf("copy: couldn't locate copied file: %w", err)
|
||||
} else if err = dstObj.setMetaData(info); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Can't copy and change name in one step so we have to check if we have
|
||||
// the correct name after copy
|
||||
srcLeaf, _, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
|
||||
err = dstObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("copy: couldn't locate copied file: %w", err)
|
||||
}
|
||||
|
||||
if srcLeaf != dstLeaf {
|
||||
// Rename
|
||||
info, err := f.renameObject(ctx, dstObj.id, dstLeaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: couldn't rename copied file: %w", err)
|
||||
}
|
||||
return dstObj, dstObj.setMetaData(info)
|
||||
return f.Move(ctx, dstObj, remote)
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
@@ -793,7 +793,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return nil, err
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
Used: fs.NewUsageValue(int64(info.SpaceUsed)),
|
||||
Used: fs.NewUsageValue(info.SpaceUsed),
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
131
backend/s3/s3.go
131
backend/s3/s3.go
@@ -149,6 +149,9 @@ var providerOption = fs.Option{
|
||||
}, {
|
||||
Value: "Outscale",
|
||||
Help: "OUTSCALE Object Storage (OOS)",
|
||||
}, {
|
||||
Value: "OVHcloud",
|
||||
Help: "OVHcloud Object Storage",
|
||||
}, {
|
||||
Value: "Petabox",
|
||||
Help: "Petabox Object Storage",
|
||||
@@ -535,6 +538,59 @@ func init() {
|
||||
Value: "ap-northeast-1",
|
||||
Help: "Tokyo, Japan",
|
||||
}},
|
||||
}, {
|
||||
// References:
|
||||
// https://help.ovhcloud.com/csm/en-public-cloud-storage-s3-location?id=kb_article_view&sysparm_article=KB0047384
|
||||
// https://support.us.ovhcloud.com/hc/en-us/articles/10667991081107-Endpoints-and-Object-Storage-Geoavailability
|
||||
Name: "region",
|
||||
Help: "Region where your bucket will be created and your data stored.\n",
|
||||
Provider: "OVHcloud",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "gra",
|
||||
Help: "Gravelines, France",
|
||||
}, {
|
||||
Value: "rbx",
|
||||
Help: "Roubaix, France",
|
||||
}, {
|
||||
Value: "sbg",
|
||||
Help: "Strasbourg, France",
|
||||
}, {
|
||||
Value: "eu-west-par",
|
||||
Help: "Paris, France (3AZ)",
|
||||
}, {
|
||||
Value: "de",
|
||||
Help: "Frankfurt, Germany",
|
||||
}, {
|
||||
Value: "uk",
|
||||
Help: "London, United Kingdom",
|
||||
}, {
|
||||
Value: "waw",
|
||||
Help: "Warsaw, Poland",
|
||||
}, {
|
||||
Value: "bhs",
|
||||
Help: "Beauharnois, Canada",
|
||||
}, {
|
||||
Value: "ca-east-tor",
|
||||
Help: "Toronto, Canada",
|
||||
}, {
|
||||
Value: "sgp",
|
||||
Help: "Singapore",
|
||||
}, {
|
||||
Value: "ap-southeast-syd",
|
||||
Help: "Sydney, Australia",
|
||||
}, {
|
||||
Value: "ap-south-mum",
|
||||
Help: "Mumbai, India",
|
||||
}, {
|
||||
Value: "us-east-va",
|
||||
Help: "Vint Hill, Virginia, USA",
|
||||
}, {
|
||||
Value: "us-west-or",
|
||||
Help: "Hillsboro, Oregon, USA",
|
||||
}, {
|
||||
Value: "rbx-archive",
|
||||
Help: "Roubaix, France (Cold Archive)",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region where your bucket will be created and your data stored.\n",
|
||||
@@ -587,7 +643,7 @@ func init() {
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,IONOS,Petabox,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive,Mega,Zata",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,IONOS,Petabox,Liara,Linode,Magalu,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive,Mega,Zata",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
@@ -1174,6 +1230,71 @@ func init() {
|
||||
Value: "obs.ru-northwest-2.myhuaweicloud.com",
|
||||
Help: "RU-Moscow2",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for OVHcloud Object Storage.",
|
||||
Provider: "OVHcloud",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "s3.gra.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Gravelines, France",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.rbx.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Roubaix, France",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.sbg.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Strasbourg, France",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.eu-west-par.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Paris, France (3AZ)",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.de.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Frankfurt, Germany",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.uk.io.cloud.ovh.net",
|
||||
Help: "OVHcloud London, United Kingdom",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.waw.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Warsaw, Poland",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.bhs.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Beauharnois, Canada",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.ca-east-tor.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Toronto, Canada",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.sgp.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Singapore",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.ap-southeast-syd.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Sydney, Australia",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.ap-south-mum.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Mumbai, India",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.us-east-va.io.cloud.ovh.us",
|
||||
Help: "OVHcloud Vint Hill, Virginia, USA",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.us-west-or.io.cloud.ovh.us",
|
||||
Help: "OVHcloud Hillsboro, Oregon, USA",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.rbx-archive.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Roubaix, France (Cold Archive)",
|
||||
Provider: "OVHcloud",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Scaleway Object Storage.",
|
||||
@@ -1411,7 +1532,7 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,LyveCloud,Magalu,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox,Zata",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,LyveCloud,Magalu,OVHcloud,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox,Zata",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -1946,7 +2067,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,FlashBlade,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox,Mega",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,FlashBlade,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox,Mega",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -3507,7 +3628,7 @@ func setQuirks(opt *Options) {
|
||||
useUnsignedPayload = false // AWS has trailer support which means it adds checksums in the trailer without seeking
|
||||
case "Alibaba":
|
||||
useMultipartEtag = false // Alibaba seems to calculate multipart Etags differently from AWS
|
||||
useAlreadyExists = true // returns 200 OK
|
||||
useAlreadyExists = false // returns BucketAlreadyExists
|
||||
case "HuaweiOBS":
|
||||
// Huawei OBS PFS is not support listObjectV2, and if turn on the urlEncodeListing, marker will not work and keep list same page forever.
|
||||
urlEncodeListings = false
|
||||
@@ -3589,6 +3710,8 @@ func setQuirks(opt *Options) {
|
||||
useAlreadyExists = false // untested
|
||||
case "Outscale":
|
||||
virtualHostStyle = false
|
||||
case "OVHcloud":
|
||||
// No quirks
|
||||
case "RackCorp":
|
||||
// No quirks
|
||||
useMultipartEtag = false // untested
|
||||
|
||||
@@ -1863,9 +1863,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
free := vfsStats.FreeSpace()
|
||||
used := total - free
|
||||
return &fs.Usage{
|
||||
Total: fs.NewUsageValue(int64(total)),
|
||||
Used: fs.NewUsageValue(int64(used)),
|
||||
Free: fs.NewUsageValue(int64(free)),
|
||||
Total: fs.NewUsageValue(total),
|
||||
Used: fs.NewUsageValue(used),
|
||||
Free: fs.NewUsageValue(free),
|
||||
}, nil
|
||||
} else if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
|
||||
@@ -494,11 +494,11 @@ func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bs := int64(stat.BlockSize())
|
||||
bs := stat.BlockSize()
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(bs * int64(stat.TotalBlockCount())),
|
||||
Used: fs.NewUsageValue(bs * int64(stat.TotalBlockCount()-stat.FreeBlockCount())),
|
||||
Free: fs.NewUsageValue(bs * int64(stat.AvailableBlockCount())),
|
||||
Total: fs.NewUsageValue(bs * stat.TotalBlockCount()),
|
||||
Used: fs.NewUsageValue(bs * (stat.TotalBlockCount() - stat.FreeBlockCount())),
|
||||
Free: fs.NewUsageValue(bs * stat.AvailableBlockCount()),
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
module go-test-cache
|
||||
|
||||
go 1.24
|
||||
@@ -1,123 +0,0 @@
|
||||
// This code was copied from:
|
||||
// https://github.com/fastly/cli/blob/main/scripts/go-test-cache/main.go
|
||||
// which in turn is based on the following script and was generated using AI.
|
||||
// https://github.com/airplanedev/blog-examples/blob/main/go-test-caching/update_file_timestamps.py?ref=airplane.ghost.io
|
||||
//
|
||||
// REFERENCE ARTICLE:
|
||||
// https://web.archive.org/web/20240308061717/https://www.airplane.dev/blog/caching-golang-tests-in-ci
|
||||
//
|
||||
// It updates the mtime of the files to a mtime dervived from the sha1 hash of their contents.
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
bufSize = 65536
|
||||
baseDate = 1684178360
|
||||
timeFormat = "2006-01-02 15:04:05"
|
||||
)
|
||||
|
||||
func main() {
|
||||
repoRoot := "."
|
||||
allDirs := make([]string, 0)
|
||||
|
||||
err := filepath.Walk(repoRoot, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
dirPath := filepath.Join(repoRoot, path)
|
||||
relPath, _ := filepath.Rel(repoRoot, dirPath)
|
||||
|
||||
if strings.HasPrefix(relPath, ".") {
|
||||
return nil
|
||||
}
|
||||
|
||||
allDirs = append(allDirs, dirPath)
|
||||
} else {
|
||||
filePath := filepath.Join(repoRoot, path)
|
||||
relPath, _ := filepath.Rel(repoRoot, filePath)
|
||||
|
||||
if strings.HasPrefix(relPath, ".") {
|
||||
return nil
|
||||
}
|
||||
|
||||
sha1Hash, err := getFileSHA1(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
modTime := getModifiedTime(sha1Hash)
|
||||
|
||||
log.Printf("Setting modified time of file %s to %s\n", relPath, modTime.Format(timeFormat))
|
||||
err = os.Chtimes(filePath, modTime, modTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal("Error:", err)
|
||||
}
|
||||
|
||||
sort.Slice(allDirs, func(i, j int) bool {
|
||||
return len(allDirs[i]) > len(allDirs[j]) || (len(allDirs[i]) == len(allDirs[j]) && allDirs[i] < allDirs[j])
|
||||
})
|
||||
|
||||
for _, dirPath := range allDirs {
|
||||
relPath, _ := filepath.Rel(repoRoot, dirPath)
|
||||
|
||||
log.Printf("Setting modified time of directory %s to %s\n", relPath, time.Unix(baseDate, 0).Format(timeFormat))
|
||||
err := os.Chtimes(dirPath, time.Unix(baseDate, 0), time.Unix(baseDate, 0))
|
||||
if err != nil {
|
||||
log.Fatal("Error:", err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Println("Done")
|
||||
}
|
||||
|
||||
func getFileSHA1(filePath string) (string, error) {
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// G401: Use of weak cryptographic primitive
|
||||
// Disabling as the hash is used not for security reasons.
|
||||
// The hash is used as a cache key to improve test run times.
|
||||
// #nosec
|
||||
// nosemgrep: go.lang.security.audit.crypto.use_of_weak_crypto.use-of-sha1
|
||||
hash := sha1.New()
|
||||
if _, err := io.CopyBuffer(hash, file, make([]byte, bufSize)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(hash.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func getModifiedTime(sha1Hash string) time.Time {
|
||||
hashBytes := []byte(sha1Hash)
|
||||
lastFiveBytes := hashBytes[:5]
|
||||
lastFiveValue := int64(0)
|
||||
|
||||
for _, b := range lastFiveBytes {
|
||||
lastFiveValue = (lastFiveValue << 8) + int64(b)
|
||||
}
|
||||
|
||||
modTime := baseDate - (lastFiveValue % 10000)
|
||||
return time.Unix(modTime, 0)
|
||||
}
|
||||
@@ -57,11 +57,11 @@ def make_out(data, indent=""):
|
||||
return
|
||||
del(data[category])
|
||||
if indent != "" and len(lines) == 1:
|
||||
out_lines.append(indent+"* " + title+": " + lines[0])
|
||||
out_lines.append(indent+"- " + title+": " + lines[0])
|
||||
return
|
||||
out_lines.append(indent+"* " + title)
|
||||
out_lines.append(indent+"- " + title)
|
||||
for line in lines:
|
||||
out_lines.append(indent+" * " + line)
|
||||
out_lines.append(indent+" - " + line)
|
||||
return out, out_lines
|
||||
|
||||
|
||||
@@ -129,12 +129,12 @@ def main():
|
||||
new_features[name].append(message)
|
||||
|
||||
# Output new features
|
||||
out, new_features_lines = make_out(new_features, indent=" ")
|
||||
out, new_features_lines = make_out(new_features, indent=" ")
|
||||
for name in sorted(new_features.keys()):
|
||||
out(name)
|
||||
|
||||
# Output bugfixes
|
||||
out, bugfix_lines = make_out(bugfixes, indent=" ")
|
||||
out, bugfix_lines = make_out(bugfixes, indent=" ")
|
||||
for name in sorted(bugfixes.keys()):
|
||||
out(name)
|
||||
|
||||
@@ -163,15 +163,15 @@ def main():
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/%(version)s...%(next_version)s)
|
||||
|
||||
* New backends
|
||||
* New commands
|
||||
* New Features
|
||||
- New backends
|
||||
- New commands
|
||||
- New Features
|
||||
%(new_features)s
|
||||
* Bug Fixes
|
||||
- Bug Fixes
|
||||
%(bugfixes)s
|
||||
%(backend_changes)s""" % locals())
|
||||
sys.stdout.write(old_tail)
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -23,7 +23,7 @@ def add_email(name, email):
|
||||
"""
|
||||
print("Adding %s <%s>" % (name, email))
|
||||
with open(AUTHORS, "a+") as fd:
|
||||
print(" * %s <%s>" % (name, email), file=fd)
|
||||
print("- %s <%s>" % (name, email), file=fd)
|
||||
subprocess.check_call(["git", "commit", "-m", "Add %s to contributors" % name, AUTHORS])
|
||||
|
||||
def main():
|
||||
|
||||
@@ -316,10 +316,10 @@ See the [VFS File Caching](#vfs-file-caching) section for more info.
|
||||
When using NFS mount on macOS, if you don't specify |--vfs-cache-mode|
|
||||
the mount point will be read-only.
|
||||
|
||||
The bucket-based remotes (e.g. Swift, S3, Google Compute Storage, B2)
|
||||
do not support the concept of empty directories, so empty
|
||||
directories will have a tendency to disappear once they fall out of
|
||||
the directory cache.
|
||||
Bucket-based remotes - Azure Blob, Swift, S3, Google Cloud Storage and B2 -
|
||||
can't store empty directories. Of these, only Azure Blob, Google Cloud Storage
|
||||
and S3 can preserve them when you add `--xxx-directory_markers`; otherwise,
|
||||
empty directories will vanish once they drop out of the directory cache.
|
||||
|
||||
When `rclone mount` is invoked on Unix with `--daemon` flag, the main rclone
|
||||
program will wait for the background mount to become ready or until the timeout
|
||||
|
||||
@@ -158,13 +158,14 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="Microsoft OneDrive" home="https://onedrive.live.com/" config="/onedrive/" >}}
|
||||
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
|
||||
{{< provider name="Nextcloud" home="https://nextcloud.com/" config="/webdav/#nextcloud" >}}
|
||||
{{< provider name="OVH" home="https://www.ovh.co.uk/public-cloud/storage/object-storage/" config="/swift/" >}}
|
||||
{{< provider name="Blomp Cloud Storage" home="https://rclone.org/swift/" config="/swift/" >}}
|
||||
{{< provider name="OpenDrive" home="https://www.opendrive.com/" config="/opendrive/" >}}
|
||||
{{< provider name="OpenStack Swift" home="https://docs.openstack.org/swift/latest/" config="/swift/" >}}
|
||||
{{< provider name="Oracle Cloud Storage Swift" home="https://docs.oracle.com/en-us/iaas/integration/doc/configure-object-storage.html" config="/swift/" >}}
|
||||
{{< provider name="Oracle Object Storage" home="https://www.oracle.com/cloud/storage/object-storage" config="/oracleobjectstorage/" >}}
|
||||
{{< provider name="Outscale" home="https://en.outscale.com/storage/outscale-object-storage/" config="/s3/#outscale" >}}
|
||||
{{< provider name="OVHcloud Object Storage (Swift)" home="https://www.ovhcloud.com/en/public-cloud/object-storage/" config="/swift/" >}}
|
||||
{{< provider name="OVHcloud Object Storage (S3-compatible)" home="https://www.ovhcloud.com/en/public-cloud/object-storage/" config="/s3/#ovhcloud" >}}
|
||||
{{< provider name="ownCloud" home="https://owncloud.org/" config="/webdav/#owncloud" >}}
|
||||
{{< provider name="pCloud" home="https://www.pcloud.com/" config="/pcloud/" >}}
|
||||
{{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -41,6 +41,5 @@ flag/option).
|
||||
|
||||
Bugs are stored in rclone's GitHub project:
|
||||
|
||||
* [Reported bugs](https://github.com/rclone/rclone/issues?q=is%3Aopen+is%3Aissue+label%3Abug)
|
||||
* [Known issues](https://github.com/rclone/rclone/issues?q=is%3Aopen+is%3Aissue+milestone%3A%22Known+Problem%22)
|
||||
|
||||
- [Reported bugs](https://github.com/rclone/rclone/issues?q=is%3Aopen+is%3Aissue+label%3Abug)
|
||||
- [Known issues](https://github.com/rclone/rclone/issues?q=is%3Aopen+is%3Aissue+milestone%3A%22Known+Problem%22)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -15,7 +15,7 @@ image](https://securebuild.com/images/rclone) through our partner
|
||||
|
||||
## Release {{% version %}} OS requirements {#osrequirements}
|
||||
|
||||
| OS | Minimum Version |
|
||||
| OS | Minimum Version |
|
||||
|:-------:|:-------:|
|
||||
| Linux | Kernel 3.2 |
|
||||
| macOS | 11 (Big Sur) |
|
||||
@@ -23,7 +23,10 @@ image](https://securebuild.com/images/rclone) through our partner
|
||||
| FreeBSD | 12.2 |
|
||||
| OpenBSD | 6.9 |
|
||||
|
||||
These requirements come from the Go version that rclone is compiled with and are simplified from [minimum requirements](https://go.dev/wiki/MinimumRequirements) and other [platform specific information](https://go.dev/wiki/#platform-specific-information) in the Go Wiki.
|
||||
These requirements come from the Go version that rclone is compiled with and are
|
||||
simplified from [minimum requirements](https://go.dev/wiki/MinimumRequirements)
|
||||
and other [platform specific information](https://go.dev/wiki/#platform-specific-information)
|
||||
in the Go Wiki.
|
||||
|
||||
## Release {{% version %}} {#release}
|
||||
|
||||
@@ -38,8 +41,10 @@ These requirements come from the Go version that rclone is compiled with and are
|
||||
| MIPS - Big Endian | - | - | {{< download linux mips >}} | {{< download linux mips deb >}} | {{< download linux mips rpm >}} | - | - | - | - | - |
|
||||
| MIPS - Little Endian | - | - | {{< download linux mipsle >}} | {{< download linux mipsle deb >}} | {{< download linux mipsle rpm >}} | - | - | - | - | - |
|
||||
|
||||
<!-- markdownlint-disable-next-line no-bare-urls line-length -->
|
||||
You can also find a [mirror of the downloads on GitHub](https://github.com/rclone/rclone/releases/tag/{{< version >}}).
|
||||
|
||||
<!-- markdownlint-disable-next-line no-bare-urls -->
|
||||
See also [Android builds](https://beta.rclone.org/{{% version %}}/testbuilds/).
|
||||
These are built as part of the official release, but haven't been
|
||||
adopted as first class builds yet.
|
||||
@@ -47,15 +52,19 @@ adopted as first class builds yet.
|
||||
See [the release signing docs](/release_signing/) for how to verify
|
||||
signatures on the release.
|
||||
|
||||
## Script download and install ##
|
||||
## Script download and install
|
||||
|
||||
To install rclone on Linux/macOS/BSD systems, run:
|
||||
|
||||
sudo -v ; curl https://rclone.org/install.sh | sudo bash
|
||||
```sh
|
||||
sudo -v ; curl https://rclone.org/install.sh | sudo bash
|
||||
```
|
||||
|
||||
For beta installation, run:
|
||||
|
||||
sudo -v ; curl https://rclone.org/install.sh | sudo bash -s beta
|
||||
```sh
|
||||
sudo -v ; curl https://rclone.org/install.sh | sudo bash -s beta
|
||||
```
|
||||
|
||||
Note that this script checks the version of rclone installed first and
|
||||
won't re-download if not needed.
|
||||
@@ -65,11 +74,15 @@ won't re-download if not needed.
|
||||
[Beta releases](https://beta.rclone.org) are generated from each commit
|
||||
to master. Note these are named like
|
||||
|
||||
{Version Tag}.beta.{Commit Number}.{Git Commit Hash}
|
||||
```text
|
||||
{Version Tag}.beta.{Commit Number}.{Git Commit Hash}
|
||||
```
|
||||
|
||||
e.g.
|
||||
|
||||
v1.53.0-beta.4677.b657a2204
|
||||
```text
|
||||
v1.53.0-beta.4677.b657a2204
|
||||
```
|
||||
|
||||
The `Version Tag` is the version that the beta release will become
|
||||
when it is released. You can match the `Git Commit Hash` up with the
|
||||
@@ -79,11 +92,15 @@ and will normally be at the end of the list.
|
||||
|
||||
Some beta releases may have a branch name also:
|
||||
|
||||
{Version Tag}-beta.{Commit Number}.{Git Commit Hash}.{Branch Name}
|
||||
```text
|
||||
{Version Tag}-beta.{Commit Number}.{Git Commit Hash}.{Branch Name}
|
||||
```
|
||||
|
||||
e.g.
|
||||
|
||||
v1.53.0-beta.4677.b657a2204.semver
|
||||
```text
|
||||
v1.53.0-beta.4677.b657a2204.semver
|
||||
```
|
||||
|
||||
The presence of `Branch Name` indicates that this is a feature under
|
||||
development which will at some point be merged into the normal betas
|
||||
@@ -115,10 +132,11 @@ script) from a URL which doesn't change then you can use these links.
|
||||
|
||||
## Older Downloads
|
||||
|
||||
Older downloads can be found [here](https://downloads.rclone.org/).
|
||||
Older downloads can be found at <https://downloads.rclone.org/>
|
||||
|
||||
The latest `rclone` version working for:
|
||||
| OS | Maximum rclone version |
|
||||
|
||||
| OS | Maximum rclone version |
|
||||
|:-------:|:-------:|
|
||||
| Windows 7 | v1.63.1 |
|
||||
| Windows Server 2008 | v1.63.1 |
|
||||
|
||||
@@ -2,15 +2,16 @@
|
||||
title: "FAQ"
|
||||
description: "Rclone Frequently Asked Questions"
|
||||
---
|
||||
<!-- markdownlint-disable heading-increment -->
|
||||
|
||||
# Frequently Asked Questions
|
||||
|
||||
### Do all cloud storage systems support all rclone commands ###
|
||||
### Do all cloud storage systems support all rclone commands
|
||||
|
||||
Yes they do. All the rclone commands (e.g. `sync`, `copy`, etc.) will
|
||||
work on all the remote storage systems.
|
||||
|
||||
### Can I copy the config from one machine to another ###
|
||||
### Can I copy the config from one machine to another
|
||||
|
||||
Sure! Rclone stores all of its config in a single file. If you want
|
||||
to find this file, run `rclone config file` which will tell you where
|
||||
@@ -18,7 +19,7 @@ it is.
|
||||
|
||||
See the [remote setup docs](/remote_setup/) for more info.
|
||||
|
||||
### How do I configure rclone on a remote / headless box with no browser? ###
|
||||
### How do I configure rclone on a remote / headless box with no browser?
|
||||
|
||||
This has now been documented in its own [remote setup page](/remote_setup/).
|
||||
|
||||
@@ -32,11 +33,11 @@ If you need to configure a remote, see the [config help docs](/docs/#configure).
|
||||
If you are using rclone entirely with [on the fly remotes](/docs/#backend-path-to-dir),
|
||||
you can create an empty config file to get rid of this notice, for example:
|
||||
|
||||
```
|
||||
```sh
|
||||
rclone config touch
|
||||
```
|
||||
|
||||
### Can rclone sync directly from drive to s3 ###
|
||||
### Can rclone sync directly from drive to s3
|
||||
|
||||
Rclone can sync between two remote cloud storage systems just fine.
|
||||
|
||||
@@ -47,15 +48,16 @@ The syncs would be incremental (on a file by file basis).
|
||||
|
||||
e.g.
|
||||
|
||||
rclone sync --interactive drive:Folder s3:bucket
|
||||
```sh
|
||||
rclone sync --interactive drive:Folder s3:bucket
|
||||
```
|
||||
|
||||
|
||||
### Using rclone from multiple locations at the same time ###
|
||||
### Using rclone from multiple locations at the same time
|
||||
|
||||
You can use rclone from multiple places at the same time if you choose
|
||||
different subdirectory for the output, e.g.
|
||||
|
||||
```
|
||||
```sh
|
||||
Server A> rclone sync --interactive /tmp/whatever remote:ServerA
|
||||
Server B> rclone sync --interactive /tmp/whatever remote:ServerB
|
||||
```
|
||||
@@ -63,7 +65,7 @@ Server B> rclone sync --interactive /tmp/whatever remote:ServerB
|
||||
If you sync to the same directory then you should use rclone copy
|
||||
otherwise the two instances of rclone may delete each other's files, e.g.
|
||||
|
||||
```
|
||||
```sh
|
||||
Server A> rclone copy /tmp/whatever remote:Backup
|
||||
Server B> rclone copy /tmp/whatever remote:Backup
|
||||
```
|
||||
@@ -72,7 +74,7 @@ The file names you upload from Server A and Server B should be
|
||||
different in this case, otherwise some file systems (e.g. Drive) may
|
||||
make duplicates.
|
||||
|
||||
### Why doesn't rclone support partial transfers / binary diffs like rsync? ###
|
||||
### Why doesn't rclone support partial transfers / binary diffs like rsync?
|
||||
|
||||
Rclone stores each file you transfer as a native object on the remote
|
||||
cloud storage system. This means that you can see the files you
|
||||
@@ -94,12 +96,12 @@ it would be possible to make partial downloads work. However to make
|
||||
this work efficiently this would require storing a significant amount
|
||||
of metadata, which breaks the desired 1:1 mapping of files to objects.
|
||||
|
||||
### Can rclone do bi-directional sync? ###
|
||||
### Can rclone do bi-directional sync?
|
||||
|
||||
Yes, since rclone v1.58.0, [bidirectional cloud sync](/bisync/) is
|
||||
available.
|
||||
|
||||
### Can I use rclone with an HTTP proxy? ###
|
||||
### Can I use rclone with an HTTP proxy?
|
||||
|
||||
Yes. rclone will follow the standard environment variables for
|
||||
proxies, similar to cURL and other programs.
|
||||
@@ -112,23 +114,26 @@ The content of the variable is `protocol://server:port`. The protocol
|
||||
value is the one used to talk to the proxy server, itself, and is commonly
|
||||
either `http` or `socks5`.
|
||||
|
||||
Slightly annoyingly, there is no _standard_ for the name; some applications
|
||||
Slightly annoyingly, there is no *standard* for the name; some applications
|
||||
may use `http_proxy` but another one `HTTP_PROXY`. The `Go` libraries
|
||||
used by `rclone` will try both variations, but you may wish to set all
|
||||
possibilities. So, on Linux, you may end up with code similar to
|
||||
|
||||
export http_proxy=http://proxyserver:12345
|
||||
export https_proxy=$http_proxy
|
||||
export HTTP_PROXY=$http_proxy
|
||||
export HTTPS_PROXY=$http_proxy
|
||||
|
||||
```sh
|
||||
export http_proxy=http://proxyserver:12345
|
||||
export https_proxy=$http_proxy
|
||||
export HTTP_PROXY=$http_proxy
|
||||
export HTTPS_PROXY=$http_proxy
|
||||
```
|
||||
|
||||
Note: If the proxy server requires a username and password, then use
|
||||
|
||||
export http_proxy=http://username:password@proxyserver:12345
|
||||
export https_proxy=$http_proxy
|
||||
export HTTP_PROXY=$http_proxy
|
||||
export HTTPS_PROXY=$http_proxy
|
||||
```sh
|
||||
export http_proxy=http://username:password@proxyserver:12345
|
||||
export https_proxy=$http_proxy
|
||||
export HTTP_PROXY=$http_proxy
|
||||
export HTTPS_PROXY=$http_proxy
|
||||
```
|
||||
|
||||
The `NO_PROXY` allows you to disable the proxy for specific hosts.
|
||||
Hosts must be comma separated, and can contain domains or parts.
|
||||
@@ -136,12 +141,24 @@ For instance "foo.com" also matches "bar.foo.com".
|
||||
|
||||
e.g.
|
||||
|
||||
export no_proxy=localhost,127.0.0.0/8,my.host.name
|
||||
export NO_PROXY=$no_proxy
|
||||
```sh
|
||||
export no_proxy=localhost,127.0.0.0/8,my.host.name
|
||||
export NO_PROXY=$no_proxy
|
||||
```
|
||||
|
||||
Note that the FTP backend does not support `ftp_proxy` yet.
|
||||
|
||||
### Rclone gives x509: failed to load system roots and no roots provided error ###
|
||||
You can use the command line argument `--http-proxy` to set the proxy,
|
||||
and in turn use an override in the config file if you want it set for
|
||||
a single backend, eg `override.http_proxy = http://...` in the config
|
||||
file.
|
||||
|
||||
The FTP and SFTP backends have their own `http_proxy` settings to
|
||||
support an HTTP CONNECT proxy (
|
||||
[--ftp-http-proxy](https://rclone.org/ftp/#ftp-http-proxy) and
|
||||
[--sftp-http-proxy](https://rclone.org/ftp/#sftp-http-proxy) )
|
||||
|
||||
### Rclone gives x509: failed to load system roots and no roots provided error
|
||||
|
||||
This means that `rclone` can't find the SSL root certificates. Likely
|
||||
you are running `rclone` on a NAS with a cut-down Linux OS, or
|
||||
@@ -150,30 +167,34 @@ possibly on Solaris.
|
||||
Rclone (via the Go runtime) tries to load the root certificates from
|
||||
these places on Linux.
|
||||
|
||||
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
|
||||
"/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL
|
||||
"/etc/ssl/ca-bundle.pem", // OpenSUSE
|
||||
"/etc/pki/tls/cacert.pem", // OpenELEC
|
||||
```sh
|
||||
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
|
||||
"/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL
|
||||
"/etc/ssl/ca-bundle.pem", // OpenSUSE
|
||||
"/etc/pki/tls/cacert.pem", // OpenELEC
|
||||
```
|
||||
|
||||
So doing something like this should fix the problem. It also sets the
|
||||
time which is important for SSL to work properly.
|
||||
|
||||
```
|
||||
```sh
|
||||
mkdir -p /etc/ssl/certs/
|
||||
curl -o /etc/ssl/certs/ca-certificates.crt https://raw.githubusercontent.com/bagder/ca-bundle/master/ca-bundle.crt
|
||||
ntpclient -s -h pool.ntp.org
|
||||
```
|
||||
|
||||
The two environment variables `SSL_CERT_FILE` and `SSL_CERT_DIR`, mentioned in the [x509 package](https://godoc.org/crypto/x509),
|
||||
provide an additional way to provide the SSL root certificates.
|
||||
The two environment variables `SSL_CERT_FILE` and `SSL_CERT_DIR`, mentioned in
|
||||
the [x509 package](https://godoc.org/crypto/x509), provide an additional way to
|
||||
provide the SSL root certificates.
|
||||
|
||||
Note that you may need to add the `--insecure` option to the `curl` command line if it doesn't work without.
|
||||
Note that you may need to add the `--insecure` option to the `curl` command line
|
||||
if it doesn't work without.
|
||||
|
||||
```
|
||||
```sh
|
||||
curl --insecure -o /etc/ssl/certs/ca-certificates.crt https://raw.githubusercontent.com/bagder/ca-bundle/master/ca-bundle.crt
|
||||
```
|
||||
|
||||
### Rclone gives Failed to load config file: function not implemented error ###
|
||||
### Rclone gives Failed to load config file: function not implemented error
|
||||
|
||||
Likely this means that you are running rclone on Linux version not
|
||||
supported by the go runtime, ie earlier than version 2.6.23.
|
||||
@@ -181,7 +202,7 @@ supported by the go runtime, ie earlier than version 2.6.23.
|
||||
See the [system requirements section in the go install
|
||||
docs](https://golang.org/doc/install) for full details.
|
||||
|
||||
### All my uploaded docx/xlsx/pptx files appear as archive/zip ###
|
||||
### All my uploaded docx/xlsx/pptx files appear as archive/zip
|
||||
|
||||
This is caused by uploading these files from a Windows computer which
|
||||
hasn't got the Microsoft Office suite installed. The easiest way to
|
||||
@@ -189,12 +210,12 @@ fix is to install the Word viewer and the Microsoft Office
|
||||
Compatibility Pack for Word, Excel, and PowerPoint 2007 and later
|
||||
versions' file formats
|
||||
|
||||
### tcp lookup some.domain.com no such host ###
|
||||
### tcp lookup some.domain.com no such host
|
||||
|
||||
This happens when rclone cannot resolve a domain. Please check that
|
||||
your DNS setup is generally working, e.g.
|
||||
|
||||
```
|
||||
```sh
|
||||
# both should print a long list of possible IP addresses
|
||||
dig www.googleapis.com # resolve using your default DNS
|
||||
dig www.googleapis.com @8.8.8.8 # resolve with Google's DNS server
|
||||
@@ -204,7 +225,6 @@ If you are using `systemd-resolved` (default on Arch Linux), ensure it
|
||||
is at version 233 or higher. Previous releases contain a bug which
|
||||
causes not all domains to be resolved properly.
|
||||
|
||||
|
||||
The Go resolver decision can be influenced with the `GODEBUG=netdns=...`
|
||||
environment variable. This also allows to resolve certain issues with
|
||||
DNS resolution. On Windows or MacOS systems, try forcing use of the
|
||||
@@ -214,17 +234,20 @@ name resolver by setting `GODEBUG=netdns=cgo` (and recompile rclone
|
||||
from source with CGO enabled if necessary). See the
|
||||
[name resolution section in the go docs](https://golang.org/pkg/net/#hdr-Name_Resolution).
|
||||
|
||||
### Failed to start auth webserver on Windows ###
|
||||
```
|
||||
### Failed to start auth webserver on Windows
|
||||
|
||||
```text
|
||||
Error: config failed to refresh token: failed to start auth webserver: listen tcp 127.0.0.1:53682: bind: An attempt was made to access a socket in a way forbidden by its access permissions.
|
||||
...
|
||||
yyyy/mm/dd hh:mm:ss Fatal error: config failed to refresh token: failed to start auth webserver: listen tcp 127.0.0.1:53682: bind: An attempt was made to access a socket in a way forbidden by its access permissions.
|
||||
```
|
||||
|
||||
This is sometimes caused by the Host Network Service causing issues with opening the port on the host.
|
||||
This is sometimes caused by the Host Network Service causing issues with opening
|
||||
the port on the host.
|
||||
|
||||
A simple solution may be restarting the Host Network Service with eg. Powershell
|
||||
```
|
||||
|
||||
```pwsh
|
||||
Restart-Service hns
|
||||
```
|
||||
|
||||
@@ -247,7 +270,7 @@ value, say `export GOGC=20`. This will make the garbage collector
|
||||
work harder, reducing memory size at the expense of CPU usage.
|
||||
|
||||
The most common cause of rclone using lots of memory is a single
|
||||
directory with millions of files in.
|
||||
directory with millions of files in.
|
||||
|
||||
Before rclone v1.70 has to load this entirely into memory as rclone
|
||||
objects. Each rclone object takes 0.5k-1k of memory. There is
|
||||
@@ -279,4 +302,4 @@ Unicode characters when transferring to one storage system, and replacing
|
||||
back again when transferring to a different storage system where the
|
||||
original characters are supported. When the same Unicode characters
|
||||
are intentionally used in file names, this replacement strategy leads
|
||||
to unwanted renames. Read more [here](/overview/#restricted-filenames-caveats).
|
||||
to unwanted renames. Read more under section [caveats](/overview/#restricted-filenames-caveats).
|
||||
|
||||
@@ -39,38 +39,50 @@ Here is a formal definition of the pattern syntax,
|
||||
|
||||
Rclone matching rules follow a glob style:
|
||||
|
||||
* matches any sequence of non-separator (/) characters
|
||||
** matches any sequence of characters including / separators
|
||||
? matches any single non-separator (/) character
|
||||
[ [ ! ] { character-range } ]
|
||||
character class (must be non-empty)
|
||||
{ pattern-list }
|
||||
pattern alternatives
|
||||
{{ regexp }}
|
||||
regular expression to match
|
||||
c matches character c (c != *, **, ?, \, [, {, })
|
||||
\c matches reserved character c (c = *, **, ?, \, [, {, }) or character class
|
||||
```text
|
||||
* matches any sequence of non-separator (/) characters
|
||||
** matches any sequence of characters including / separators
|
||||
? matches any single non-separator (/) character
|
||||
[ [ ! ] { character-range } ]
|
||||
character class (must be non-empty)
|
||||
{ pattern-list }
|
||||
pattern alternatives
|
||||
{{ regexp }}
|
||||
regular expression to match
|
||||
c matches character c (c != *, **, ?, \, [, {, })
|
||||
\c matches reserved character c (c = *, **, ?, \, [, {, }) or character class
|
||||
```
|
||||
|
||||
character-range:
|
||||
|
||||
c matches character c (c != \, -, ])
|
||||
\c matches reserved character c (c = \, -, ])
|
||||
lo - hi matches character c for lo <= c <= hi
|
||||
```text
|
||||
c matches character c (c != \, -, ])
|
||||
\c matches reserved character c (c = \, -, ])
|
||||
lo - hi matches character c for lo <= c <= hi
|
||||
```
|
||||
|
||||
pattern-list:
|
||||
|
||||
pattern { , pattern }
|
||||
comma-separated (without spaces) patterns
|
||||
```text
|
||||
pattern { , pattern }
|
||||
comma-separated (without spaces) patterns
|
||||
```
|
||||
|
||||
character classes (see [Go regular expression reference](https://golang.org/pkg/regexp/syntax/)) include:
|
||||
character classes (see [Go regular expression reference](https://golang.org/pkg/regexp/syntax/))
|
||||
include:
|
||||
|
||||
Named character classes (e.g. [\d], [^\d], [\D], [^\D])
|
||||
Perl character classes (e.g. \s, \S, \w, \W)
|
||||
ASCII character classes (e.g. [[:alnum:]], [[:alpha:]], [[:punct:]], [[:xdigit:]])
|
||||
```text
|
||||
Named character classes (e.g. [\d], [^\d], [\D], [^\D])
|
||||
Perl character classes (e.g. \s, \S, \w, \W)
|
||||
ASCII character classes (e.g. [[:alnum:]], [[:alpha:]], [[:punct:]], [[:xdigit:]])
|
||||
```
|
||||
|
||||
regexp for advanced users to insert a regular expression - see [below](#regexp) for more info:
|
||||
regexp for advanced users to insert a regular expression - see [below](#regexp)
|
||||
for more info:
|
||||
|
||||
Any re2 regular expression not containing `}}`
|
||||
```text
|
||||
Any re2 regular expression not containing `}}`
|
||||
```
|
||||
|
||||
If the filter pattern starts with a `/` then it only matches
|
||||
at the top level of the directory tree,
|
||||
@@ -80,29 +92,34 @@ starting at the **end of the path/file name** but it only matches
|
||||
a complete path element - it must match from a `/`
|
||||
separator or the beginning of the path/file.
|
||||
|
||||
file.jpg - matches "file.jpg"
|
||||
- matches "directory/file.jpg"
|
||||
- doesn't match "afile.jpg"
|
||||
- doesn't match "directory/afile.jpg"
|
||||
/file.jpg - matches "file.jpg" in the root directory of the remote
|
||||
- doesn't match "afile.jpg"
|
||||
- doesn't match "directory/file.jpg"
|
||||
```text
|
||||
file.jpg - matches "file.jpg"
|
||||
- matches "directory/file.jpg"
|
||||
- doesn't match "afile.jpg"
|
||||
- doesn't match "directory/afile.jpg"
|
||||
/file.jpg - matches "file.jpg" in the root directory of the remote
|
||||
- doesn't match "afile.jpg"
|
||||
- doesn't match "directory/file.jpg"
|
||||
```
|
||||
|
||||
The top level of the remote might not be the top level of the drive.
|
||||
|
||||
E.g. for a Microsoft Windows local directory structure
|
||||
|
||||
F:
|
||||
├── bkp
|
||||
├── data
|
||||
│ ├── excl
|
||||
│ │ ├── 123.jpg
|
||||
│ │ └── 456.jpg
|
||||
│ ├── incl
|
||||
│ │ └── document.pdf
|
||||
```text
|
||||
F:
|
||||
├── bkp
|
||||
├── data
|
||||
│ ├── excl
|
||||
│ │ ├── 123.jpg
|
||||
│ │ └── 456.jpg
|
||||
│ ├── incl
|
||||
│ │ └── document.pdf
|
||||
```
|
||||
|
||||
To copy the contents of folder `data` into folder `bkp` excluding the contents of subfolder
|
||||
`excl`the following command treats `F:\data` and `F:\bkp` as top level for filtering.
|
||||
To copy the contents of folder `data` into folder `bkp` excluding the contents
|
||||
of subfolder `excl`the following command treats `F:\data` and `F:\bkp` as top
|
||||
level for filtering.
|
||||
|
||||
`rclone copy F:\data\ F:\bkp\ --exclude=/excl/**`
|
||||
|
||||
@@ -113,13 +130,17 @@ Simple patterns are case sensitive unless the `--ignore-case` flag is used.
|
||||
|
||||
Without `--ignore-case` (default)
|
||||
|
||||
potato - matches "potato"
|
||||
- doesn't match "POTATO"
|
||||
```text
|
||||
potato - matches "potato"
|
||||
- doesn't match "POTATO"
|
||||
```
|
||||
|
||||
With `--ignore-case`
|
||||
|
||||
potato - matches "potato"
|
||||
- matches "POTATO"
|
||||
```text
|
||||
potato - matches "potato"
|
||||
- matches "POTATO"
|
||||
```
|
||||
|
||||
## Using regular expressions in filter patterns {#regexp}
|
||||
|
||||
@@ -141,26 +162,36 @@ the supplied regular expression(s).
|
||||
Here is how the `{{regexp}}` is transformed into an full regular
|
||||
expression to match the entire path:
|
||||
|
||||
{{regexp}} becomes (^|/)(regexp)$
|
||||
/{{regexp}} becomes ^(regexp)$
|
||||
```text
|
||||
{{regexp}} becomes (^|/)(regexp)$
|
||||
/{{regexp}} becomes ^(regexp)$
|
||||
```
|
||||
|
||||
Regexp syntax can be mixed with glob syntax, for example
|
||||
|
||||
*.{{jpe?g}} to match file.jpg, file.jpeg but not file.png
|
||||
```text
|
||||
*.{{jpe?g}} to match file.jpg, file.jpeg but not file.png
|
||||
```
|
||||
|
||||
You can also use regexp flags - to set case insensitive, for example
|
||||
|
||||
*.{{(?i)jpg}} to match file.jpg, file.JPG but not file.png
|
||||
```text
|
||||
*.{{(?i)jpg}} to match file.jpg, file.JPG but not file.png
|
||||
```
|
||||
|
||||
Be careful with wildcards in regular expressions - you don't want them
|
||||
to match path separators normally. To match any file name starting
|
||||
with `start` and ending with `end` write
|
||||
|
||||
{{start[^/]*end\.jpg}}
|
||||
```text
|
||||
{{start[^/]*end\.jpg}}
|
||||
```
|
||||
|
||||
Not
|
||||
|
||||
{{start.*end\.jpg}}
|
||||
```text
|
||||
{{start.*end\.jpg}}
|
||||
```
|
||||
|
||||
Which will match a directory called `start` with a file called
|
||||
`end.jpg` in it as the `.*` will match `/` characters.
|
||||
@@ -198,12 +229,12 @@ them into regular expressions.
|
||||
|
||||
Rclone path/file name filters are made up of one or more of the following flags:
|
||||
|
||||
* `--include`
|
||||
* `--include-from`
|
||||
* `--exclude`
|
||||
* `--exclude-from`
|
||||
* `--filter`
|
||||
* `--filter-from`
|
||||
- `--include`
|
||||
- `--include-from`
|
||||
- `--exclude`
|
||||
- `--exclude-from`
|
||||
- `--filter`
|
||||
- `--filter-from`
|
||||
|
||||
There can be more than one instance of individual flags.
|
||||
|
||||
@@ -274,15 +305,16 @@ every path against the supplied regular expression(s).
|
||||
|
||||
Directory recursion optimisation occurs if either:
|
||||
|
||||
* A source remote does not support the rclone `ListR` primitive. local,
|
||||
- A source remote does not support the rclone `ListR` primitive. local,
|
||||
sftp, Microsoft OneDrive and WebDAV do not support `ListR`. Google
|
||||
Drive and most bucket type storage do. [Full list](https://rclone.org/overview/#optional-features)
|
||||
|
||||
* On other remotes (those that support `ListR`), if the rclone command is not naturally recursive, and
|
||||
provided it is not run with the `--fast-list` flag. `ls`, `lsf -R` and
|
||||
`size` are naturally recursive but `sync`, `copy` and `move` are not.
|
||||
- On other remotes (those that support `ListR`), if the rclone command is not
|
||||
naturally recursive, and provided it is not run with the `--fast-list` flag.
|
||||
`ls`, `lsf -R` and `size` are naturally recursive but `sync`, `copy` and `move`
|
||||
are not.
|
||||
|
||||
* Whenever the `--disable ListR` flag is applied to an rclone command.
|
||||
- Whenever the `--disable ListR` flag is applied to an rclone command.
|
||||
|
||||
Rclone commands imply directory filter rules from path/file filter
|
||||
rules. To view the directory filter rules rclone has implied for a
|
||||
@@ -290,11 +322,15 @@ command specify the `--dump filters` flag.
|
||||
|
||||
E.g. for an include rule
|
||||
|
||||
/a/*.jpg
|
||||
```text
|
||||
/a/*.jpg
|
||||
```
|
||||
|
||||
Rclone implies the directory include rule
|
||||
|
||||
/a/
|
||||
```text
|
||||
/a/
|
||||
```
|
||||
|
||||
Directory filter rules specified in an rclone command can limit
|
||||
the scope of an rclone command but path/file filters still have
|
||||
@@ -308,10 +344,12 @@ access to the remote by ignoring everything outside of that directory.
|
||||
E.g. `rclone ls remote: --filter-from filter-list.txt` with a file
|
||||
`filter-list.txt`:
|
||||
|
||||
- /dir1/
|
||||
- /dir2/
|
||||
+ *.pdf
|
||||
- **
|
||||
```text
|
||||
- /dir1/
|
||||
- /dir2/
|
||||
+ *.pdf
|
||||
- **
|
||||
```
|
||||
|
||||
All files in directories `dir1` or `dir2` or their subdirectories
|
||||
are completely excluded from the listing. Only files of suffix
|
||||
@@ -329,7 +367,9 @@ from this pattern list.
|
||||
|
||||
E.g. for an include rule
|
||||
|
||||
{dir1/**,dir2/**}
|
||||
```text
|
||||
{dir1/**,dir2/**}
|
||||
```
|
||||
|
||||
Rclone will match files below directories `dir1` or `dir2` only,
|
||||
but will not be able to use this filter to exclude a directory `dir3`
|
||||
@@ -381,9 +421,11 @@ named file. The file contains a list of remarks and pattern rules.
|
||||
|
||||
For an example `exclude-file.txt`:
|
||||
|
||||
# a sample exclude rule file
|
||||
*.bak
|
||||
file2.jpg
|
||||
```text
|
||||
# a sample exclude rule file
|
||||
*.bak
|
||||
file2.jpg
|
||||
```
|
||||
|
||||
`rclone ls remote: --exclude-from exclude-file.txt` lists the files on
|
||||
`remote:` except those named `file2.jpg` or with a suffix `.bak`. That is
|
||||
@@ -426,12 +468,16 @@ E.g. `rclone ls remote: --include "*.{png,jpg}"` lists the files on
|
||||
E.g. multiple rclone copy commands can be combined with `--include` and a
|
||||
pattern-list.
|
||||
|
||||
rclone copy /vol1/A remote:A
|
||||
rclone copy /vol1/B remote:B
|
||||
```sh
|
||||
rclone copy /vol1/A remote:A
|
||||
rclone copy /vol1/B remote:B
|
||||
```
|
||||
|
||||
is equivalent to:
|
||||
|
||||
rclone copy /vol1 remote: --include "{A,B}/**"
|
||||
```sh
|
||||
rclone copy /vol1 remote: --include "{A,B}/**"
|
||||
```
|
||||
|
||||
E.g. `rclone ls remote:/wheat --include "??[^[:punct:]]*"` lists the
|
||||
files `remote:` directory `wheat` (and subdirectories) whose third
|
||||
@@ -445,9 +491,11 @@ named file. The file contains a list of remarks and pattern rules.
|
||||
|
||||
For an example `include-file.txt`:
|
||||
|
||||
# a sample include rule file
|
||||
*.jpg
|
||||
file2.avi
|
||||
```text
|
||||
# a sample include rule file
|
||||
*.jpg
|
||||
file2.avi
|
||||
```
|
||||
|
||||
`rclone ls remote: --include-from include-file.txt` lists the files on
|
||||
`remote:` with name `file2.avi` or suffix `.jpg`. That is equivalent to
|
||||
@@ -496,6 +544,7 @@ from a list of `remote:`.
|
||||
|
||||
Adds path/file names to an rclone command based on rules in a
|
||||
named file. The file contains a list of remarks and pattern rules. Include
|
||||
<!-- markdownlint-disable-next-line no-space-in-code -->
|
||||
rules start with `+ ` and exclude rules with `- `. `!` clears existing
|
||||
rules. Rules are processed in the order they are defined.
|
||||
|
||||
@@ -505,20 +554,24 @@ processed in.
|
||||
Arrange the order of filter rules with the most restrictive first and
|
||||
work down.
|
||||
|
||||
Lines starting with # or ; are ignored, and can be used to write comments. Inline comments are not supported. _Use `-vv --dump filters` to see how they appear in the final regexp._
|
||||
Lines starting with # or ; are ignored, and can be used to write comments.
|
||||
Inline comments are not supported. *Use `-vv --dump filters` to see how they
|
||||
appear in the final regexp.*
|
||||
|
||||
E.g. for `filter-file.txt`:
|
||||
|
||||
# a sample filter rule file
|
||||
- secret*.jpg
|
||||
+ *.jpg
|
||||
+ *.png
|
||||
+ file2.avi
|
||||
- /dir/tmp/** # WARNING! This text will be treated as part of the path.
|
||||
- /dir/Trash/**
|
||||
+ /dir/**
|
||||
# exclude everything else
|
||||
- *
|
||||
```text
|
||||
# a sample filter rule file
|
||||
- secret*.jpg
|
||||
+ *.jpg
|
||||
+ *.png
|
||||
+ file2.avi
|
||||
- /dir/tmp/** # WARNING! This text will be treated as part of the path.
|
||||
- /dir/Trash/**
|
||||
+ /dir/**
|
||||
# exclude everything else
|
||||
- *
|
||||
```
|
||||
|
||||
`rclone ls remote: --filter-from filter-file.txt` lists the path/files on
|
||||
`remote:` including all `jpg` and `png` files, excluding any
|
||||
@@ -526,25 +579,28 @@ matching `secret*.jpg` and including `file2.avi`. It also includes
|
||||
everything in the directory `dir` at the root of `remote`, except
|
||||
`remote:dir/Trash` which it excludes. Everything else is excluded.
|
||||
|
||||
|
||||
E.g. for an alternative `filter-file.txt`:
|
||||
|
||||
- secret*.jpg
|
||||
+ *.jpg
|
||||
+ *.png
|
||||
+ file2.avi
|
||||
- *
|
||||
```text
|
||||
- secret*.jpg
|
||||
+ *.jpg
|
||||
+ *.png
|
||||
+ file2.avi
|
||||
- *
|
||||
```
|
||||
|
||||
Files `file1.jpg`, `file3.png` and `file2.avi` are listed whilst
|
||||
`secret17.jpg` and files without the suffix `.jpg` or `.png` are excluded.
|
||||
|
||||
E.g. for an alternative `filter-file.txt`:
|
||||
|
||||
+ *.jpg
|
||||
+ *.gif
|
||||
!
|
||||
+ 42.doc
|
||||
- *
|
||||
```text
|
||||
+ *.jpg
|
||||
+ *.gif
|
||||
!
|
||||
+ 42.doc
|
||||
- *
|
||||
```
|
||||
|
||||
Only file 42.doc is listed. Prior rules are cleared by the `!`.
|
||||
|
||||
@@ -582,67 +638,85 @@ to right along the command line.
|
||||
|
||||
Paths within the `--files-from` file are interpreted as starting
|
||||
with the root specified in the rclone command. Leading `/` separators are
|
||||
ignored. See [--files-from-raw](#files-from-raw-read-list-of-source-file-names-without-any-processing) if
|
||||
you need the input to be processed in a raw manner.
|
||||
ignored. See [--files-from-raw](#files-from-raw-read-list-of-source-file-names-without-any-processing)
|
||||
if you need the input to be processed in a raw manner.
|
||||
|
||||
E.g. for a file `files-from.txt`:
|
||||
|
||||
# comment
|
||||
file1.jpg
|
||||
subdir/file2.jpg
|
||||
```text
|
||||
# comment
|
||||
file1.jpg
|
||||
subdir/file2.jpg
|
||||
```
|
||||
|
||||
`rclone copy --files-from files-from.txt /home/me/pics remote:pics`
|
||||
copies the following, if they exist, and only those files.
|
||||
|
||||
/home/me/pics/file1.jpg → remote:pics/file1.jpg
|
||||
/home/me/pics/subdir/file2.jpg → remote:pics/subdir/file2.jpg
|
||||
```text
|
||||
/home/me/pics/file1.jpg → remote:pics/file1.jpg
|
||||
/home/me/pics/subdir/file2.jpg → remote:pics/subdir/file2.jpg
|
||||
```
|
||||
|
||||
E.g. to copy the following files referenced by their absolute paths:
|
||||
|
||||
/home/user1/42
|
||||
/home/user1/dir/ford
|
||||
/home/user2/prefect
|
||||
```text
|
||||
/home/user1/42
|
||||
/home/user1/dir/ford
|
||||
/home/user2/prefect
|
||||
```
|
||||
|
||||
First find a common subdirectory - in this case `/home`
|
||||
and put the remaining files in `files-from.txt` with or without
|
||||
leading `/`, e.g.
|
||||
|
||||
user1/42
|
||||
user1/dir/ford
|
||||
user2/prefect
|
||||
```text
|
||||
user1/42
|
||||
user1/dir/ford
|
||||
user2/prefect
|
||||
```
|
||||
|
||||
Then copy these to a remote:
|
||||
|
||||
rclone copy --files-from files-from.txt /home remote:backup
|
||||
```sh
|
||||
rclone copy --files-from files-from.txt /home remote:backup
|
||||
```
|
||||
|
||||
The three files are transferred as follows:
|
||||
|
||||
/home/user1/42 → remote:backup/user1/important
|
||||
/home/user1/dir/ford → remote:backup/user1/dir/file
|
||||
/home/user2/prefect → remote:backup/user2/stuff
|
||||
```text
|
||||
/home/user1/42 → remote:backup/user1/important
|
||||
/home/user1/dir/ford → remote:backup/user1/dir/file
|
||||
/home/user2/prefect → remote:backup/user2/stuff
|
||||
```
|
||||
|
||||
Alternatively if `/` is chosen as root `files-from.txt` will be:
|
||||
|
||||
/home/user1/42
|
||||
/home/user1/dir/ford
|
||||
/home/user2/prefect
|
||||
```text
|
||||
/home/user1/42
|
||||
/home/user1/dir/ford
|
||||
/home/user2/prefect
|
||||
```
|
||||
|
||||
The copy command will be:
|
||||
|
||||
rclone copy --files-from files-from.txt / remote:backup
|
||||
```sh
|
||||
rclone copy --files-from files-from.txt / remote:backup
|
||||
```
|
||||
|
||||
Then there will be an extra `home` directory on the remote:
|
||||
|
||||
/home/user1/42 → remote:backup/home/user1/42
|
||||
/home/user1/dir/ford → remote:backup/home/user1/dir/ford
|
||||
/home/user2/prefect → remote:backup/home/user2/prefect
|
||||
```text
|
||||
/home/user1/42 → remote:backup/home/user1/42
|
||||
/home/user1/dir/ford → remote:backup/home/user1/dir/ford
|
||||
/home/user2/prefect → remote:backup/home/user2/prefect
|
||||
```
|
||||
|
||||
### `--files-from-raw` - Read list of source-file names without any processing
|
||||
|
||||
This flag is the same as `--files-from` except that input is read in a
|
||||
raw manner. Lines with leading / trailing whitespace, and lines starting
|
||||
with `;` or `#` are read without any processing. [rclone lsf](/commands/rclone_lsf/) has
|
||||
a compatible format that can be used to export file lists from remotes for
|
||||
with `;` or `#` are read without any processing. [rclone lsf](/commands/rclone_lsf/)
|
||||
has a compatible format that can be used to export file lists from remotes for
|
||||
input to `--files-from-raw`.
|
||||
|
||||
### `--ignore-case` - make searches case insensitive
|
||||
@@ -661,9 +735,9 @@ not as work as expected in your shell and may require quoting.
|
||||
|
||||
E.g. linux, OSX (`*` metacharacter)
|
||||
|
||||
* `--include \*.jpg`
|
||||
* `--include '*.jpg'`
|
||||
* `--include='*.jpg'`
|
||||
- `--include \*.jpg`
|
||||
- `--include '*.jpg'`
|
||||
- `--include='*.jpg'`
|
||||
|
||||
Microsoft Windows expansion is done by the command, not shell, so
|
||||
`--include *.jpg` does not require quoting.
|
||||
@@ -720,7 +794,8 @@ See [the time option docs](/docs/#time-options) for valid formats.
|
||||
|
||||
### `--hash-filter` - Deterministically select a subset of files {#hash-filter}
|
||||
|
||||
The `--hash-filter` flag enables selecting a deterministic subset of files, useful for:
|
||||
The `--hash-filter` flag enables selecting a deterministic subset of files,
|
||||
useful for:
|
||||
|
||||
1. Running large sync operations across multiple machines.
|
||||
2. Checking a subset of files for bitrot.
|
||||
@@ -730,7 +805,7 @@ The `--hash-filter` flag enables selecting a deterministic subset of files, usef
|
||||
|
||||
The flag takes two parameters expressed as a fraction:
|
||||
|
||||
```
|
||||
```sh
|
||||
--hash-filter K/N
|
||||
```
|
||||
|
||||
@@ -738,8 +813,10 @@ The flag takes two parameters expressed as a fraction:
|
||||
- `K`: The specific partition to select (an integer from `0` to `N`).
|
||||
|
||||
For example:
|
||||
|
||||
- `--hash-filter 1/3`: Selects the first third of the files.
|
||||
- `--hash-filter 2/3` and `--hash-filter 3/3`: Select the second and third partitions, respectively.
|
||||
- `--hash-filter 2/3` and `--hash-filter 3/3`: Select the second and third
|
||||
partitions, respectively.
|
||||
|
||||
Each partition is non-overlapping, ensuring all files are covered without duplication.
|
||||
|
||||
@@ -747,15 +824,17 @@ Each partition is non-overlapping, ensuring all files are covered without duplic
|
||||
|
||||
Use `@` as `K` to randomly select a partition:
|
||||
|
||||
```
|
||||
```sh
|
||||
--hash-filter @/M
|
||||
```
|
||||
|
||||
For example, `--hash-filter @/3` will randomly select a number between 0 and 2. This will stay constant across retries.
|
||||
For example, `--hash-filter @/3` will randomly select a number between 0 and 2.
|
||||
This will stay constant across retries.
|
||||
|
||||
#### How It Works
|
||||
|
||||
- Rclone takes each file's full path, normalizes it to lowercase, and applies Unicode normalization.
|
||||
- Rclone takes each file's full path, normalizes it to lowercase, and applies
|
||||
Unicode normalization.
|
||||
- It then hashes the normalized path into a 64 bit number.
|
||||
- The hash result is reduced modulo `N` to assign the file to a partition.
|
||||
- If the calculated partition does not match `K` the file is excluded.
|
||||
@@ -775,7 +854,7 @@ For example, `--hash-filter @/3` will randomly select a number between 0 and 2.
|
||||
|
||||
Assuming the current directory contains `file1.jpg` through `file9.jpg`:
|
||||
|
||||
```
|
||||
```sh
|
||||
$ rclone lsf --hash-filter 0/4 .
|
||||
file1.jpg
|
||||
file5.jpg
|
||||
@@ -800,13 +879,13 @@ file5.jpg
|
||||
|
||||
##### Syncing the first quarter of files
|
||||
|
||||
```
|
||||
```sh
|
||||
rclone sync --hash-filter 1/4 source:path destination:path
|
||||
```
|
||||
|
||||
##### Checking a random 1% of files for integrity
|
||||
|
||||
```
|
||||
```sh
|
||||
rclone check --download --hash-filter @/100 source:path destination:path
|
||||
```
|
||||
|
||||
@@ -822,7 +901,9 @@ on the destination which are excluded from the command.
|
||||
|
||||
E.g. the scope of `rclone sync --interactive A: B:` can be restricted:
|
||||
|
||||
rclone --min-size 50k --delete-excluded sync A: B:
|
||||
```sh
|
||||
rclone --min-size 50k --delete-excluded sync A: B:
|
||||
```
|
||||
|
||||
All files on `B:` which are less than 50 KiB are deleted
|
||||
because they are excluded from the rclone sync command.
|
||||
@@ -846,10 +927,12 @@ This flag has a priority over other filter flags.
|
||||
|
||||
E.g. for the following directory structure:
|
||||
|
||||
dir1/file1
|
||||
dir1/dir2/file2
|
||||
dir1/dir2/dir3/file3
|
||||
dir1/dir2/dir3/.ignore
|
||||
```text
|
||||
dir1/file1
|
||||
dir1/dir2/file2
|
||||
dir1/dir2/dir3/file3
|
||||
dir1/dir2/dir3/.ignore
|
||||
```
|
||||
|
||||
The command `rclone ls --exclude-if-present .ignore dir1` does
|
||||
not list `dir3`, `file3` or `.ignore`.
|
||||
@@ -867,11 +950,15 @@ expressions](#regexp).
|
||||
For example if you wished to list only local files with a mode of
|
||||
`100664` you could do that with:
|
||||
|
||||
rclone lsf -M --files-only --metadata-include "mode=100664" .
|
||||
```sh
|
||||
rclone lsf -M --files-only --metadata-include "mode=100664" .
|
||||
```
|
||||
|
||||
Or if you wished to show files with an `atime`, `mtime` or `btime` at a given date:
|
||||
|
||||
rclone lsf -M --files-only --metadata-include "[abm]time=2022-12-16*" .
|
||||
```sh
|
||||
rclone lsf -M --files-only --metadata-include "[abm]time=2022-12-16*" .
|
||||
```
|
||||
|
||||
Like file filtering, metadata filtering only applies to files not to
|
||||
directories.
|
||||
@@ -879,24 +966,25 @@ directories.
|
||||
The filters can be applied using these flags.
|
||||
|
||||
- `--metadata-include` - Include metadatas matching pattern
|
||||
- `--metadata-include-from` - Read metadata include patterns from file (use - to read from stdin)
|
||||
- `--metadata-include-from` - Read metadata include patterns from file
|
||||
(use - to read from stdin)
|
||||
- `--metadata-exclude` - Exclude metadatas matching pattern
|
||||
- `--metadata-exclude-from` - Read metadata exclude patterns from file (use - to read from stdin)
|
||||
- `--metadata-exclude-from` - Read metadata exclude patterns from file
|
||||
(use - to read from stdin)
|
||||
- `--metadata-filter` - Add a metadata filtering rule
|
||||
- `--metadata-filter-from` - Read metadata filtering patterns from a file (use - to read from stdin)
|
||||
- `--metadata-filter-from` - Read metadata filtering patterns from a file
|
||||
(use - to read from stdin)
|
||||
|
||||
Each flag can be repeated. See the section on [how filter rules are
|
||||
applied](#how-filter-rules-work) for more details - these flags work
|
||||
in an identical way to the file name filtering flags, but instead of
|
||||
file name patterns have metadata patterns.
|
||||
|
||||
|
||||
## Common pitfalls
|
||||
|
||||
The most frequent filter support issues on
|
||||
the [rclone forum](https://forum.rclone.org/) are:
|
||||
|
||||
* Not using paths relative to the root of the remote
|
||||
* Not using `/` to match from the root of a remote
|
||||
* Not using `**` to match the contents of a directory
|
||||
|
||||
- Not using paths relative to the root of the remote
|
||||
- Not using `/` to match from the root of a remote
|
||||
- Not using `**` to match the contents of a directory
|
||||
|
||||
@@ -13,13 +13,14 @@ change.
|
||||
Run this command in a terminal and rclone will download and then
|
||||
display the GUI in a web browser.
|
||||
|
||||
```
|
||||
```sh
|
||||
rclone rcd --rc-web-gui
|
||||
```
|
||||
|
||||
This will produce logs like this and rclone needs to continue to run to serve the GUI:
|
||||
This will produce logs like this and rclone needs to continue to run to serve
|
||||
the GUI:
|
||||
|
||||
```
|
||||
```text
|
||||
2019/08/25 11:40:14 NOTICE: A new release for gui is present at https://github.com/rclone/rclone-webui-react/releases/download/v0.0.6/currentbuild.zip
|
||||
2019/08/25 11:40:14 NOTICE: Downloading webgui binary. Please wait. [Size: 3813937, Path : /home/USER/.cache/rclone/webgui/v0.0.6.zip]
|
||||
2019/08/25 11:40:16 NOTICE: Unzipping
|
||||
@@ -58,7 +59,8 @@ When you run the `rclone rcd --rc-web-gui` this is what happens
|
||||
- Rclone starts but only runs the remote control API ("rc").
|
||||
- The API is bound to localhost with an auto-generated username and password.
|
||||
- If the API bundle is missing then rclone will download it.
|
||||
- rclone will start serving the files from the API bundle over the same port as the API
|
||||
- rclone will start serving the files from the API bundle over the same port as
|
||||
the API
|
||||
- rclone will open the browser with a `login_token` so it can log straight in.
|
||||
|
||||
## Advanced use
|
||||
@@ -79,7 +81,8 @@ See also the [rclone rcd documentation](https://rclone.org/commands/rclone_rcd/)
|
||||
|
||||
### Example: Running a public GUI
|
||||
|
||||
For example the GUI could be served on a public port over SSL using an htpasswd file using the following flags:
|
||||
For example the GUI could be served on a public port over SSL using an htpasswd
|
||||
file using the following flags:
|
||||
|
||||
- `--rc-web-gui`
|
||||
- `--rc-addr :443`
|
||||
@@ -107,5 +110,3 @@ The GUI is being developed in the: [rclone/rclone-webui-react repository](https:
|
||||
Bug reports and contributions are very welcome :-)
|
||||
|
||||
If you have questions then please ask them on the [rclone forum](https://forum.rclone.org/).
|
||||
|
||||
|
||||
|
||||
@@ -9,10 +9,10 @@ Rclone is a Go program and comes as a single binary file.
|
||||
|
||||
## Quickstart
|
||||
|
||||
* [Download](/downloads/) the relevant binary.
|
||||
* Extract the `rclone` executable, `rclone.exe` on Windows, from the archive.
|
||||
* Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
|
||||
* Optionally configure [automatic execution](#autostart).
|
||||
- [Download](/downloads/) the relevant binary.
|
||||
- Extract the `rclone` executable, `rclone.exe` on Windows, from the archive.
|
||||
- Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
|
||||
- Optionally configure [automatic execution](#autostart).
|
||||
|
||||
See below for some expanded Linux / macOS / Windows instructions.
|
||||
|
||||
@@ -29,11 +29,15 @@ signatures on the release.
|
||||
|
||||
To install rclone on Linux/macOS/BSD systems, run:
|
||||
|
||||
sudo -v ; curl https://rclone.org/install.sh | sudo bash
|
||||
```sh
|
||||
sudo -v ; curl https://rclone.org/install.sh | sudo bash
|
||||
```
|
||||
|
||||
For beta installation, run:
|
||||
|
||||
sudo -v ; curl https://rclone.org/install.sh | sudo bash -s beta
|
||||
```sh
|
||||
sudo -v ; curl https://rclone.org/install.sh | sudo bash -s beta
|
||||
```
|
||||
|
||||
Note that this script checks the version of rclone installed first and
|
||||
won't re-download if not needed.
|
||||
@@ -44,31 +48,41 @@ won't re-download if not needed.
|
||||
|
||||
Fetch and unpack
|
||||
|
||||
curl -O https://downloads.rclone.org/rclone-current-linux-amd64.zip
|
||||
unzip rclone-current-linux-amd64.zip
|
||||
cd rclone-*-linux-amd64
|
||||
```sh
|
||||
curl -O https://downloads.rclone.org/rclone-current-linux-amd64.zip
|
||||
unzip rclone-current-linux-amd64.zip
|
||||
cd rclone-*-linux-amd64
|
||||
```
|
||||
|
||||
Copy binary file
|
||||
|
||||
sudo cp rclone /usr/bin/
|
||||
sudo chown root:root /usr/bin/rclone
|
||||
sudo chmod 755 /usr/bin/rclone
|
||||
```sh
|
||||
sudo cp rclone /usr/bin/
|
||||
sudo chown root:root /usr/bin/rclone
|
||||
sudo chmod 755 /usr/bin/rclone
|
||||
```
|
||||
|
||||
Install manpage
|
||||
|
||||
sudo mkdir -p /usr/local/share/man/man1
|
||||
sudo cp rclone.1 /usr/local/share/man/man1/
|
||||
sudo mandb
|
||||
```sh
|
||||
sudo mkdir -p /usr/local/share/man/man1
|
||||
sudo cp rclone.1 /usr/local/share/man/man1/
|
||||
sudo mandb
|
||||
```
|
||||
|
||||
Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
|
||||
|
||||
rclone config
|
||||
```sh
|
||||
rclone config
|
||||
```
|
||||
|
||||
## macOS installation {#macos}
|
||||
|
||||
### Installation with brew {#macos-brew}
|
||||
|
||||
brew install rclone
|
||||
```sh
|
||||
brew install rclone
|
||||
```
|
||||
|
||||
NOTE: This version of rclone will not support `mount` any more (see
|
||||
[#5373](https://github.com/rclone/rclone/issues/5373)). If mounting is wanted
|
||||
@@ -84,14 +98,16 @@ developers so it may be out of date. Its current version is as below.
|
||||
|
||||
On macOS, rclone can also be installed via [MacPorts](https://www.macports.org):
|
||||
|
||||
sudo port install rclone
|
||||
```sh
|
||||
sudo port install rclone
|
||||
```
|
||||
|
||||
Note that this is a third party installer not controlled by the rclone
|
||||
developers so it may be out of date. Its current version is as below.
|
||||
|
||||
[](https://repology.org/project/rclone/versions)
|
||||
|
||||
More information [here](https://ports.macports.org/port/rclone/).
|
||||
More information on [macports.org](https://ports.macports.org/port/rclone/).
|
||||
|
||||
### Precompiled binary, using curl {#macos-precompiled}
|
||||
|
||||
@@ -100,26 +116,36 @@ notarized it is enough to download with `curl`.
|
||||
|
||||
Download the latest version of rclone.
|
||||
|
||||
cd && curl -O https://downloads.rclone.org/rclone-current-osx-amd64.zip
|
||||
```sh
|
||||
cd && curl -O https://downloads.rclone.org/rclone-current-osx-amd64.zip
|
||||
```
|
||||
|
||||
Unzip the download and cd to the extracted folder.
|
||||
|
||||
unzip -a rclone-current-osx-amd64.zip && cd rclone-*-osx-amd64
|
||||
```sh
|
||||
unzip -a rclone-current-osx-amd64.zip && cd rclone-*-osx-amd64
|
||||
```
|
||||
|
||||
Move rclone to your $PATH. You will be prompted for your password.
|
||||
|
||||
sudo mkdir -p /usr/local/bin
|
||||
sudo mv rclone /usr/local/bin/
|
||||
```sh
|
||||
sudo mkdir -p /usr/local/bin
|
||||
sudo mv rclone /usr/local/bin/
|
||||
```
|
||||
|
||||
(the `mkdir` command is safe to run, even if the directory already exists).
|
||||
|
||||
Remove the leftover files.
|
||||
|
||||
cd .. && rm -rf rclone-*-osx-amd64 rclone-current-osx-amd64.zip
|
||||
```sh
|
||||
cd .. && rm -rf rclone-*-osx-amd64 rclone-current-osx-amd64.zip
|
||||
```
|
||||
|
||||
Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
|
||||
|
||||
rclone config
|
||||
```sh
|
||||
rclone config
|
||||
```
|
||||
|
||||
### Precompiled binary, using a web browser {#macos-precompiled-web}
|
||||
|
||||
@@ -127,12 +153,16 @@ When downloading a binary with a web browser, the browser will set the macOS
|
||||
gatekeeper quarantine attribute. Starting from Catalina, when attempting to run
|
||||
`rclone`, a pop-up will appear saying:
|
||||
|
||||
"rclone" cannot be opened because the developer cannot be verified.
|
||||
macOS cannot verify that this app is free from malware.
|
||||
```sh
|
||||
"rclone" cannot be opened because the developer cannot be verified.
|
||||
macOS cannot verify that this app is free from malware.
|
||||
```
|
||||
|
||||
The simplest fix is to run
|
||||
|
||||
xattr -d com.apple.quarantine rclone
|
||||
```sh
|
||||
xattr -d com.apple.quarantine rclone
|
||||
```
|
||||
|
||||
## Windows installation {#windows}
|
||||
|
||||
@@ -160,14 +190,20 @@ feature then you will need to install the third party utility
|
||||
|
||||
### Windows package manager (Winget) {#windows-chocolatey}
|
||||
|
||||
[Winget](https://learn.microsoft.com/en-us/windows/package-manager/) comes pre-installed with the latest versions of Windows. If not, update the [App Installer](https://www.microsoft.com/p/app-installer/9nblggh4nns1) package from the Microsoft store.
|
||||
[Winget](https://learn.microsoft.com/en-us/windows/package-manager/) comes
|
||||
pre-installed with the latest versions of Windows. If not, update the
|
||||
[App Installer](https://www.microsoft.com/p/app-installer/9nblggh4nns1) package
|
||||
from the Microsoft store.
|
||||
|
||||
To install rclone
|
||||
```
|
||||
|
||||
```bat
|
||||
winget install Rclone.Rclone
|
||||
```
|
||||
|
||||
To uninstall rclone
|
||||
```
|
||||
|
||||
```bat
|
||||
winget uninstall Rclone.Rclone --force
|
||||
```
|
||||
|
||||
@@ -175,7 +211,7 @@ winget uninstall Rclone.Rclone --force
|
||||
|
||||
Make sure you have [Choco](https://chocolatey.org/) installed
|
||||
|
||||
```
|
||||
```bat
|
||||
choco search rclone
|
||||
choco install rclone
|
||||
```
|
||||
@@ -183,7 +219,7 @@ choco install rclone
|
||||
This will install rclone on your Windows machine. If you are planning
|
||||
to use [rclone mount](/commands/rclone_mount/) then
|
||||
|
||||
```
|
||||
```bat
|
||||
choco install winfsp
|
||||
```
|
||||
|
||||
@@ -198,7 +234,7 @@ developers so it may be out of date. Its current version is as below.
|
||||
|
||||
Make sure you have [Scoop](https://scoop.sh/) installed
|
||||
|
||||
```
|
||||
```bat
|
||||
scoop install rclone
|
||||
```
|
||||
|
||||
@@ -238,7 +274,7 @@ The `:latest` tag will always point to the latest stable release. You
|
||||
can use the `:beta` tag to get the latest build from master. You can
|
||||
also use version tags, e.g. `:1.49.1`, `:1.49` or `:1`.
|
||||
|
||||
```
|
||||
```sh
|
||||
$ docker pull rclone/rclone:latest
|
||||
latest: Pulling from rclone/rclone
|
||||
Digest: sha256:0e0ced72671989bb837fea8e88578b3fc48371aa45d209663683e24cfdaa0e11
|
||||
@@ -253,35 +289,37 @@ There are a few command line options to consider when starting an rclone Docker
|
||||
from the rclone image.
|
||||
|
||||
- You need to mount the host rclone config dir at `/config/rclone` into the Docker
|
||||
container. Due to the fact that rclone updates tokens inside its config file, and that
|
||||
the update process involves a file rename, you need to mount the whole host rclone
|
||||
config dir, not just the single host rclone config file.
|
||||
container. Due to the fact that rclone updates tokens inside its config file,
|
||||
and that the update process involves a file rename, you need to mount the whole
|
||||
host rclone config dir, not just the single host rclone config file.
|
||||
|
||||
- You need to mount a host data dir at `/data` into the Docker container.
|
||||
|
||||
- By default, the rclone binary inside a Docker container runs with UID=0 (root).
|
||||
As a result, all files created in a run will have UID=0. If your config and data files
|
||||
reside on the host with a non-root UID:GID, you need to pass these on the container
|
||||
start command line.
|
||||
As a result, all files created in a run will have UID=0. If your config and
|
||||
data files reside on the host with a non-root UID:GID, you need to pass these
|
||||
on the container start command line.
|
||||
|
||||
- If you want to access the RC interface (either via the API or the Web UI), it is
|
||||
required to set the `--rc-addr` to `:5572` in order to connect to it from outside
|
||||
the container. An explanation about why this is necessary is present [here](https://web.archive.org/web/20200808071950/https://pythonspeed.com/articles/docker-connection-refused/).
|
||||
* NOTE: Users running this container with the docker network set to `host` should
|
||||
probably set it to listen to localhost only, with `127.0.0.1:5572` as the value for
|
||||
`--rc-addr`
|
||||
the container. An explanation about why this is necessary can be found in an old
|
||||
[pythonspeed.com](https://web.archive.org/web/20200808071950/https://pythonspeed.com/articles/docker-connection-refused/)
|
||||
article.
|
||||
- NOTE: Users running this container with the docker network set to `host` should
|
||||
probably set it to listen to localhost only, with `127.0.0.1:5572` as the
|
||||
value for `--rc-addr`
|
||||
|
||||
- It is possible to use `rclone mount` inside a userspace Docker container, and expose
|
||||
the resulting fuse mount to the host. The exact `docker run` options to do that might
|
||||
vary slightly between hosts. See, e.g. the discussion in this
|
||||
the resulting fuse mount to the host. The exact `docker run` options to do that
|
||||
might vary slightly between hosts. See, e.g. the discussion in this
|
||||
[thread](https://github.com/moby/moby/issues/9448).
|
||||
|
||||
You also need to mount the host `/etc/passwd` and `/etc/group` for fuse to work inside
|
||||
the container.
|
||||
You also need to mount the host `/etc/passwd` and `/etc/group` for fuse to work
|
||||
inside the container.
|
||||
|
||||
Here are some commands tested on an Ubuntu 18.04.3 host:
|
||||
|
||||
```
|
||||
```sh
|
||||
# config on host at ~/.config/rclone/rclone.conf
|
||||
# data on host at ~/data
|
||||
|
||||
@@ -319,23 +357,26 @@ kill %1
|
||||
|
||||
Make sure you have [Snapd installed](https://snapcraft.io/docs/installing-snapd)
|
||||
|
||||
```bash
|
||||
$ sudo snap install rclone
|
||||
```sh
|
||||
sudo snap install rclone
|
||||
```
|
||||
Due to the strict confinement of Snap, rclone snap cannot access real /home/$USER/.config/rclone directory, default config path is as below.
|
||||
|
||||
Due to the strict confinement of Snap, rclone snap cannot access real
|
||||
`/home/$USER/.config/rclone` directory, default config path is as below.
|
||||
|
||||
- Default config directory:
|
||||
- /home/$USER/snap/rclone/current/.config/rclone
|
||||
- /home/$USER/snap/rclone/current/.config/rclone
|
||||
|
||||
Note: Due to the strict confinement of Snap, `rclone mount` feature is `not` supported.
|
||||
|
||||
If mounting is wanted, either install a precompiled binary or enable the relevant option when [installing from source](#source).
|
||||
If mounting is wanted, either install a precompiled binary or enable the relevant
|
||||
option when [installing from source](#source).
|
||||
|
||||
Note that this is controlled by [community maintainer](https://github.com/boukendesho/rclone-snap) not the rclone developers so it may be out of date. Its current version is as below.
|
||||
Note that this is controlled by [community maintainer](https://github.com/boukendesho/rclone-snap)
|
||||
not the rclone developers so it may be out of date. Its current version is as below.
|
||||
|
||||
[](https://snapcraft.io/rclone)
|
||||
|
||||
|
||||
## Source installation {#source}
|
||||
|
||||
Make sure you have git and [Go](https://golang.org/) installed.
|
||||
@@ -343,7 +384,7 @@ Go version 1.22 or newer is required, the latest release is recommended.
|
||||
You can get it from your package manager, or download it from
|
||||
[golang.org/dl](https://golang.org/dl/). Then you can run the following:
|
||||
|
||||
```
|
||||
```sh
|
||||
git clone https://github.com/rclone/rclone.git
|
||||
cd rclone
|
||||
go build
|
||||
@@ -357,7 +398,7 @@ in the same folder. As an initial check you can now run `./rclone version`
|
||||
Note that on macOS and Windows the [mount](https://rclone.org/commands/rclone_mount/)
|
||||
command will not be available unless you specify an additional build tag `cmount`.
|
||||
|
||||
```
|
||||
```sh
|
||||
go build -tags cmount
|
||||
```
|
||||
|
||||
@@ -383,7 +424,7 @@ You may add arguments `-ldflags -s` to omit symbol table and debug information,
|
||||
making the executable file smaller, and `-trimpath` to remove references to
|
||||
local file system paths. The official rclone releases are built with both of these.
|
||||
|
||||
```
|
||||
```sh
|
||||
go build -trimpath -ldflags -s -tags cmount
|
||||
```
|
||||
|
||||
@@ -394,7 +435,7 @@ or `fs.VersionSuffix` (to keep default number but customize the suffix).
|
||||
This can be done from the build command, by adding to the `-ldflags`
|
||||
argument value as shown below.
|
||||
|
||||
```
|
||||
```sh
|
||||
go build -trimpath -ldflags "-s -X github.com/rclone/rclone/fs.Version=v9.9.9-test" -tags cmount
|
||||
```
|
||||
|
||||
@@ -405,7 +446,7 @@ It generates a Windows resource system object file, with extension .syso, e.g.
|
||||
`resource_windows_amd64.syso`, that will be automatically picked up by
|
||||
future build commands.
|
||||
|
||||
```
|
||||
```sh
|
||||
go run bin/resource_windows.go
|
||||
```
|
||||
|
||||
@@ -417,7 +458,7 @@ override this version variable in the build command as described above, you
|
||||
need to do that also when generating the resource file, or else it will still
|
||||
use the value from the source.
|
||||
|
||||
```
|
||||
```sh
|
||||
go run bin/resource_windows.go -version v9.9.9-test
|
||||
```
|
||||
|
||||
@@ -427,13 +468,13 @@ followed by additional commit details, embeds version information binary resourc
|
||||
on Windows, and copies the resulting rclone executable into your GOPATH bin folder
|
||||
(`$(go env GOPATH)/bin`, which corresponds to `~/go/bin/rclone` by default).
|
||||
|
||||
```
|
||||
```sh
|
||||
make
|
||||
```
|
||||
|
||||
To include mount command on macOS and Windows with Makefile build:
|
||||
|
||||
```
|
||||
```sh
|
||||
make GOTAGS=cmount
|
||||
```
|
||||
|
||||
@@ -450,7 +491,7 @@ The source will be stored it in the Go module cache, and the resulting
|
||||
executable will be in your GOPATH bin folder (`$(go env GOPATH)/bin`,
|
||||
which corresponds to `~/go/bin/rclone` by default).
|
||||
|
||||
```
|
||||
```sh
|
||||
go install github.com/rclone/rclone@latest
|
||||
```
|
||||
|
||||
@@ -466,14 +507,15 @@ role](https://github.com/stefangweichinger/ansible-rclone).
|
||||
|
||||
Instructions
|
||||
|
||||
1. `git clone https://github.com/stefangweichinger/ansible-rclone.git` into your local roles-directory
|
||||
2. add the role to the hosts you want rclone installed to:
|
||||
1. `git clone https://github.com/stefangweichinger/ansible-rclone.git` into
|
||||
your local roles-directory
|
||||
2. add the role to the hosts you want rclone installed to:
|
||||
|
||||
```
|
||||
```yml
|
||||
- hosts: rclone-hosts
|
||||
roles:
|
||||
- rclone
|
||||
```
|
||||
- rclone
|
||||
```
|
||||
|
||||
## Portable installation {#portable}
|
||||
|
||||
@@ -491,29 +533,31 @@ the locations that rclone will use.
|
||||
|
||||
To override them set the corresponding options (as command-line arguments, or as
|
||||
[environment variables](https://rclone.org/docs/#environment-variables)):
|
||||
- [--config](https://rclone.org/docs/#config-string)
|
||||
- [--cache-dir](https://rclone.org/docs/#cache-dir-string)
|
||||
- [--temp-dir](https://rclone.org/docs/#temp-dir-string)
|
||||
|
||||
- [--config](https://rclone.org/docs/#config-string)
|
||||
- [--cache-dir](https://rclone.org/docs/#cache-dir-string)
|
||||
- [--temp-dir](https://rclone.org/docs/#temp-dir-string)
|
||||
|
||||
## Autostart
|
||||
|
||||
After installing and configuring rclone, as described above, you are ready to use rclone
|
||||
as an interactive command line utility. If your goal is to perform *periodic* operations,
|
||||
such as a regular [sync](https://rclone.org/commands/rclone_sync/), you will probably want
|
||||
to configure your rclone command in your operating system's scheduler. If you need to
|
||||
expose *service*-like features, such as [remote control](https://rclone.org/rc/),
|
||||
[GUI](https://rclone.org/gui/), [serve](https://rclone.org/commands/rclone_serve/)
|
||||
or [mount](https://rclone.org/commands/rclone_mount/), you will often want an rclone
|
||||
command always running in the background, and configuring it to run in a service infrastructure
|
||||
may be a better option. Below are some alternatives on how to achieve this on
|
||||
different operating systems.
|
||||
After installing and configuring rclone, as described above, you are ready to use
|
||||
rclone as an interactive command line utility. If your goal is to perform *periodic*
|
||||
operations, such as a regular [sync](https://rclone.org/commands/rclone_sync/), you
|
||||
will probably want to configure your rclone command in your operating system's
|
||||
scheduler. If you need to expose *service*-like features, such as
|
||||
[remote control](https://rclone.org/rc/), [GUI](https://rclone.org/gui/),
|
||||
[serve](https://rclone.org/commands/rclone_serve/) or [mount](https://rclone.org/commands/rclone_mount/),
|
||||
you will often want an rclone command always running in the background, and
|
||||
configuring it to run in a service infrastructure may be a better option. Below
|
||||
are some alternatives on how to achieve this on different operating systems.
|
||||
|
||||
NOTE: Before setting up autorun it is highly recommended that you have tested your command
|
||||
manually from a Command Prompt first.
|
||||
NOTE: Before setting up autorun it is highly recommended that you have tested
|
||||
your command manually from a Command Prompt first.
|
||||
|
||||
### Autostart on Windows
|
||||
|
||||
The most relevant alternatives for autostart on Windows are:
|
||||
|
||||
- Run at user log on using the Startup folder
|
||||
- Run at user log on, at system startup or at schedule using Task Scheduler
|
||||
- Run at system startup using Windows service
|
||||
@@ -523,22 +567,23 @@ The most relevant alternatives for autostart on Windows are:
|
||||
Rclone is a console application, so if not starting from an existing Command Prompt,
|
||||
e.g. when starting rclone.exe from a shortcut, it will open a Command Prompt window.
|
||||
When configuring rclone to run from task scheduler and windows service you are able
|
||||
to set it to run hidden in background. From rclone version 1.54 you can also make it
|
||||
run hidden from anywhere by adding option `--no-console` (it may still flash briefly
|
||||
when the program starts). Since rclone normally writes information and any error
|
||||
messages to the console, you must redirect this to a file to be able to see it.
|
||||
Rclone has a built-in option `--log-file` for that.
|
||||
to set it to run hidden in background. From rclone version 1.54 you can also make
|
||||
it run hidden from anywhere by adding option `--no-console` (it may still flash
|
||||
briefly when the program starts). Since rclone normally writes information and any
|
||||
error messages to the console, you must redirect this to a file to be able to see
|
||||
it. Rclone has a built-in option `--log-file` for that.
|
||||
|
||||
Example command to run a sync in background:
|
||||
```
|
||||
|
||||
```bat
|
||||
c:\rclone\rclone.exe sync c:\files remote:/files --no-console --log-file c:\rclone\logs\sync_files.txt
|
||||
```
|
||||
|
||||
#### User account
|
||||
|
||||
As mentioned in the [mount](https://rclone.org/commands/rclone_mount/) documentation,
|
||||
mounted drives created as Administrator are not visible to other accounts, not even the
|
||||
account that was elevated as Administrator. By running the mount command as the
|
||||
mounted drives created as Administrator are not visible to other accounts, not even
|
||||
the account that was elevated as Administrator. By running the mount command as the
|
||||
built-in `SYSTEM` user account, it will create drives accessible for everyone on
|
||||
the system. Both scheduled task and Windows service can be used to achieve this.
|
||||
|
||||
@@ -575,8 +620,7 @@ configure rclone to be started automatically in a highly configurable way, e.g.
|
||||
periodically on a schedule, on user log on, or at system startup. It can run
|
||||
be configured to run as the current user, or for a mount command that needs to
|
||||
be available to all users it can run as the `SYSTEM` user.
|
||||
For technical information, see
|
||||
https://docs.microsoft.com/windows/win32/taskschd/task-scheduler-start-page.
|
||||
For technical information, see [Task Scheduler for developers](https://docs.microsoft.com/windows/win32/taskschd/task-scheduler-start-page).
|
||||
|
||||
#### Run as service
|
||||
|
||||
@@ -585,15 +629,16 @@ your rclone command, as an alternative to scheduled task configured to run at st
|
||||
|
||||
##### Mount command built-in service integration
|
||||
|
||||
For mount commands, rclone has a built-in Windows service integration via the third-party
|
||||
WinFsp library it uses. Registering as a regular Windows service easy, as you just have to
|
||||
execute the built-in PowerShell command `New-Service` (requires administrative privileges).
|
||||
For mount commands, rclone has a built-in Windows service integration via the
|
||||
third-party WinFsp library it uses. Registering as a regular Windows service
|
||||
easy, as you just have to execute the built-in PowerShell command `New-Service`
|
||||
(requires administrative privileges).
|
||||
|
||||
Example of a PowerShell command that creates a Windows service for mounting
|
||||
some `remote:/files` as drive letter `X:`, for *all* users (service will be running as the
|
||||
local system account):
|
||||
some `remote:/files` as drive letter `X:`, for *all* users (service will be
|
||||
running as the local system account):
|
||||
|
||||
```
|
||||
```pwsh
|
||||
New-Service -Name Rclone -BinaryPathName 'c:\rclone\rclone.exe mount remote:/files X: --config c:\rclone\config\rclone.conf --log-file c:\rclone\logs\mount.txt'
|
||||
```
|
||||
|
||||
@@ -603,7 +648,7 @@ into its own launcher service, as kind of "child services". This has the additio
|
||||
advantage that it also implements a network provider that integrates into
|
||||
Windows standard methods for managing network drives. This is currently not
|
||||
officially supported by Rclone, but with WinFsp version 2019.3 B2 / v1.5B2 or later
|
||||
it should be possible through path rewriting as described [here](https://github.com/rclone/rclone/issues/3340).
|
||||
it should be possible through path rewriting as described in [#3340](https://github.com/rclone/rclone/issues/3340).
|
||||
|
||||
##### Third-party service integration
|
||||
|
||||
@@ -615,15 +660,15 @@ customized response to different exit codes, with a GUI to configure everything
|
||||
(although it can also be used from command line ).
|
||||
|
||||
There are also several other alternatives. To mention one more,
|
||||
[WinSW](https://github.com/winsw/winsw), "Windows Service Wrapper", is worth checking out.
|
||||
It requires .NET Framework, but it is preinstalled on newer versions of Windows, and it
|
||||
also provides alternative standalone distributions which includes necessary runtime (.NET 5).
|
||||
WinSW is a command-line only utility, where you have to manually create an XML file with
|
||||
service configuration. This may be a drawback for some, but it can also be an advantage
|
||||
as it is easy to back up and reuse the configuration
|
||||
[WinSW](https://github.com/winsw/winsw), "Windows Service Wrapper", is worth checking
|
||||
out. It requires .NET Framework, but it is preinstalled on newer versions of Windows,
|
||||
and it also provides alternative standalone distributions which includes necessary
|
||||
runtime (.NET 5). WinSW is a command-line only utility, where you have to manually
|
||||
create an XML file with service configuration. This may be a drawback for some, but
|
||||
it can also be an advantage as it is easy to back up and reuse the configuration
|
||||
settings, without having go through manual steps in a GUI. One thing to note is that
|
||||
by default it does not restart the service on error, one have to explicit enable this
|
||||
in the configuration file (via the "onfailure" parameter).
|
||||
by default it does not restart the service on error, one have to explicit enable
|
||||
this in the configuration file (via the "onfailure" parameter).
|
||||
|
||||
### Autostart on Linux
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ description: "Rclone Licence"
|
||||
This is free software under the terms of the MIT license (check the
|
||||
COPYING file included with the source code).
|
||||
|
||||
```
|
||||
```text
|
||||
Copyright (C) 2019 by Nick Craig-Wood https://www.craig-wood.com/nick/
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
@@ -29,4 +29,3 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
```
|
||||
|
||||
|
||||
@@ -4,13 +4,13 @@ description: "Overview of cloud storage systems"
|
||||
type: page
|
||||
---
|
||||
|
||||
# Overview of cloud storage systems #
|
||||
# Overview of cloud storage systems
|
||||
|
||||
Each cloud storage system is slightly different. Rclone attempts to
|
||||
provide a unified interface to them, but some underlying differences
|
||||
show through.
|
||||
|
||||
## Features ##
|
||||
## Features
|
||||
|
||||
Here is an overview of the major features of each cloud storage system.
|
||||
|
||||
@@ -79,9 +79,11 @@ This is an SHA256 sum of all the 4 MiB block SHA256s.
|
||||
|
||||
³ WebDAV supports hashes when used with Fastmail Files, Owncloud and Nextcloud only.
|
||||
|
||||
⁴ WebDAV supports modtimes when used with Fastmail Files, Owncloud and Nextcloud only.
|
||||
⁴ WebDAV supports modtimes when used with Fastmail Files, Owncloud and Nextcloud
|
||||
only.
|
||||
|
||||
⁵ [QuickXorHash](https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash) is Microsoft's own hash.
|
||||
⁵ [QuickXorHash](https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash)
|
||||
is Microsoft's own hash.
|
||||
|
||||
⁶ Mail.ru uses its own modified SHA1 hash
|
||||
|
||||
@@ -110,7 +112,7 @@ top-level sum.
|
||||
¹³ Uloz.to provides server-calculated MD5 hash upon file upload. MD5 and SHA256
|
||||
hashes are client-calculated and stored as metadata fields.
|
||||
|
||||
### Hash ###
|
||||
### Hash
|
||||
|
||||
The cloud storage system supports various hash types of the objects.
|
||||
The hashes are used when transferring data as an integrity check and
|
||||
@@ -120,7 +122,7 @@ the `check` command.
|
||||
To use the verify checksums when transferring between cloud storage
|
||||
systems they must support a common hash type.
|
||||
|
||||
### ModTime ###
|
||||
### ModTime
|
||||
|
||||
Almost all cloud storage systems store some sort of timestamp
|
||||
on objects, but several of them not something that is appropriate
|
||||
@@ -164,7 +166,7 @@ means they do also support modtime-only operations.
|
||||
Storage systems with `D` in the ModTime column means that the
|
||||
following symbols apply to directories as well as files.
|
||||
|
||||
### Case Insensitive ###
|
||||
### Case Insensitive
|
||||
|
||||
If a cloud storage systems is case sensitive then it is possible to
|
||||
have two files which differ only in case, e.g. `file.txt` and
|
||||
@@ -178,15 +180,16 @@ matter how many times you run the sync it never completes fully.
|
||||
The local filesystem and SFTP may or may not be case sensitive
|
||||
depending on OS.
|
||||
|
||||
* Windows - usually case insensitive, though case is preserved
|
||||
* OSX - usually case insensitive, though it is possible to format case sensitive
|
||||
* Linux - usually case sensitive, but there are case insensitive file systems (e.g. FAT formatted USB keys)
|
||||
- Windows - usually case insensitive, though case is preserved
|
||||
- OSX - usually case insensitive, though it is possible to format case sensitive
|
||||
- Linux - usually case sensitive, but there are case insensitive file systems
|
||||
(e.g. FAT formatted USB keys)
|
||||
|
||||
Most of the time this doesn't cause any problems as people tend to
|
||||
avoid files whose name differs only by case even on case sensitive
|
||||
systems.
|
||||
|
||||
### Duplicate files ###
|
||||
### Duplicate files
|
||||
|
||||
If a cloud storage system allows duplicate files then it can have two
|
||||
objects with the same name.
|
||||
@@ -194,7 +197,7 @@ objects with the same name.
|
||||
This confuses rclone greatly when syncing - use the `rclone dedupe`
|
||||
command to rename or remove duplicates.
|
||||
|
||||
### Restricted filenames ###
|
||||
### Restricted filenames
|
||||
|
||||
Some cloud storage systems might have restrictions on the characters
|
||||
that are usable in file or directory names.
|
||||
@@ -402,20 +405,27 @@ and to maintain backward compatibility, its behavior has not been changed.
|
||||
|
||||
To take a specific example, the FTP backend's default encoding is
|
||||
|
||||
--ftp-encoding "Slash,Del,Ctl,RightSpace,Dot"
|
||||
```sh
|
||||
--ftp-encoding "Slash,Del,Ctl,RightSpace,Dot"
|
||||
```
|
||||
|
||||
However, let's say the FTP server is running on Windows and can't have
|
||||
any of the invalid Windows characters in file names. You are backing
|
||||
up Linux servers to this FTP server which do have those characters in
|
||||
file names. So you would add the Windows set which are
|
||||
|
||||
Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot
|
||||
```text
|
||||
Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot
|
||||
```
|
||||
|
||||
to the existing ones, giving:
|
||||
|
||||
Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot,Del,RightSpace
|
||||
```text
|
||||
Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot,Del,RightSpace
|
||||
```
|
||||
|
||||
This can be specified using the `--ftp-encoding` flag or using an `encoding` parameter in the config file.
|
||||
This can be specified using the `--ftp-encoding` flag or using an `encoding`
|
||||
parameter in the config file.
|
||||
|
||||
##### Encoding example: Windows
|
||||
|
||||
@@ -429,7 +439,7 @@ To avoid this you can change the set of characters rclone should convert
|
||||
for the local filesystem, using command-line argument `--local-encoding`.
|
||||
Rclone's default behavior on Windows corresponds to
|
||||
|
||||
```
|
||||
```sh
|
||||
--local-encoding "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot"
|
||||
```
|
||||
|
||||
@@ -437,11 +447,12 @@ If you want to use fullwidth characters `:`, `*` and `?` in your filenames
|
||||
without rclone changing them when uploading to a remote, then set the same as
|
||||
the default value but without `Colon,Question,Asterisk`:
|
||||
|
||||
```
|
||||
```sh
|
||||
--local-encoding "Slash,LtGt,DoubleQuote,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot"
|
||||
```
|
||||
|
||||
Alternatively, you can disable the conversion of any characters with `--local-encoding Raw`.
|
||||
Alternatively, you can disable the conversion of any characters with
|
||||
`--local-encoding Raw`.
|
||||
|
||||
Instead of using command-line argument `--local-encoding`, you may also set it
|
||||
as [environment variable](/docs/#environment-variables) `RCLONE_LOCAL_ENCODING`,
|
||||
@@ -454,7 +465,7 @@ it to your Windows filesystem, this will fail. These characters are not
|
||||
valid in filenames on Windows, and you have told rclone not to work around
|
||||
this by converting them to valid fullwidth variants.
|
||||
|
||||
### MIME Type ###
|
||||
### MIME Type
|
||||
|
||||
MIME types (also known as media types) classify types of documents
|
||||
using a simple text classification, e.g. `text/html` or
|
||||
@@ -490,7 +501,7 @@ The levels of metadata support are
|
||||
|
||||
See [the metadata docs](/docs/#metadata) for more info.
|
||||
|
||||
## Optional Features ##
|
||||
## Optional Features
|
||||
|
||||
All rclone remotes support a base command set. Other features depend
|
||||
upon backend-specific capabilities.
|
||||
@@ -563,12 +574,12 @@ purging a directory inside a bucket, files are deleted individually.
|
||||
|
||||
⁵ Use the `--onedrive-delta` flag to enable.
|
||||
|
||||
### Purge ###
|
||||
### Purge
|
||||
|
||||
This deletes a directory quicker than just deleting all the files in
|
||||
the directory.
|
||||
|
||||
### Copy ###
|
||||
### Copy
|
||||
|
||||
Used when copying an object to and from the same remote. This known
|
||||
as a server-side copy so you can copy a file without downloading it
|
||||
@@ -578,7 +589,7 @@ and uploading it again. It is used if you use `rclone copy` or
|
||||
If the server doesn't support `Copy` directly then for copy operations
|
||||
the file is downloaded then re-uploaded.
|
||||
|
||||
### Move ###
|
||||
### Move
|
||||
|
||||
Used when moving/renaming an object on the same remote. This is known
|
||||
as a server-side move of a file. This is used in `rclone move` if the
|
||||
@@ -588,13 +599,13 @@ If the server isn't capable of `Move` then rclone simulates it with
|
||||
`Copy` then delete. If the server doesn't support `Copy` then rclone
|
||||
will download the file and re-upload it.
|
||||
|
||||
### DirMove ###
|
||||
### DirMove
|
||||
|
||||
This is used to implement `rclone move` to move a directory if
|
||||
possible. If it isn't then it will use `Move` on each file (which
|
||||
falls back to `Copy` then download and upload - see `Move` section).
|
||||
|
||||
### CleanUp ###
|
||||
### CleanUp
|
||||
|
||||
This is used for emptying the trash for a remote by `rclone cleanup`.
|
||||
|
||||
@@ -604,31 +615,31 @@ error.
|
||||
‡‡ Note that while Box implements this it has to delete every file
|
||||
individually so it will be slower than emptying the trash via the WebUI
|
||||
|
||||
### ListR ###
|
||||
### ListR
|
||||
|
||||
The remote supports a recursive list to list all the contents beneath
|
||||
a directory quickly. This enables the `--fast-list` flag to work.
|
||||
See the [rclone docs](/docs/#fast-list) for more details.
|
||||
|
||||
### StreamUpload ###
|
||||
### StreamUpload
|
||||
|
||||
Some remotes allow files to be uploaded without knowing the file size
|
||||
in advance. This allows certain operations to work without spooling the
|
||||
file to local disk first, e.g. `rclone rcat`.
|
||||
|
||||
### MultithreadUpload ###
|
||||
### MultithreadUpload
|
||||
|
||||
Some remotes allow transfers to the remote to be sent as chunks in
|
||||
parallel. If this is supported then rclone will use multi-thread
|
||||
copying to transfer files much faster.
|
||||
|
||||
### LinkSharing ###
|
||||
### LinkSharing
|
||||
|
||||
Sets the necessary permissions on a file or folder and prints a link
|
||||
that allows others to access them, even if they don't have an account
|
||||
on the particular cloud provider.
|
||||
|
||||
### About ###
|
||||
### About
|
||||
|
||||
Rclone `about` prints quota information for a remote. Typical output
|
||||
includes bytes used, free, quota and in trash.
|
||||
@@ -642,7 +653,7 @@ rclone union remote.
|
||||
|
||||
See [rclone about command](https://rclone.org/commands/rclone_about/)
|
||||
|
||||
### EmptyDir ###
|
||||
### EmptyDir
|
||||
|
||||
The remote supports empty directories. See [Limitations](/bugs/#limitations)
|
||||
for details. Most Object/Bucket-based remotes do not support this.
|
||||
|
||||
@@ -3,71 +3,141 @@ title: "Privacy Policy"
|
||||
description: "Rclone Privacy Policy"
|
||||
---
|
||||
|
||||
# Rclone Privacy Policy #
|
||||
# Rclone Privacy Policy
|
||||
|
||||
## What is this Privacy Policy for? ##
|
||||
## What is this Privacy Policy for?
|
||||
|
||||
This privacy policy is for this website https://rclone.org and governs the privacy of its users who choose to use it.
|
||||
This privacy policy is for this website <https://rclone.org> and governs the
|
||||
privacy of its users who choose to use it.
|
||||
|
||||
The policy sets out the different areas where user privacy is concerned and outlines the obligations & requirements of the users, the website and website owners. Furthermore the way this website processes, stores and protects user data and information will also be detailed within this policy.
|
||||
The policy sets out the different areas where user privacy is concerned and
|
||||
outlines the obligations & requirements of the users, the website and website
|
||||
owners. Furthermore the way this website processes, stores and protects user
|
||||
data and information will also be detailed within this policy.
|
||||
|
||||
## The Website ##
|
||||
## The Website
|
||||
|
||||
This website and its owners take a proactive approach to user privacy and ensure the necessary steps are taken to protect the privacy of its users throughout their visiting experience. This website complies to all UK national laws and requirements for user privacy.
|
||||
This website and its owners take a proactive approach to user privacy and
|
||||
ensure the necessary steps are taken to protect the privacy of its users
|
||||
throughout their visiting experience. This website complies to all UK national
|
||||
laws and requirements for user privacy.
|
||||
|
||||
## Use of Cookies ##
|
||||
## Use of Cookies
|
||||
|
||||
This website uses cookies to better the users experience while visiting the website. Where applicable this website uses a cookie control system allowing the user on their first visit to the website to allow or disallow the use of cookies on their computer / device. This complies with recent legislation requirements for websites to obtain explicit consent from users before leaving behind or reading files such as cookies on a user's computer / device.
|
||||
This website uses cookies to better the users experience while visiting the
|
||||
website. Where applicable this website uses a cookie control system allowing
|
||||
the user on their first visit to the website to allow or disallow the use of
|
||||
cookies on their computer / device. This complies with recent legislation
|
||||
requirements for websites to obtain explicit consent from users before leaving
|
||||
behind or reading files such as cookies on a user's computer / device.
|
||||
|
||||
Cookies are small files saved to the user's computers hard drive that track, save and store information about the user's interactions and usage of the website. This allows the website, through its server to provide the users with a tailored experience within this website.
|
||||
Cookies are small files saved to the user's computers hard drive that track,
|
||||
save and store information about the user's interactions and usage of the
|
||||
website. This allows the website, through its server to provide the users with
|
||||
a tailored experience within this website.
|
||||
|
||||
Users are advised that if they wish to deny the use and saving of cookies from this website on to their computers hard drive they should take necessary steps within their web browsers security settings to block all cookies from this website and its external serving vendors.
|
||||
Users are advised that if they wish to deny the use and saving of cookies from
|
||||
this website on to their computers hard drive they should take necessary steps
|
||||
within their web browsers security settings to block all cookies from this
|
||||
website and its external serving vendors.
|
||||
|
||||
This website uses tracking software to monitor its visitors to better understand how they use it. This software is provided by Google Analytics which uses cookies to track visitor usage. The software will save a cookie to your computers hard drive in order to track and monitor your engagement and usage of the website, but will not store, save or collect personal information. You can read [Google's privacy policy here](https://www.google.com/privacy.html) for further information.
|
||||
This website uses tracking software to monitor its visitors to better
|
||||
understand how they use it. This software is provided by Google Analytics which
|
||||
uses cookies to track visitor usage. The software will save a cookie to your
|
||||
computers hard drive in order to track and monitor your engagement and usage of
|
||||
the website, but will not store, save or collect personal information. You can
|
||||
read [Google's privacy policy here](https://www.google.com/privacy.html) for
|
||||
further information.
|
||||
|
||||
Other cookies may be stored to your computers hard drive by external vendors when this website uses referral programs, sponsored links or adverts. Such cookies are used for conversion and referral tracking and typically expire after 30 days, though some may take longer. No personal information is stored, saved or collected.
|
||||
Other cookies may be stored to your computers hard drive by external vendors
|
||||
when this website uses referral programs, sponsored links or adverts. Such
|
||||
cookies are used for conversion and referral tracking and typically expire
|
||||
after 30 days, though some may take longer. No personal information is stored,
|
||||
saved or collected.
|
||||
|
||||
## Contact & Communication ##
|
||||
## Contact & Communication
|
||||
|
||||
Users contacting this website and/or its owners do so at their own discretion and provide any such personal details requested at their own risk. Your personal information is kept private and stored securely until a time it is no longer required or has no use, as detailed in the Data Protection Act 1998.
|
||||
Users contacting this website and/or its owners do so at their own discretion
|
||||
and provide any such personal details requested at their own risk. Your
|
||||
personal information is kept private and stored securely until a time it is no
|
||||
longer required or has no use, as detailed in the Data Protection Act 1998.
|
||||
|
||||
This website and its owners use any information submitted to provide you with further information about the products / services they offer or to assist you in answering any questions or queries you may have submitted.
|
||||
This website and its owners use any information submitted to provide you with
|
||||
further information about the products / services they offer or to assist you
|
||||
in answering any questions or queries you may have submitted.
|
||||
|
||||
## External Links ##
|
||||
## External Links
|
||||
|
||||
Although this website only looks to include quality, safe and relevant external links, users are advised adopt a policy of caution before clicking any external web links mentioned throughout this website.
|
||||
Although this website only looks to include quality, safe and relevant external
|
||||
links, users are advised adopt a policy of caution before clicking any external
|
||||
web links mentioned throughout this website.
|
||||
|
||||
The owners of this website cannot guarantee or verify the contents of any externally linked website despite their best efforts. Users should therefore note they click on external links at their own risk and this website and its owners cannot be held liable for any damages or implications caused by visiting any external links mentioned.
|
||||
The owners of this website cannot guarantee or verify the contents of any
|
||||
externally linked website despite their best efforts. Users should therefore
|
||||
note they click on external links at their own risk and this website and its
|
||||
owners cannot be held liable for any damages or implications caused by visiting
|
||||
any external links mentioned.
|
||||
|
||||
## Adverts and Sponsored Links ##
|
||||
## Adverts and Sponsored Links
|
||||
|
||||
This website may contain sponsored links and adverts. These will typically be served through our advertising partners, to whom may have detailed privacy policies relating directly to the adverts they serve.
|
||||
This website may contain sponsored links and adverts. These will typically be
|
||||
served through our advertising partners, to whom may have detailed privacy
|
||||
policies relating directly to the adverts they serve.
|
||||
|
||||
Clicking on any such adverts will send you to the advertisers website through a referral program which may use cookies and will track the number of referrals sent from this website. This may include the use of cookies which may in turn be saved on your computers hard drive. Users should therefore note they click on sponsored external links at their own risk and this website and its owners cannot be held liable for any damages or implications caused by visiting any external links mentioned.
|
||||
Clicking on any such adverts will send you to the advertisers website through a
|
||||
referral program which may use cookies and will track the number of referrals
|
||||
sent from this website. This may include the use of cookies which may in turn
|
||||
be saved on your computers hard drive. Users should therefore note they click
|
||||
on sponsored external links at their own risk and this website and its owners
|
||||
cannot be held liable for any damages or implications caused by visiting any
|
||||
external links mentioned.
|
||||
|
||||
### Social Media Platforms ##
|
||||
### Social Media Platforms
|
||||
|
||||
Communication, engagement and actions taken through external social media platforms that this website and its owners participate on are subject to the terms and conditions as well as the privacy policies held with each social media platform respectively.
|
||||
Communication, engagement and actions taken through external social media
|
||||
platforms that this website and its owners participate on are subject to the
|
||||
terms and conditions as well as the privacy policies held with each social media
|
||||
platform respectively.
|
||||
|
||||
Users are advised to use social media platforms wisely and communicate / engage upon them with due care and caution in regard to their own privacy and personal details. This website nor its owners will ever ask for personal or sensitive information through social media platforms and encourage users wishing to discuss sensitive details to contact them through primary communication channels such as email.
|
||||
Users are advised to use social media platforms wisely and communicate / engage
|
||||
upon them with due care and caution in regard to their own privacy and personal
|
||||
details. This website nor its owners will ever ask for personal or sensitive
|
||||
information through social media platforms and encourage users wishing to
|
||||
discuss sensitive details to contact them through primary communication channels
|
||||
such as email.
|
||||
|
||||
This website may use social sharing buttons which help share web content directly from web pages to the social media platform in question. Users are advised before using such social sharing buttons that they do so at their own discretion and note that the social media platform may track and save your request to share a web page respectively through your social media platform account.
|
||||
This website may use social sharing buttons which help share web content
|
||||
directly from web pages to the social media platform in question. Users are
|
||||
advised before using such social sharing buttons that they do so at their own
|
||||
discretion and note that the social media platform may track and save your
|
||||
request to share a web page respectively through your social media platform
|
||||
account.
|
||||
|
||||
## Use of Cloud API User Data ##
|
||||
## Use of Cloud API User Data
|
||||
|
||||
Rclone is a command-line program to manage files on cloud storage. Its sole purpose is to access and manipulate user content in the [supported](/overview/) cloud storage systems from a local machine of the end user. For accessing the user content via the cloud provider API, Rclone uses authentication mechanisms, such as OAuth or HTTP Cookies, depending on the particular cloud provider offerings. Use of these authentication mechanisms and user data is governed by the privacy policies mentioned in the [Resources & Further Information](/privacy/#resources-further-information) section and followed by the privacy policy of Rclone.
|
||||
Rclone is a command-line program to manage files on cloud storage. Its sole
|
||||
purpose is to access and manipulate user content in the [supported](/overview/)
|
||||
cloud storage systems from a local machine of the end user. For accessing the
|
||||
user content via the cloud provider API, Rclone uses authentication mechanisms,
|
||||
such as OAuth or HTTP Cookies, depending on the particular cloud provider
|
||||
offerings. Use of these authentication mechanisms and user data is governed by
|
||||
the privacy policies mentioned in the [Resources & Further Information](/privacy/#resources-further-information)
|
||||
section and followed by the privacy policy of Rclone.
|
||||
|
||||
* Rclone provides the end user with access to their files available in a storage system associated by the authentication credentials via the publicly exposed API of the storage system.
|
||||
* Rclone allows storing the authentication credentials on the user machine in the local configuration file.
|
||||
* Rclone does not share any user data with third parties.
|
||||
- Rclone provides the end user with access to their files available in a storage
|
||||
system associated by the authentication credentials via the publicly exposed API
|
||||
of the storage system.
|
||||
- Rclone allows storing the authentication credentials on the user machine in the
|
||||
local configuration file.
|
||||
- Rclone does not share any user data with third parties.
|
||||
|
||||
## Resources & Further Information ##
|
||||
## Resources & Further Information
|
||||
|
||||
* [Data Protection Act 1998](http://www.legislation.gov.uk/ukpga/1998/29/contents)
|
||||
* [Privacy and Electronic Communications Regulations 2003](http://www.legislation.gov.uk/uksi/2003/2426/contents/made)
|
||||
* [Privacy and Electronic Communications Regulations 2003 - The Guide](https://ico.org.uk/for-organisations/guide-to-pecr/)
|
||||
* [Twitter Privacy Policy](https://twitter.com/privacy)
|
||||
* [Facebook Privacy Policy](https://www.facebook.com/about/privacy/)
|
||||
* [Google Privacy Policy](https://www.google.com/privacy.html)
|
||||
* [Google API Services User Data Policy](https://developers.google.com/terms/api-services-user-data-policy)
|
||||
* [Sample Website Privacy Policy](http://www.jamieking.co.uk/resources/free_sample_privacy_policy.html)
|
||||
- [Data Protection Act 1998](http://www.legislation.gov.uk/ukpga/1998/29/contents)
|
||||
- [Privacy and Electronic Communications Regulations 2003](http://www.legislation.gov.uk/uksi/2003/2426/contents/made)
|
||||
- [Privacy and Electronic Communications Regulations 2003 - The Guide](https://ico.org.uk/for-organisations/guide-to-pecr/)
|
||||
- [Twitter Privacy Policy](https://twitter.com/privacy)
|
||||
- [Facebook Privacy Policy](https://www.facebook.com/about/privacy/)
|
||||
- [Google Privacy Policy](https://www.google.com/privacy.html)
|
||||
- [Google API Services User Data Policy](https://developers.google.com/terms/api-services-user-data-policy)
|
||||
- [Sample Website Privacy Policy](http://www.jamieking.co.uk/resources/free_sample_privacy_policy.html)
|
||||
|
||||
@@ -12,14 +12,15 @@ which can be used to remote control rclone using its API.
|
||||
You can either use the [rc](#api-rc) command to access the API
|
||||
or [use HTTP directly](#api-http).
|
||||
|
||||
If you just want to run a remote control then see the [rcd](/commands/rclone_rcd/) command.
|
||||
If you just want to run a remote control then see the [rcd](/commands/rclone_rcd/)
|
||||
command.
|
||||
|
||||
## Supported parameters
|
||||
|
||||
### --rc
|
||||
|
||||
Flag to start the http server listen on remote requests.
|
||||
|
||||
|
||||
### --rc-addr=IP
|
||||
|
||||
IPaddress:Port or :Port to bind server to. (default "localhost:5572").
|
||||
@@ -71,11 +72,11 @@ Timeout for server writing data (default 1h0m0s).
|
||||
|
||||
### --rc-serve
|
||||
|
||||
Enable the serving of remote objects via the HTTP interface. This
|
||||
means objects will be accessible at http://127.0.0.1:5572/ by default,
|
||||
so you can browse to http://127.0.0.1:5572/ or http://127.0.0.1:5572/*
|
||||
Enable the serving of remote objects via the HTTP interface. This
|
||||
means objects will be accessible at `http://127.0.0.1:5572/` by default,
|
||||
so you can browse to `http://127.0.0.1:5572/` or `http://127.0.0.1:5572/*`
|
||||
to see a listing of the remotes. Objects may be requested from
|
||||
remotes using this syntax http://127.0.0.1:5572/[remote:path]/path/to/object
|
||||
remotes using this syntax `http://127.0.0.1:5572/[remote:path]/path/to/object`
|
||||
|
||||
Default Off.
|
||||
|
||||
@@ -102,7 +103,9 @@ Default Off.
|
||||
### --rc-enable-metrics
|
||||
|
||||
Enable OpenMetrics/Prometheus compatible endpoint at `/metrics`.
|
||||
If more control over the metrics is desired (for example running it on a different port or with different auth) then endpoint can be enabled with the `--metrics-*` flags instead.
|
||||
If more control over the metrics is desired (for example running it on a
|
||||
different port or with different auth) then endpoint can be enabled with
|
||||
the `--metrics-*` flags instead.
|
||||
|
||||
Default Off.
|
||||
|
||||
@@ -124,7 +127,7 @@ Default is IP address on which rc is running.
|
||||
|
||||
Set the URL to fetch the rclone-web-gui files from.
|
||||
|
||||
Default https://api.github.com/repos/rclone/rclone-webui-react/releases/latest.
|
||||
Default <https://api.github.com/repos/rclone/rclone-webui-react/releases/latest>.
|
||||
|
||||
### --rc-web-gui-update
|
||||
|
||||
@@ -182,26 +185,26 @@ rc` command.
|
||||
|
||||
You can use it like this:
|
||||
|
||||
```
|
||||
```sh
|
||||
$ rclone rc rc/noop param1=one param2=two
|
||||
{
|
||||
"param1": "one",
|
||||
"param2": "two"
|
||||
"param1": "one",
|
||||
"param2": "two"
|
||||
}
|
||||
```
|
||||
|
||||
If the remote is running on a different URL than the default
|
||||
`http://localhost:5572/`, use the `--url` option to specify it:
|
||||
|
||||
```
|
||||
$ rclone rc --url http://some.remote:1234/ rc/noop
|
||||
```sh
|
||||
rclone rc --url http://some.remote:1234/ rc/noop
|
||||
```
|
||||
|
||||
Or, if the remote is listening on a Unix socket, use the `--unix-socket` option
|
||||
instead:
|
||||
|
||||
```
|
||||
$ rclone rc --unix-socket /tmp/rclone.sock rc/noop
|
||||
```sh
|
||||
rclone rc --unix-socket /tmp/rclone.sock rc/noop
|
||||
```
|
||||
|
||||
Run `rclone rc` on its own, without any commands, to see the help for the
|
||||
@@ -213,19 +216,19 @@ remote server.
|
||||
`rclone rc` also supports a `--json` flag which can be used to send
|
||||
more complicated input parameters.
|
||||
|
||||
```
|
||||
```sh
|
||||
$ rclone rc --json '{ "p1": [1,"2",null,4], "p2": { "a":1, "b":2 } }' rc/noop
|
||||
{
|
||||
"p1": [
|
||||
1,
|
||||
"2",
|
||||
null,
|
||||
4
|
||||
],
|
||||
"p2": {
|
||||
"a": 1,
|
||||
"b": 2
|
||||
}
|
||||
"p1": [
|
||||
1,
|
||||
"2",
|
||||
null,
|
||||
4
|
||||
],
|
||||
"p2": {
|
||||
"a": 1,
|
||||
"b": 2
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -233,13 +236,13 @@ If the parameter being passed is an object then it can be passed as a
|
||||
JSON string rather than using the `--json` flag which simplifies the
|
||||
command line.
|
||||
|
||||
```
|
||||
```sh
|
||||
rclone rc operations/list fs=/tmp remote=test opt='{"showHash": true}'
|
||||
```
|
||||
|
||||
Rather than
|
||||
|
||||
```
|
||||
```sh
|
||||
rclone rc operations/list --json '{"fs": "/tmp", "remote": "test", "opt": {"showHash": true}}'
|
||||
```
|
||||
|
||||
@@ -266,50 +269,50 @@ response timing out.
|
||||
|
||||
Starting a job with the `_async` flag:
|
||||
|
||||
```
|
||||
```sh
|
||||
$ rclone rc --json '{ "p1": [1,"2",null,4], "p2": { "a":1, "b":2 }, "_async": true }' rc/noop
|
||||
{
|
||||
"jobid": 2
|
||||
"jobid": 2
|
||||
}
|
||||
```
|
||||
|
||||
Query the status to see if the job has finished. For more information
|
||||
on the meaning of these return parameters see the `job/status` call.
|
||||
|
||||
```
|
||||
```sh
|
||||
$ rclone rc --json '{ "jobid":2 }' job/status
|
||||
{
|
||||
"duration": 0.000124163,
|
||||
"endTime": "2018-10-27T11:38:07.911245881+01:00",
|
||||
"error": "",
|
||||
"finished": true,
|
||||
"id": 2,
|
||||
"output": {
|
||||
"_async": true,
|
||||
"p1": [
|
||||
1,
|
||||
"2",
|
||||
null,
|
||||
4
|
||||
],
|
||||
"p2": {
|
||||
"a": 1,
|
||||
"b": 2
|
||||
}
|
||||
},
|
||||
"startTime": "2018-10-27T11:38:07.911121728+01:00",
|
||||
"success": true
|
||||
"duration": 0.000124163,
|
||||
"endTime": "2018-10-27T11:38:07.911245881+01:00",
|
||||
"error": "",
|
||||
"finished": true,
|
||||
"id": 2,
|
||||
"output": {
|
||||
"_async": true,
|
||||
"p1": [
|
||||
1,
|
||||
"2",
|
||||
null,
|
||||
4
|
||||
],
|
||||
"p2": {
|
||||
"a": 1,
|
||||
"b": 2
|
||||
}
|
||||
},
|
||||
"startTime": "2018-10-27T11:38:07.911121728+01:00",
|
||||
"success": true
|
||||
}
|
||||
```
|
||||
|
||||
`job/list` can be used to show the running or recently completed jobs
|
||||
|
||||
```
|
||||
```sh
|
||||
$ rclone rc job/list
|
||||
{
|
||||
"jobids": [
|
||||
2
|
||||
]
|
||||
"jobids": [
|
||||
2
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
@@ -321,21 +324,29 @@ duration of an rc call only then pass in the `_config` parameter.
|
||||
This should be in the same format as the `main` key returned by
|
||||
[options/get](#options-get).
|
||||
|
||||
rclone rc --loopback options/get blocks=main
|
||||
```sh
|
||||
rclone rc --loopback options/get blocks=main
|
||||
```
|
||||
|
||||
You can see more help on these options with this command (see [the
|
||||
options blocks section](#option-blocks) for more info).
|
||||
|
||||
rclone rc --loopback options/info blocks=main
|
||||
```sh
|
||||
rclone rc --loopback options/info blocks=main
|
||||
```
|
||||
|
||||
For example, if you wished to run a sync with the `--checksum`
|
||||
parameter, you would pass this parameter in your JSON blob.
|
||||
|
||||
"_config":{"CheckSum": true}
|
||||
```json
|
||||
"_config":{"CheckSum": true}
|
||||
```
|
||||
|
||||
If using `rclone rc` this could be passed as
|
||||
|
||||
rclone rc sync/sync ... _config='{"CheckSum": true}'
|
||||
```sh
|
||||
rclone rc sync/sync ... _config='{"CheckSum": true}'
|
||||
```
|
||||
|
||||
Any config parameters you don't set will inherit the global defaults
|
||||
which were set with command line flags or environment variables.
|
||||
@@ -344,8 +355,10 @@ Note that it is possible to set some values as strings or integers -
|
||||
see [data types](#data-types) for more info. Here is an example
|
||||
setting the equivalent of `--buffer-size` in string or integer format.
|
||||
|
||||
"_config":{"BufferSize": "42M"}
|
||||
"_config":{"BufferSize": 44040192}
|
||||
```json
|
||||
"_config":{"BufferSize": "42M"}
|
||||
"_config":{"BufferSize": 44040192}
|
||||
```
|
||||
|
||||
If you wish to check the `_config` assignment has worked properly then
|
||||
calling `options/local` will show what the value got set to.
|
||||
@@ -358,24 +371,34 @@ pass in the `_filter` parameter.
|
||||
This should be in the same format as the `filter` key returned by
|
||||
[options/get](#options-get).
|
||||
|
||||
rclone rc --loopback options/get blocks=filter
|
||||
```sh
|
||||
rclone rc --loopback options/get blocks=filter
|
||||
```
|
||||
|
||||
You can see more help on these options with this command (see [the
|
||||
options blocks section](#option-blocks) for more info).
|
||||
|
||||
rclone rc --loopback options/info blocks=filter
|
||||
```sh
|
||||
rclone rc --loopback options/info blocks=filter
|
||||
```
|
||||
|
||||
For example, if you wished to run a sync with these flags
|
||||
|
||||
--max-size 1M --max-age 42s --include "a" --include "b"
|
||||
```sh
|
||||
--max-size 1M --max-age 42s --include "a" --include "b"
|
||||
```
|
||||
|
||||
you would pass this parameter in your JSON blob.
|
||||
|
||||
"_filter":{"MaxSize":"1M", "IncludeRule":["a","b"], "MaxAge":"42s"}
|
||||
```json
|
||||
"_filter":{"MaxSize":"1M", "IncludeRule":["a","b"], "MaxAge":"42s"}
|
||||
```
|
||||
|
||||
If using `rclone rc` this could be passed as
|
||||
|
||||
rclone rc ... _filter='{"MaxSize":"1M", "IncludeRule":["a","b"], "MaxAge":"42s"}'
|
||||
```sh
|
||||
rclone rc ... _filter='{"MaxSize":"1M", "IncludeRule":["a","b"], "MaxAge":"42s"}'
|
||||
```
|
||||
|
||||
Any filter parameters you don't set will inherit the global defaults
|
||||
which were set with command line flags or environment variables.
|
||||
@@ -384,8 +407,10 @@ Note that it is possible to set some values as strings or integers -
|
||||
see [data types](#data-types) for more info. Here is an example
|
||||
setting the equivalent of `--buffer-size` in string or integer format.
|
||||
|
||||
"_filter":{"MinSize": "42M"}
|
||||
"_filter":{"MinSize": 44040192}
|
||||
```json
|
||||
"_filter":{"MinSize": "42M"}
|
||||
"_filter":{"MinSize": 44040192}
|
||||
```
|
||||
|
||||
If you wish to check the `_filter` assignment has worked properly then
|
||||
calling `options/local` will show what the value got set to.
|
||||
@@ -401,11 +426,11 @@ value. This allows caller to group stats under their own name.
|
||||
|
||||
Stats for specific group can be accessed by passing `group` to `core/stats`:
|
||||
|
||||
```
|
||||
```sh
|
||||
$ rclone rc --json '{ "group": "job/1" }' core/stats
|
||||
{
|
||||
"speed": 12345
|
||||
...
|
||||
"speed": 12345
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
@@ -466,7 +491,7 @@ An example of this might be the `--log-level` flag. Note that the
|
||||
`Name` of the option becomes the command line flag with `_` replaced
|
||||
with `-`.
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"Advanced": false,
|
||||
"Default": 5,
|
||||
@@ -525,7 +550,7 @@ isn't specified then it defaults to the root of the remote.
|
||||
|
||||
For example this JSON is equivalent to `remote:/tmp`
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"_name": "remote",
|
||||
"_root": "/tmp"
|
||||
@@ -534,7 +559,7 @@ For example this JSON is equivalent to `remote:/tmp`
|
||||
|
||||
And this is equivalent to `:sftp,host='example.com':/tmp`
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"type": "sftp",
|
||||
"host": "example.com",
|
||||
@@ -544,7 +569,7 @@ And this is equivalent to `:sftp,host='example.com':/tmp`
|
||||
|
||||
And this is equivalent to `/tmp/dir`
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"type": "local",
|
||||
"_root": "/tmp/dir"
|
||||
@@ -2352,7 +2377,7 @@ If an error occurs then there will be an HTTP error status (e.g. 500)
|
||||
and the body of the response will contain a JSON encoded error object,
|
||||
e.g.
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"error": "Expecting string value for key \"remote\" (was float64)",
|
||||
"input": {
|
||||
@@ -2364,7 +2389,8 @@ e.g.
|
||||
}
|
||||
```
|
||||
|
||||
The keys in the error response are
|
||||
The keys in the error response are:
|
||||
|
||||
- error - error string
|
||||
- input - the input parameters to the call
|
||||
- status - the HTTP status code
|
||||
@@ -2373,42 +2399,43 @@ The keys in the error response are
|
||||
### CORS
|
||||
|
||||
The sever implements basic CORS support and allows all origins for that.
|
||||
The response to a preflight OPTIONS request will echo the requested "Access-Control-Request-Headers" back.
|
||||
The response to a preflight OPTIONS request will echo the requested
|
||||
"Access-Control-Request-Headers" back.
|
||||
|
||||
### Using POST with URL parameters only
|
||||
|
||||
```
|
||||
```sh
|
||||
curl -X POST 'http://localhost:5572/rc/noop?potato=1&sausage=2'
|
||||
```
|
||||
|
||||
Response
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"potato": "1",
|
||||
"sausage": "2"
|
||||
"potato": "1",
|
||||
"sausage": "2"
|
||||
}
|
||||
```
|
||||
|
||||
Here is what an error response looks like:
|
||||
|
||||
```
|
||||
```sh
|
||||
curl -X POST 'http://localhost:5572/rc/error?potato=1&sausage=2'
|
||||
```
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"error": "arbitrary error on input map[potato:1 sausage:2]",
|
||||
"input": {
|
||||
"potato": "1",
|
||||
"sausage": "2"
|
||||
}
|
||||
"error": "arbitrary error on input map[potato:1 sausage:2]",
|
||||
"input": {
|
||||
"potato": "1",
|
||||
"sausage": "2"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Note that curl doesn't return errors to the shell unless you use the `-f` option
|
||||
|
||||
```
|
||||
```sh
|
||||
$ curl -f -X POST 'http://localhost:5572/rc/error?potato=1&sausage=2'
|
||||
curl: (22) The requested URL returned error: 400 Bad Request
|
||||
$ echo $?
|
||||
@@ -2417,68 +2444,68 @@ $ echo $?
|
||||
|
||||
### Using POST with a form
|
||||
|
||||
```
|
||||
```sh
|
||||
curl --data "potato=1" --data "sausage=2" http://localhost:5572/rc/noop
|
||||
```
|
||||
|
||||
Response
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"potato": "1",
|
||||
"sausage": "2"
|
||||
"potato": "1",
|
||||
"sausage": "2"
|
||||
}
|
||||
```
|
||||
|
||||
Note that you can combine these with URL parameters too with the POST
|
||||
parameters taking precedence.
|
||||
|
||||
```
|
||||
```sh
|
||||
curl --data "potato=1" --data "sausage=2" "http://localhost:5572/rc/noop?rutabaga=3&sausage=4"
|
||||
```
|
||||
|
||||
Response
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"potato": "1",
|
||||
"rutabaga": "3",
|
||||
"sausage": "4"
|
||||
"potato": "1",
|
||||
"rutabaga": "3",
|
||||
"sausage": "4"
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
### Using POST with a JSON blob
|
||||
|
||||
```
|
||||
```sh
|
||||
curl -H "Content-Type: application/json" -X POST -d '{"potato":2,"sausage":1}' http://localhost:5572/rc/noop
|
||||
```
|
||||
|
||||
response
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"password": "xyz",
|
||||
"username": "xyz"
|
||||
"password": "xyz",
|
||||
"username": "xyz"
|
||||
}
|
||||
```
|
||||
|
||||
This can be combined with URL parameters too if required. The JSON
|
||||
blob takes precedence.
|
||||
|
||||
```
|
||||
```sh
|
||||
curl -H "Content-Type: application/json" -X POST -d '{"potato":2,"sausage":1}' 'http://localhost:5572/rc/noop?rutabaga=3&potato=4'
|
||||
```
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"potato": 2,
|
||||
"rutabaga": "3",
|
||||
"sausage": 1
|
||||
"potato": 2,
|
||||
"rutabaga": "3",
|
||||
"sausage": 1
|
||||
}
|
||||
```
|
||||
|
||||
## Debugging rclone with pprof ##
|
||||
## Debugging rclone with pprof
|
||||
|
||||
If you use the `--rc` flag this will also enable the use of the go
|
||||
profiling tools on the same port.
|
||||
@@ -2489,14 +2516,16 @@ To use these, first [install go](https://golang.org/doc/install).
|
||||
|
||||
To profile rclone's memory use you can run:
|
||||
|
||||
go tool pprof -web http://localhost:5572/debug/pprof/heap
|
||||
```sh
|
||||
go tool pprof -web http://localhost:5572/debug/pprof/heap
|
||||
```
|
||||
|
||||
This should open a page in your browser showing what is using what
|
||||
memory.
|
||||
|
||||
You can also use the `-text` flag to produce a textual summary
|
||||
|
||||
```
|
||||
```sh
|
||||
$ go tool pprof -text http://localhost:5572/debug/pprof/heap
|
||||
Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
|
||||
flat flat% sum% cum cum%
|
||||
@@ -2521,13 +2550,15 @@ alive which should have been garbage collected.
|
||||
|
||||
See all active go routines using
|
||||
|
||||
curl http://localhost:5572/debug/pprof/goroutine?debug=1
|
||||
```sh
|
||||
curl http://localhost:5572/debug/pprof/goroutine?debug=1
|
||||
```
|
||||
|
||||
Or go to http://localhost:5572/debug/pprof/goroutine?debug=1 in your browser.
|
||||
Or go to <http://localhost:5572/debug/pprof/goroutine?debug=1> in your browser.
|
||||
|
||||
### Other profiles to look at
|
||||
|
||||
You can see a summary of profiles available at http://localhost:5572/debug/pprof/
|
||||
You can see a summary of profiles available at <http://localhost:5572/debug/pprof/>
|
||||
|
||||
Here is how to use some of them:
|
||||
|
||||
@@ -2536,15 +2567,14 @@ Here is how to use some of them:
|
||||
- 30-second CPU profile: `go tool pprof http://localhost:5572/debug/pprof/profile`
|
||||
- 5-second execution trace: `wget http://localhost:5572/debug/pprof/trace?seconds=5`
|
||||
- Goroutine blocking profile
|
||||
- Enable first with: `rclone rc debug/set-block-profile-rate rate=1` ([docs](#debug-set-block-profile-rate))
|
||||
- `go tool pprof http://localhost:5572/debug/pprof/block`
|
||||
- Enable first with: `rclone rc debug/set-block-profile-rate rate=1` ([docs](#debug-set-block-profile-rate))
|
||||
- `go tool pprof http://localhost:5572/debug/pprof/block`
|
||||
- Contended mutexes:
|
||||
- Enable first with: `rclone rc debug/set-mutex-profile-fraction rate=1` ([docs](#debug-set-mutex-profile-fraction))
|
||||
- `go tool pprof http://localhost:5572/debug/pprof/mutex`
|
||||
- Enable first with: `rclone rc debug/set-mutex-profile-fraction rate=1` ([docs](#debug-set-mutex-profile-fraction))
|
||||
- `go tool pprof http://localhost:5572/debug/pprof/mutex`
|
||||
|
||||
See the [net/http/pprof docs](https://golang.org/pkg/net/http/pprof/)
|
||||
for more info on how to use the profiling and for a general overview
|
||||
see [the Go team's blog post on profiling go programs](https://blog.golang.org/profiling-go-programs).
|
||||
|
||||
The profiling hook is [zero overhead unless it is used](https://stackoverflow.com/q/26545159/164234).
|
||||
|
||||
|
||||
@@ -29,6 +29,7 @@ The S3 backend can be used with a number of different providers:
|
||||
{{< provider name="MEGA S4 Object Storage" home="https://mega.io/objectstorage" config="/s3/#mega" >}}
|
||||
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
|
||||
{{< provider name="Outscale" home="https://en.outscale.com/storage/outscale-object-storage/" config="/s3/#outscale" >}}
|
||||
{{< provider name="OVHcloud" home="https://www.ovhcloud.com/en/public-cloud/object-storage/" config="/s3/#ovhcloud" >}}
|
||||
{{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}}
|
||||
{{< provider name="Pure Storage FlashBlade" home="https://www.purestorage.com/products/unstructured-data-storage.html" config="/s3/#pure-storage-flashblade" >}}
|
||||
{{< provider name="Qiniu Cloud Object Storage (Kodo)" home="https://www.qiniu.com/en/products/kodo" config="/s3/#qiniu" >}}
|
||||
@@ -3611,6 +3612,206 @@ d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
### OVHcloud {#ovhcloud}
|
||||
|
||||
[OVHcloud Object Storage](https://www.ovhcloud.com/en-ie/public-cloud/object-storage/)
|
||||
is an S3-compatible general-purpose object storage platform available in all OVHcloud regions.
|
||||
To use the platform, you will need an access key and secret key. To know more about it and how
|
||||
to interact with the platform, take a look at the [documentation](https://ovh.to/8stqhuo).
|
||||
|
||||
Here is an example of making an OVHcloud Object Storage configuration with `rclone config`:
|
||||
|
||||
```
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
|
||||
Enter name for new remote.
|
||||
name> ovhcloud-rbx
|
||||
|
||||
Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
[...]
|
||||
XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Outscale, OVHcloud, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, Selectel, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others
|
||||
\ (s3)
|
||||
[...]
|
||||
Storage> s3
|
||||
|
||||
Option provider.
|
||||
Choose your S3 provider.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
[...]
|
||||
XX / OVHcloud Object Storage
|
||||
\ (OVHcloud)
|
||||
[...]
|
||||
provider> OVHcloud
|
||||
|
||||
Option env_auth.
|
||||
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
Only applies if access_key_id and secret_access_key is blank.
|
||||
Choose a number from below, or type in your own boolean value (true or false).
|
||||
Press Enter for the default (false).
|
||||
1 / Enter AWS credentials in the next step.
|
||||
\ (false)
|
||||
2 / Get AWS credentials from the environment (env vars or IAM).
|
||||
\ (true)
|
||||
env_auth> 1
|
||||
|
||||
Option access_key_id.
|
||||
AWS Access Key ID.
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
access_key_id> my_access
|
||||
|
||||
Option secret_access_key.
|
||||
AWS Secret Access Key (password).
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
secret_access_key> my_secret
|
||||
|
||||
Option region.
|
||||
Region where your bucket will be created and your data stored.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / Gravelines, France
|
||||
\ (gra)
|
||||
2 / Roubaix, France
|
||||
\ (rbx)
|
||||
3 / Strasbourg, France
|
||||
\ (sbg)
|
||||
4 / Paris, France (3AZ)
|
||||
\ (eu-west-par)
|
||||
5 / Frankfurt, Germany
|
||||
\ (de)
|
||||
6 / London, United Kingdom
|
||||
\ (uk)
|
||||
7 / Warsaw, Poland
|
||||
\ (waw)
|
||||
8 / Beauharnois, Canada
|
||||
\ (bhs)
|
||||
9 / Toronto, Canada
|
||||
\ (ca-east-tor)
|
||||
10 / Singapore
|
||||
\ (sgp)
|
||||
11 / Sydney, Australia
|
||||
\ (ap-southeast-syd)
|
||||
12 / Mumbai, India
|
||||
\ (ap-south-mum)
|
||||
13 / Vint Hill, Virginia, USA
|
||||
\ (us-east-va)
|
||||
14 / Hillsboro, Oregon, USA
|
||||
\ (us-west-or)
|
||||
15 / Roubaix, France (Cold Archive)
|
||||
\ (rbx-archive)
|
||||
region> 2
|
||||
|
||||
Option endpoint.
|
||||
Endpoint for OVHcloud Object Storage.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / OVHcloud Gravelines, France
|
||||
\ (s3.gra.io.cloud.ovh.net)
|
||||
2 / OVHcloud Roubaix, France
|
||||
\ (s3.rbx.io.cloud.ovh.net)
|
||||
3 / OVHcloud Strasbourg, France
|
||||
\ (s3.sbg.io.cloud.ovh.net)
|
||||
4 / OVHcloud Paris, France (3AZ)
|
||||
\ (s3.eu-west-par.io.cloud.ovh.net)
|
||||
5 / OVHcloud Frankfurt, Germany
|
||||
\ (s3.de.io.cloud.ovh.net)
|
||||
6 / OVHcloud London, United Kingdom
|
||||
\ (s3.uk.io.cloud.ovh.net)
|
||||
7 / OVHcloud Warsaw, Poland
|
||||
\ (s3.waw.io.cloud.ovh.net)
|
||||
8 / OVHcloud Beauharnois, Canada
|
||||
\ (s3.bhs.io.cloud.ovh.net)
|
||||
9 / OVHcloud Toronto, Canada
|
||||
\ (s3.ca-east-tor.io.cloud.ovh.net)
|
||||
10 / OVHcloud Singapore
|
||||
\ (s3.sgp.io.cloud.ovh.net)
|
||||
11 / OVHcloud Sydney, Australia
|
||||
\ (s3.ap-southeast-syd.io.cloud.ovh.net)
|
||||
12 / OVHcloud Mumbai, India
|
||||
\ (s3.ap-south-mum.io.cloud.ovh.net)
|
||||
13 / OVHcloud Vint Hill, Virginia, USA
|
||||
\ (s3.us-east-va.io.cloud.ovh.us)
|
||||
14 / OVHcloud Hillsboro, Oregon, USA
|
||||
\ (s3.us-west-or.io.cloud.ovh.us)
|
||||
15 / OVHcloud Roubaix, France (Cold Archive)
|
||||
\ (s3.rbx-archive.io.cloud.ovh.net)
|
||||
endpoint> 2
|
||||
|
||||
Option acl.
|
||||
Canned ACL used when creating buckets and storing or copying objects.
|
||||
This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
Note that this ACL is applied when server-side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
If the acl is an empty string then no X-Amz-Acl: header is added and
|
||||
the default (private) will be used.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
/ Owner gets FULL_CONTROL.
|
||||
1 | No one else has access rights (default).
|
||||
\ (private)
|
||||
/ Owner gets FULL_CONTROL.
|
||||
2 | The AllUsers group gets READ access.
|
||||
\ (public-read)
|
||||
/ Owner gets FULL_CONTROL.
|
||||
3 | The AllUsers group gets READ and WRITE access.
|
||||
| Granting this on a bucket is generally not recommended.
|
||||
\ (public-read-write)
|
||||
/ Owner gets FULL_CONTROL.
|
||||
4 | The AuthenticatedUsers group gets READ access.
|
||||
\ (authenticated-read)
|
||||
/ Object owner gets FULL_CONTROL.
|
||||
5 | Bucket owner gets READ access.
|
||||
| If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
|
||||
\ (bucket-owner-read)
|
||||
/ Both the object owner and the bucket owner get FULL_CONTROL over the object.
|
||||
6 | If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
|
||||
\ (bucket-owner-full-control)
|
||||
acl> 1
|
||||
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: s3
|
||||
- provider: OVHcloud
|
||||
- access_key_id: my_access
|
||||
- secret_access_key: my_secret
|
||||
- region: rbx
|
||||
- endpoint: s3.rbx.io.cloud.ovh.net
|
||||
- acl: private
|
||||
Keep this "ovhcloud-rbx" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
Your configuration file should now look like this:
|
||||
|
||||
```
|
||||
[ovhcloud-rbx]
|
||||
type = s3
|
||||
provider = OVHcloud
|
||||
access_key_id = my_access
|
||||
secret_access_key = my_secret
|
||||
region = rbx
|
||||
endpoint = s3.rbx.io.cloud.ovh.net
|
||||
acl = private
|
||||
```
|
||||
|
||||
|
||||
### Qiniu Cloud Object Storage (Kodo) {#qiniu}
|
||||
|
||||
[Qiniu Cloud Object Storage (Kodo)](https://www.qiniu.com/en/products/kodo), a completely independent-researched core technology which is proven by repeated customer experience has occupied absolute leading market leader position. Kodo can be widely applied to mass data management.
|
||||
|
||||
@@ -62,4 +62,6 @@ Thank you very much to our sponsors:
|
||||
{{< sponsor src="/img/logos/sia.svg" width="200" height="200" title="Visit our sponsor sia" link="https://sia.tech">}}
|
||||
{{< sponsor src="/img/logos/route4me.svg" width="400" height="200" title="Visit our sponsor Route4Me" link="https://route4me.com/">}}
|
||||
{{< sponsor src="/img/logos/rcloneview.svg" width="300" height="200" title="Visit our sponsor RcloneView" link="https://rcloneview.com/">}}
|
||||
{{< sponsor src="/img/logos/rcloneui.svg" width="300" height="200" title="Visit our sponsor RcloneUI" link="https://rcloneui.com">}}
|
||||
{{< sponsor src="/img/logos/filelu-rclone.svg" width="330" height="200" title="Visit our sponsor FileLu" link="https://filelu.com/">}}
|
||||
{{< sponsor src="/img/logos/torbox.png" width="200" height="200" title="Visit our sponsor TORBOX" link="https://www.torbox.app/">}}
|
||||
|
||||
@@ -11,7 +11,7 @@ Commercial implementations of that being:
|
||||
|
||||
* [Rackspace Cloud Files](https://www.rackspace.com/cloud/files/)
|
||||
* [Memset Memstore](https://www.memset.com/cloud/storage/)
|
||||
* [OVH Object Storage](https://www.ovh.co.uk/public-cloud/storage/object-storage/)
|
||||
* [OVH Object Storage](https://www.ovhcloud.com/en/public-cloud/object-storage/)
|
||||
* [Oracle Cloud Storage](https://docs.oracle.com/en-us/iaas/integration/doc/configure-object-storage.html)
|
||||
* [Blomp Cloud Storage](https://www.blomp.com/cloud-storage/)
|
||||
* [IBM Bluemix Cloud ObjectStorage Swift](https://console.bluemix.net/docs/infrastructure/objectstorage-swift/index.html)
|
||||
|
||||
@@ -6,6 +6,14 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
|
||||
<meta name="description" content="{{ .Description }}">
|
||||
<meta name="author" content="Nick Craig-Wood">
|
||||
<meta property="og:site_name" content="Rclone" />
|
||||
<meta property="og:type" content="website" />
|
||||
<meta property="og:image" content="{{ "/img/rclone-1200x630.png" | absURL }}">
|
||||
<meta property="og:image:width" content="1200">
|
||||
<meta property="og:image:height" content="630">
|
||||
<meta property="og:url" content="{{ .Permalink }}" />
|
||||
<meta property="og:title" content="{{ .Title }}" />
|
||||
<meta property="og:description" content="{{ .Description }}" />
|
||||
<link rel="shortcut icon" type="image/png" href="/img/rclone-32x32.png"/>
|
||||
<script defer data-domain="rclone.org" src="https://weblog.rclone.org/js/script.js"></script>
|
||||
<title>{{ block "title" . }}{{ .Title }}{{ end }}</title>
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
{{ if and (gt .WordCount 200 ) (not (.Params.notoc)) }}
|
||||
<div class="card">
|
||||
<div class="card-header" style="padding: 5px 10px;">
|
||||
<div class="card-header">
|
||||
Contents
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<div class="card-body card-body-padded">
|
||||
{{ .TableOfContents }}
|
||||
<p></p>
|
||||
</div>
|
||||
</div>
|
||||
{{end}}
|
||||
|
||||
<div class="card">
|
||||
<div class="card-header" style="padding: 5px 15px;">
|
||||
<div class="card-header">
|
||||
Platinum Sponsor
|
||||
</div>
|
||||
<div class="card-body">
|
||||
@@ -20,7 +19,7 @@
|
||||
</div>
|
||||
|
||||
<div class="card">
|
||||
<div class="card-header" style="padding: 5px 15px;">
|
||||
<div class="card-header">
|
||||
Gold Sponsor
|
||||
</div>
|
||||
<div class="card-body">
|
||||
@@ -29,7 +28,7 @@
|
||||
</div>
|
||||
|
||||
<div class="card">
|
||||
<div class="card-header" style="padding: 5px 15px;">
|
||||
<div class="card-header">
|
||||
Gold Sponsor
|
||||
</div>
|
||||
<div class="card-body">
|
||||
@@ -37,9 +36,18 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
Gold Sponsor
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<a href="https://mega.io/objectstorage?utm_source=rclone&utm_medium=referral&utm_campaign=rclone-mega-s4&mct=rclonepromo" target="_blank" rel="noopener" title="MEGA S4: New S3 compatible object storage. High scale. Low cost. Free egress."><img style="max-width: 100%; height: auto;" src="/img/logos/mega-s4.svg"></a><br />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{{if .IsHome}}
|
||||
<div class="card">
|
||||
<div class="card-header" style="padding: 5px 15px;">
|
||||
<div class="card-header">
|
||||
Silver Sponsor
|
||||
</div>
|
||||
<div class="card-body">
|
||||
@@ -47,7 +55,7 @@
|
||||
</div>
|
||||
</div>
|
||||
<div class="card">
|
||||
<div class="card-header" style="padding: 5px 15px;">
|
||||
<div class="card-header">
|
||||
Silver Sponsor
|
||||
</div>
|
||||
<div class="card-body">
|
||||
@@ -57,31 +65,31 @@
|
||||
{{end}}
|
||||
|
||||
<div class="card">
|
||||
<div class="card-header" style="padding: 5px 10px;">
|
||||
<div class="card-header">
|
||||
Share and Enjoy
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<p class="menu">
|
||||
<div class="card-body card-body-padded">
|
||||
<div class="menu">
|
||||
<!-- Non tracking sharing links from: https://sharingbuttons.io/ -->
|
||||
<i class="fab fa-twitter fa-fw" aria-hidden="true"></i> <a href="https://twitter.com/intent/tweet/?text=rclone%20-%20rsync%20for%20cloud%20storage%20from%20%40njcw&url=https%3A%2F%2Frclone.org" target="_blank" rel="noopener" aria-label="Share on Twitter">Twitter</a><br />
|
||||
<i class="fab fa-facebook fa-fw" aria-hidden="true"></i> <a href="https://facebook.com/sharer/sharer.php?u=https%3A%2F%2Frclone.org" target="_blank" rel="noopener" aria-label="Share on Facebook">Facebook</a><br />
|
||||
<i class="fab fa-reddit fa-fw" aria-hidden="true"></i> <a href="https://reddit.com/submit/?url=https%3A%2F%2Frclone.org&resubmit=true&title=rclone%20-%20rsync%20for%20cloud%20storage" target="_blank" rel="noopener" aria-label="Share on Reddit">Reddit</a><br />
|
||||
<iframe src="//ghbtns.com/github-btn.html?user=rclone&repo=rclone&type=star&count=true" allowtransparency="true" frameborder="0" scrolling="no" width="120" height="20"></iframe>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="card">
|
||||
<div class="card-header" style="padding: 5px 15px;">
|
||||
<div class="card-header">
|
||||
Links
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<p class="menu">
|
||||
<div class="card-body card-body-padded">
|
||||
<div class="menu">
|
||||
<i class="fa fa-comments fa-fw" aria-hidden="true"></i> <a href="https://forum.rclone.org">Rclone forum</a><br />
|
||||
<i class="fab fa-github fa-fw" aria-hidden="true"></i> <a href="https://github.com/rclone/rclone">GitHub project</a><br />
|
||||
<i class="fa fa-book fa-fw" aria-hidden="true"></i> <a href="https://github.com/rclone/rclone/wiki">Rclone Wiki</a><br />
|
||||
<i class="fa fa-heart heart fa-fw" aria-hidden="true"></i> <a href="/sponsor/">Sponsor</a><br />
|
||||
<i class="fab fa-twitter fa-fw" aria-hidden="true"></i> <a href="https://twitter.com/njcw">@njcw</a>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -85,6 +85,7 @@
|
||||
<a class="dropdown-item" href="/linkbox/"><i class="fa fa-infinity fa-fw"></i> Linkbox</a>
|
||||
<a class="dropdown-item" href="/mailru/"><i class="fa fa-at fa-fw"></i> Mail.ru Cloud</a>
|
||||
<a class="dropdown-item" href="/mega/"><i class="fa fa-archive fa-fw"></i> Mega</a>
|
||||
<a class="dropdown-item" href="/s3/#mega"><i class="fa fa-archive fa-fw"></i> Mega S4</a>
|
||||
<a class="dropdown-item" href="/memory/"><i class="fas fa-memory fa-fw"></i> Memory</a>
|
||||
<a class="dropdown-item" href="/azureblob/"><i class="fab fa-windows fa-fw"></i> Microsoft Azure Blob Storage</a>
|
||||
<a class="dropdown-item" href="/azurefiles/"><i class="fab fa-windows fa-fw"></i> Microsoft Azure Files Storage</a>
|
||||
|
||||
12
docs/static/css/custom.css
vendored
12
docs/static/css/custom.css
vendored
@@ -63,11 +63,19 @@ h1, h2, h3, h4, h5, h6 {
|
||||
|
||||
/* Fix spacing of info boxes */
|
||||
.card {
|
||||
margin-top: 0.75rem;
|
||||
margin-top: 0.5rem;
|
||||
}
|
||||
/* less padding on titles */
|
||||
.card-header {
|
||||
padding: 5px 15px;
|
||||
}
|
||||
/* less padding around info box items */
|
||||
.card-body {
|
||||
padding: 0.5rem;
|
||||
padding: 0px;
|
||||
}
|
||||
/* more padding around info box items */
|
||||
.card-body-padded {
|
||||
padding: 10px 10px 10px 10px;
|
||||
}
|
||||
|
||||
/* make menus longer */
|
||||
|
||||
BIN
docs/static/img/rclone-1200x630.png
vendored
Normal file
BIN
docs/static/img/rclone-1200x630.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 36 KiB |
@@ -391,6 +391,7 @@ func (s *StatsInfo) _stopAverageLoop() {
|
||||
if s.average.started {
|
||||
s.average.cancel()
|
||||
s.average.stopped.Wait()
|
||||
s.average.started = false
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -555,6 +555,11 @@ var ConfigOptionsInfo = Options{{
|
||||
Default: []string{},
|
||||
Help: "Transform paths during the copy process.",
|
||||
Groups: "Copy",
|
||||
}, {
|
||||
Name: "http_proxy",
|
||||
Default: "",
|
||||
Help: "HTTP proxy URL.",
|
||||
Groups: "Networking",
|
||||
}}
|
||||
|
||||
// ConfigInfo is filesystem config options
|
||||
@@ -667,6 +672,7 @@ type ConfigInfo struct {
|
||||
MetadataMapper SpaceSepList `config:"metadata_mapper"`
|
||||
MaxConnections int `config:"max_connections"`
|
||||
NameTransform []string `config:"name_transform"`
|
||||
HTTPProxy string `config:"http_proxy"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -51,6 +51,7 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
var usingPasswordCommand bool
|
||||
var usingEnvPassword bool
|
||||
|
||||
// Find first non-empty line
|
||||
r := bufio.NewReader(b)
|
||||
@@ -99,15 +100,18 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) {
|
||||
} else {
|
||||
usingPasswordCommand = false
|
||||
|
||||
envpw := os.Getenv("RCLONE_CONFIG_PASS")
|
||||
envPassword := os.Getenv("RCLONE_CONFIG_PASS")
|
||||
|
||||
if envpw != "" {
|
||||
err := SetConfigPassword(envpw)
|
||||
if envPassword != "" {
|
||||
usingEnvPassword = true
|
||||
err := SetConfigPassword(envPassword)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Using RCLONE_CONFIG_PASS returned: %v", err)
|
||||
} else {
|
||||
fs.Debugf(nil, "Using RCLONE_CONFIG_PASS password.")
|
||||
}
|
||||
} else {
|
||||
usingEnvPassword = false
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -144,6 +148,9 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) {
|
||||
if usingPasswordCommand {
|
||||
return nil, errors.New("using --password-command derived password, unable to decrypt configuration")
|
||||
}
|
||||
if usingEnvPassword {
|
||||
return nil, errors.New("using RCLONE_CONFIG_PASS env password, unable to decrypt configuration")
|
||||
}
|
||||
if !ci.AskPassword {
|
||||
return nil, errors.New("unable to decrypt configuration and not allowed to ask for password - set RCLONE_CONFIG_PASS to your configuration password")
|
||||
}
|
||||
|
||||
@@ -9,6 +9,37 @@ import (
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rc.Add(rc.Call{
|
||||
Path: "config/unlock",
|
||||
Fn: rcConfigPassword,
|
||||
Title: "Unlock the config file.",
|
||||
AuthRequired: true,
|
||||
Help: `
|
||||
Unlocks the config file if it is locked.
|
||||
|
||||
Parameters:
|
||||
|
||||
- 'config_password' - password to unlock the config file
|
||||
|
||||
A good idea is to disable AskPassword before making this call
|
||||
`,
|
||||
})
|
||||
}
|
||||
|
||||
// Unlock the config file
|
||||
// A good idea is to disable AskPassword before making this call
|
||||
func rcConfigPassword(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
configPass, err := in.GetString("config_password")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if SetConfigPassword(configPass) != nil {
|
||||
return nil, errors.New("failed to set config password")
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
rc.Add(rc.Call{
|
||||
Path: "config/dump",
|
||||
@@ -75,6 +106,9 @@ See the [listremotes](/commands/rclone_listremotes/) command for more informatio
|
||||
// including any defined by environment variables.
|
||||
func rcListRemotes(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
remoteNames := GetRemoteNames()
|
||||
if remoteNames == nil {
|
||||
remoteNames = []string{}
|
||||
}
|
||||
out = rc.Params{
|
||||
"remotes": remoteNames,
|
||||
}
|
||||
|
||||
@@ -138,6 +138,22 @@ func TestRc(t *testing.T) {
|
||||
assert.Nil(t, out)
|
||||
assert.Equal(t, "", config.GetValue(testName, "type"))
|
||||
assert.Equal(t, "", config.GetValue(testName, "test_key"))
|
||||
|
||||
t.Run("ListRemotes empty not nil", func(t *testing.T) {
|
||||
call := rc.Calls.Get("config/listremotes")
|
||||
assert.NotNil(t, call)
|
||||
in := rc.Params{}
|
||||
out, err := call.Fn(context.Background(), in)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, out)
|
||||
|
||||
var remotes []string
|
||||
err = out.GetStruct("remotes", &remotes)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NotNil(t, remotes)
|
||||
assert.Empty(t, remotes)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRcProviders(t *testing.T) {
|
||||
@@ -188,3 +204,17 @@ func TestRcPaths(t *testing.T) {
|
||||
assert.Equal(t, config.GetCacheDir(), out["cache"])
|
||||
assert.Equal(t, os.TempDir(), out["temp"])
|
||||
}
|
||||
|
||||
func TestRcConfigUnlock(t *testing.T) {
|
||||
call := rc.Calls.Get("config/unlock")
|
||||
assert.NotNil(t, call)
|
||||
in := rc.Params{
|
||||
"config_password": "test",
|
||||
}
|
||||
out, err := call.Fn(context.Background(), in)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, out)
|
||||
|
||||
}
|
||||
|
||||
@@ -6,10 +6,12 @@ import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -55,7 +57,18 @@ func NewTransportCustom(ctx context.Context, customize func(*http.Transport)) ht
|
||||
// This also means we get new stuff when it gets added to go
|
||||
t := new(http.Transport)
|
||||
structs.SetDefaults(t, http.DefaultTransport.(*http.Transport))
|
||||
t.Proxy = http.ProxyFromEnvironment
|
||||
if ci.HTTPProxy != "" {
|
||||
proxyURL, err := url.Parse(ci.HTTPProxy)
|
||||
if err != nil {
|
||||
t.Proxy = func(*http.Request) (*url.URL, error) {
|
||||
return nil, fmt.Errorf("failed to set --http-proxy from %q: %w", ci.HTTPProxy, err)
|
||||
}
|
||||
} else {
|
||||
t.Proxy = http.ProxyURL(proxyURL)
|
||||
}
|
||||
} else {
|
||||
t.Proxy = http.ProxyFromEnvironment
|
||||
}
|
||||
t.MaxIdleConnsPerHost = 2 * (ci.Checkers + ci.Transfers + 1)
|
||||
t.MaxIdleConns = 2 * t.MaxIdleConnsPerHost
|
||||
t.TLSHandshakeTimeout = time.Duration(ci.ConnectTimeout)
|
||||
|
||||
@@ -20,7 +20,7 @@ const (
|
||||
var (
|
||||
errInvalidCharacters = errors.New("config name contains invalid characters - may only contain numbers, letters, `_`, `-`, `.`, `+`, `@` and space, while not start with `-` or space, and not end with space")
|
||||
errCantBeEmpty = errors.New("can't use empty string as a path")
|
||||
errBadConfigParam = errors.New("config parameters may only contain `0-9`, `A-Z`, `a-z` and `_`")
|
||||
errBadConfigParam = errors.New("config parameters may only contain `0-9`, `A-Z`, `a-z`, `_` and `.`")
|
||||
errEmptyConfigParam = errors.New("config parameters can't be empty")
|
||||
errConfigNameEmpty = errors.New("config name can't be empty")
|
||||
errConfigName = errors.New("config name needs a trailing `:`")
|
||||
@@ -79,7 +79,8 @@ func isConfigParam(c rune) bool {
|
||||
return ((c >= 'a' && c <= 'z') ||
|
||||
(c >= 'A' && c <= 'Z') ||
|
||||
(c >= '0' && c <= '9') ||
|
||||
c == '_')
|
||||
c == '_' ||
|
||||
c == '.')
|
||||
}
|
||||
|
||||
// Parsed is returned from Parse with the results of the connection string decomposition
|
||||
|
||||
55
fs/newfs.go
55
fs/newfs.go
@@ -7,12 +7,15 @@ import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"maps"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
)
|
||||
|
||||
@@ -65,6 +68,10 @@ func NewFs(ctx context.Context, path string) (Fs, error) {
|
||||
overriddenConfig[suffix] = extraConfig
|
||||
overriddenConfigMu.Unlock()
|
||||
}
|
||||
ctx, err = addConfigToContext(ctx, configName, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := fsInfo.NewFs(ctx, configName, fsPath, config)
|
||||
if f != nil && (err == nil || err == ErrorIsFile) {
|
||||
addReverse(f, fsInfo)
|
||||
@@ -72,6 +79,54 @@ func NewFs(ctx context.Context, path string) (Fs, error) {
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Add "global" config or "override" to ctx and the global config if required.
|
||||
//
|
||||
// This looks through keys prefixed with "global." or "override." in
|
||||
// config and sets ctx and optionally the global context if "global.".
|
||||
func addConfigToContext(ctx context.Context, configName string, config configmap.Getter) (newCtx context.Context, err error) {
|
||||
overrideConfig := make(configmap.Simple)
|
||||
globalConfig := make(configmap.Simple)
|
||||
for i := range ConfigOptionsInfo {
|
||||
opt := &ConfigOptionsInfo[i]
|
||||
globalName := "global." + opt.Name
|
||||
value, isSet := config.Get(globalName)
|
||||
if isSet {
|
||||
// Set both override and global if global
|
||||
overrideConfig[opt.Name] = value
|
||||
globalConfig[opt.Name] = value
|
||||
}
|
||||
overrideName := "override." + opt.Name
|
||||
value, isSet = config.Get(overrideName)
|
||||
if isSet {
|
||||
overrideConfig[opt.Name] = value
|
||||
}
|
||||
}
|
||||
if len(overrideConfig) == 0 && len(globalConfig) == 0 {
|
||||
return ctx, nil
|
||||
}
|
||||
newCtx, ci := AddConfig(ctx)
|
||||
overrideKeys := slices.Collect(maps.Keys(overrideConfig))
|
||||
slices.Sort(overrideKeys)
|
||||
globalKeys := slices.Collect(maps.Keys(globalConfig))
|
||||
slices.Sort(globalKeys)
|
||||
// Set the config in the newCtx
|
||||
err = configstruct.Set(overrideConfig, ci)
|
||||
if err != nil {
|
||||
return ctx, fmt.Errorf("failed to set override config variables %q: %w", overrideKeys, err)
|
||||
}
|
||||
Debugf(configName, "Set overridden config %q for backend startup", overrideKeys)
|
||||
// Set the global context only
|
||||
if len(globalConfig) != 0 {
|
||||
globalCI := GetConfig(context.Background())
|
||||
err = configstruct.Set(globalConfig, globalCI)
|
||||
if err != nil {
|
||||
return ctx, fmt.Errorf("failed to set global config variables %q: %w", globalKeys, err)
|
||||
}
|
||||
Debugf(configName, "Set global config %q at backend startup", overrideKeys)
|
||||
}
|
||||
return newCtx, nil
|
||||
}
|
||||
|
||||
// ConfigFs makes the config for calling NewFs with.
|
||||
//
|
||||
// It parses the path which is of the form remote:path
|
||||
|
||||
55
fs/newfs_internal_test.go
Normal file
55
fs/newfs_internal_test.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// When no override/global keys exist, ctx must be returned unchanged.
|
||||
func TestAddConfigToContext_NoChanges(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
newCtx, err := addConfigToContext(ctx, "unit-test", configmap.Simple{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, newCtx, ctx)
|
||||
}
|
||||
|
||||
// A single override.key must create a new ctx, but leave the
|
||||
// background ctx untouched.
|
||||
func TestAddConfigToContext_OverrideOnly(t *testing.T) {
|
||||
override := configmap.Simple{
|
||||
"override.user_agent": "potato",
|
||||
}
|
||||
ctx := context.Background()
|
||||
globalCI := GetConfig(ctx)
|
||||
original := globalCI.UserAgent
|
||||
newCtx, err := addConfigToContext(ctx, "unit-test", override)
|
||||
require.NoError(t, err)
|
||||
assert.NotEqual(t, newCtx, ctx)
|
||||
assert.Equal(t, original, globalCI.UserAgent)
|
||||
ci := GetConfig(newCtx)
|
||||
assert.Equal(t, "potato", ci.UserAgent)
|
||||
}
|
||||
|
||||
// A single global.key must create a new ctx and update the
|
||||
// background/global config.
|
||||
func TestAddConfigToContext_GlobalOnly(t *testing.T) {
|
||||
global := configmap.Simple{
|
||||
"global.user_agent": "potato2",
|
||||
}
|
||||
ctx := context.Background()
|
||||
globalCI := GetConfig(ctx)
|
||||
original := globalCI.UserAgent
|
||||
defer func() {
|
||||
globalCI.UserAgent = original
|
||||
}()
|
||||
newCtx, err := addConfigToContext(ctx, "unit-test", global)
|
||||
require.NoError(t, err)
|
||||
assert.NotEqual(t, newCtx, ctx)
|
||||
assert.Equal(t, "potato2", globalCI.UserAgent)
|
||||
ci := GetConfig(newCtx)
|
||||
assert.Equal(t, "potato2", ci.UserAgent)
|
||||
}
|
||||
@@ -42,4 +42,21 @@ func TestNewFs(t *testing.T) {
|
||||
|
||||
assert.Equal(t, ":mockfs{S_NHG}:/tmp", fs.ConfigString(f3))
|
||||
assert.Equal(t, ":mockfs,potato='true':/tmp", fs.ConfigStringFull(f3))
|
||||
|
||||
// Check that the overrides work
|
||||
globalCI := fs.GetConfig(ctx)
|
||||
original := globalCI.UserAgent
|
||||
defer func() {
|
||||
globalCI.UserAgent = original
|
||||
}()
|
||||
|
||||
f4, err := fs.NewFs(ctx, ":mockfs,global.user_agent='julian':/tmp")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, ":mockfs", f4.Name())
|
||||
assert.Equal(t, "/tmp", f4.Root())
|
||||
|
||||
assert.Equal(t, ":mockfs:/tmp", fs.ConfigString(f4))
|
||||
assert.Equal(t, ":mockfs:/tmp", fs.ConfigStringFull(f4))
|
||||
|
||||
assert.Equal(t, "julian", globalCI.UserAgent)
|
||||
}
|
||||
|
||||
@@ -820,7 +820,7 @@ func rcCheck(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
return nil, rc.NewErrParamInvalid(errors.New("need srcFs parameter when not using checkFileHash"))
|
||||
}
|
||||
|
||||
oneway, _ := in.GetBool("oneway")
|
||||
oneway, _ := in.GetBool("oneWay")
|
||||
download, _ := in.GetBool("download")
|
||||
|
||||
opt := &CheckOpt{
|
||||
|
||||
@@ -49,7 +49,7 @@ Parameters:
|
||||
|
||||
Note that these are the global options which are unaffected by use of
|
||||
the _config and _filter parameters. If you wish to read the parameters
|
||||
set in _config then use options/config and for _filter use options/filter.
|
||||
set in _config or _filter use options/local.
|
||||
|
||||
This shows the internal names of the option within rclone which should
|
||||
map to the external options very easily with a few exceptions.
|
||||
|
||||
@@ -658,7 +658,7 @@ func TestServerSideCopyOverSelf(t *testing.T) {
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = CopyDir(ctx, FremoteCopy, r.Fremote, false)
|
||||
require.NoError(t, err)
|
||||
testLoggerVsLsf(ctx, r.Fremote, r.Flocal, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
testLoggerVsLsf(ctx, FremoteCopy, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
fstest.CheckItems(t, FremoteCopy, file1)
|
||||
|
||||
file2 := r.WriteObject(ctx, "sub dir/hello world", "hello world again", t2)
|
||||
@@ -667,7 +667,7 @@ func TestServerSideCopyOverSelf(t *testing.T) {
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = CopyDir(ctx, FremoteCopy, r.Fremote, false)
|
||||
require.NoError(t, err)
|
||||
testLoggerVsLsf(ctx, r.Fremote, r.Flocal, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
testLoggerVsLsf(ctx, FremoteCopy, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
fstest.CheckItems(t, FremoteCopy, file2)
|
||||
}
|
||||
|
||||
@@ -703,7 +703,7 @@ func TestServerSideMoveOverSelf(t *testing.T) {
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = CopyDir(ctx, FremoteCopy, r.Fremote, false)
|
||||
require.NoError(t, err)
|
||||
testLoggerVsLsf(ctx, r.Fremote, r.Flocal, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
testLoggerVsLsf(ctx, FremoteCopy, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
fstest.CheckItems(t, FremoteCopy, file1)
|
||||
|
||||
file2 := r.WriteObject(ctx, "sub dir/hello world", "hello world again", t2)
|
||||
@@ -3031,6 +3031,9 @@ func DstLsf(ctx context.Context, Fremote fs.Fs) *bytes.Buffer {
|
||||
|
||||
list.SetSeparator(";")
|
||||
timeFormat := operations.FormatForLSFPrecision(Fremote.Precision())
|
||||
if Fremote.Precision() == fs.ModTimeNotSupported {
|
||||
timeFormat = "none"
|
||||
}
|
||||
list.AddModTime(timeFormat)
|
||||
list.AddHash(hash.MD5)
|
||||
list.AddSize()
|
||||
@@ -3082,7 +3085,7 @@ func testLoggerVsLsf(ctx context.Context, fdst, fsrc fs.Fs, logger *bytes.Buffer
|
||||
elements := bytes.Split(line, []byte(";"))
|
||||
if len(elements) >= 2 {
|
||||
if !canTestModtime {
|
||||
elements[0] = []byte("")
|
||||
elements[0] = []byte("none")
|
||||
}
|
||||
if !canTestHash {
|
||||
elements[1] = []byte("")
|
||||
|
||||
11
fs/types.go
11
fs/types.go
@@ -7,6 +7,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -335,9 +336,15 @@ type FlaggerNP interface {
|
||||
}
|
||||
|
||||
// NewUsageValue makes a valid value
|
||||
func NewUsageValue(value int64) *int64 {
|
||||
func NewUsageValue[T interface {
|
||||
int64 | uint64 | float64
|
||||
}](value T) *int64 {
|
||||
p := new(int64)
|
||||
*p = value
|
||||
if value > T(int64(math.MaxInt64)) {
|
||||
*p = math.MaxInt64
|
||||
} else {
|
||||
*p = int64(value)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
|
||||
@@ -120,8 +120,6 @@ backends:
|
||||
- TestCopyURL
|
||||
- TestMoveFileWithIgnoreExisting
|
||||
- TestCopyFileCompareDest
|
||||
# fs/sync
|
||||
- TestServerSideMoveOverSelf
|
||||
#vfs
|
||||
- TestFileSetModTime/cache=off,open=false,write=false
|
||||
- TestFileSetModTime/cache=off,open=true,write=false
|
||||
|
||||
@@ -692,6 +692,10 @@ version recommended):
|
||||
newFormat := true
|
||||
err := outM.Decode(code)
|
||||
if err != nil {
|
||||
if len(code) > 0 && code[0] != '{' {
|
||||
fs.Errorf(nil, "Couldn't decode rclone authorize output as base64, trying JSON: %v", err)
|
||||
fs.Errorf(nil, "Check the code is complete and didn't get truncated >>>%s<<<", code)
|
||||
}
|
||||
newFormat = false
|
||||
err = json.Unmarshal([]byte(code), &token)
|
||||
}
|
||||
|
||||
@@ -88,7 +88,9 @@ func (r *Renew) Shutdown() {
|
||||
}
|
||||
// closing a channel can only be done once
|
||||
r.shutdown.Do(func() {
|
||||
r.ts.expiryTimer.Stop()
|
||||
if r.ts != nil {
|
||||
r.ts.expiryTimer.Stop()
|
||||
}
|
||||
close(r.done)
|
||||
})
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user