1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-11 13:53:15 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Anagh Kumar Baranwal
2804f5068a build: cache go build & test
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2025-07-22 01:21:52 +05:30
64 changed files with 8087 additions and 9065 deletions

View File

@@ -282,16 +282,6 @@ jobs:
- name: Scan for vulnerabilities - name: Scan for vulnerabilities
run: govulncheck ./... run: govulncheck ./...
- name: Check Markdown format
uses: DavidAnson/markdownlint-cli2-action@v20
with:
globs: |
CONTRIBUTING.md
MAINTAINERS.md
README.md
RELEASE.md
docs/content/{authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
- name: Scan edits of autogenerated files - name: Scan edits of autogenerated files
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}' run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
if: github.event_name == 'pull_request' if: github.event_name == 'pull_request'

212
.github/workflows/build_android.yml vendored Normal file
View File

@@ -0,0 +1,212 @@
---
# Github Actions build for rclone
# -*- compile-command: "yamllint -f parsable build_android.yml" -*-
name: Build & Push Android Builds
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
cancel-in-progress: true
# Trigger the workflow on push or pull request
on:
push:
branches:
- '**'
tags:
- '**'
pull_request:
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
jobs:
android:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
include:
- job_name: android-all
platform: linux/amd64/android/go1.24
os: ubuntu-latest
go: '>=1.24.0-rc.1'
name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }}
steps:
- name: Checkout Repository
uses: actions/checkout@v4
with:
fetch-depth: 0
# Upgrade together with NDK version
- name: Install Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
check-latest: true
cache: false
- name: Set Environment Variables
shell: bash
run: |
echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_ENV
echo "GOCACHE=$(go env GOCACHE)" >> $GITHUB_ENV
echo "VERSION=$(make version)" >> $GITHUB_ENV
- name: Set PLATFORM Variable
shell: bash
run: |
platform=${{ matrix.platform }}
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
- name: Get ImageOS
# There's no way around this, because "ImageOS" is only available to
# processes, but the setup-go action uses it in its key.
id: imageos
uses: actions/github-script@v7
with:
result-encoding: string
script: |
return process.env.ImageOS
- name: Set CACHE_PREFIX Variable
shell: bash
run: |
cache_prefix=${{ runner.os }}-${{ steps.imageos.outputs.result }}-${{ env.PLATFORM }}
echo "CACHE_PREFIX=${cache_prefix}" >> $GITHUB_ENV
- name: Load Go Module Cache
uses: actions/cache@v4
with:
path: |
${{ env.GOMODCACHE }}
key: ${{ env.CACHE_PREFIX }}-modcache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ env.CACHE_PREFIX }}-modcache
# Both load & update the cache when on default branch
- name: Load Go Build & Test Cache
id: go-cache
uses: actions/cache@v4
if: github.ref_name == github.event.repository.default_branch && github.event_name != 'pull_request'
with:
path: |
${{ env.GOCACHE }}
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
restore-keys: |
${{ env.CACHE_PREFIX }}-cache
# Only load the cache when not on default branch
- name: Load Go Build & Test Cache
id: go-cache-restore
uses: actions/cache/restore@v4
if: github.ref_name != github.event.repository.default_branch || github.event_name == 'pull_request'
with:
path: |
${{ env.GOCACHE }}
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
restore-keys: |
${{ env.CACHE_PREFIX }}-cache
- name: Build Native rclone
shell: bash
run: |
make
- name: Install gomobile
shell: bash
run: |
go install golang.org/x/mobile/cmd/gobind@latest
go install golang.org/x/mobile/cmd/gomobile@latest
env PATH=$PATH:~/go/bin gomobile init
echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV
- name: arm-v7a - gomobile build
shell: bash
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
- name: arm-v7a - Set Environment Variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm' >> $GITHUB_ENV
echo 'GOARM=7' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm-v7a - Build
shell: bash
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
- name: arm64-v8a - Set Environment Variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm64-v8a - Build
shell: bash
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
- name: x86 - Set Environment Variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=386' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x86 - Build
shell: bash
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
- name: x64 - Set Environment Variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=amd64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x64 - Build
shell: bash
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 .
- name: Delete Existing Cache
continue-on-error: true
shell: bash
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
cache_ids=($(gh cache list --key "${{ env.CACHE_PREFIX }}-cache" --json id | jq '.[].id'))
for cache_id in "${cache_ids[@]}"; do
echo "Deleting Cache: $cache_id"
gh cache delete "$cache_id"
done
if: github.ref_name == github.event.repository.default_branch && github.event_name != 'pull_request' && steps.go-cache.outputs.cache-hit != 'true'
- name: Deploy Built Binaries
shell: bash
run: |
make ci_upload
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'

View File

@@ -4,6 +4,10 @@
name: Build & Push Docker Images name: Build & Push Docker Images
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
cancel-in-progress: true
# Trigger the workflow on push or pull request # Trigger the workflow on push or pull request
on: on:
push: push:
@@ -41,32 +45,26 @@ jobs:
runs-on: ${{ matrix.runs-on }} runs-on: ${{ matrix.runs-on }}
steps: steps:
- name: Free Space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout Repository - name: Checkout Repository
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Set REPO_NAME Variable - name: Set REPO_NAME Variable
shell: bash
run: | run: |
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV} echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
- name: Set PLATFORM Variable - name: Set PLATFORM Variable
shell: bash
run: | run: |
platform=${{ matrix.platform }} platform=${{ matrix.platform }}
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
- name: Set CACHE_NAME Variable - name: Set CACHE_NAME Variable
shell: python shell: python
env:
GITHUB_EVENT_REPOSITORY_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
run: | run: |
import os, re import os, re
@@ -82,8 +80,11 @@ jobs:
ref_name_slug = "cache" ref_name_slug = "cache"
if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request": if os.environ.get("GITHUB_REF_NAME"):
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME']) if os.environ['GITHUB_EVENT_NAME'] == "pull_request":
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
elif os.environ['GITHUB_REF_NAME'] != os.environ['GITHUB_EVENT_REPOSITORY_DEFAULT_BRANCH']:
ref_name_slug += "-ref-" + slugify(os.environ['GITHUB_REF_NAME'])
with open(os.environ['GITHUB_ENV'], 'a') as env: with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"CACHE_NAME={ref_name_slug}\n") env.write(f"CACHE_NAME={ref_name_slug}\n")
@@ -98,6 +99,12 @@ jobs:
script: | script: |
return process.env.ImageOS return process.env.ImageOS
- name: Set CACHE_PREFIX Variable
shell: bash
run: |
cache_prefix=${{ runner.os }}-${{ steps.imageos.outputs.result }}-${{ env.PLATFORM }}-docker-go
echo "CACHE_PREFIX=${cache_prefix}" >> $GITHUB_ENV
- name: Extract Metadata (tags, labels) for Docker - name: Extract Metadata (tags, labels) for Docker
id: meta id: meta
uses: docker/metadata-action@v5 uses: docker/metadata-action@v5
@@ -130,22 +137,35 @@ jobs:
- name: Load Go Build Cache for Docker - name: Load Go Build Cache for Docker
id: go-cache id: go-cache
uses: actions/cache@v4 uses: actions/cache@v4
if: github.ref_name == github.event.repository.default_branch
with: with:
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
# Cache only the go builds, the module download is cached via the docker layer caching # Cache only the go builds, the module download is cached via the docker layer caching
path: | path: |
go-build-cache /tmp/go-build-cache
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
restore-keys: |
${{ env.CACHE_PREFIX }}-cache
- name: Load Go Build Cache for Docker
id: go-cache-restore
uses: actions/cache/restore@v4
if: github.ref_name != github.event.repository.default_branch
with:
# Cache only the go builds, the module download is cached via the docker layer caching
path: |
/tmp/go-build-cache
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
restore-keys: |
${{ env.CACHE_PREFIX }}-cache
- name: Inject Go Build Cache into Docker - name: Inject Go Build Cache into Docker
uses: reproducible-containers/buildkit-cache-dance@v3 uses: reproducible-containers/buildkit-cache-dance@v3
with: with:
cache-map: | cache-map: |
{ {
"go-build-cache": "/root/.cache/go-build" "/tmp/go-build-cache": "/root/.cache/go-build"
} }
skip-extraction: ${{ steps.go-cache.outputs.cache-hit }} skip-extraction: ${{ steps.go-cache.outputs.cache-hit || steps.go-cache-restore.outputs.cache-hit }}
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@v3 uses: docker/login-action@v3
@@ -172,9 +192,10 @@ jobs:
outputs: | outputs: |
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
cache-from: | cache-from: |
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }} type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-${{ env.CACHE_NAME }}
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-cache
cache-to: | cache-to: |
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-${{ env.CACHE_NAME }},image-manifest=true,mode=max,compression=zstd
- name: Export Image Digest - name: Export Image Digest
run: | run: |
@@ -190,6 +211,19 @@ jobs:
retention-days: 1 retention-days: 1
if-no-files-found: error if-no-files-found: error
- name: Delete Existing Cache
if: github.ref_name == github.event.repository.default_branch && steps.go-cache.outputs.cache-hit != 'true'
continue-on-error: true
shell: bash
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
cache_ids=($(gh cache list --key "${{ env.CACHE_PREFIX }}-cache" --json id | jq '.[].id'))
for cache_id in "${cache_ids[@]}"; do
echo "Deleting Cache: $cache_id"
gh cache delete "$cache_id"
done
merge-image: merge-image:
name: Merge & Push Final Docker Image name: Merge & Push Final Docker Image
runs-on: ubuntu-24.04 runs-on: ubuntu-24.04
@@ -205,6 +239,7 @@ jobs:
merge-multiple: true merge-multiple: true
- name: Set REPO_NAME Variable - name: Set REPO_NAME Variable
shell: bash
run: | run: |
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV} echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}

104
.github/workflows/lint.yml vendored Normal file
View File

@@ -0,0 +1,104 @@
---
# Github Actions build for rclone
# -*- compile-command: "yamllint -f parsable lint.yml" -*-
name: Lint & Vulnerability Check
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
cancel-in-progress: true
# Trigger the workflow on push or pull request
on:
push:
branches:
- '**'
tags:
- '**'
pull_request:
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
jobs:
lint:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
timeout-minutes: 30
name: "lint"
runs-on: ubuntu-latest
steps:
- name: Get runner parameters
id: get-runner-parameters
shell: bash
run: |
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
- name: Checkout
uses: actions/checkout@v4
- name: Install Go
id: setup-go
uses: actions/setup-go@v5
with:
go-version: '>=1.23.0-rc.1'
check-latest: true
cache: false
- name: Cache
uses: actions/cache@v4
with:
path: |
~/go/pkg/mod
~/.cache/go-build
~/.cache/golangci-lint
key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }}
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
- name: Code quality test (Linux)
uses: golangci/golangci-lint-action@v6
with:
version: latest
skip-cache: true
- name: Code quality test (Windows)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "windows"
with:
version: latest
skip-cache: true
- name: Code quality test (macOS)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "darwin"
with:
version: latest
skip-cache: true
- name: Code quality test (FreeBSD)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "freebsd"
with:
version: latest
skip-cache: true
- name: Code quality test (OpenBSD)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "openbsd"
with:
version: latest
skip-cache: true
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
- name: Scan for vulnerabilities
run: govulncheck ./...

View File

@@ -1,43 +0,0 @@
default: true
# Use specific styles, to be consistent accross all documents.
# Default is to accept any as long as it is consistent within the same document.
heading-style: # MD003
style: atx
ul-style: # MD004
style: dash
hr-style: # MD035
style: ---
code-block-style: # MD046
style: fenced
code-fence-style: # MD048
style: backtick
emphasis-style: # MD049
style: asterisk
strong-style: # MD050
style: asterisk
# Allow multiple headers with same text as long as they are not siblings.
no-duplicate-heading: # MD024
siblings_only: true
# Allow long lines in code blocks and tables.
line-length: # MD013
code_blocks: false
tables: false
# The Markdown files used to generated docs with Hugo contain a top level
# header, even though the YAML front matter has a title property (which is
# used for the HTML document title only). Suppress Markdownlint warning:
# Multiple top-level headings in the same document.
single-title: # MD025
level: 1
front_matter_title:
# The HTML docs generated by Hugo from Markdown files may have slightly
# different header anchors than GitHub rendered Markdown, e.g. Hugo trims
# leading dashes so "--config string" becomes "#config-string" while it is
# "#--config-string" in GitHub preview. When writing links to headers in the
# Markdown files we must use whatever works in the final HTML generated docs.
# Suppress Markdownlint warning: Link fragments should be valid.
link-fragments: false # MD051

View File

@@ -15,81 +15,61 @@ with the [latest beta of rclone](https://beta.rclone.org/):
- Rclone version (e.g. output from `rclone version`) - Rclone version (e.g. output from `rclone version`)
- Which OS you are using and how many bits (e.g. Windows 10, 64 bit) - Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
- The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`) - The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
- A log of the command with the `-vv` flag (e.g. output from - A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
`rclone -vv copy /tmp remote:tmp`) - if the log contains secrets then edit the file with a text editor first to obscure them
- if the log contains secrets then edit the file with a text editor first to
obscure them
## Submitting a new feature or bug fix ## Submitting a new feature or bug fix
If you find a bug that you'd like to fix, or a new feature that you'd If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via GitHub. like to implement then please submit a pull request via GitHub.
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) first so it can be discussed.
first so it can be discussed.
To prepare your pull request first press the fork button on [rclone's GitHub To prepare your pull request first press the fork button on [rclone's GitHub
page](https://github.com/rclone/rclone). page](https://github.com/rclone/rclone).
Then [install Git](https://git-scm.com/downloads) and set your public contribution Then [install Git](https://git-scm.com/downloads) and set your public contribution [name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git) and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
[name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git)
and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
Next open your terminal, change directory to your preferred folder and initialise Next open your terminal, change directory to your preferred folder and initialise your local rclone project:
your local rclone project:
```sh git clone https://github.com/rclone/rclone.git
git clone https://github.com/rclone/rclone.git cd rclone
cd rclone git remote rename origin upstream
git remote rename origin upstream # if you have SSH keys setup in your GitHub account:
# if you have SSH keys setup in your GitHub account: git remote add origin git@github.com:YOURUSER/rclone.git
git remote add origin git@github.com:YOURUSER/rclone.git # otherwise:
# otherwise: git remote add origin https://github.com/YOURUSER/rclone.git
git remote add origin https://github.com/YOURUSER/rclone.git
```
Note that most of the terminal commands in the rest of this guide must be Note that most of the terminal commands in the rest of this guide must be executed from the rclone folder created above.
executed from the rclone folder created above.
Now [install Go](https://golang.org/doc/install) and verify your installation: Now [install Go](https://golang.org/doc/install) and verify your installation:
```sh go version
go version
```
Great, you can now compile and execute your own version of rclone: Great, you can now compile and execute your own version of rclone:
```sh go build
go build ./rclone version
./rclone version
```
(Note that you can also replace `go build` with `make`, which will include a (Note that you can also replace `go build` with `make`, which will include a
more accurate version number in the executable as well as enable you to specify more accurate version number in the executable as well as enable you to specify
more build options.) Finally make a branch to add your new feature more build options.) Finally make a branch to add your new feature
```sh git checkout -b my-new-feature
git checkout -b my-new-feature
```
And get hacking. And get hacking.
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) and a quick view on the rclone [code organisation](#code-organisation).
and a quick view on the rclone [code organisation](#code-organisation).
When ready - test the affected functionality and run the unit tests for the When ready - test the affected functionality and run the unit tests for the code you changed
code you changed
```sh cd folder/with/changed/files
cd folder/with/changed/files go test -v
go test -v
```
Note that you may need to make a test remote, e.g. `TestSwift` for some Note that you may need to make a test remote, e.g. `TestSwift` for some
of the unit tests. of the unit tests.
This is typically enough if you made a simple bug fix, otherwise please read This is typically enough if you made a simple bug fix, otherwise please read the rclone [testing](#testing) section too.
the rclone [testing](#testing) section too.
Make sure you Make sure you
@@ -99,19 +79,14 @@ Make sure you
When you are done with that push your changes to GitHub: When you are done with that push your changes to GitHub:
```sh git push -u origin my-new-feature
git push -u origin my-new-feature
```
and open the GitHub website to [create your pull and open the GitHub website to [create your pull
request](https://help.github.com/articles/creating-a-pull-request/). request](https://help.github.com/articles/creating-a-pull-request/).
Your changes will then get reviewed and you might get asked to fix some stuff. Your changes will then get reviewed and you might get asked to fix some stuff. If so, then make the changes in the same branch, commit and push your updates to GitHub.
If so, then make the changes in the same branch, commit and push your updates to
GitHub.
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
or [squash your commits](#squashing-your-commits).
## Using Git and GitHub ## Using Git and GitHub
@@ -119,118 +94,87 @@ or [squash your commits](#squashing-your-commits).
Follow the guideline for [commit messages](#commit-messages) and then: Follow the guideline for [commit messages](#commit-messages) and then:
```sh git checkout my-new-feature # To switch to your branch
git checkout my-new-feature # To switch to your branch git status # To see the new and changed files
git status # To see the new and changed files git add FILENAME # To select FILENAME for the commit
git add FILENAME # To select FILENAME for the commit git status # To verify the changes to be committed
git status # To verify the changes to be committed git commit # To do the commit
git commit # To do the commit git log # To verify the commit. Use q to quit the log
git log # To verify the commit. Use q to quit the log
```
You can modify the message or changes in the latest commit using: You can modify the message or changes in the latest commit using:
```sh git commit --amend
git commit --amend
```
If you amend to commits that have been pushed to GitHub, then you will have to If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Replacing your previously pushed commits ### Replacing your previously pushed commits
Note that you are about to rewrite the GitHub history of your branch. It is good Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
practice to involve your collaborators before modifying commits that have been
pushed to GitHub.
Your previously pushed commits are replaced by: Your previously pushed commits are replaced by:
```sh git push --force origin my-new-feature
git push --force origin my-new-feature
```
### Basing your changes on the latest master ### Basing your changes on the latest master
To base your changes on the latest version of the To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
[rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
```sh git checkout master
git checkout master git fetch upstream
git fetch upstream git merge --ff-only
git merge --ff-only git push origin --follow-tags # optional update of your fork in GitHub
git push origin --follow-tags # optional update of your fork in GitHub git checkout my-new-feature
git checkout my-new-feature git rebase master
git rebase master
```
If you rebase commits that have been pushed to GitHub, then you will have to If you rebase commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Squashing your commits ### Squashing your commits ###
To combine your commits into one commit: To combine your commits into one commit:
```sh git log # To count the commits to squash, e.g. the last 2
git log # To count the commits to squash, e.g. the last 2 git reset --soft HEAD~2 # To undo the 2 latest commits
git reset --soft HEAD~2 # To undo the 2 latest commits git status # To check everything is as expected
git status # To check everything is as expected
```
If everything is fine, then make the new combined commit: If everything is fine, then make the new combined commit:
```sh git commit # To commit the undone commits as one
git commit # To commit the undone commits as one
```
otherwise, you may roll back using: otherwise, you may roll back using:
```sh git reflog # To check that HEAD{1} is your previous state
git reflog # To check that HEAD{1} is your previous state git reset --soft 'HEAD@{1}' # To roll back to your previous state
git reset --soft 'HEAD@{1}' # To roll back to your previous state
```
If you squash commits that have been pushed to GitHub, then you will have to If you squash commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
Tip: You may like to use `git rebase -i master` if you are experienced or have a Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
more complex situation.
### GitHub Continuous Integration ### GitHub Continuous Integration
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
to build and test the project, which should be automatically available for your
fork too from the `Actions` tab in your repository.
## Testing ## Testing
### Code quality tests ### Code quality tests
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then If you install [golangci-lint](https://github.com/golangci/golangci-lint) then you can run the same tests as get run in the CI which can be very helpful.
you can run the same tests as get run in the CI which can be very helpful.
You can run them with `make check` or with `golangci-lint run ./...`. You can run them with `make check` or with `golangci-lint run ./...`.
Using these tests ensures that the rclone codebase all uses the same coding Using these tests ensures that the rclone codebase all uses the same coding standards. These tests also check for easy mistakes to make (like forgetting to check an error return).
standards. These tests also check for easy mistakes to make (like forgetting
to check an error return).
### Quick testing ### Quick testing
rclone's tests are run from the go testing framework, so at the top rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests. level you can run this to run all the tests.
```sh go test -v ./...
go test -v ./...
```
You can also use `make`, if supported by your platform You can also use `make`, if supported by your platform
```sh make quicktest
make quicktest
```
The quicktest is [automatically run by GitHub](#github-continuous-integration) The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
when you push your branch to GitHub.
### Backend testing ### Backend testing
@@ -246,51 +190,41 @@ need to make a remote called `TestDrive`.
You can then run the unit tests in the drive directory. These tests You can then run the unit tests in the drive directory. These tests
are skipped if `TestDrive:` isn't defined. are skipped if `TestDrive:` isn't defined.
```sh cd backend/drive
cd backend/drive go test -v
go test -v
```
You can then run the integration tests which test all of rclone's You can then run the integration tests which test all of rclone's
operations. Normally these get run against the local file system, operations. Normally these get run against the local file system,
but they can be run against any of the remotes. but they can be run against any of the remotes.
```sh cd fs/sync
cd fs/sync go test -v -remote TestDrive:
go test -v -remote TestDrive: go test -v -remote TestDrive: -fast-list
go test -v -remote TestDrive: -fast-list
cd fs/operations cd fs/operations
go test -v -remote TestDrive: go test -v -remote TestDrive:
```
If you want to use the integration test framework to run these tests If you want to use the integration test framework to run these tests
altogether with an HTML report and test retries then from the altogether with an HTML report and test retries then from the
project root: project root:
```sh go install github.com/rclone/rclone/fstest/test_all
go install github.com/rclone/rclone/fstest/test_all test_all -backends drive
test_all -backends drive
```
### Full integration testing ### Full integration testing
If you want to run all the integration tests against all the remotes, If you want to run all the integration tests against all the remotes,
then change into the project root and run then change into the project root and run
```sh make check
make check make test
make test
```
The commands may require some extra go packages which you can install with The commands may require some extra go packages which you can install with
```sh make build_dep
make build_dep
```
The full integration tests are run daily on the integration test server. You can The full integration tests are run daily on the integration test server. You can
find the results at <https://pub.rclone.org/integration-tests/> find the results at https://pub.rclone.org/integration-tests/
## Code Organisation ## Code Organisation
@@ -298,48 +232,46 @@ Rclone code is organised into a small number of top level directories
with modules beneath. with modules beneath.
- backend - the rclone backends for interfacing to cloud providers - - backend - the rclone backends for interfacing to cloud providers -
- all - import this to load all the cloud providers - all - import this to load all the cloud providers
- ...providers - ...providers
- bin - scripts for use while building or maintaining rclone - bin - scripts for use while building or maintaining rclone
- cmd - the rclone commands - cmd - the rclone commands
- all - import this to load all the commands - all - import this to load all the commands
- ...commands - ...commands
- cmdtest - end-to-end tests of commands, flags, environment variables,... - cmdtest - end-to-end tests of commands, flags, environment variables,...
- docs - the documentation and website - docs - the documentation and website
- content - adjust these docs only, except those marked autogenerated - content - adjust these docs only - everything else is autogenerated
or portions marked autogenerated where the corresponding .go file must be - command - these are auto-generated - edit the corresponding .go file
edited instead, and everything else is autogenerated
- commands - these are auto-generated, edit the corresponding .go file
- fs - main rclone definitions - minimal amount of code - fs - main rclone definitions - minimal amount of code
- accounting - bandwidth limiting and statistics - accounting - bandwidth limiting and statistics
- asyncreader - an io.Reader which reads ahead - asyncreader - an io.Reader which reads ahead
- config - manage the config file and flags - config - manage the config file and flags
- driveletter - detect if a name is a drive letter - driveletter - detect if a name is a drive letter
- filter - implements include/exclude filtering - filter - implements include/exclude filtering
- fserrors - rclone specific error handling - fserrors - rclone specific error handling
- fshttp - http handling for rclone - fshttp - http handling for rclone
- fspath - path handling for rclone - fspath - path handling for rclone
- hash - defines rclone's hash types and functions - hash - defines rclone's hash types and functions
- list - list a remote - list - list a remote
- log - logging facilities - log - logging facilities
- march - iterates directories in lock step - march - iterates directories in lock step
- object - in memory Fs objects - object - in memory Fs objects
- operations - primitives for sync, e.g. Copy, Move - operations - primitives for sync, e.g. Copy, Move
- sync - sync directories - sync - sync directories
- walk - walk a directory - walk - walk a directory
- fstest - provides integration test framework - fstest - provides integration test framework
- fstests - integration tests for the backends - fstests - integration tests for the backends
- mockdir - mocks an fs.Directory - mockdir - mocks an fs.Directory
- mockobject - mocks an fs.Object - mockobject - mocks an fs.Object
- test_all - Runs integration tests for everything - test_all - Runs integration tests for everything
- graphics - the images used in the website, etc. - graphics - the images used in the website, etc.
- lib - libraries used by the backend - lib - libraries used by the backend
- atexit - register functions to run when rclone exits - atexit - register functions to run when rclone exits
- dircache - directory ID to name caching - dircache - directory ID to name caching
- oauthutil - helpers for using oauth - oauthutil - helpers for using oauth
- pacer - retries with backoff and paces operations - pacer - retries with backoff and paces operations
- readers - a selection of useful io.Readers - readers - a selection of useful io.Readers
- rest - a thin abstraction over net/http for REST - rest - a thin abstraction over net/http for REST
- librclone - in memory interface to rclone's API for embedding rclone - librclone - in memory interface to rclone's API for embedding rclone
- vfs - Virtual FileSystem layer for implementing rclone mount and similar - vfs - Virtual FileSystem layer for implementing rclone mount and similar
@@ -347,36 +279,6 @@ with modules beneath.
If you are adding a new feature then please update the documentation. If you are adding a new feature then please update the documentation.
The documentation sources are generally in Markdown format, in conformance
with the CommonMark specification and compatible with GitHub Flavored
Markdown (GFM). The markdown format is checked as part of the lint operation
that runs automatically on pull requests, to enforce standards and consistency.
This is based on the [markdownlint](https://github.com/DavidAnson/markdownlint)
tool, which can also be integrated into editors so you can perform the same
checks while writing.
HTML pages, served as website <rclone.org>, are generated from the Markdown,
using [Hugo](https://gohugo.io). Note that when generating the HTML pages,
there is currently used a different algorithm for generating header anchors
than what GitHub uses for its Markdown rendering. For example, in the HTML docs
generated by Hugo any leading `-` characters are ignored, which means when
linking to a header with text `--config string` we therefore need to use the
link `#config-string` in our Markdown source, which will not work in GitHub's
preview where `#--config-string` would be the correct link.
Most of the documentation are written directly in text files with extension
`.md`, mainly within folder `docs/content`. Note that several of such files
are autogenerated (e.g. the command documentation, and `docs/content/flags.md`),
or contain autogenerated portions (e.g. the backend documentation under
`docs/content/commands`). These are marked with an `autogenerated` comment.
The sources of the autogenerated text are usually Markdown formatted text
embedded as string values in the Go source code, so you need to locate these
and edit the `.go` file instead. The `MANUAL.*`, `rclone.1` and other text
files in the root of the repository are also autogenerated. The autogeneration
of files, and the website, will be done during the release process. See the
`make doc` and `make website` targets in the Makefile if you are interested in
how. You don't need to run these when adding a feature.
If you add a new general flag (not for a backend), then document it in If you add a new general flag (not for a backend), then document it in
`docs/content/docs.md` - the flags there are supposed to be in `docs/content/docs.md` - the flags there are supposed to be in
alphabetical order. alphabetical order.
@@ -385,40 +287,39 @@ If you add a new backend option/flag, then it should be documented in
the source file in the `Help:` field. the source file in the `Help:` field.
- Start with the most important information about the option, - Start with the most important information about the option,
as a single sentence on a single line. as a single sentence on a single line.
- This text will be used for the command-line flag help. - This text will be used for the command-line flag help.
- It will be combined with other information, such as any default value, - It will be combined with other information, such as any default value,
and the result will look odd if not written as a single sentence. and the result will look odd if not written as a single sentence.
- It should end with a period/full stop character, which will be shown - It should end with a period/full stop character, which will be shown
in docs but automatically removed when producing the flag help. in docs but automatically removed when producing the flag help.
- Try to keep it below 80 characters, to reduce text wrapping in the terminal. - Try to keep it below 80 characters, to reduce text wrapping in the terminal.
- More details can be added in a new paragraph, after an empty line (`"\n\n"`). - More details can be added in a new paragraph, after an empty line (`"\n\n"`).
- Like with docs generated from Markdown, a single line break is ignored - Like with docs generated from Markdown, a single line break is ignored
and two line breaks creates a new paragraph. and two line breaks creates a new paragraph.
- This text will be shown to the user in `rclone config` - This text will be shown to the user in `rclone config`
and in the docs (where it will be added by `make backenddocs`, and in the docs (where it will be added by `make backenddocs`,
normally run some time before next release). normally run some time before next release).
- To create options of enumeration type use the `Examples:` field. - To create options of enumeration type use the `Examples:` field.
- Each example value have their own `Help:` field, but they are treated - Each example value have their own `Help:` field, but they are treated
a bit different than the main option help text. They will be shown a bit different than the main option help text. They will be shown
as an unordered list, therefore a single line break is enough to as an unordered list, therefore a single line break is enough to
create a new list item. Also, for enumeration texts like name of create a new list item. Also, for enumeration texts like name of
countries, it looks better without an ending period/full stop character. countries, it looks better without an ending period/full stop character.
When writing documentation for an entirely new backend, The only documentation you need to edit are the `docs/content/*.md`
see [backend documentation](#backend-documentation). files. The `MANUAL.*`, `rclone.1`, website, etc. are all auto-generated
from those during the release process. See the `make doc` and `make
website` targets in the Makefile if you are interested in how. You
don't need to run these when adding a feature.
If you are updating documentation for a command, you must do that in the Documentation for rclone sub commands is with their code, e.g.
command source code, e.g. `cmd/ls/ls.go`. Write flag help strings as a single `cmd/ls/ls.go`. Write flag help strings as a single sentence on a single
sentence on a single line, without a period/full stop character at the end, line, without a period/full stop character at the end, as it will be
as it will be combined unmodified with other information (such as any default combined unmodified with other information (such as any default value).
value).
Note that you can use Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
[GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository) for small changes in the docs which makes it very easy.
for small changes in the docs which makes it very easy. Just remember the
caveat when linking to header anchors, noted above, which means that GitHub's
Markdown preview may not be an entirely reliable verification of the results.
## Making a release ## Making a release
@@ -449,13 +350,13 @@ change will get linked into the issue.
Here is an example of a short commit message: Here is an example of a short commit message:
```text ```
drive: add team drive support - fixes #885 drive: add team drive support - fixes #885
``` ```
And here is an example of a longer one: And here is an example of a longer one:
```text ```
mount: fix hang on errored upload mount: fix hang on errored upload
In certain circumstances, if an upload failed then the mount could hang In certain circumstances, if an upload failed then the mount could hang
@@ -478,9 +379,7 @@ To add a dependency `github.com/ncw/new_dependency` see the
instructions below. These will fetch the dependency and add it to instructions below. These will fetch the dependency and add it to
`go.mod` and `go.sum`. `go.mod` and `go.sum`.
```sh go get github.com/ncw/new_dependency
go get github.com/ncw/new_dependency
```
You can add constraints on that package when doing `go get` (see the You can add constraints on that package when doing `go get` (see the
go docs linked above), but don't unless you really need to. go docs linked above), but don't unless you really need to.
@@ -492,9 +391,7 @@ and `go.sum` in the same commit as your other changes.
If you need to update a dependency then run If you need to update a dependency then run
```sh go get golang.org/x/crypto
go get golang.org/x/crypto
```
Check in a single commit as above. Check in a single commit as above.
@@ -537,38 +434,25 @@ remote or an fs.
### Getting going ### Getting going
- Create `backend/remote/remote.go` (copy this from a similar remote) - Create `backend/remote/remote.go` (copy this from a similar remote)
- box is a good one to start from if you have a directory-based remote (and - box is a good one to start from if you have a directory-based remote (and shows how to use the directory cache)
shows how to use the directory cache) - b2 is a good one to start from if you have a bucket-based remote
- b2 is a good one to start from if you have a bucket-based remote
- Add your remote to the imports in `backend/all/all.go` - Add your remote to the imports in `backend/all/all.go`
- HTTP based remotes are easiest to maintain if they use rclone's - HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good Go SDK from the provider then use that instead.
[lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but - Try to implement as many optional methods as possible as it makes the remote more usable.
if there is a really good Go SDK from the provider then use that instead. - Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
- Try to implement as many optional methods as possible as it makes the remote - `rclone purge -v TestRemote:rclone-info`
more usable. - `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to - `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
make sure we can encode any path name and `rclone info` to help determine the - open `remote.csv` in a spreadsheet and examine
encodings needed
- `rclone purge -v TestRemote:rclone-info`
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
- open `remote.csv` in a spreadsheet and examine
### Guidelines for a speedy merge ### Guidelines for a speedy merge
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) - **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend.
if you are implementing a REST like backend and parsing XML/JSON in the backend. - **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) if your backend is HTTP based - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) - **Do** follow your example backend exactly - use the same code order, function names, layout, structure. **Don't** move stuff around and **Don't** delete the comments.
if your backend is HTTP based - this adds features like `--dump bodies`, - **Do not** split your backend up into `fs.go` and `object.go` (there are a few backends like that - don't follow them!)
`--tpslimit`, `--user-agent` without you having to code anything!
- **Do** follow your example backend exactly - use the same code order, function
names, layout, structure. **Don't** move stuff around and **Don't** delete the
comments.
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few
backends like that - don't follow them!)
- **Do** put your API type definitions in a separate file - by preference `api/types.go` - **Do** put your API type definitions in a separate file - by preference `api/types.go`
- **Remember** we have >50 backends to maintain so keeping them as similar as - **Remember** we have >50 backends to maintain so keeping them as similar as possible to each other is a high priority!
possible to each other is a high priority!
### Unit tests ### Unit tests
@@ -579,20 +463,19 @@ remote or an fs.
### Integration tests ### Integration tests
- Add your backend to `fstest/test_all/config.yaml` - Add your backend to `fstest/test_all/config.yaml`
- Once you've done that then you can use the integration test framework from - Once you've done that then you can use the integration test framework from the project root:
the project root: - go install ./...
- go install ./... - test_all -backends remote
- test_all -backends remote
Or if you want to run the integration tests manually: Or if you want to run the integration tests manually:
- Make sure integration tests pass with - Make sure integration tests pass with
- `cd fs/operations` - `cd fs/operations`
- `go test -v -remote TestRemote:` - `go test -v -remote TestRemote:`
- `cd fs/sync` - `cd fs/sync`
- `go test -v -remote TestRemote:` - `go test -v -remote TestRemote:`
- If your remote defines `ListR` check with this also - If your remote defines `ListR` check with this also
- `go test -v -remote TestRemote: -fast-list` - `go test -v -remote TestRemote: -fast-list`
See the [testing](#testing) section for more information on integration tests. See the [testing](#testing) section for more information on integration tests.
@@ -604,13 +487,10 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
`Google Drive`) but with the local file system last. `Google Drive`) but with the local file system last.
- `README.md` - main GitHub page - `README.md` - main GitHub page
- `docs/content/remote.md` - main docs page (note the backend options are - `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
automatically added to this file with `make backenddocs`) - make sure this has the `autogenerated options` comments in (see your reference backend docs)
- make sure this has the `autogenerated options` comments in (see your - update them in your backend with `bin/make_backend_docs.py remote`
reference backend docs) - `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table.
- update them in your backend with `bin/make_backend_docs.py remote`
- `docs/content/overview.md` - overview docs - add an entry into the Features
table and the Optional Features table.
- `docs/content/docs.md` - list of remotes in config section - `docs/content/docs.md` - list of remotes in config section
- `docs/content/_index.md` - front page of rclone.org - `docs/content/_index.md` - front page of rclone.org
- `docs/layouts/chrome/navbar.html` - add it to the website navigation - `docs/layouts/chrome/navbar.html` - add it to the website navigation
@@ -626,21 +506,21 @@ It is quite easy to add a new S3 provider to rclone.
You'll need to modify the following files You'll need to modify the following files
- `backend/s3/s3.go` - `backend/s3/s3.go`
- Add the provider to `providerOption` at the top of the file - Add the provider to `providerOption` at the top of the file
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`. - Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
- Exclude your provider from generic config questions (eg `region` and `endpoint). - Exclude your provider from generic config questions (eg `region` and `endpoint).
- Add the provider to the `setQuirks` function - see the documentation there. - Add the provider to the `setQuirks` function - see the documentation there.
- `docs/content/s3.md` - `docs/content/s3.md`
- Add the provider at the top of the page. - Add the provider at the top of the page.
- Add a section about the provider linked from there. - Add a section about the provider linked from there.
- Add a transcript of a trial `rclone config` session - Add a transcript of a trial `rclone config` session
- Edit the transcript to remove things which might change in subsequent versions - Edit the transcript to remove things which might change in subsequent versions
- **Do not** alter or add to the autogenerated parts of `s3.md` - **Do not** alter or add to the autogenerated parts of `s3.md`
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3` - **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
- `README.md` - this is the home page in github - `README.md` - this is the home page in github
- Add the provider and a link to the section you wrote in `docs/contents/s3.md` - Add the provider and a link to the section you wrote in `docs/contents/s3.md`
- `docs/content/_index.md` - this is the home page of rclone.org - `docs/content/_index.md` - this is the home page of rclone.org
- Add the provider and a link to the section you wrote in `docs/contents/s3.md` - Add the provider and a link to the section you wrote in `docs/contents/s3.md`
When adding the provider, endpoints, quirks, docs etc keep them in When adding the provider, endpoints, quirks, docs etc keep them in
alphabetical order by `Provider` name, but with `AWS` first and alphabetical order by `Provider` name, but with `AWS` first and
@@ -661,34 +541,31 @@ For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone
## Writing a plugin ## Writing a plugin
New features (backends, commands) can also be added "out-of-tree", through Go New features (backends, commands) can also be added "out-of-tree", through Go plugins.
plugins. Changes will be kept in a dynamically loaded file instead of being Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
compiled into the main binary. This is useful if you can't merge your changes This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
upstream or don't want to maintain a fork of rclone.
### Usage ### Usage
- Naming - Naming
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`. - Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
- `KIND` should be one of `backend`, `command` or `bundle`. - `KIND` should be one of `backend`, `command` or `bundle`.
- Example: A plugin with backend support for PiFS would be called - Example: A plugin with backend support for PiFS would be called
`librcloneplugin_backend_pifs.so`. `librcloneplugin_backend_pifs.so`.
- Loading - Loading
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282)) - Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
- Supported on rclone v1.50 or greater. - Supported on rclone v1.50 or greater.
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded. - All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
- If this variable doesn't exist, plugin support is disabled. - If this variable doesn't exist, plugin support is disabled.
- Plugins must be compiled against the exact version of rclone to work. - Plugins must be compiled against the exact version of rclone to work.
(The rclone used during building the plugin must be the same as the source (The rclone used during building the plugin must be the same as the source of rclone)
of rclone)
### Building ### Building
To turn your existing additions into a Go plugin, move them to an external repository To turn your existing additions into a Go plugin, move them to an external repository
and change the top-level package name to `main`. and change the top-level package name to `main`.
Check `rclone --version` and make sure that the plugin's rclone dependency and Check `rclone --version` and make sure that the plugin's rclone dependency and host Go version match.
host Go version match.
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin. Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
@@ -706,6 +583,6 @@ add them out of tree.
This may be easier than using a plugin and is supported on all This may be easier than using a plugin and is supported on all
platforms not just macOS and Linux. platforms not just macOS and Linux.
This is explained further in <https://github.com/rclone/rclone_out_of_tree_example> This is explained further in https://github.com/rclone/rclone_out_of_tree_example
which has an example of an out of tree backend `ram` (which is a which has an example of an out of tree backend `ram` (which is a
renamed version of the `memory` backend). renamed version of the `memory` backend).

View File

@@ -1,4 +1,4 @@
# Maintainers guide for rclone # Maintainers guide for rclone #
Current active maintainers of rclone are: Current active maintainers of rclone are:
@@ -24,108 +24,80 @@ Current active maintainers of rclone are:
| Dan McArdle | @dmcardle | gitannex | | Dan McArdle | @dmcardle | gitannex |
| Sam Harrison | @childish-sambino | filescom | | Sam Harrison | @childish-sambino | filescom |
## This is a work in progress draft **This is a work in progress Draft**
This is a guide for how to be an rclone maintainer. This is mostly a write-up This is a guide for how to be an rclone maintainer. This is mostly a write-up of what I (@ncw) attempt to do.
of what I (@ncw) attempt to do.
## Triaging Tickets ## Triaging Tickets ##
When a ticket comes in it should be triaged. This means it should be classified When a ticket comes in it should be triaged. This means it should be classified by adding labels and placed into a milestone. Quite a lot of tickets need a bit of back and forth to determine whether it is a valid ticket so tickets may remain without labels or milestone for a while.
by adding labels and placed into a milestone. Quite a lot of tickets need a bit
of back and forth to determine whether it is a valid ticket so tickets may
remain without labels or milestone for a while.
Rclone uses the labels like this: Rclone uses the labels like this:
- `bug` - a definitely verified bug * `bug` - a definitely verified bug
- `can't reproduce` - a problem which we can't reproduce * `can't reproduce` - a problem which we can't reproduce
- `doc fix` - a bug in the documentation - if users need help understanding the * `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
docs add this label * `duplicate` - normally close these and ask the user to subscribe to the original
- `duplicate` - normally close these and ask the user to subscribe to the original * `enhancement: new remote` - a new rclone backend
- `enhancement: new remote` - a new rclone backend * `enhancement` - a new feature
- `enhancement` - a new feature * `FUSE` - to do with `rclone mount` command
- `FUSE` - to do with `rclone mount` command * `good first issue` - mark these if you find a small self-contained issue - these get shown to new visitors to the project
- `good first issue` - mark these if you find a small self-contained issue - * `help` wanted - mark these if you find a self-contained issue - these get shown to new visitors to the project
these get shown to new visitors to the project * `IMPORTANT` - note to maintainers not to forget to fix this for the release
- `help` wanted - mark these if you find a self-contained issue - these get * `maintenance` - internal enhancement, code re-organisation, etc.
shown to new visitors to the project * `Needs Go 1.XX` - waiting for that version of Go to be released
- `IMPORTANT` - note to maintainers not to forget to fix this for the release * `question` - not a `bug` or `enhancement` - direct to the forum for next time
- `maintenance` - internal enhancement, code re-organisation, etc. * `Remote: XXX` - which rclone backend this affects
- `Needs Go 1.XX` - waiting for that version of Go to be released * `thinking` - not decided on the course of action yet
- `question` - not a `bug` or `enhancement` - direct to the forum for next time
- `Remote: XXX` - which rclone backend this affects
- `thinking` - not decided on the course of action yet
If it turns out to be a bug or an enhancement it should be tagged as such, with If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
the appropriate other tags. Don't forget the "good first issue" tag to give new
contributors something easy to do to get going.
When a ticket is tagged it should be added to a milestone, either the next When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (e.g. the next go release).
release, the one after, Soon or Help Wanted. Bugs can be added to the
"Known Bugs" milestone if they aren't planned to be fixed or need to wait for
something (e.g. the next go release).
The milestones have these meanings: The milestones have these meanings:
- v1.XX - stuff we would like to fit into this release * v1.XX - stuff we would like to fit into this release
- v1.XX+1 - stuff we are leaving until the next release * v1.XX+1 - stuff we are leaving until the next release
- Soon - stuff we think is a good idea - waiting to be scheduled for a release * Soon - stuff we think is a good idea - waiting to be scheduled for a release
- Help wanted - blue sky stuff that might get moved up, or someone could help with * Help wanted - blue sky stuff that might get moved up, or someone could help with
- Known bugs - bugs waiting on external factors or we aren't going to fix for * Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
the moment
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
are good candidates for ones that have slipped between the gaps and need
following up.
## Closing Tickets ## Closing Tickets ##
Close tickets as soon as you can - make sure they are tagged with a release. Close tickets as soon as you can - make sure they are tagged with a release. Post a link to a beta in the ticket with the fix in, asking for feedback.
Post a link to a beta in the ticket with the fix in, asking for feedback.
## Pull requests ## Pull requests ##
Try to process pull requests promptly! Try to process pull requests promptly!
Merging pull requests on GitHub itself works quite well nowadays so you can Merging pull requests on GitHub itself works quite well nowadays so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
squash and rebase or rebase pull requests. rclone doesn't use merge commits.
Use the squash and rebase option if you need to edit the commit message.
After merging the commit, in your local master branch, do `git pull` then run After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
`bin/update-authors.py` to update the authors file then `git push`.
Sometimes pull requests need to be left open for a while - this especially true Sometimes pull requests need to be left open for a while - this especially true of contributions of new backends which take a long time to get right.
of contributions of new backends which take a long time to get right.
## Merges ## Merges ##
If you are merging a branch locally then do `git merge --ff-only branch-name` to If you are merging a branch locally then do `git merge --ff-only branch-name` to avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
## Release cycle ## Release cycle ##
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer if there is something big to merge that didn't stabilize properly or for personal reasons.
if there is something big to merge that didn't stabilize properly or for personal
reasons.
High impact regressions should be fixed before the next release. High impact regressions should be fixed before the next release.
Near the start of the release cycle, the dependencies should be updated with Near the start of the release cycle, the dependencies should be updated with `make update` to give time for bugs to surface.
`make update` to give time for bugs to surface.
Towards the end of the release cycle try not to merge anything too big so let Towards the end of the release cycle try not to merge anything too big so let things settle down.
things settle down.
Follow the instructions in RELEASE.md for making the release. Note that the Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time-consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
testing part is the most time-consuming often needing several rounds of test
and fix depending on exactly how many new features rclone has gained.
## Mailing list ## Mailing list ##
There is now an invite-only mailing list for rclone developers `rclone-dev` on There is now an invite-only mailing list for rclone developers `rclone-dev` on google groups.
google groups.
## TODO ## TODO ##
I should probably make a <dev@rclone.org> to register with cloud providers. I should probably make a dev@rclone.org to register with cloud providers.

View File

@@ -88,13 +88,13 @@ test: rclone test_all
# Quick test # Quick test
quicktest: quicktest:
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) ./... RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
racequicktest: racequicktest:
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -cpu=2 -race ./... RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
compiletest: compiletest:
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -run XXX ./... RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
# Do source code quality checks # Do source code quality checks
check: rclone check: rclone
@@ -243,7 +243,7 @@ fetch_binaries:
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/ rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
serve: website serve: website
cd docs && hugo server --logLevel info -w --disableFastRender --ignoreCache cd docs && hugo server --logLevel info -w --disableFastRender
tag: retag doc tag: retag doc
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new

260
README.md
View File

@@ -1,6 +1,6 @@
<!-- markdownlint-disable-next-line first-line-heading no-inline-html -->
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only) [<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
<!-- markdownlint-disable-next-line no-inline-html -->
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only) [<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
[Website](https://rclone.org) | [Website](https://rclone.org) |
@@ -18,104 +18,102 @@
# Rclone # Rclone
Rclone *("rsync for cloud storage")* is a command-line program to sync files and Rclone *("rsync for cloud storage")* is a command-line program to sync files and directories to and from different cloud storage providers.
directories to and from different cloud storage providers.
## Storage providers ## Storage providers
- 1Fichier [:page_facing_up:](https://rclone.org/fichier/) * 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
- Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/) * Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
- Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss) * Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
- Amazon S3 [:page_facing_up:](https://rclone.org/s3/) * Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
- ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos) * ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
- Backblaze B2 [:page_facing_up:](https://rclone.org/b2/) * Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
- Box [:page_facing_up:](https://rclone.org/box/) * Box [:page_facing_up:](https://rclone.org/box/)
- Ceph [:page_facing_up:](https://rclone.org/s3/#ceph) * Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos) * China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2) * Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/) * Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces) * DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage) * Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost) * Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
- Dropbox [:page_facing_up:](https://rclone.org/dropbox/) * Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/) * Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
- Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files) * Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
- FileLu [:page_facing_up:](https://rclone.org/filelu/) * FileLu [:page_facing_up:](https://rclone.org/filelu/)
- Files.com [:page_facing_up:](https://rclone.org/filescom/) * Files.com [:page_facing_up:](https://rclone.org/filescom/)
- FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade) * FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
- FTP [:page_facing_up:](https://rclone.org/ftp/) * FTP [:page_facing_up:](https://rclone.org/ftp/)
- GoFile [:page_facing_up:](https://rclone.org/gofile/) * GoFile [:page_facing_up:](https://rclone.org/gofile/)
- Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/) * Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
- Google Drive [:page_facing_up:](https://rclone.org/drive/) * Google Drive [:page_facing_up:](https://rclone.org/drive/)
- Google Photos [:page_facing_up:](https://rclone.org/googlephotos/) * Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
- HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/) * HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
- Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box) * Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
- HiDrive [:page_facing_up:](https://rclone.org/hidrive/) * HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
- HTTP [:page_facing_up:](https://rclone.org/http/) * HTTP [:page_facing_up:](https://rclone.org/http/)
- Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs) * Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
- iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/) * iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
- ImageKit [:page_facing_up:](https://rclone.org/imagekit/) * ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/) * Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/) * Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3) * IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
- IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos) * IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
- Koofr [:page_facing_up:](https://rclone.org/koofr/) * Koofr [:page_facing_up:](https://rclone.org/koofr/)
- Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia) * Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
- Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage) * Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
- Linkbox [:page_facing_up:](https://rclone.org/linkbox) * Linkbox [:page_facing_up:](https://rclone.org/linkbox)
- Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode) * Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
- Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu) * Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
- Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/) * Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
- Memset Memstore [:page_facing_up:](https://rclone.org/swift/) * Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
- MEGA [:page_facing_up:](https://rclone.org/mega/) * MEGA [:page_facing_up:](https://rclone.org/mega/)
- MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega) * MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega)
- Memory [:page_facing_up:](https://rclone.org/memory/) * Memory [:page_facing_up:](https://rclone.org/memory/)
- Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/) * Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
- Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/) * Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
- Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/) * Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
- Minio [:page_facing_up:](https://rclone.org/s3/#minio) * Minio [:page_facing_up:](https://rclone.org/s3/#minio)
- Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud) * Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
- Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/) * OVH [:page_facing_up:](https://rclone.org/swift/)
- OpenDrive [:page_facing_up:](https://rclone.org/opendrive/) * Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
- OpenStack Swift [:page_facing_up:](https://rclone.org/swift/) * OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
- Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/) * OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
- Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/) * Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
- Outscale [:page_facing_up:](https://rclone.org/s3/#outscale) * Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
- OVHcloud Object Storage (Swift) [:page_facing_up:](https://rclone.org/swift/) * Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
- OVHcloud Object Storage (S3-compatible) [:page_facing_up:](https://rclone.org/s3/#ovhcloud) * ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
- ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud) * pCloud [:page_facing_up:](https://rclone.org/pcloud/)
- pCloud [:page_facing_up:](https://rclone.org/pcloud/) * Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
- Petabox [:page_facing_up:](https://rclone.org/s3/#petabox) * PikPak [:page_facing_up:](https://rclone.org/pikpak/)
- PikPak [:page_facing_up:](https://rclone.org/pikpak/) * Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
- Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/) * premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
- premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/) * put.io [:page_facing_up:](https://rclone.org/putio/)
- put.io [:page_facing_up:](https://rclone.org/putio/) * Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
- Proton Drive [:page_facing_up:](https://rclone.org/protondrive/) * QingStor [:page_facing_up:](https://rclone.org/qingstor/)
- QingStor [:page_facing_up:](https://rclone.org/qingstor/) * Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
- Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu) * Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
- Quatrix [:page_facing_up:](https://rclone.org/quatrix/) * Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
- Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/) * RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
- RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp) * rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
- rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net) * Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
- Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway) * Seafile [:page_facing_up:](https://rclone.org/seafile/)
- Seafile [:page_facing_up:](https://rclone.org/seafile/) * Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
- Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve) * SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
- SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs) * Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel) * SFTP [:page_facing_up:](https://rclone.org/sftp/)
- SFTP [:page_facing_up:](https://rclone.org/sftp/) * SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/) * StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath) * Storj [:page_facing_up:](https://rclone.org/storj/)
- Storj [:page_facing_up:](https://rclone.org/storj/) * SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/) * Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
- Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2) * Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
- Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos) * Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
- Uloz.to [:page_facing_up:](https://rclone.org/ulozto/) * Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
- Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi) * WebDAV [:page_facing_up:](https://rclone.org/webdav/)
- WebDAV [:page_facing_up:](https://rclone.org/webdav/) * Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
- Yandex Disk [:page_facing_up:](https://rclone.org/yandex/) * Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
- Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/) * Zata.ai [:page_facing_up:](https://rclone.org/s3/#Zata)
- Zata.ai [:page_facing_up:](https://rclone.org/s3/#Zata) * The local filesystem [:page_facing_up:](https://rclone.org/local/)
- The local filesystem [:page_facing_up:](https://rclone.org/local/)
Please see [the full list of all storage providers and their features](https://rclone.org/overview/) Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
@@ -123,54 +121,50 @@ Please see [the full list of all storage providers and their features](https://r
These backends adapt or modify other storage providers These backends adapt or modify other storage providers
- Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/) * Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
- Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/) * Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
- Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/) * Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
- Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/) * Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
- Compress: compress files [:page_facing_up:](https://rclone.org/compress/) * Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
- Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/) * Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
- Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/) * Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
- Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/) * Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
## Features ## Features
- MD5/SHA-1 hashes checked at all times for file integrity * MD5/SHA-1 hashes checked at all times for file integrity
- Timestamps preserved on files * Timestamps preserved on files
- Partial syncs supported on a whole file basis * Partial syncs supported on a whole file basis
- [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed * [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
files * [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
- [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory * [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync bidirectionally
identical * [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
- [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync * Can sync to and from network, e.g. two different cloud accounts
bidirectionally * Optional large file chunking ([Chunker](https://rclone.org/chunker/))
- [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash * Optional transparent compression ([Compress](https://rclone.org/compress/))
equality * Optional encryption ([Crypt](https://rclone.org/crypt/))
- Can sync to and from network, e.g. two different cloud accounts * Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
- Optional large file chunking ([Chunker](https://rclone.org/chunker/)) * Multi-threaded downloads to local disk
- Optional transparent compression ([Compress](https://rclone.org/compress/)) * Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDAV/FTP/SFTP/DLNA
- Optional encryption ([Crypt](https://rclone.org/crypt/))
- Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
- Multi-threaded downloads to local disk
- Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files
over HTTP/WebDAV/FTP/SFTP/DLNA
## Installation & documentation ## Installation & documentation
Please see the [rclone website](https://rclone.org/) for: Please see the [rclone website](https://rclone.org/) for:
- [Installation](https://rclone.org/install/) * [Installation](https://rclone.org/install/)
- [Documentation & configuration](https://rclone.org/docs/) * [Documentation & configuration](https://rclone.org/docs/)
- [Changelog](https://rclone.org/changelog/) * [Changelog](https://rclone.org/changelog/)
- [FAQ](https://rclone.org/faq/) * [FAQ](https://rclone.org/faq/)
- [Storage providers](https://rclone.org/overview/) * [Storage providers](https://rclone.org/overview/)
- [Forum](https://forum.rclone.org/) * [Forum](https://forum.rclone.org/)
- ...and more * ...and more
## Downloads ## Downloads
- <https://rclone.org/downloads/> * https://rclone.org/downloads/
## License License
-------
This is free software under the terms of the MIT license (check the This is free software under the terms of the MIT license (check the
[COPYING file](/COPYING) included in this package). [COPYING file](/COPYING) included in this package).

View File

@@ -4,55 +4,52 @@ This file describes how to make the various kinds of releases
## Extra required software for making a release ## Extra required software for making a release
- [gh the github cli](https://github.com/cli/cli) for uploading packages * [gh the github cli](https://github.com/cli/cli) for uploading packages
- pandoc for making the html and man pages * pandoc for making the html and man pages
## Making a release ## Making a release
- git checkout master # see below for stable branch * git checkout master # see below for stable branch
- git pull # IMPORTANT * git pull # IMPORTANT
- git status - make sure everything is checked in * git status - make sure everything is checked in
- Check GitHub actions build for master is Green * Check GitHub actions build for master is Green
- make test # see integration test server or run locally * make test # see integration test server or run locally
- make tag * make tag
- edit docs/content/changelog.md # make sure to remove duplicate logs from point * edit docs/content/changelog.md # make sure to remove duplicate logs from point releases
releases * make tidy
- make tidy * make doc
- make doc * git status - to check for new man pages - git add them
- git status - to check for new man pages - git add them * git commit -a -v -m "Version v1.XX.0"
- git commit -a -v -m "Version v1.XX.0" * make retag
- make retag * git push origin # without --follow-tags so it doesn't push the tag if it fails
- git push origin # without --follow-tags so it doesn't push the tag if it fails * git push --follow-tags origin
- git push --follow-tags origin * # Wait for the GitHub builds to complete then...
- \# Wait for the GitHub builds to complete then... * make fetch_binaries
- make fetch_binaries * make tarball
- make tarball * make vendorball
- make vendorball * make sign_upload
- make sign_upload * make check_sign
- make check_sign * make upload
- make upload * make upload_website
- make upload_website * make upload_github
- make upload_github * make startdev # make startstable for stable branch
- make startdev # make startstable for stable branch * # announce with forum post, twitter post, patreon post
- \# announce with forum post, twitter post, patreon post
## Update dependencies ## Update dependencies
Early in the next release cycle update the dependencies. Early in the next release cycle update the dependencies.
- Review any pinned packages in go.mod and remove if possible * Review any pinned packages in go.mod and remove if possible
- `make updatedirect` * `make updatedirect`
- `make GOTAGS=cmount` * `make GOTAGS=cmount`
- `make compiletest` * `make compiletest`
- Fix anything which doesn't compile at this point and commit changes here * Fix anything which doesn't compile at this point and commit changes here
- `git commit -a -v -m "build: update all dependencies"` * `git commit -a -v -m "build: update all dependencies"`
If the `make updatedirect` upgrades the version of go in the `go.mod` If the `make updatedirect` upgrades the version of go in the `go.mod`
```text go 1.22.0
go 1.22.0
```
then go to manual mode. `go1.22` here is the lowest supported version then go to manual mode. `go1.22` here is the lowest supported version
in the `go.mod`. in the `go.mod`.
@@ -60,7 +57,7 @@ If `make updatedirect` added a `toolchain` directive then remove it.
We don't want to force a toolchain on our users. Linux packagers are We don't want to force a toolchain on our users. Linux packagers are
often using a version of Go that is a few versions out of date. often using a version of Go that is a few versions out of date.
```sh ```
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
go get -d $(cat /tmp/potential-upgrades) go get -d $(cat /tmp/potential-upgrades)
go mod tidy -go=1.22 -compat=1.22 go mod tidy -go=1.22 -compat=1.22
@@ -70,7 +67,7 @@ If the `go mod tidy` fails use the output from it to remove the
package which can't be upgraded from `/tmp/potential-upgrades` when package which can't be upgraded from `/tmp/potential-upgrades` when
done done
```sh ```
git co go.mod go.sum git co go.mod go.sum
``` ```
@@ -80,12 +77,12 @@ Optionally upgrade the direct and indirect dependencies. This is very
likely to fail if the manual method was used abve - in that case likely to fail if the manual method was used abve - in that case
ignore it as it is too time consuming to fix. ignore it as it is too time consuming to fix.
- `make update` * `make update`
- `make GOTAGS=cmount` * `make GOTAGS=cmount`
- `make compiletest` * `make compiletest`
- roll back any updates which didn't compile * roll back any updates which didn't compile
- `git commit -a -v --amend` * `git commit -a -v --amend`
- **NB** watch out for this changing the default go version in `go.mod` * **NB** watch out for this changing the default go version in `go.mod`
Note that `make update` updates all direct and indirect dependencies Note that `make update` updates all direct and indirect dependencies
and there can occasionally be forwards compatibility problems with and there can occasionally be forwards compatibility problems with
@@ -102,9 +99,7 @@ The above procedure will not upgrade major versions, so v2 to v3.
However this tool can show which major versions might need to be However this tool can show which major versions might need to be
upgraded: upgraded:
```sh go run github.com/icholy/gomajor@latest list -major
go run github.com/icholy/gomajor@latest list -major
```
Expect API breakage when updating major versions. Expect API breakage when updating major versions.
@@ -112,9 +107,7 @@ Expect API breakage when updating major versions.
At some point after the release run At some point after the release run
```sh bin/tidy-beta v1.55
bin/tidy-beta v1.55
```
where the version number is that of a couple ago to remove old beta binaries. where the version number is that of a couple ago to remove old beta binaries.
@@ -124,64 +117,54 @@ If rclone needs a point release due to some horrendous bug:
Set vars Set vars
- BASE_TAG=v1.XX # e.g. v1.52 * BASE_TAG=v1.XX # e.g. v1.52
- NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1 * NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
- echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1 * echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
First make the release branch. If this is a second point release then First make the release branch. If this is a second point release then
this will be done already. this will be done already.
- git co -b ${BASE_TAG}-stable ${BASE_TAG}.0 * git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
- make startstable * make startstable
Now Now
- git co ${BASE_TAG}-stable * git co ${BASE_TAG}-stable
- git cherry-pick any fixes * git cherry-pick any fixes
- make startstable * make startstable
- Do the steps as above * Do the steps as above
- git co master * git co master
- `#` cherry pick the changes to the changelog - check the diff to make sure it * `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
is correct * git checkout ${BASE_TAG}-stable docs/content/changelog.md
- git checkout ${BASE_TAG}-stable docs/content/changelog.md * git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
- git commit -a -v -m "Changelog updates from Version ${NEW_TAG}" * git push
- git push
## Sponsor logos ## Sponsor logos
If updating the website note that the sponsor logos have been moved out of the If updating the website note that the sponsor logos have been moved out of the main repository.
main repository.
You will need to checkout `/docs/static/img/logos` from <https://github.com/rclone/third-party-logos> You will need to checkout `/docs/static/img/logos` from https://github.com/rclone/third-party-logos
which is a private repo containing artwork from sponsors. which is a private repo containing artwork from sponsors.
## Update the website between releases ## Update the website between releases
Create an update website branch based off the last release Create an update website branch based off the last release
```sh git co -b update-website
git co -b update-website
```
If the branch already exists, double check there are no commits that need saving. If the branch already exists, double check there are no commits that need saving.
Now reset the branch to the last release Now reset the branch to the last release
```sh git reset --hard v1.64.0
git reset --hard v1.64.0
```
Create the changes, check them in, test with `make serve` then Create the changes, check them in, test with `make serve` then
```sh make upload_test_website
make upload_test_website
```
Check out <https://test.rclone.org> and when happy Check out https://test.rclone.org and when happy
```sh make upload_website
make upload_website
```
Cherry pick any changes back to master and the stable branch if it is active. Cherry pick any changes back to master and the stable branch if it is active.
@@ -189,14 +172,14 @@ Cherry pick any changes back to master and the stable branch if it is active.
To do a basic build of rclone's docker image to debug builds locally: To do a basic build of rclone's docker image to debug builds locally:
```sh ```
docker buildx build --load -t rclone/rclone:testing --progress=plain . docker buildx build --load -t rclone/rclone:testing --progress=plain .
docker run --rm rclone/rclone:testing version docker run --rm rclone/rclone:testing version
``` ```
To test the multipatform build To test the multipatform build
```sh ```
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 . docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
``` ```
@@ -204,6 +187,6 @@ To make a full build then set the tags correctly and add `--push`
Note that you can't only build one architecture - you need to build them all. Note that you can't only build one architecture - you need to build them all.
```sh ```
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push . docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
``` ```

View File

@@ -271,9 +271,9 @@ type User struct {
ModifiedAt time.Time `json:"modified_at"` ModifiedAt time.Time `json:"modified_at"`
Language string `json:"language"` Language string `json:"language"`
Timezone string `json:"timezone"` Timezone string `json:"timezone"`
SpaceAmount float64 `json:"space_amount"` SpaceAmount int64 `json:"space_amount"`
SpaceUsed float64 `json:"space_used"` SpaceUsed int64 `json:"space_used"`
MaxUploadSize float64 `json:"max_upload_size"` MaxUploadSize int64 `json:"max_upload_size"`
Status string `json:"status"` Status string `json:"status"`
JobTitle string `json:"job_title"` JobTitle string `json:"job_title"`
Phone string `json:"phone"` Phone string `json:"phone"`

View File

@@ -1446,9 +1446,9 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
} }
} }
usage = &fs.Usage{ usage = &fs.Usage{
Total: fs.NewUsageValue(total), // quota of bytes that can be used Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
Used: fs.NewUsageValue(used), // bytes in use Used: fs.NewUsageValue(int64(used)), // bytes in use
Free: fs.NewUsageValue(total - used), // bytes which can be uploaded before reaching the quota Free: fs.NewUsageValue(int64(total - used)), // bytes which can be uploaded before reaching the quota
} }
return usage, nil return usage, nil
} }

View File

@@ -163,16 +163,6 @@ Enabled by default. Use 0 to disable.`,
Help: "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)", Help: "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)",
Default: false, Default: false,
Advanced: true, Advanced: true,
}, {
Name: "allow_insecure_tls_ciphers",
Help: `Allow insecure TLS ciphers
Setting this flag will allow the usage of the following TLS ciphers in addition to the secure defaults:
- TLS_RSA_WITH_AES_128_GCM_SHA256
`,
Default: false,
Advanced: true,
}, { }, {
Name: "shut_timeout", Name: "shut_timeout",
Help: "Maximum time to wait for data connection closing status.", Help: "Maximum time to wait for data connection closing status.",
@@ -246,30 +236,29 @@ a write only folder.
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
Host string `config:"host"` Host string `config:"host"`
User string `config:"user"` User string `config:"user"`
Pass string `config:"pass"` Pass string `config:"pass"`
Port string `config:"port"` Port string `config:"port"`
TLS bool `config:"tls"` TLS bool `config:"tls"`
ExplicitTLS bool `config:"explicit_tls"` ExplicitTLS bool `config:"explicit_tls"`
TLSCacheSize int `config:"tls_cache_size"` TLSCacheSize int `config:"tls_cache_size"`
DisableTLS13 bool `config:"disable_tls13"` DisableTLS13 bool `config:"disable_tls13"`
AllowInsecureTLSCiphers bool `config:"allow_insecure_tls_ciphers"` Concurrency int `config:"concurrency"`
Concurrency int `config:"concurrency"` SkipVerifyTLSCert bool `config:"no_check_certificate"`
SkipVerifyTLSCert bool `config:"no_check_certificate"` DisableEPSV bool `config:"disable_epsv"`
DisableEPSV bool `config:"disable_epsv"` DisableMLSD bool `config:"disable_mlsd"`
DisableMLSD bool `config:"disable_mlsd"` DisableUTF8 bool `config:"disable_utf8"`
DisableUTF8 bool `config:"disable_utf8"` WritingMDTM bool `config:"writing_mdtm"`
WritingMDTM bool `config:"writing_mdtm"` ForceListHidden bool `config:"force_list_hidden"`
ForceListHidden bool `config:"force_list_hidden"` IdleTimeout fs.Duration `config:"idle_timeout"`
IdleTimeout fs.Duration `config:"idle_timeout"` CloseTimeout fs.Duration `config:"close_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"` ShutTimeout fs.Duration `config:"shut_timeout"`
ShutTimeout fs.Duration `config:"shut_timeout"` AskPassword bool `config:"ask_password"`
AskPassword bool `config:"ask_password"` Enc encoder.MultiEncoder `config:"encoding"`
Enc encoder.MultiEncoder `config:"encoding"` SocksProxy string `config:"socks_proxy"`
SocksProxy string `config:"socks_proxy"` HTTPProxy string `config:"http_proxy"`
HTTPProxy string `config:"http_proxy"` NoCheckUpload bool `config:"no_check_upload"`
NoCheckUpload bool `config:"no_check_upload"`
} }
// Fs represents a remote FTP server // Fs represents a remote FTP server
@@ -418,14 +407,6 @@ func (f *Fs) tlsConfig() *tls.Config {
if f.opt.DisableTLS13 { if f.opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12 tlsConfig.MaxVersion = tls.VersionTLS12
} }
if f.opt.AllowInsecureTLSCiphers {
var ids []uint16
// Read default ciphers
for _, cs := range tls.CipherSuites() {
ids = append(ids, cs.ID)
}
tlsConfig.CipherSuites = append(ids, tls.TLS_RSA_WITH_AES_128_GCM_SHA256)
}
} }
return tlsConfig return tlsConfig
} }

View File

@@ -371,9 +371,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return nil, err return nil, err
} }
return &fs.Usage{ return &fs.Usage{
Total: fs.NewUsageValue(info.Capacity), Total: fs.NewUsageValue(int64(info.Capacity)),
Used: fs.NewUsageValue(info.Used), Used: fs.NewUsageValue(int64(info.Used)),
Free: fs.NewUsageValue(info.Remaining), Free: fs.NewUsageValue(int64(info.Remaining)),
}, nil }, nil
} }

View File

@@ -946,9 +946,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return nil, fmt.Errorf("failed to get Mega Quota: %w", err) return nil, fmt.Errorf("failed to get Mega Quota: %w", err)
} }
usage := &fs.Usage{ usage := &fs.Usage{
Total: fs.NewUsageValue(q.Mstrg), // quota of bytes that can be used Total: fs.NewUsageValue(int64(q.Mstrg)), // quota of bytes that can be used
Used: fs.NewUsageValue(q.Cstrg), // bytes in use Used: fs.NewUsageValue(int64(q.Cstrg)), // bytes in use
Free: fs.NewUsageValue(q.Mstrg - q.Cstrg), // bytes which can be uploaded before reaching the quota Free: fs.NewUsageValue(int64(q.Mstrg - q.Cstrg)), // bytes which can be uploaded before reaching the quota
} }
return usage, nil return usage, nil
} }

View File

@@ -979,24 +979,6 @@ func (f *Fs) deleteObjects(ctx context.Context, IDs []string, useTrash bool) (er
return nil return nil
} }
// untrash a file or directory by ID
//
// If a name collision occurs in the destination folder, PikPak might automatically
// rename the restored item(s) by appending a numbered suffix. For example,
// foo.txt -> foo(1).txt or foo(2).txt if foo(1).txt already exists
func (f *Fs) untrashObjects(ctx context.Context, IDs []string) (err error) {
if len(IDs) == 0 {
return nil
}
req := api.RequestBatch{
IDs: IDs,
}
if err := f.requestBatchAction(ctx, "batchUntrash", &req); err != nil {
return fmt.Errorf("untrash object failed: %w", err)
}
return nil
}
// purgeCheck removes the root directory, if check is set then it // purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in // refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
@@ -1081,14 +1063,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
return f.waitTask(ctx, info.TaskID) return f.waitTask(ctx, info.TaskID)
} }
// Move the object to a new parent folder // Move the object
//
// Objects cannot be moved to their current folder.
// "file_move_or_copy_to_cur" (9): Please don't move or copy to current folder or sub folder
//
// If a name collision occurs in the destination folder, PikPak might automatically
// rename the moved item(s) by appending a numbered suffix. For example,
// foo.txt -> foo(1).txt or foo(2).txt if foo(1).txt already exists
func (f *Fs) moveObjects(ctx context.Context, IDs []string, dirID string) (err error) { func (f *Fs) moveObjects(ctx context.Context, IDs []string, dirID string) (err error) {
if len(IDs) == 0 { if len(IDs) == 0 {
return nil return nil
@@ -1104,12 +1079,6 @@ func (f *Fs) moveObjects(ctx context.Context, IDs []string, dirID string) (err e
} }
// renames the object // renames the object
//
// The new name must be different from the current name.
// "file_rename_to_same_name" (3): Name of file or folder is not changed
//
// Within the same folder, object names must be unique.
// "file_duplicated_name" (3): File name cannot be repeated
func (f *Fs) renameObject(ctx context.Context, ID, newName string) (info *api.File, err error) { func (f *Fs) renameObject(ctx context.Context, ID, newName string) (info *api.File, err error) {
req := api.File{ req := api.File{
Name: f.opt.Enc.FromStandardName(newName), Name: f.opt.Enc.FromStandardName(newName),
@@ -1194,13 +1163,18 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantMove // If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) { func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't move - not same remote type") fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
} }
err = srcObj.readMetaData(ctx) err := srcObj.readMetaData(ctx)
if err != nil {
return nil, err
}
srcLeaf, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -1211,74 +1185,31 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
return nil, err return nil, err
} }
if srcObj.parent != dstParentID { if srcParentID != dstParentID {
// Perform the move. A numbered copy might be generated upon name collision. // Do the move
if err = f.moveObjects(ctx, []string{srcObj.id}, dstParentID); err != nil { if err = f.moveObjects(ctx, []string{srcObj.id}, dstParentID); err != nil {
return nil, fmt.Errorf("move: failed to move object %s to new parent %s: %w", srcObj.id, dstParentID, err) return nil, err
} }
defer func() {
if err != nil {
// FIXME: Restored file might have a numbered name if a conflict occurs
if mvErr := f.moveObjects(ctx, []string{srcObj.id}, srcObj.parent); mvErr != nil {
fs.Logf(f, "move: couldn't restore original object %q to %q after move failure: %v", dstObj.id, src.Remote(), mvErr)
}
}
}()
} }
// Manually update info of moved object to save API calls
dstObj.id = srcObj.id
dstObj.mimeType = srcObj.mimeType
dstObj.gcid = srcObj.gcid
dstObj.md5sum = srcObj.md5sum
dstObj.hasMetaData = true
// Find the moved object and any conflict object with the same name. if srcLeaf != dstLeaf {
var moved, conflict *api.File // Rename
_, err = f.listAll(ctx, dstParentID, api.KindOfFile, "false", func(item *api.File) bool { info, err := f.renameObject(ctx, srcObj.id, dstLeaf)
if item.ID == srcObj.id { if err != nil {
moved = item return nil, fmt.Errorf("move: couldn't rename moved file: %w", err)
if item.Name == dstLeaf {
return true
}
} else if item.Name == dstLeaf {
conflict = item
} }
// Stop early if both found return dstObj, dstObj.setMetaData(info)
return moved != nil && conflict != nil
})
if err != nil {
return nil, fmt.Errorf("move: couldn't locate moved file %q in destination directory %q: %w", srcObj.id, dstParentID, err)
} }
if moved == nil { return dstObj, nil
return nil, fmt.Errorf("move: moved file %q not found in destination", srcObj.id)
}
// If moved object already has the correct name, return
if moved.Name == dstLeaf {
return dstObj, dstObj.setMetaData(moved)
}
// If name collision, delete conflicting file first
if conflict != nil {
if err = f.deleteObjects(ctx, []string{conflict.ID}, true); err != nil {
return nil, fmt.Errorf("move: couldn't delete conflicting file: %w", err)
}
defer func() {
if err != nil {
if restoreErr := f.untrashObjects(ctx, []string{conflict.ID}); restoreErr != nil {
fs.Logf(f, "move: couldn't restore conflicting file: %v", restoreErr)
}
}
}()
}
info, err := f.renameObject(ctx, srcObj.id, dstLeaf)
if err != nil {
return nil, fmt.Errorf("move: couldn't rename moved file %q to %q: %w", dstObj.id, dstLeaf, err)
}
return dstObj, dstObj.setMetaData(info)
} }
// copy objects // copy objects
//
// Objects cannot be copied to their current folder.
// "file_move_or_copy_to_cur" (9): Please don't move or copy to current folder or sub folder
//
// If a name collision occurs in the destination folder, PikPak might automatically
// rename the copied item(s) by appending a numbered suffix. For example,
// foo.txt -> foo(1).txt or foo(2).txt if foo(1).txt already exists
func (f *Fs) copyObjects(ctx context.Context, IDs []string, dirID string) (err error) { func (f *Fs) copyObjects(ctx context.Context, IDs []string, dirID string) (err error) {
if len(IDs) == 0 { if len(IDs) == 0 {
return nil return nil
@@ -1302,13 +1233,13 @@ func (f *Fs) copyObjects(ctx context.Context, IDs []string, dirID string) (err e
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't copy - not same remote type") fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
err = srcObj.readMetaData(ctx) err := srcObj.readMetaData(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -1323,55 +1254,31 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
fs.Debugf(src, "Can't copy - same parent") fs.Debugf(src, "Can't copy - same parent")
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
// Check for possible conflicts: Pikpak creates numbered copies on name collision.
var conflict *api.File
_, srcLeaf := dircache.SplitPath(srcObj.remote)
if srcLeaf == dstLeaf {
if conflict, err = f.readMetaDataForPath(ctx, remote); err == nil {
// delete conflicting file
if err = f.deleteObjects(ctx, []string{conflict.ID}, true); err != nil {
return nil, fmt.Errorf("copy: couldn't delete conflicting file: %w", err)
}
defer func() {
if err != nil {
if restoreErr := f.untrashObjects(ctx, []string{conflict.ID}); restoreErr != nil {
fs.Logf(f, "copy: couldn't restore conflicting file: %v", restoreErr)
}
}
}()
} else if err != fs.ErrorObjectNotFound {
return nil, err
}
} else {
dstDir, _ := dircache.SplitPath(remote)
dstObj.remote = path.Join(dstDir, srcLeaf)
if conflict, err = f.readMetaDataForPath(ctx, dstObj.remote); err == nil {
tmpName := conflict.Name + "-rclone-copy-" + random.String(8)
if _, err = f.renameObject(ctx, conflict.ID, tmpName); err != nil {
return nil, fmt.Errorf("copy: couldn't rename conflicting file: %w", err)
}
defer func() {
if _, renameErr := f.renameObject(ctx, conflict.ID, conflict.Name); renameErr != nil {
fs.Logf(f, "copy: couldn't rename conflicting file back to original: %v", renameErr)
}
}()
} else if err != fs.ErrorObjectNotFound {
return nil, err
}
}
// Copy the object // Copy the object
if err := f.copyObjects(ctx, []string{srcObj.id}, dstParentID); err != nil { if err := f.copyObjects(ctx, []string{srcObj.id}, dstParentID); err != nil {
return nil, fmt.Errorf("couldn't copy file: %w", err) return nil, fmt.Errorf("couldn't copy file: %w", err)
} }
err = dstObj.readMetaData(ctx) // Update info of the copied object with new parent but source name
if err != nil { if info, err := dstObj.fs.readMetaDataForPath(ctx, srcObj.remote); err != nil {
return nil, fmt.Errorf("copy: couldn't locate copied file: %w", err) return nil, fmt.Errorf("copy: couldn't locate copied file: %w", err)
} else if err = dstObj.setMetaData(info); err != nil {
return nil, err
}
// Can't copy and change name in one step so we have to check if we have
// the correct name after copy
srcLeaf, _, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
if err != nil {
return nil, err
} }
if srcLeaf != dstLeaf { if srcLeaf != dstLeaf {
return f.Move(ctx, dstObj, remote) // Rename
info, err := f.renameObject(ctx, dstObj.id, dstLeaf)
if err != nil {
return nil, fmt.Errorf("copy: couldn't rename copied file: %w", err)
}
return dstObj, dstObj.setMetaData(info)
} }
return dstObj, nil return dstObj, nil
} }

View File

@@ -793,7 +793,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return nil, err return nil, err
} }
usage = &fs.Usage{ usage = &fs.Usage{
Used: fs.NewUsageValue(info.SpaceUsed), Used: fs.NewUsageValue(int64(info.SpaceUsed)),
} }
return usage, nil return usage, nil
} }

View File

@@ -149,9 +149,6 @@ var providerOption = fs.Option{
}, { }, {
Value: "Outscale", Value: "Outscale",
Help: "OUTSCALE Object Storage (OOS)", Help: "OUTSCALE Object Storage (OOS)",
}, {
Value: "OVHcloud",
Help: "OVHcloud Object Storage",
}, { }, {
Value: "Petabox", Value: "Petabox",
Help: "Petabox Object Storage", Help: "Petabox Object Storage",
@@ -538,59 +535,6 @@ func init() {
Value: "ap-northeast-1", Value: "ap-northeast-1",
Help: "Tokyo, Japan", Help: "Tokyo, Japan",
}}, }},
}, {
// References:
// https://help.ovhcloud.com/csm/en-public-cloud-storage-s3-location?id=kb_article_view&sysparm_article=KB0047384
// https://support.us.ovhcloud.com/hc/en-us/articles/10667991081107-Endpoints-and-Object-Storage-Geoavailability
Name: "region",
Help: "Region where your bucket will be created and your data stored.\n",
Provider: "OVHcloud",
Examples: []fs.OptionExample{{
Value: "gra",
Help: "Gravelines, France",
}, {
Value: "rbx",
Help: "Roubaix, France",
}, {
Value: "sbg",
Help: "Strasbourg, France",
}, {
Value: "eu-west-par",
Help: "Paris, France (3AZ)",
}, {
Value: "de",
Help: "Frankfurt, Germany",
}, {
Value: "uk",
Help: "London, United Kingdom",
}, {
Value: "waw",
Help: "Warsaw, Poland",
}, {
Value: "bhs",
Help: "Beauharnois, Canada",
}, {
Value: "ca-east-tor",
Help: "Toronto, Canada",
}, {
Value: "sgp",
Help: "Singapore",
}, {
Value: "ap-southeast-syd",
Help: "Sydney, Australia",
}, {
Value: "ap-south-mum",
Help: "Mumbai, India",
}, {
Value: "us-east-va",
Help: "Vint Hill, Virginia, USA",
}, {
Value: "us-west-or",
Help: "Hillsboro, Oregon, USA",
}, {
Value: "rbx-archive",
Help: "Roubaix, France (Cold Archive)",
}},
}, { }, {
Name: "region", Name: "region",
Help: "Region where your bucket will be created and your data stored.\n", Help: "Region where your bucket will be created and your data stored.\n",
@@ -643,7 +587,7 @@ func init() {
}, { }, {
Name: "region", Name: "region",
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.", Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,IONOS,Petabox,Liara,Linode,Magalu,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive,Mega,Zata", Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,IONOS,Petabox,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive,Mega,Zata",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "", Value: "",
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.", Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
@@ -1230,71 +1174,6 @@ func init() {
Value: "obs.ru-northwest-2.myhuaweicloud.com", Value: "obs.ru-northwest-2.myhuaweicloud.com",
Help: "RU-Moscow2", Help: "RU-Moscow2",
}}, }},
}, {
Name: "endpoint",
Help: "Endpoint for OVHcloud Object Storage.",
Provider: "OVHcloud",
Examples: []fs.OptionExample{{
Value: "s3.gra.io.cloud.ovh.net",
Help: "OVHcloud Gravelines, France",
Provider: "OVHcloud",
}, {
Value: "s3.rbx.io.cloud.ovh.net",
Help: "OVHcloud Roubaix, France",
Provider: "OVHcloud",
}, {
Value: "s3.sbg.io.cloud.ovh.net",
Help: "OVHcloud Strasbourg, France",
Provider: "OVHcloud",
}, {
Value: "s3.eu-west-par.io.cloud.ovh.net",
Help: "OVHcloud Paris, France (3AZ)",
Provider: "OVHcloud",
}, {
Value: "s3.de.io.cloud.ovh.net",
Help: "OVHcloud Frankfurt, Germany",
Provider: "OVHcloud",
}, {
Value: "s3.uk.io.cloud.ovh.net",
Help: "OVHcloud London, United Kingdom",
Provider: "OVHcloud",
}, {
Value: "s3.waw.io.cloud.ovh.net",
Help: "OVHcloud Warsaw, Poland",
Provider: "OVHcloud",
}, {
Value: "s3.bhs.io.cloud.ovh.net",
Help: "OVHcloud Beauharnois, Canada",
Provider: "OVHcloud",
}, {
Value: "s3.ca-east-tor.io.cloud.ovh.net",
Help: "OVHcloud Toronto, Canada",
Provider: "OVHcloud",
}, {
Value: "s3.sgp.io.cloud.ovh.net",
Help: "OVHcloud Singapore",
Provider: "OVHcloud",
}, {
Value: "s3.ap-southeast-syd.io.cloud.ovh.net",
Help: "OVHcloud Sydney, Australia",
Provider: "OVHcloud",
}, {
Value: "s3.ap-south-mum.io.cloud.ovh.net",
Help: "OVHcloud Mumbai, India",
Provider: "OVHcloud",
}, {
Value: "s3.us-east-va.io.cloud.ovh.us",
Help: "OVHcloud Vint Hill, Virginia, USA",
Provider: "OVHcloud",
}, {
Value: "s3.us-west-or.io.cloud.ovh.us",
Help: "OVHcloud Hillsboro, Oregon, USA",
Provider: "OVHcloud",
}, {
Value: "s3.rbx-archive.io.cloud.ovh.net",
Help: "OVHcloud Roubaix, France (Cold Archive)",
Provider: "OVHcloud",
}},
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for Scaleway Object Storage.", Help: "Endpoint for Scaleway Object Storage.",
@@ -1532,7 +1411,7 @@ func init() {
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.", Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,LyveCloud,Magalu,OVHcloud,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox,Zata", Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,LyveCloud,Magalu,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox,Zata",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "objects-us-east-1.dream.io", Value: "objects-us-east-1.dream.io",
Help: "Dream Objects endpoint", Help: "Dream Objects endpoint",
@@ -2067,7 +1946,7 @@ func init() {
}, { }, {
Name: "location_constraint", Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.", Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,FlashBlade,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox,Mega", Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,FlashBlade,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox,Mega",
}, { }, {
Name: "acl", Name: "acl",
Help: `Canned ACL used when creating buckets and storing or copying objects. Help: `Canned ACL used when creating buckets and storing or copying objects.
@@ -3628,7 +3507,7 @@ func setQuirks(opt *Options) {
useUnsignedPayload = false // AWS has trailer support which means it adds checksums in the trailer without seeking useUnsignedPayload = false // AWS has trailer support which means it adds checksums in the trailer without seeking
case "Alibaba": case "Alibaba":
useMultipartEtag = false // Alibaba seems to calculate multipart Etags differently from AWS useMultipartEtag = false // Alibaba seems to calculate multipart Etags differently from AWS
useAlreadyExists = false // returns BucketAlreadyExists useAlreadyExists = true // returns 200 OK
case "HuaweiOBS": case "HuaweiOBS":
// Huawei OBS PFS is not support listObjectV2, and if turn on the urlEncodeListing, marker will not work and keep list same page forever. // Huawei OBS PFS is not support listObjectV2, and if turn on the urlEncodeListing, marker will not work and keep list same page forever.
urlEncodeListings = false urlEncodeListings = false
@@ -3710,8 +3589,6 @@ func setQuirks(opt *Options) {
useAlreadyExists = false // untested useAlreadyExists = false // untested
case "Outscale": case "Outscale":
virtualHostStyle = false virtualHostStyle = false
case "OVHcloud":
// No quirks
case "RackCorp": case "RackCorp":
// No quirks // No quirks
useMultipartEtag = false // untested useMultipartEtag = false // untested

View File

@@ -1863,9 +1863,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
free := vfsStats.FreeSpace() free := vfsStats.FreeSpace()
used := total - free used := total - free
return &fs.Usage{ return &fs.Usage{
Total: fs.NewUsageValue(total), Total: fs.NewUsageValue(int64(total)),
Used: fs.NewUsageValue(used), Used: fs.NewUsageValue(int64(used)),
Free: fs.NewUsageValue(free), Free: fs.NewUsageValue(int64(free)),
}, nil }, nil
} else if err != nil { } else if err != nil {
if errors.Is(err, os.ErrNotExist) { if errors.Is(err, os.ErrNotExist) {

View File

@@ -494,11 +494,11 @@ func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
return nil, err return nil, err
} }
bs := stat.BlockSize() bs := int64(stat.BlockSize())
usage := &fs.Usage{ usage := &fs.Usage{
Total: fs.NewUsageValue(bs * stat.TotalBlockCount()), Total: fs.NewUsageValue(bs * int64(stat.TotalBlockCount())),
Used: fs.NewUsageValue(bs * (stat.TotalBlockCount() - stat.FreeBlockCount())), Used: fs.NewUsageValue(bs * int64(stat.TotalBlockCount()-stat.FreeBlockCount())),
Free: fs.NewUsageValue(bs * stat.AvailableBlockCount()), Free: fs.NewUsageValue(bs * int64(stat.AvailableBlockCount())),
} }
return usage, nil return usage, nil
} }

3
bin/go-test-cache/go.mod Normal file
View File

@@ -0,0 +1,3 @@
module go-test-cache
go 1.24

123
bin/go-test-cache/main.go Normal file
View File

@@ -0,0 +1,123 @@
// This code was copied from:
// https://github.com/fastly/cli/blob/main/scripts/go-test-cache/main.go
// which in turn is based on the following script and was generated using AI.
// https://github.com/airplanedev/blog-examples/blob/main/go-test-caching/update_file_timestamps.py?ref=airplane.ghost.io
//
// REFERENCE ARTICLE:
// https://web.archive.org/web/20240308061717/https://www.airplane.dev/blog/caching-golang-tests-in-ci
//
// It updates the mtime of the files to a mtime dervived from the sha1 hash of their contents.
package main
import (
"crypto/sha1"
"io"
"log"
"os"
"path/filepath"
"sort"
"strings"
"time"
)
const (
bufSize = 65536
baseDate = 1684178360
timeFormat = "2006-01-02 15:04:05"
)
func main() {
repoRoot := "."
allDirs := make([]string, 0)
err := filepath.Walk(repoRoot, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
dirPath := filepath.Join(repoRoot, path)
relPath, _ := filepath.Rel(repoRoot, dirPath)
if strings.HasPrefix(relPath, ".") {
return nil
}
allDirs = append(allDirs, dirPath)
} else {
filePath := filepath.Join(repoRoot, path)
relPath, _ := filepath.Rel(repoRoot, filePath)
if strings.HasPrefix(relPath, ".") {
return nil
}
sha1Hash, err := getFileSHA1(filePath)
if err != nil {
return err
}
modTime := getModifiedTime(sha1Hash)
log.Printf("Setting modified time of file %s to %s\n", relPath, modTime.Format(timeFormat))
err = os.Chtimes(filePath, modTime, modTime)
if err != nil {
return err
}
}
return nil
})
if err != nil {
log.Fatal("Error:", err)
}
sort.Slice(allDirs, func(i, j int) bool {
return len(allDirs[i]) > len(allDirs[j]) || (len(allDirs[i]) == len(allDirs[j]) && allDirs[i] < allDirs[j])
})
for _, dirPath := range allDirs {
relPath, _ := filepath.Rel(repoRoot, dirPath)
log.Printf("Setting modified time of directory %s to %s\n", relPath, time.Unix(baseDate, 0).Format(timeFormat))
err := os.Chtimes(dirPath, time.Unix(baseDate, 0), time.Unix(baseDate, 0))
if err != nil {
log.Fatal("Error:", err)
}
}
log.Println("Done")
}
func getFileSHA1(filePath string) (string, error) {
file, err := os.Open(filePath)
if err != nil {
return "", err
}
defer file.Close()
// G401: Use of weak cryptographic primitive
// Disabling as the hash is used not for security reasons.
// The hash is used as a cache key to improve test run times.
// #nosec
// nosemgrep: go.lang.security.audit.crypto.use_of_weak_crypto.use-of-sha1
hash := sha1.New()
if _, err := io.CopyBuffer(hash, file, make([]byte, bufSize)); err != nil {
return "", err
}
return string(hash.Sum(nil)), nil
}
func getModifiedTime(sha1Hash string) time.Time {
hashBytes := []byte(sha1Hash)
lastFiveBytes := hashBytes[:5]
lastFiveValue := int64(0)
for _, b := range lastFiveBytes {
lastFiveValue = (lastFiveValue << 8) + int64(b)
}
modTime := baseDate - (lastFiveValue % 10000)
return time.Unix(modTime, 0)
}

View File

@@ -57,11 +57,11 @@ def make_out(data, indent=""):
return return
del(data[category]) del(data[category])
if indent != "" and len(lines) == 1: if indent != "" and len(lines) == 1:
out_lines.append(indent+"- " + title+": " + lines[0]) out_lines.append(indent+"* " + title+": " + lines[0])
return return
out_lines.append(indent+"- " + title) out_lines.append(indent+"* " + title)
for line in lines: for line in lines:
out_lines.append(indent+" - " + line) out_lines.append(indent+" * " + line)
return out, out_lines return out, out_lines
@@ -129,12 +129,12 @@ def main():
new_features[name].append(message) new_features[name].append(message)
# Output new features # Output new features
out, new_features_lines = make_out(new_features, indent=" ") out, new_features_lines = make_out(new_features, indent=" ")
for name in sorted(new_features.keys()): for name in sorted(new_features.keys()):
out(name) out(name)
# Output bugfixes # Output bugfixes
out, bugfix_lines = make_out(bugfixes, indent=" ") out, bugfix_lines = make_out(bugfixes, indent=" ")
for name in sorted(bugfixes.keys()): for name in sorted(bugfixes.keys()):
out(name) out(name)
@@ -163,15 +163,15 @@ def main():
[See commits](https://github.com/rclone/rclone/compare/%(version)s...%(next_version)s) [See commits](https://github.com/rclone/rclone/compare/%(version)s...%(next_version)s)
- New backends * New backends
- New commands * New commands
- New Features * New Features
%(new_features)s %(new_features)s
- Bug Fixes * Bug Fixes
%(bugfixes)s %(bugfixes)s
%(backend_changes)s""" % locals()) %(backend_changes)s""" % locals())
sys.stdout.write(old_tail) sys.stdout.write(old_tail)
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@@ -23,7 +23,7 @@ def add_email(name, email):
""" """
print("Adding %s <%s>" % (name, email)) print("Adding %s <%s>" % (name, email))
with open(AUTHORS, "a+") as fd: with open(AUTHORS, "a+") as fd:
print("- %s <%s>" % (name, email), file=fd) print(" * %s <%s>" % (name, email), file=fd)
subprocess.check_call(["git", "commit", "-m", "Add %s to contributors" % name, AUTHORS]) subprocess.check_call(["git", "commit", "-m", "Add %s to contributors" % name, AUTHORS])
def main(): def main():

View File

@@ -316,10 +316,10 @@ See the [VFS File Caching](#vfs-file-caching) section for more info.
When using NFS mount on macOS, if you don't specify |--vfs-cache-mode| When using NFS mount on macOS, if you don't specify |--vfs-cache-mode|
the mount point will be read-only. the mount point will be read-only.
Bucket-based remotes - Azure Blob, Swift, S3, Google Cloud Storage and B2 - The bucket-based remotes (e.g. Swift, S3, Google Compute Storage, B2)
can't store empty directories. Of these, only Azure Blob, Google Cloud Storage do not support the concept of empty directories, so empty
and S3 can preserve them when you add `--xxx-directory_markers`; otherwise, directories will have a tendency to disappear once they fall out of
empty directories will vanish once they drop out of the directory cache. the directory cache.
When `rclone mount` is invoked on Unix with `--daemon` flag, the main rclone When `rclone mount` is invoked on Unix with `--daemon` flag, the main rclone
program will wait for the background mount to become ready or until the timeout program will wait for the background mount to become ready or until the timeout

View File

@@ -158,14 +158,13 @@ WebDAV or S3, that work out of the box.)
{{< provider name="Microsoft OneDrive" home="https://onedrive.live.com/" config="/onedrive/" >}} {{< provider name="Microsoft OneDrive" home="https://onedrive.live.com/" config="/onedrive/" >}}
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}} {{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
{{< provider name="Nextcloud" home="https://nextcloud.com/" config="/webdav/#nextcloud" >}} {{< provider name="Nextcloud" home="https://nextcloud.com/" config="/webdav/#nextcloud" >}}
{{< provider name="OVH" home="https://www.ovh.co.uk/public-cloud/storage/object-storage/" config="/swift/" >}}
{{< provider name="Blomp Cloud Storage" home="https://rclone.org/swift/" config="/swift/" >}} {{< provider name="Blomp Cloud Storage" home="https://rclone.org/swift/" config="/swift/" >}}
{{< provider name="OpenDrive" home="https://www.opendrive.com/" config="/opendrive/" >}} {{< provider name="OpenDrive" home="https://www.opendrive.com/" config="/opendrive/" >}}
{{< provider name="OpenStack Swift" home="https://docs.openstack.org/swift/latest/" config="/swift/" >}} {{< provider name="OpenStack Swift" home="https://docs.openstack.org/swift/latest/" config="/swift/" >}}
{{< provider name="Oracle Cloud Storage Swift" home="https://docs.oracle.com/en-us/iaas/integration/doc/configure-object-storage.html" config="/swift/" >}} {{< provider name="Oracle Cloud Storage Swift" home="https://docs.oracle.com/en-us/iaas/integration/doc/configure-object-storage.html" config="/swift/" >}}
{{< provider name="Oracle Object Storage" home="https://www.oracle.com/cloud/storage/object-storage" config="/oracleobjectstorage/" >}} {{< provider name="Oracle Object Storage" home="https://www.oracle.com/cloud/storage/object-storage" config="/oracleobjectstorage/" >}}
{{< provider name="Outscale" home="https://en.outscale.com/storage/outscale-object-storage/" config="/s3/#outscale" >}} {{< provider name="Outscale" home="https://en.outscale.com/storage/outscale-object-storage/" config="/s3/#outscale" >}}
{{< provider name="OVHcloud Object Storage (Swift)" home="https://www.ovhcloud.com/en/public-cloud/object-storage/" config="/swift/" >}}
{{< provider name="OVHcloud Object Storage (S3-compatible)" home="https://www.ovhcloud.com/en/public-cloud/object-storage/" config="/s3/#ovhcloud" >}}
{{< provider name="ownCloud" home="https://owncloud.org/" config="/webdav/#owncloud" >}} {{< provider name="ownCloud" home="https://owncloud.org/" config="/webdav/#owncloud" >}}
{{< provider name="pCloud" home="https://www.pcloud.com/" config="/pcloud/" >}} {{< provider name="pCloud" home="https://www.pcloud.com/" config="/pcloud/" >}}
{{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}} {{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}}

File diff suppressed because it is too large Load Diff

View File

@@ -41,5 +41,6 @@ flag/option).
Bugs are stored in rclone's GitHub project: Bugs are stored in rclone's GitHub project:
- [Reported bugs](https://github.com/rclone/rclone/issues?q=is%3Aopen+is%3Aissue+label%3Abug) * [Reported bugs](https://github.com/rclone/rclone/issues?q=is%3Aopen+is%3Aissue+label%3Abug)
- [Known issues](https://github.com/rclone/rclone/issues?q=is%3Aopen+is%3Aissue+milestone%3A%22Known+Problem%22) * [Known issues](https://github.com/rclone/rclone/issues?q=is%3Aopen+is%3Aissue+milestone%3A%22Known+Problem%22)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -15,7 +15,7 @@ image](https://securebuild.com/images/rclone) through our partner
## Release {{% version %}} OS requirements {#osrequirements} ## Release {{% version %}} OS requirements {#osrequirements}
| OS | Minimum Version | | OS | Minimum Version |
|:-------:|:-------:| |:-------:|:-------:|
| Linux | Kernel 3.2 | | Linux | Kernel 3.2 |
| macOS | 11 (Big Sur) | | macOS | 11 (Big Sur) |
@@ -23,10 +23,7 @@ image](https://securebuild.com/images/rclone) through our partner
| FreeBSD | 12.2 | | FreeBSD | 12.2 |
| OpenBSD | 6.9 | | OpenBSD | 6.9 |
These requirements come from the Go version that rclone is compiled with and are These requirements come from the Go version that rclone is compiled with and are simplified from [minimum requirements](https://go.dev/wiki/MinimumRequirements) and other [platform specific information](https://go.dev/wiki/#platform-specific-information) in the Go Wiki.
simplified from [minimum requirements](https://go.dev/wiki/MinimumRequirements)
and other [platform specific information](https://go.dev/wiki/#platform-specific-information)
in the Go Wiki.
## Release {{% version %}} {#release} ## Release {{% version %}} {#release}
@@ -41,10 +38,8 @@ in the Go Wiki.
| MIPS - Big Endian | - | - | {{< download linux mips >}} | {{< download linux mips deb >}} | {{< download linux mips rpm >}} | - | - | - | - | - | | MIPS - Big Endian | - | - | {{< download linux mips >}} | {{< download linux mips deb >}} | {{< download linux mips rpm >}} | - | - | - | - | - |
| MIPS - Little Endian | - | - | {{< download linux mipsle >}} | {{< download linux mipsle deb >}} | {{< download linux mipsle rpm >}} | - | - | - | - | - | | MIPS - Little Endian | - | - | {{< download linux mipsle >}} | {{< download linux mipsle deb >}} | {{< download linux mipsle rpm >}} | - | - | - | - | - |
<!-- markdownlint-disable-next-line no-bare-urls line-length -->
You can also find a [mirror of the downloads on GitHub](https://github.com/rclone/rclone/releases/tag/{{< version >}}). You can also find a [mirror of the downloads on GitHub](https://github.com/rclone/rclone/releases/tag/{{< version >}}).
<!-- markdownlint-disable-next-line no-bare-urls -->
See also [Android builds](https://beta.rclone.org/{{% version %}}/testbuilds/). See also [Android builds](https://beta.rclone.org/{{% version %}}/testbuilds/).
These are built as part of the official release, but haven't been These are built as part of the official release, but haven't been
adopted as first class builds yet. adopted as first class builds yet.
@@ -52,19 +47,15 @@ adopted as first class builds yet.
See [the release signing docs](/release_signing/) for how to verify See [the release signing docs](/release_signing/) for how to verify
signatures on the release. signatures on the release.
## Script download and install ## Script download and install ##
To install rclone on Linux/macOS/BSD systems, run: To install rclone on Linux/macOS/BSD systems, run:
```sh sudo -v ; curl https://rclone.org/install.sh | sudo bash
sudo -v ; curl https://rclone.org/install.sh | sudo bash
```
For beta installation, run: For beta installation, run:
```sh sudo -v ; curl https://rclone.org/install.sh | sudo bash -s beta
sudo -v ; curl https://rclone.org/install.sh | sudo bash -s beta
```
Note that this script checks the version of rclone installed first and Note that this script checks the version of rclone installed first and
won't re-download if not needed. won't re-download if not needed.
@@ -74,15 +65,11 @@ won't re-download if not needed.
[Beta releases](https://beta.rclone.org) are generated from each commit [Beta releases](https://beta.rclone.org) are generated from each commit
to master. Note these are named like to master. Note these are named like
```text {Version Tag}.beta.{Commit Number}.{Git Commit Hash}
{Version Tag}.beta.{Commit Number}.{Git Commit Hash}
```
e.g. e.g.
```text v1.53.0-beta.4677.b657a2204
v1.53.0-beta.4677.b657a2204
```
The `Version Tag` is the version that the beta release will become The `Version Tag` is the version that the beta release will become
when it is released. You can match the `Git Commit Hash` up with the when it is released. You can match the `Git Commit Hash` up with the
@@ -92,15 +79,11 @@ and will normally be at the end of the list.
Some beta releases may have a branch name also: Some beta releases may have a branch name also:
```text {Version Tag}-beta.{Commit Number}.{Git Commit Hash}.{Branch Name}
{Version Tag}-beta.{Commit Number}.{Git Commit Hash}.{Branch Name}
```
e.g. e.g.
```text v1.53.0-beta.4677.b657a2204.semver
v1.53.0-beta.4677.b657a2204.semver
```
The presence of `Branch Name` indicates that this is a feature under The presence of `Branch Name` indicates that this is a feature under
development which will at some point be merged into the normal betas development which will at some point be merged into the normal betas
@@ -132,11 +115,10 @@ script) from a URL which doesn't change then you can use these links.
## Older Downloads ## Older Downloads
Older downloads can be found at <https://downloads.rclone.org/> Older downloads can be found [here](https://downloads.rclone.org/).
The latest `rclone` version working for: The latest `rclone` version working for:
| OS | Maximum rclone version |
| OS | Maximum rclone version |
|:-------:|:-------:| |:-------:|:-------:|
| Windows 7 | v1.63.1 | | Windows 7 | v1.63.1 |
| Windows Server 2008 | v1.63.1 | | Windows Server 2008 | v1.63.1 |

View File

@@ -2,16 +2,15 @@
title: "FAQ" title: "FAQ"
description: "Rclone Frequently Asked Questions" description: "Rclone Frequently Asked Questions"
--- ---
<!-- markdownlint-disable heading-increment -->
# Frequently Asked Questions # Frequently Asked Questions
### Do all cloud storage systems support all rclone commands ### Do all cloud storage systems support all rclone commands ###
Yes they do. All the rclone commands (e.g. `sync`, `copy`, etc.) will Yes they do. All the rclone commands (e.g. `sync`, `copy`, etc.) will
work on all the remote storage systems. work on all the remote storage systems.
### Can I copy the config from one machine to another ### Can I copy the config from one machine to another ###
Sure! Rclone stores all of its config in a single file. If you want Sure! Rclone stores all of its config in a single file. If you want
to find this file, run `rclone config file` which will tell you where to find this file, run `rclone config file` which will tell you where
@@ -19,7 +18,7 @@ it is.
See the [remote setup docs](/remote_setup/) for more info. See the [remote setup docs](/remote_setup/) for more info.
### How do I configure rclone on a remote / headless box with no browser? ### How do I configure rclone on a remote / headless box with no browser? ###
This has now been documented in its own [remote setup page](/remote_setup/). This has now been documented in its own [remote setup page](/remote_setup/).
@@ -33,11 +32,11 @@ If you need to configure a remote, see the [config help docs](/docs/#configure).
If you are using rclone entirely with [on the fly remotes](/docs/#backend-path-to-dir), If you are using rclone entirely with [on the fly remotes](/docs/#backend-path-to-dir),
you can create an empty config file to get rid of this notice, for example: you can create an empty config file to get rid of this notice, for example:
```sh ```
rclone config touch rclone config touch
``` ```
### Can rclone sync directly from drive to s3 ### Can rclone sync directly from drive to s3 ###
Rclone can sync between two remote cloud storage systems just fine. Rclone can sync between two remote cloud storage systems just fine.
@@ -48,16 +47,15 @@ The syncs would be incremental (on a file by file basis).
e.g. e.g.
```sh rclone sync --interactive drive:Folder s3:bucket
rclone sync --interactive drive:Folder s3:bucket
```
### Using rclone from multiple locations at the same time
### Using rclone from multiple locations at the same time ###
You can use rclone from multiple places at the same time if you choose You can use rclone from multiple places at the same time if you choose
different subdirectory for the output, e.g. different subdirectory for the output, e.g.
```sh ```
Server A> rclone sync --interactive /tmp/whatever remote:ServerA Server A> rclone sync --interactive /tmp/whatever remote:ServerA
Server B> rclone sync --interactive /tmp/whatever remote:ServerB Server B> rclone sync --interactive /tmp/whatever remote:ServerB
``` ```
@@ -65,7 +63,7 @@ Server B> rclone sync --interactive /tmp/whatever remote:ServerB
If you sync to the same directory then you should use rclone copy If you sync to the same directory then you should use rclone copy
otherwise the two instances of rclone may delete each other's files, e.g. otherwise the two instances of rclone may delete each other's files, e.g.
```sh ```
Server A> rclone copy /tmp/whatever remote:Backup Server A> rclone copy /tmp/whatever remote:Backup
Server B> rclone copy /tmp/whatever remote:Backup Server B> rclone copy /tmp/whatever remote:Backup
``` ```
@@ -74,7 +72,7 @@ The file names you upload from Server A and Server B should be
different in this case, otherwise some file systems (e.g. Drive) may different in this case, otherwise some file systems (e.g. Drive) may
make duplicates. make duplicates.
### Why doesn't rclone support partial transfers / binary diffs like rsync? ### Why doesn't rclone support partial transfers / binary diffs like rsync? ###
Rclone stores each file you transfer as a native object on the remote Rclone stores each file you transfer as a native object on the remote
cloud storage system. This means that you can see the files you cloud storage system. This means that you can see the files you
@@ -96,12 +94,12 @@ it would be possible to make partial downloads work. However to make
this work efficiently this would require storing a significant amount this work efficiently this would require storing a significant amount
of metadata, which breaks the desired 1:1 mapping of files to objects. of metadata, which breaks the desired 1:1 mapping of files to objects.
### Can rclone do bi-directional sync? ### Can rclone do bi-directional sync? ###
Yes, since rclone v1.58.0, [bidirectional cloud sync](/bisync/) is Yes, since rclone v1.58.0, [bidirectional cloud sync](/bisync/) is
available. available.
### Can I use rclone with an HTTP proxy? ### Can I use rclone with an HTTP proxy? ###
Yes. rclone will follow the standard environment variables for Yes. rclone will follow the standard environment variables for
proxies, similar to cURL and other programs. proxies, similar to cURL and other programs.
@@ -114,26 +112,23 @@ The content of the variable is `protocol://server:port`. The protocol
value is the one used to talk to the proxy server, itself, and is commonly value is the one used to talk to the proxy server, itself, and is commonly
either `http` or `socks5`. either `http` or `socks5`.
Slightly annoyingly, there is no *standard* for the name; some applications Slightly annoyingly, there is no _standard_ for the name; some applications
may use `http_proxy` but another one `HTTP_PROXY`. The `Go` libraries may use `http_proxy` but another one `HTTP_PROXY`. The `Go` libraries
used by `rclone` will try both variations, but you may wish to set all used by `rclone` will try both variations, but you may wish to set all
possibilities. So, on Linux, you may end up with code similar to possibilities. So, on Linux, you may end up with code similar to
```sh export http_proxy=http://proxyserver:12345
export http_proxy=http://proxyserver:12345 export https_proxy=$http_proxy
export https_proxy=$http_proxy export HTTP_PROXY=$http_proxy
export HTTP_PROXY=$http_proxy export HTTPS_PROXY=$http_proxy
export HTTPS_PROXY=$http_proxy
```
Note: If the proxy server requires a username and password, then use Note: If the proxy server requires a username and password, then use
```sh export http_proxy=http://username:password@proxyserver:12345
export http_proxy=http://username:password@proxyserver:12345 export https_proxy=$http_proxy
export https_proxy=$http_proxy export HTTP_PROXY=$http_proxy
export HTTP_PROXY=$http_proxy export HTTPS_PROXY=$http_proxy
export HTTPS_PROXY=$http_proxy
```
The `NO_PROXY` allows you to disable the proxy for specific hosts. The `NO_PROXY` allows you to disable the proxy for specific hosts.
Hosts must be comma separated, and can contain domains or parts. Hosts must be comma separated, and can contain domains or parts.
@@ -141,24 +136,12 @@ For instance "foo.com" also matches "bar.foo.com".
e.g. e.g.
```sh export no_proxy=localhost,127.0.0.0/8,my.host.name
export no_proxy=localhost,127.0.0.0/8,my.host.name export NO_PROXY=$no_proxy
export NO_PROXY=$no_proxy
```
Note that the FTP backend does not support `ftp_proxy` yet. Note that the FTP backend does not support `ftp_proxy` yet.
You can use the command line argument `--http-proxy` to set the proxy, ### Rclone gives x509: failed to load system roots and no roots provided error ###
and in turn use an override in the config file if you want it set for
a single backend, eg `override.http_proxy = http://...` in the config
file.
The FTP and SFTP backends have their own `http_proxy` settings to
support an HTTP CONNECT proxy (
[--ftp-http-proxy](https://rclone.org/ftp/#ftp-http-proxy) and
[--sftp-http-proxy](https://rclone.org/ftp/#sftp-http-proxy) )
### Rclone gives x509: failed to load system roots and no roots provided error
This means that `rclone` can't find the SSL root certificates. Likely This means that `rclone` can't find the SSL root certificates. Likely
you are running `rclone` on a NAS with a cut-down Linux OS, or you are running `rclone` on a NAS with a cut-down Linux OS, or
@@ -167,34 +150,30 @@ possibly on Solaris.
Rclone (via the Go runtime) tries to load the root certificates from Rclone (via the Go runtime) tries to load the root certificates from
these places on Linux. these places on Linux.
```sh "/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc. "/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL
"/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL "/etc/ssl/ca-bundle.pem", // OpenSUSE
"/etc/ssl/ca-bundle.pem", // OpenSUSE "/etc/pki/tls/cacert.pem", // OpenELEC
"/etc/pki/tls/cacert.pem", // OpenELEC
```
So doing something like this should fix the problem. It also sets the So doing something like this should fix the problem. It also sets the
time which is important for SSL to work properly. time which is important for SSL to work properly.
```sh ```
mkdir -p /etc/ssl/certs/ mkdir -p /etc/ssl/certs/
curl -o /etc/ssl/certs/ca-certificates.crt https://raw.githubusercontent.com/bagder/ca-bundle/master/ca-bundle.crt curl -o /etc/ssl/certs/ca-certificates.crt https://raw.githubusercontent.com/bagder/ca-bundle/master/ca-bundle.crt
ntpclient -s -h pool.ntp.org ntpclient -s -h pool.ntp.org
``` ```
The two environment variables `SSL_CERT_FILE` and `SSL_CERT_DIR`, mentioned in The two environment variables `SSL_CERT_FILE` and `SSL_CERT_DIR`, mentioned in the [x509 package](https://godoc.org/crypto/x509),
the [x509 package](https://godoc.org/crypto/x509), provide an additional way to provide an additional way to provide the SSL root certificates.
provide the SSL root certificates.
Note that you may need to add the `--insecure` option to the `curl` command line Note that you may need to add the `--insecure` option to the `curl` command line if it doesn't work without.
if it doesn't work without.
```sh ```
curl --insecure -o /etc/ssl/certs/ca-certificates.crt https://raw.githubusercontent.com/bagder/ca-bundle/master/ca-bundle.crt curl --insecure -o /etc/ssl/certs/ca-certificates.crt https://raw.githubusercontent.com/bagder/ca-bundle/master/ca-bundle.crt
``` ```
### Rclone gives Failed to load config file: function not implemented error ### Rclone gives Failed to load config file: function not implemented error ###
Likely this means that you are running rclone on Linux version not Likely this means that you are running rclone on Linux version not
supported by the go runtime, ie earlier than version 2.6.23. supported by the go runtime, ie earlier than version 2.6.23.
@@ -202,7 +181,7 @@ supported by the go runtime, ie earlier than version 2.6.23.
See the [system requirements section in the go install See the [system requirements section in the go install
docs](https://golang.org/doc/install) for full details. docs](https://golang.org/doc/install) for full details.
### All my uploaded docx/xlsx/pptx files appear as archive/zip ### All my uploaded docx/xlsx/pptx files appear as archive/zip ###
This is caused by uploading these files from a Windows computer which This is caused by uploading these files from a Windows computer which
hasn't got the Microsoft Office suite installed. The easiest way to hasn't got the Microsoft Office suite installed. The easiest way to
@@ -210,12 +189,12 @@ fix is to install the Word viewer and the Microsoft Office
Compatibility Pack for Word, Excel, and PowerPoint 2007 and later Compatibility Pack for Word, Excel, and PowerPoint 2007 and later
versions' file formats versions' file formats
### tcp lookup some.domain.com no such host ### tcp lookup some.domain.com no such host ###
This happens when rclone cannot resolve a domain. Please check that This happens when rclone cannot resolve a domain. Please check that
your DNS setup is generally working, e.g. your DNS setup is generally working, e.g.
```sh ```
# both should print a long list of possible IP addresses # both should print a long list of possible IP addresses
dig www.googleapis.com # resolve using your default DNS dig www.googleapis.com # resolve using your default DNS
dig www.googleapis.com @8.8.8.8 # resolve with Google's DNS server dig www.googleapis.com @8.8.8.8 # resolve with Google's DNS server
@@ -225,6 +204,7 @@ If you are using `systemd-resolved` (default on Arch Linux), ensure it
is at version 233 or higher. Previous releases contain a bug which is at version 233 or higher. Previous releases contain a bug which
causes not all domains to be resolved properly. causes not all domains to be resolved properly.
The Go resolver decision can be influenced with the `GODEBUG=netdns=...` The Go resolver decision can be influenced with the `GODEBUG=netdns=...`
environment variable. This also allows to resolve certain issues with environment variable. This also allows to resolve certain issues with
DNS resolution. On Windows or MacOS systems, try forcing use of the DNS resolution. On Windows or MacOS systems, try forcing use of the
@@ -234,20 +214,17 @@ name resolver by setting `GODEBUG=netdns=cgo` (and recompile rclone
from source with CGO enabled if necessary). See the from source with CGO enabled if necessary). See the
[name resolution section in the go docs](https://golang.org/pkg/net/#hdr-Name_Resolution). [name resolution section in the go docs](https://golang.org/pkg/net/#hdr-Name_Resolution).
### Failed to start auth webserver on Windows ### Failed to start auth webserver on Windows ###
```
```text
Error: config failed to refresh token: failed to start auth webserver: listen tcp 127.0.0.1:53682: bind: An attempt was made to access a socket in a way forbidden by its access permissions. Error: config failed to refresh token: failed to start auth webserver: listen tcp 127.0.0.1:53682: bind: An attempt was made to access a socket in a way forbidden by its access permissions.
... ...
yyyy/mm/dd hh:mm:ss Fatal error: config failed to refresh token: failed to start auth webserver: listen tcp 127.0.0.1:53682: bind: An attempt was made to access a socket in a way forbidden by its access permissions. yyyy/mm/dd hh:mm:ss Fatal error: config failed to refresh token: failed to start auth webserver: listen tcp 127.0.0.1:53682: bind: An attempt was made to access a socket in a way forbidden by its access permissions.
``` ```
This is sometimes caused by the Host Network Service causing issues with opening This is sometimes caused by the Host Network Service causing issues with opening the port on the host.
the port on the host.
A simple solution may be restarting the Host Network Service with eg. Powershell A simple solution may be restarting the Host Network Service with eg. Powershell
```
```pwsh
Restart-Service hns Restart-Service hns
``` ```
@@ -270,7 +247,7 @@ value, say `export GOGC=20`. This will make the garbage collector
work harder, reducing memory size at the expense of CPU usage. work harder, reducing memory size at the expense of CPU usage.
The most common cause of rclone using lots of memory is a single The most common cause of rclone using lots of memory is a single
directory with millions of files in. directory with millions of files in.
Before rclone v1.70 has to load this entirely into memory as rclone Before rclone v1.70 has to load this entirely into memory as rclone
objects. Each rclone object takes 0.5k-1k of memory. There is objects. Each rclone object takes 0.5k-1k of memory. There is
@@ -302,4 +279,4 @@ Unicode characters when transferring to one storage system, and replacing
back again when transferring to a different storage system where the back again when transferring to a different storage system where the
original characters are supported. When the same Unicode characters original characters are supported. When the same Unicode characters
are intentionally used in file names, this replacement strategy leads are intentionally used in file names, this replacement strategy leads
to unwanted renames. Read more under section [caveats](/overview/#restricted-filenames-caveats). to unwanted renames. Read more [here](/overview/#restricted-filenames-caveats).

View File

@@ -39,50 +39,38 @@ Here is a formal definition of the pattern syntax,
Rclone matching rules follow a glob style: Rclone matching rules follow a glob style:
```text * matches any sequence of non-separator (/) characters
* matches any sequence of non-separator (/) characters ** matches any sequence of characters including / separators
** matches any sequence of characters including / separators ? matches any single non-separator (/) character
? matches any single non-separator (/) character [ [ ! ] { character-range } ]
[ [ ! ] { character-range } ] character class (must be non-empty)
character class (must be non-empty) { pattern-list }
{ pattern-list } pattern alternatives
pattern alternatives {{ regexp }}
{{ regexp }} regular expression to match
regular expression to match c matches character c (c != *, **, ?, \, [, {, })
c matches character c (c != *, **, ?, \, [, {, }) \c matches reserved character c (c = *, **, ?, \, [, {, }) or character class
\c matches reserved character c (c = *, **, ?, \, [, {, }) or character class
```
character-range: character-range:
```text c matches character c (c != \, -, ])
c matches character c (c != \, -, ]) \c matches reserved character c (c = \, -, ])
\c matches reserved character c (c = \, -, ]) lo - hi matches character c for lo <= c <= hi
lo - hi matches character c for lo <= c <= hi
```
pattern-list: pattern-list:
```text pattern { , pattern }
pattern { , pattern } comma-separated (without spaces) patterns
comma-separated (without spaces) patterns
```
character classes (see [Go regular expression reference](https://golang.org/pkg/regexp/syntax/)) character classes (see [Go regular expression reference](https://golang.org/pkg/regexp/syntax/)) include:
include:
```text Named character classes (e.g. [\d], [^\d], [\D], [^\D])
Named character classes (e.g. [\d], [^\d], [\D], [^\D]) Perl character classes (e.g. \s, \S, \w, \W)
Perl character classes (e.g. \s, \S, \w, \W) ASCII character classes (e.g. [[:alnum:]], [[:alpha:]], [[:punct:]], [[:xdigit:]])
ASCII character classes (e.g. [[:alnum:]], [[:alpha:]], [[:punct:]], [[:xdigit:]])
```
regexp for advanced users to insert a regular expression - see [below](#regexp) regexp for advanced users to insert a regular expression - see [below](#regexp) for more info:
for more info:
```text Any re2 regular expression not containing `}}`
Any re2 regular expression not containing `}}`
```
If the filter pattern starts with a `/` then it only matches If the filter pattern starts with a `/` then it only matches
at the top level of the directory tree, at the top level of the directory tree,
@@ -92,34 +80,29 @@ starting at the **end of the path/file name** but it only matches
a complete path element - it must match from a `/` a complete path element - it must match from a `/`
separator or the beginning of the path/file. separator or the beginning of the path/file.
```text file.jpg - matches "file.jpg"
file.jpg - matches "file.jpg" - matches "directory/file.jpg"
- matches "directory/file.jpg" - doesn't match "afile.jpg"
- doesn't match "afile.jpg" - doesn't match "directory/afile.jpg"
- doesn't match "directory/afile.jpg" /file.jpg - matches "file.jpg" in the root directory of the remote
/file.jpg - matches "file.jpg" in the root directory of the remote - doesn't match "afile.jpg"
- doesn't match "afile.jpg" - doesn't match "directory/file.jpg"
- doesn't match "directory/file.jpg"
```
The top level of the remote might not be the top level of the drive. The top level of the remote might not be the top level of the drive.
E.g. for a Microsoft Windows local directory structure E.g. for a Microsoft Windows local directory structure
```text F:
F: ├── bkp
├── bkp ├── data
├── data │ ├── excl
│ ├── excl │ ├── 123.jpg
│ │ ── 123.jpg │ │ ── 456.jpg
── 456.jpg ── incl
│ ├── incl │ │ └── document.pdf
│ │ └── document.pdf
```
To copy the contents of folder `data` into folder `bkp` excluding the contents To copy the contents of folder `data` into folder `bkp` excluding the contents of subfolder
of subfolder `excl`the following command treats `F:\data` and `F:\bkp` as top `excl`the following command treats `F:\data` and `F:\bkp` as top level for filtering.
level for filtering.
`rclone copy F:\data\ F:\bkp\ --exclude=/excl/**` `rclone copy F:\data\ F:\bkp\ --exclude=/excl/**`
@@ -130,17 +113,13 @@ Simple patterns are case sensitive unless the `--ignore-case` flag is used.
Without `--ignore-case` (default) Without `--ignore-case` (default)
```text potato - matches "potato"
potato - matches "potato" - doesn't match "POTATO"
- doesn't match "POTATO"
```
With `--ignore-case` With `--ignore-case`
```text potato - matches "potato"
potato - matches "potato" - matches "POTATO"
- matches "POTATO"
```
## Using regular expressions in filter patterns {#regexp} ## Using regular expressions in filter patterns {#regexp}
@@ -162,36 +141,26 @@ the supplied regular expression(s).
Here is how the `{{regexp}}` is transformed into an full regular Here is how the `{{regexp}}` is transformed into an full regular
expression to match the entire path: expression to match the entire path:
```text {{regexp}} becomes (^|/)(regexp)$
{{regexp}} becomes (^|/)(regexp)$ /{{regexp}} becomes ^(regexp)$
/{{regexp}} becomes ^(regexp)$
```
Regexp syntax can be mixed with glob syntax, for example Regexp syntax can be mixed with glob syntax, for example
```text *.{{jpe?g}} to match file.jpg, file.jpeg but not file.png
*.{{jpe?g}} to match file.jpg, file.jpeg but not file.png
```
You can also use regexp flags - to set case insensitive, for example You can also use regexp flags - to set case insensitive, for example
```text *.{{(?i)jpg}} to match file.jpg, file.JPG but not file.png
*.{{(?i)jpg}} to match file.jpg, file.JPG but not file.png
```
Be careful with wildcards in regular expressions - you don't want them Be careful with wildcards in regular expressions - you don't want them
to match path separators normally. To match any file name starting to match path separators normally. To match any file name starting
with `start` and ending with `end` write with `start` and ending with `end` write
```text {{start[^/]*end\.jpg}}
{{start[^/]*end\.jpg}}
```
Not Not
```text {{start.*end\.jpg}}
{{start.*end\.jpg}}
```
Which will match a directory called `start` with a file called Which will match a directory called `start` with a file called
`end.jpg` in it as the `.*` will match `/` characters. `end.jpg` in it as the `.*` will match `/` characters.
@@ -229,12 +198,12 @@ them into regular expressions.
Rclone path/file name filters are made up of one or more of the following flags: Rclone path/file name filters are made up of one or more of the following flags:
- `--include` * `--include`
- `--include-from` * `--include-from`
- `--exclude` * `--exclude`
- `--exclude-from` * `--exclude-from`
- `--filter` * `--filter`
- `--filter-from` * `--filter-from`
There can be more than one instance of individual flags. There can be more than one instance of individual flags.
@@ -305,16 +274,15 @@ every path against the supplied regular expression(s).
Directory recursion optimisation occurs if either: Directory recursion optimisation occurs if either:
- A source remote does not support the rclone `ListR` primitive. local, * A source remote does not support the rclone `ListR` primitive. local,
sftp, Microsoft OneDrive and WebDAV do not support `ListR`. Google sftp, Microsoft OneDrive and WebDAV do not support `ListR`. Google
Drive and most bucket type storage do. [Full list](https://rclone.org/overview/#optional-features) Drive and most bucket type storage do. [Full list](https://rclone.org/overview/#optional-features)
- On other remotes (those that support `ListR`), if the rclone command is not * On other remotes (those that support `ListR`), if the rclone command is not naturally recursive, and
naturally recursive, and provided it is not run with the `--fast-list` flag. provided it is not run with the `--fast-list` flag. `ls`, `lsf -R` and
`ls`, `lsf -R` and `size` are naturally recursive but `sync`, `copy` and `move` `size` are naturally recursive but `sync`, `copy` and `move` are not.
are not.
- Whenever the `--disable ListR` flag is applied to an rclone command. * Whenever the `--disable ListR` flag is applied to an rclone command.
Rclone commands imply directory filter rules from path/file filter Rclone commands imply directory filter rules from path/file filter
rules. To view the directory filter rules rclone has implied for a rules. To view the directory filter rules rclone has implied for a
@@ -322,15 +290,11 @@ command specify the `--dump filters` flag.
E.g. for an include rule E.g. for an include rule
```text /a/*.jpg
/a/*.jpg
```
Rclone implies the directory include rule Rclone implies the directory include rule
```text /a/
/a/
```
Directory filter rules specified in an rclone command can limit Directory filter rules specified in an rclone command can limit
the scope of an rclone command but path/file filters still have the scope of an rclone command but path/file filters still have
@@ -344,12 +308,10 @@ access to the remote by ignoring everything outside of that directory.
E.g. `rclone ls remote: --filter-from filter-list.txt` with a file E.g. `rclone ls remote: --filter-from filter-list.txt` with a file
`filter-list.txt`: `filter-list.txt`:
```text - /dir1/
- /dir1/ - /dir2/
- /dir2/ + *.pdf
+ *.pdf - **
- **
```
All files in directories `dir1` or `dir2` or their subdirectories All files in directories `dir1` or `dir2` or their subdirectories
are completely excluded from the listing. Only files of suffix are completely excluded from the listing. Only files of suffix
@@ -367,9 +329,7 @@ from this pattern list.
E.g. for an include rule E.g. for an include rule
```text {dir1/**,dir2/**}
{dir1/**,dir2/**}
```
Rclone will match files below directories `dir1` or `dir2` only, Rclone will match files below directories `dir1` or `dir2` only,
but will not be able to use this filter to exclude a directory `dir3` but will not be able to use this filter to exclude a directory `dir3`
@@ -421,11 +381,9 @@ named file. The file contains a list of remarks and pattern rules.
For an example `exclude-file.txt`: For an example `exclude-file.txt`:
```text # a sample exclude rule file
# a sample exclude rule file *.bak
*.bak file2.jpg
file2.jpg
```
`rclone ls remote: --exclude-from exclude-file.txt` lists the files on `rclone ls remote: --exclude-from exclude-file.txt` lists the files on
`remote:` except those named `file2.jpg` or with a suffix `.bak`. That is `remote:` except those named `file2.jpg` or with a suffix `.bak`. That is
@@ -468,16 +426,12 @@ E.g. `rclone ls remote: --include "*.{png,jpg}"` lists the files on
E.g. multiple rclone copy commands can be combined with `--include` and a E.g. multiple rclone copy commands can be combined with `--include` and a
pattern-list. pattern-list.
```sh rclone copy /vol1/A remote:A
rclone copy /vol1/A remote:A rclone copy /vol1/B remote:B
rclone copy /vol1/B remote:B
```
is equivalent to: is equivalent to:
```sh rclone copy /vol1 remote: --include "{A,B}/**"
rclone copy /vol1 remote: --include "{A,B}/**"
```
E.g. `rclone ls remote:/wheat --include "??[^[:punct:]]*"` lists the E.g. `rclone ls remote:/wheat --include "??[^[:punct:]]*"` lists the
files `remote:` directory `wheat` (and subdirectories) whose third files `remote:` directory `wheat` (and subdirectories) whose third
@@ -491,11 +445,9 @@ named file. The file contains a list of remarks and pattern rules.
For an example `include-file.txt`: For an example `include-file.txt`:
```text # a sample include rule file
# a sample include rule file *.jpg
*.jpg file2.avi
file2.avi
```
`rclone ls remote: --include-from include-file.txt` lists the files on `rclone ls remote: --include-from include-file.txt` lists the files on
`remote:` with name `file2.avi` or suffix `.jpg`. That is equivalent to `remote:` with name `file2.avi` or suffix `.jpg`. That is equivalent to
@@ -544,7 +496,6 @@ from a list of `remote:`.
Adds path/file names to an rclone command based on rules in a Adds path/file names to an rclone command based on rules in a
named file. The file contains a list of remarks and pattern rules. Include named file. The file contains a list of remarks and pattern rules. Include
<!-- markdownlint-disable-next-line no-space-in-code -->
rules start with `+ ` and exclude rules with `- `. `!` clears existing rules start with `+ ` and exclude rules with `- `. `!` clears existing
rules. Rules are processed in the order they are defined. rules. Rules are processed in the order they are defined.
@@ -554,24 +505,20 @@ processed in.
Arrange the order of filter rules with the most restrictive first and Arrange the order of filter rules with the most restrictive first and
work down. work down.
Lines starting with # or ; are ignored, and can be used to write comments. Lines starting with # or ; are ignored, and can be used to write comments. Inline comments are not supported. _Use `-vv --dump filters` to see how they appear in the final regexp._
Inline comments are not supported. *Use `-vv --dump filters` to see how they
appear in the final regexp.*
E.g. for `filter-file.txt`: E.g. for `filter-file.txt`:
```text # a sample filter rule file
# a sample filter rule file - secret*.jpg
- secret*.jpg + *.jpg
+ *.jpg + *.png
+ *.png + file2.avi
+ file2.avi - /dir/tmp/** # WARNING! This text will be treated as part of the path.
- /dir/tmp/** # WARNING! This text will be treated as part of the path. - /dir/Trash/**
- /dir/Trash/** + /dir/**
+ /dir/** # exclude everything else
# exclude everything else - *
- *
```
`rclone ls remote: --filter-from filter-file.txt` lists the path/files on `rclone ls remote: --filter-from filter-file.txt` lists the path/files on
`remote:` including all `jpg` and `png` files, excluding any `remote:` including all `jpg` and `png` files, excluding any
@@ -579,28 +526,25 @@ matching `secret*.jpg` and including `file2.avi`. It also includes
everything in the directory `dir` at the root of `remote`, except everything in the directory `dir` at the root of `remote`, except
`remote:dir/Trash` which it excludes. Everything else is excluded. `remote:dir/Trash` which it excludes. Everything else is excluded.
E.g. for an alternative `filter-file.txt`: E.g. for an alternative `filter-file.txt`:
```text - secret*.jpg
- secret*.jpg + *.jpg
+ *.jpg + *.png
+ *.png + file2.avi
+ file2.avi - *
- *
```
Files `file1.jpg`, `file3.png` and `file2.avi` are listed whilst Files `file1.jpg`, `file3.png` and `file2.avi` are listed whilst
`secret17.jpg` and files without the suffix `.jpg` or `.png` are excluded. `secret17.jpg` and files without the suffix `.jpg` or `.png` are excluded.
E.g. for an alternative `filter-file.txt`: E.g. for an alternative `filter-file.txt`:
```text + *.jpg
+ *.jpg + *.gif
+ *.gif !
! + 42.doc
+ 42.doc - *
- *
```
Only file 42.doc is listed. Prior rules are cleared by the `!`. Only file 42.doc is listed. Prior rules are cleared by the `!`.
@@ -638,85 +582,67 @@ to right along the command line.
Paths within the `--files-from` file are interpreted as starting Paths within the `--files-from` file are interpreted as starting
with the root specified in the rclone command. Leading `/` separators are with the root specified in the rclone command. Leading `/` separators are
ignored. See [--files-from-raw](#files-from-raw-read-list-of-source-file-names-without-any-processing) ignored. See [--files-from-raw](#files-from-raw-read-list-of-source-file-names-without-any-processing) if
if you need the input to be processed in a raw manner. you need the input to be processed in a raw manner.
E.g. for a file `files-from.txt`: E.g. for a file `files-from.txt`:
```text # comment
# comment file1.jpg
file1.jpg subdir/file2.jpg
subdir/file2.jpg
```
`rclone copy --files-from files-from.txt /home/me/pics remote:pics` `rclone copy --files-from files-from.txt /home/me/pics remote:pics`
copies the following, if they exist, and only those files. copies the following, if they exist, and only those files.
```text /home/me/pics/file1.jpg → remote:pics/file1.jpg
/home/me/pics/file1.jpg → remote:pics/file1.jpg /home/me/pics/subdir/file2.jpg → remote:pics/subdir/file2.jpg
/home/me/pics/subdir/file2.jpg → remote:pics/subdir/file2.jpg
```
E.g. to copy the following files referenced by their absolute paths: E.g. to copy the following files referenced by their absolute paths:
```text /home/user1/42
/home/user1/42 /home/user1/dir/ford
/home/user1/dir/ford /home/user2/prefect
/home/user2/prefect
```
First find a common subdirectory - in this case `/home` First find a common subdirectory - in this case `/home`
and put the remaining files in `files-from.txt` with or without and put the remaining files in `files-from.txt` with or without
leading `/`, e.g. leading `/`, e.g.
```text user1/42
user1/42 user1/dir/ford
user1/dir/ford user2/prefect
user2/prefect
```
Then copy these to a remote: Then copy these to a remote:
```sh rclone copy --files-from files-from.txt /home remote:backup
rclone copy --files-from files-from.txt /home remote:backup
```
The three files are transferred as follows: The three files are transferred as follows:
```text /home/user1/42 → remote:backup/user1/important
/home/user1/42 → remote:backup/user1/important /home/user1/dir/ford → remote:backup/user1/dir/file
/home/user1/dir/ford → remote:backup/user1/dir/file /home/user2/prefect → remote:backup/user2/stuff
/home/user2/prefect → remote:backup/user2/stuff
```
Alternatively if `/` is chosen as root `files-from.txt` will be: Alternatively if `/` is chosen as root `files-from.txt` will be:
```text /home/user1/42
/home/user1/42 /home/user1/dir/ford
/home/user1/dir/ford /home/user2/prefect
/home/user2/prefect
```
The copy command will be: The copy command will be:
```sh rclone copy --files-from files-from.txt / remote:backup
rclone copy --files-from files-from.txt / remote:backup
```
Then there will be an extra `home` directory on the remote: Then there will be an extra `home` directory on the remote:
```text /home/user1/42 → remote:backup/home/user1/42
/home/user1/42 → remote:backup/home/user1/42 /home/user1/dir/ford → remote:backup/home/user1/dir/ford
/home/user1/dir/ford → remote:backup/home/user1/dir/ford /home/user2/prefect → remote:backup/home/user2/prefect
/home/user2/prefect → remote:backup/home/user2/prefect
```
### `--files-from-raw` - Read list of source-file names without any processing ### `--files-from-raw` - Read list of source-file names without any processing
This flag is the same as `--files-from` except that input is read in a This flag is the same as `--files-from` except that input is read in a
raw manner. Lines with leading / trailing whitespace, and lines starting raw manner. Lines with leading / trailing whitespace, and lines starting
with `;` or `#` are read without any processing. [rclone lsf](/commands/rclone_lsf/) with `;` or `#` are read without any processing. [rclone lsf](/commands/rclone_lsf/) has
has a compatible format that can be used to export file lists from remotes for a compatible format that can be used to export file lists from remotes for
input to `--files-from-raw`. input to `--files-from-raw`.
### `--ignore-case` - make searches case insensitive ### `--ignore-case` - make searches case insensitive
@@ -735,9 +661,9 @@ not as work as expected in your shell and may require quoting.
E.g. linux, OSX (`*` metacharacter) E.g. linux, OSX (`*` metacharacter)
- `--include \*.jpg` * `--include \*.jpg`
- `--include '*.jpg'` * `--include '*.jpg'`
- `--include='*.jpg'` * `--include='*.jpg'`
Microsoft Windows expansion is done by the command, not shell, so Microsoft Windows expansion is done by the command, not shell, so
`--include *.jpg` does not require quoting. `--include *.jpg` does not require quoting.
@@ -794,8 +720,7 @@ See [the time option docs](/docs/#time-options) for valid formats.
### `--hash-filter` - Deterministically select a subset of files {#hash-filter} ### `--hash-filter` - Deterministically select a subset of files {#hash-filter}
The `--hash-filter` flag enables selecting a deterministic subset of files, The `--hash-filter` flag enables selecting a deterministic subset of files, useful for:
useful for:
1. Running large sync operations across multiple machines. 1. Running large sync operations across multiple machines.
2. Checking a subset of files for bitrot. 2. Checking a subset of files for bitrot.
@@ -805,7 +730,7 @@ useful for:
The flag takes two parameters expressed as a fraction: The flag takes two parameters expressed as a fraction:
```sh ```
--hash-filter K/N --hash-filter K/N
``` ```
@@ -813,10 +738,8 @@ The flag takes two parameters expressed as a fraction:
- `K`: The specific partition to select (an integer from `0` to `N`). - `K`: The specific partition to select (an integer from `0` to `N`).
For example: For example:
- `--hash-filter 1/3`: Selects the first third of the files. - `--hash-filter 1/3`: Selects the first third of the files.
- `--hash-filter 2/3` and `--hash-filter 3/3`: Select the second and third - `--hash-filter 2/3` and `--hash-filter 3/3`: Select the second and third partitions, respectively.
partitions, respectively.
Each partition is non-overlapping, ensuring all files are covered without duplication. Each partition is non-overlapping, ensuring all files are covered without duplication.
@@ -824,17 +747,15 @@ Each partition is non-overlapping, ensuring all files are covered without duplic
Use `@` as `K` to randomly select a partition: Use `@` as `K` to randomly select a partition:
```sh ```
--hash-filter @/M --hash-filter @/M
``` ```
For example, `--hash-filter @/3` will randomly select a number between 0 and 2. For example, `--hash-filter @/3` will randomly select a number between 0 and 2. This will stay constant across retries.
This will stay constant across retries.
#### How It Works #### How It Works
- Rclone takes each file's full path, normalizes it to lowercase, and applies - Rclone takes each file's full path, normalizes it to lowercase, and applies Unicode normalization.
Unicode normalization.
- It then hashes the normalized path into a 64 bit number. - It then hashes the normalized path into a 64 bit number.
- The hash result is reduced modulo `N` to assign the file to a partition. - The hash result is reduced modulo `N` to assign the file to a partition.
- If the calculated partition does not match `K` the file is excluded. - If the calculated partition does not match `K` the file is excluded.
@@ -854,7 +775,7 @@ This will stay constant across retries.
Assuming the current directory contains `file1.jpg` through `file9.jpg`: Assuming the current directory contains `file1.jpg` through `file9.jpg`:
```sh ```
$ rclone lsf --hash-filter 0/4 . $ rclone lsf --hash-filter 0/4 .
file1.jpg file1.jpg
file5.jpg file5.jpg
@@ -879,13 +800,13 @@ file5.jpg
##### Syncing the first quarter of files ##### Syncing the first quarter of files
```sh ```
rclone sync --hash-filter 1/4 source:path destination:path rclone sync --hash-filter 1/4 source:path destination:path
``` ```
##### Checking a random 1% of files for integrity ##### Checking a random 1% of files for integrity
```sh ```
rclone check --download --hash-filter @/100 source:path destination:path rclone check --download --hash-filter @/100 source:path destination:path
``` ```
@@ -901,9 +822,7 @@ on the destination which are excluded from the command.
E.g. the scope of `rclone sync --interactive A: B:` can be restricted: E.g. the scope of `rclone sync --interactive A: B:` can be restricted:
```sh rclone --min-size 50k --delete-excluded sync A: B:
rclone --min-size 50k --delete-excluded sync A: B:
```
All files on `B:` which are less than 50 KiB are deleted All files on `B:` which are less than 50 KiB are deleted
because they are excluded from the rclone sync command. because they are excluded from the rclone sync command.
@@ -927,12 +846,10 @@ This flag has a priority over other filter flags.
E.g. for the following directory structure: E.g. for the following directory structure:
```text dir1/file1
dir1/file1 dir1/dir2/file2
dir1/dir2/file2 dir1/dir2/dir3/file3
dir1/dir2/dir3/file3 dir1/dir2/dir3/.ignore
dir1/dir2/dir3/.ignore
```
The command `rclone ls --exclude-if-present .ignore dir1` does The command `rclone ls --exclude-if-present .ignore dir1` does
not list `dir3`, `file3` or `.ignore`. not list `dir3`, `file3` or `.ignore`.
@@ -950,15 +867,11 @@ expressions](#regexp).
For example if you wished to list only local files with a mode of For example if you wished to list only local files with a mode of
`100664` you could do that with: `100664` you could do that with:
```sh rclone lsf -M --files-only --metadata-include "mode=100664" .
rclone lsf -M --files-only --metadata-include "mode=100664" .
```
Or if you wished to show files with an `atime`, `mtime` or `btime` at a given date: Or if you wished to show files with an `atime`, `mtime` or `btime` at a given date:
```sh rclone lsf -M --files-only --metadata-include "[abm]time=2022-12-16*" .
rclone lsf -M --files-only --metadata-include "[abm]time=2022-12-16*" .
```
Like file filtering, metadata filtering only applies to files not to Like file filtering, metadata filtering only applies to files not to
directories. directories.
@@ -966,25 +879,24 @@ directories.
The filters can be applied using these flags. The filters can be applied using these flags.
- `--metadata-include` - Include metadatas matching pattern - `--metadata-include` - Include metadatas matching pattern
- `--metadata-include-from` - Read metadata include patterns from file - `--metadata-include-from` - Read metadata include patterns from file (use - to read from stdin)
(use - to read from stdin)
- `--metadata-exclude` - Exclude metadatas matching pattern - `--metadata-exclude` - Exclude metadatas matching pattern
- `--metadata-exclude-from` - Read metadata exclude patterns from file - `--metadata-exclude-from` - Read metadata exclude patterns from file (use - to read from stdin)
(use - to read from stdin)
- `--metadata-filter` - Add a metadata filtering rule - `--metadata-filter` - Add a metadata filtering rule
- `--metadata-filter-from` - Read metadata filtering patterns from a file - `--metadata-filter-from` - Read metadata filtering patterns from a file (use - to read from stdin)
(use - to read from stdin)
Each flag can be repeated. See the section on [how filter rules are Each flag can be repeated. See the section on [how filter rules are
applied](#how-filter-rules-work) for more details - these flags work applied](#how-filter-rules-work) for more details - these flags work
in an identical way to the file name filtering flags, but instead of in an identical way to the file name filtering flags, but instead of
file name patterns have metadata patterns. file name patterns have metadata patterns.
## Common pitfalls ## Common pitfalls
The most frequent filter support issues on The most frequent filter support issues on
the [rclone forum](https://forum.rclone.org/) are: the [rclone forum](https://forum.rclone.org/) are:
- Not using paths relative to the root of the remote * Not using paths relative to the root of the remote
- Not using `/` to match from the root of a remote * Not using `/` to match from the root of a remote
- Not using `**` to match the contents of a directory * Not using `**` to match the contents of a directory

View File

@@ -13,14 +13,13 @@ change.
Run this command in a terminal and rclone will download and then Run this command in a terminal and rclone will download and then
display the GUI in a web browser. display the GUI in a web browser.
```sh ```
rclone rcd --rc-web-gui rclone rcd --rc-web-gui
``` ```
This will produce logs like this and rclone needs to continue to run to serve This will produce logs like this and rclone needs to continue to run to serve the GUI:
the GUI:
```text ```
2019/08/25 11:40:14 NOTICE: A new release for gui is present at https://github.com/rclone/rclone-webui-react/releases/download/v0.0.6/currentbuild.zip 2019/08/25 11:40:14 NOTICE: A new release for gui is present at https://github.com/rclone/rclone-webui-react/releases/download/v0.0.6/currentbuild.zip
2019/08/25 11:40:14 NOTICE: Downloading webgui binary. Please wait. [Size: 3813937, Path : /home/USER/.cache/rclone/webgui/v0.0.6.zip] 2019/08/25 11:40:14 NOTICE: Downloading webgui binary. Please wait. [Size: 3813937, Path : /home/USER/.cache/rclone/webgui/v0.0.6.zip]
2019/08/25 11:40:16 NOTICE: Unzipping 2019/08/25 11:40:16 NOTICE: Unzipping
@@ -59,8 +58,7 @@ When you run the `rclone rcd --rc-web-gui` this is what happens
- Rclone starts but only runs the remote control API ("rc"). - Rclone starts but only runs the remote control API ("rc").
- The API is bound to localhost with an auto-generated username and password. - The API is bound to localhost with an auto-generated username and password.
- If the API bundle is missing then rclone will download it. - If the API bundle is missing then rclone will download it.
- rclone will start serving the files from the API bundle over the same port as - rclone will start serving the files from the API bundle over the same port as the API
the API
- rclone will open the browser with a `login_token` so it can log straight in. - rclone will open the browser with a `login_token` so it can log straight in.
## Advanced use ## Advanced use
@@ -81,8 +79,7 @@ See also the [rclone rcd documentation](https://rclone.org/commands/rclone_rcd/)
### Example: Running a public GUI ### Example: Running a public GUI
For example the GUI could be served on a public port over SSL using an htpasswd For example the GUI could be served on a public port over SSL using an htpasswd file using the following flags:
file using the following flags:
- `--rc-web-gui` - `--rc-web-gui`
- `--rc-addr :443` - `--rc-addr :443`
@@ -110,3 +107,5 @@ The GUI is being developed in the: [rclone/rclone-webui-react repository](https:
Bug reports and contributions are very welcome :-) Bug reports and contributions are very welcome :-)
If you have questions then please ask them on the [rclone forum](https://forum.rclone.org/). If you have questions then please ask them on the [rclone forum](https://forum.rclone.org/).

View File

@@ -9,10 +9,10 @@ Rclone is a Go program and comes as a single binary file.
## Quickstart ## Quickstart
- [Download](/downloads/) the relevant binary. * [Download](/downloads/) the relevant binary.
- Extract the `rclone` executable, `rclone.exe` on Windows, from the archive. * Extract the `rclone` executable, `rclone.exe` on Windows, from the archive.
- Run `rclone config` to setup. See [rclone config docs](/docs/) for more details. * Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
- Optionally configure [automatic execution](#autostart). * Optionally configure [automatic execution](#autostart).
See below for some expanded Linux / macOS / Windows instructions. See below for some expanded Linux / macOS / Windows instructions.
@@ -29,15 +29,11 @@ signatures on the release.
To install rclone on Linux/macOS/BSD systems, run: To install rclone on Linux/macOS/BSD systems, run:
```sh sudo -v ; curl https://rclone.org/install.sh | sudo bash
sudo -v ; curl https://rclone.org/install.sh | sudo bash
```
For beta installation, run: For beta installation, run:
```sh sudo -v ; curl https://rclone.org/install.sh | sudo bash -s beta
sudo -v ; curl https://rclone.org/install.sh | sudo bash -s beta
```
Note that this script checks the version of rclone installed first and Note that this script checks the version of rclone installed first and
won't re-download if not needed. won't re-download if not needed.
@@ -48,41 +44,31 @@ won't re-download if not needed.
Fetch and unpack Fetch and unpack
```sh curl -O https://downloads.rclone.org/rclone-current-linux-amd64.zip
curl -O https://downloads.rclone.org/rclone-current-linux-amd64.zip unzip rclone-current-linux-amd64.zip
unzip rclone-current-linux-amd64.zip cd rclone-*-linux-amd64
cd rclone-*-linux-amd64
```
Copy binary file Copy binary file
```sh sudo cp rclone /usr/bin/
sudo cp rclone /usr/bin/ sudo chown root:root /usr/bin/rclone
sudo chown root:root /usr/bin/rclone sudo chmod 755 /usr/bin/rclone
sudo chmod 755 /usr/bin/rclone
```
Install manpage Install manpage
```sh sudo mkdir -p /usr/local/share/man/man1
sudo mkdir -p /usr/local/share/man/man1 sudo cp rclone.1 /usr/local/share/man/man1/
sudo cp rclone.1 /usr/local/share/man/man1/ sudo mandb
sudo mandb
```
Run `rclone config` to setup. See [rclone config docs](/docs/) for more details. Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
```sh rclone config
rclone config
```
## macOS installation {#macos} ## macOS installation {#macos}
### Installation with brew {#macos-brew} ### Installation with brew {#macos-brew}
```sh brew install rclone
brew install rclone
```
NOTE: This version of rclone will not support `mount` any more (see NOTE: This version of rclone will not support `mount` any more (see
[#5373](https://github.com/rclone/rclone/issues/5373)). If mounting is wanted [#5373](https://github.com/rclone/rclone/issues/5373)). If mounting is wanted
@@ -98,16 +84,14 @@ developers so it may be out of date. Its current version is as below.
On macOS, rclone can also be installed via [MacPorts](https://www.macports.org): On macOS, rclone can also be installed via [MacPorts](https://www.macports.org):
```sh sudo port install rclone
sudo port install rclone
```
Note that this is a third party installer not controlled by the rclone Note that this is a third party installer not controlled by the rclone
developers so it may be out of date. Its current version is as below. developers so it may be out of date. Its current version is as below.
[![MacPorts port](https://repology.org/badge/version-for-repo/macports/rclone.svg)](https://repology.org/project/rclone/versions) [![MacPorts port](https://repology.org/badge/version-for-repo/macports/rclone.svg)](https://repology.org/project/rclone/versions)
More information on [macports.org](https://ports.macports.org/port/rclone/). More information [here](https://ports.macports.org/port/rclone/).
### Precompiled binary, using curl {#macos-precompiled} ### Precompiled binary, using curl {#macos-precompiled}
@@ -116,36 +100,26 @@ notarized it is enough to download with `curl`.
Download the latest version of rclone. Download the latest version of rclone.
```sh cd && curl -O https://downloads.rclone.org/rclone-current-osx-amd64.zip
cd && curl -O https://downloads.rclone.org/rclone-current-osx-amd64.zip
```
Unzip the download and cd to the extracted folder. Unzip the download and cd to the extracted folder.
```sh unzip -a rclone-current-osx-amd64.zip && cd rclone-*-osx-amd64
unzip -a rclone-current-osx-amd64.zip && cd rclone-*-osx-amd64
```
Move rclone to your $PATH. You will be prompted for your password. Move rclone to your $PATH. You will be prompted for your password.
```sh sudo mkdir -p /usr/local/bin
sudo mkdir -p /usr/local/bin sudo mv rclone /usr/local/bin/
sudo mv rclone /usr/local/bin/
```
(the `mkdir` command is safe to run, even if the directory already exists). (the `mkdir` command is safe to run, even if the directory already exists).
Remove the leftover files. Remove the leftover files.
```sh cd .. && rm -rf rclone-*-osx-amd64 rclone-current-osx-amd64.zip
cd .. && rm -rf rclone-*-osx-amd64 rclone-current-osx-amd64.zip
```
Run `rclone config` to setup. See [rclone config docs](/docs/) for more details. Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
```sh rclone config
rclone config
```
### Precompiled binary, using a web browser {#macos-precompiled-web} ### Precompiled binary, using a web browser {#macos-precompiled-web}
@@ -153,16 +127,12 @@ When downloading a binary with a web browser, the browser will set the macOS
gatekeeper quarantine attribute. Starting from Catalina, when attempting to run gatekeeper quarantine attribute. Starting from Catalina, when attempting to run
`rclone`, a pop-up will appear saying: `rclone`, a pop-up will appear saying:
```sh "rclone" cannot be opened because the developer cannot be verified.
"rclone" cannot be opened because the developer cannot be verified. macOS cannot verify that this app is free from malware.
macOS cannot verify that this app is free from malware.
```
The simplest fix is to run The simplest fix is to run
```sh xattr -d com.apple.quarantine rclone
xattr -d com.apple.quarantine rclone
```
## Windows installation {#windows} ## Windows installation {#windows}
@@ -190,20 +160,14 @@ feature then you will need to install the third party utility
### Windows package manager (Winget) {#windows-chocolatey} ### Windows package manager (Winget) {#windows-chocolatey}
[Winget](https://learn.microsoft.com/en-us/windows/package-manager/) comes [Winget](https://learn.microsoft.com/en-us/windows/package-manager/) comes pre-installed with the latest versions of Windows. If not, update the [App Installer](https://www.microsoft.com/p/app-installer/9nblggh4nns1) package from the Microsoft store.
pre-installed with the latest versions of Windows. If not, update the
[App Installer](https://www.microsoft.com/p/app-installer/9nblggh4nns1) package
from the Microsoft store.
To install rclone To install rclone
```
```bat
winget install Rclone.Rclone winget install Rclone.Rclone
``` ```
To uninstall rclone To uninstall rclone
```
```bat
winget uninstall Rclone.Rclone --force winget uninstall Rclone.Rclone --force
``` ```
@@ -211,7 +175,7 @@ winget uninstall Rclone.Rclone --force
Make sure you have [Choco](https://chocolatey.org/) installed Make sure you have [Choco](https://chocolatey.org/) installed
```bat ```
choco search rclone choco search rclone
choco install rclone choco install rclone
``` ```
@@ -219,7 +183,7 @@ choco install rclone
This will install rclone on your Windows machine. If you are planning This will install rclone on your Windows machine. If you are planning
to use [rclone mount](/commands/rclone_mount/) then to use [rclone mount](/commands/rclone_mount/) then
```bat ```
choco install winfsp choco install winfsp
``` ```
@@ -234,7 +198,7 @@ developers so it may be out of date. Its current version is as below.
Make sure you have [Scoop](https://scoop.sh/) installed Make sure you have [Scoop](https://scoop.sh/) installed
```bat ```
scoop install rclone scoop install rclone
``` ```
@@ -274,7 +238,7 @@ The `:latest` tag will always point to the latest stable release. You
can use the `:beta` tag to get the latest build from master. You can can use the `:beta` tag to get the latest build from master. You can
also use version tags, e.g. `:1.49.1`, `:1.49` or `:1`. also use version tags, e.g. `:1.49.1`, `:1.49` or `:1`.
```sh ```
$ docker pull rclone/rclone:latest $ docker pull rclone/rclone:latest
latest: Pulling from rclone/rclone latest: Pulling from rclone/rclone
Digest: sha256:0e0ced72671989bb837fea8e88578b3fc48371aa45d209663683e24cfdaa0e11 Digest: sha256:0e0ced72671989bb837fea8e88578b3fc48371aa45d209663683e24cfdaa0e11
@@ -289,37 +253,35 @@ There are a few command line options to consider when starting an rclone Docker
from the rclone image. from the rclone image.
- You need to mount the host rclone config dir at `/config/rclone` into the Docker - You need to mount the host rclone config dir at `/config/rclone` into the Docker
container. Due to the fact that rclone updates tokens inside its config file, container. Due to the fact that rclone updates tokens inside its config file, and that
and that the update process involves a file rename, you need to mount the whole the update process involves a file rename, you need to mount the whole host rclone
host rclone config dir, not just the single host rclone config file. config dir, not just the single host rclone config file.
- You need to mount a host data dir at `/data` into the Docker container. - You need to mount a host data dir at `/data` into the Docker container.
- By default, the rclone binary inside a Docker container runs with UID=0 (root). - By default, the rclone binary inside a Docker container runs with UID=0 (root).
As a result, all files created in a run will have UID=0. If your config and As a result, all files created in a run will have UID=0. If your config and data files
data files reside on the host with a non-root UID:GID, you need to pass these reside on the host with a non-root UID:GID, you need to pass these on the container
on the container start command line. start command line.
- If you want to access the RC interface (either via the API or the Web UI), it is - If you want to access the RC interface (either via the API or the Web UI), it is
required to set the `--rc-addr` to `:5572` in order to connect to it from outside required to set the `--rc-addr` to `:5572` in order to connect to it from outside
the container. An explanation about why this is necessary can be found in an old the container. An explanation about why this is necessary is present [here](https://web.archive.org/web/20200808071950/https://pythonspeed.com/articles/docker-connection-refused/).
[pythonspeed.com](https://web.archive.org/web/20200808071950/https://pythonspeed.com/articles/docker-connection-refused/) * NOTE: Users running this container with the docker network set to `host` should
article. probably set it to listen to localhost only, with `127.0.0.1:5572` as the value for
- NOTE: Users running this container with the docker network set to `host` should `--rc-addr`
probably set it to listen to localhost only, with `127.0.0.1:5572` as the
value for `--rc-addr`
- It is possible to use `rclone mount` inside a userspace Docker container, and expose - It is possible to use `rclone mount` inside a userspace Docker container, and expose
the resulting fuse mount to the host. The exact `docker run` options to do that the resulting fuse mount to the host. The exact `docker run` options to do that might
might vary slightly between hosts. See, e.g. the discussion in this vary slightly between hosts. See, e.g. the discussion in this
[thread](https://github.com/moby/moby/issues/9448). [thread](https://github.com/moby/moby/issues/9448).
You also need to mount the host `/etc/passwd` and `/etc/group` for fuse to work You also need to mount the host `/etc/passwd` and `/etc/group` for fuse to work inside
inside the container. the container.
Here are some commands tested on an Ubuntu 18.04.3 host: Here are some commands tested on an Ubuntu 18.04.3 host:
```sh ```
# config on host at ~/.config/rclone/rclone.conf # config on host at ~/.config/rclone/rclone.conf
# data on host at ~/data # data on host at ~/data
@@ -357,26 +319,23 @@ kill %1
Make sure you have [Snapd installed](https://snapcraft.io/docs/installing-snapd) Make sure you have [Snapd installed](https://snapcraft.io/docs/installing-snapd)
```sh ```bash
sudo snap install rclone $ sudo snap install rclone
``` ```
Due to the strict confinement of Snap, rclone snap cannot access real /home/$USER/.config/rclone directory, default config path is as below.
Due to the strict confinement of Snap, rclone snap cannot access real
`/home/$USER/.config/rclone` directory, default config path is as below.
- Default config directory: - Default config directory:
- /home/$USER/snap/rclone/current/.config/rclone - /home/$USER/snap/rclone/current/.config/rclone
Note: Due to the strict confinement of Snap, `rclone mount` feature is `not` supported. Note: Due to the strict confinement of Snap, `rclone mount` feature is `not` supported.
If mounting is wanted, either install a precompiled binary or enable the relevant If mounting is wanted, either install a precompiled binary or enable the relevant option when [installing from source](#source).
option when [installing from source](#source).
Note that this is controlled by [community maintainer](https://github.com/boukendesho/rclone-snap) Note that this is controlled by [community maintainer](https://github.com/boukendesho/rclone-snap) not the rclone developers so it may be out of date. Its current version is as below.
not the rclone developers so it may be out of date. Its current version is as below.
[![rclone](https://snapcraft.io/rclone/badge.svg)](https://snapcraft.io/rclone) [![rclone](https://snapcraft.io/rclone/badge.svg)](https://snapcraft.io/rclone)
## Source installation {#source} ## Source installation {#source}
Make sure you have git and [Go](https://golang.org/) installed. Make sure you have git and [Go](https://golang.org/) installed.
@@ -384,7 +343,7 @@ Go version 1.22 or newer is required, the latest release is recommended.
You can get it from your package manager, or download it from You can get it from your package manager, or download it from
[golang.org/dl](https://golang.org/dl/). Then you can run the following: [golang.org/dl](https://golang.org/dl/). Then you can run the following:
```sh ```
git clone https://github.com/rclone/rclone.git git clone https://github.com/rclone/rclone.git
cd rclone cd rclone
go build go build
@@ -398,7 +357,7 @@ in the same folder. As an initial check you can now run `./rclone version`
Note that on macOS and Windows the [mount](https://rclone.org/commands/rclone_mount/) Note that on macOS and Windows the [mount](https://rclone.org/commands/rclone_mount/)
command will not be available unless you specify an additional build tag `cmount`. command will not be available unless you specify an additional build tag `cmount`.
```sh ```
go build -tags cmount go build -tags cmount
``` ```
@@ -424,7 +383,7 @@ You may add arguments `-ldflags -s` to omit symbol table and debug information,
making the executable file smaller, and `-trimpath` to remove references to making the executable file smaller, and `-trimpath` to remove references to
local file system paths. The official rclone releases are built with both of these. local file system paths. The official rclone releases are built with both of these.
```sh ```
go build -trimpath -ldflags -s -tags cmount go build -trimpath -ldflags -s -tags cmount
``` ```
@@ -435,7 +394,7 @@ or `fs.VersionSuffix` (to keep default number but customize the suffix).
This can be done from the build command, by adding to the `-ldflags` This can be done from the build command, by adding to the `-ldflags`
argument value as shown below. argument value as shown below.
```sh ```
go build -trimpath -ldflags "-s -X github.com/rclone/rclone/fs.Version=v9.9.9-test" -tags cmount go build -trimpath -ldflags "-s -X github.com/rclone/rclone/fs.Version=v9.9.9-test" -tags cmount
``` ```
@@ -446,7 +405,7 @@ It generates a Windows resource system object file, with extension .syso, e.g.
`resource_windows_amd64.syso`, that will be automatically picked up by `resource_windows_amd64.syso`, that will be automatically picked up by
future build commands. future build commands.
```sh ```
go run bin/resource_windows.go go run bin/resource_windows.go
``` ```
@@ -458,7 +417,7 @@ override this version variable in the build command as described above, you
need to do that also when generating the resource file, or else it will still need to do that also when generating the resource file, or else it will still
use the value from the source. use the value from the source.
```sh ```
go run bin/resource_windows.go -version v9.9.9-test go run bin/resource_windows.go -version v9.9.9-test
``` ```
@@ -468,13 +427,13 @@ followed by additional commit details, embeds version information binary resourc
on Windows, and copies the resulting rclone executable into your GOPATH bin folder on Windows, and copies the resulting rclone executable into your GOPATH bin folder
(`$(go env GOPATH)/bin`, which corresponds to `~/go/bin/rclone` by default). (`$(go env GOPATH)/bin`, which corresponds to `~/go/bin/rclone` by default).
```sh ```
make make
``` ```
To include mount command on macOS and Windows with Makefile build: To include mount command on macOS and Windows with Makefile build:
```sh ```
make GOTAGS=cmount make GOTAGS=cmount
``` ```
@@ -491,7 +450,7 @@ The source will be stored it in the Go module cache, and the resulting
executable will be in your GOPATH bin folder (`$(go env GOPATH)/bin`, executable will be in your GOPATH bin folder (`$(go env GOPATH)/bin`,
which corresponds to `~/go/bin/rclone` by default). which corresponds to `~/go/bin/rclone` by default).
```sh ```
go install github.com/rclone/rclone@latest go install github.com/rclone/rclone@latest
``` ```
@@ -507,15 +466,14 @@ role](https://github.com/stefangweichinger/ansible-rclone).
Instructions Instructions
1. `git clone https://github.com/stefangweichinger/ansible-rclone.git` into 1. `git clone https://github.com/stefangweichinger/ansible-rclone.git` into your local roles-directory
your local roles-directory 2. add the role to the hosts you want rclone installed to:
2. add the role to the hosts you want rclone installed to:
```yml ```
- hosts: rclone-hosts - hosts: rclone-hosts
roles: roles:
- rclone - rclone
``` ```
## Portable installation {#portable} ## Portable installation {#portable}
@@ -533,31 +491,29 @@ the locations that rclone will use.
To override them set the corresponding options (as command-line arguments, or as To override them set the corresponding options (as command-line arguments, or as
[environment variables](https://rclone.org/docs/#environment-variables)): [environment variables](https://rclone.org/docs/#environment-variables)):
- [--config](https://rclone.org/docs/#config-string)
- [--config](https://rclone.org/docs/#config-string) - [--cache-dir](https://rclone.org/docs/#cache-dir-string)
- [--cache-dir](https://rclone.org/docs/#cache-dir-string) - [--temp-dir](https://rclone.org/docs/#temp-dir-string)
- [--temp-dir](https://rclone.org/docs/#temp-dir-string)
## Autostart ## Autostart
After installing and configuring rclone, as described above, you are ready to use After installing and configuring rclone, as described above, you are ready to use rclone
rclone as an interactive command line utility. If your goal is to perform *periodic* as an interactive command line utility. If your goal is to perform *periodic* operations,
operations, such as a regular [sync](https://rclone.org/commands/rclone_sync/), you such as a regular [sync](https://rclone.org/commands/rclone_sync/), you will probably want
will probably want to configure your rclone command in your operating system's to configure your rclone command in your operating system's scheduler. If you need to
scheduler. If you need to expose *service*-like features, such as expose *service*-like features, such as [remote control](https://rclone.org/rc/),
[remote control](https://rclone.org/rc/), [GUI](https://rclone.org/gui/), [GUI](https://rclone.org/gui/), [serve](https://rclone.org/commands/rclone_serve/)
[serve](https://rclone.org/commands/rclone_serve/) or [mount](https://rclone.org/commands/rclone_mount/), or [mount](https://rclone.org/commands/rclone_mount/), you will often want an rclone
you will often want an rclone command always running in the background, and command always running in the background, and configuring it to run in a service infrastructure
configuring it to run in a service infrastructure may be a better option. Below may be a better option. Below are some alternatives on how to achieve this on
are some alternatives on how to achieve this on different operating systems. different operating systems.
NOTE: Before setting up autorun it is highly recommended that you have tested NOTE: Before setting up autorun it is highly recommended that you have tested your command
your command manually from a Command Prompt first. manually from a Command Prompt first.
### Autostart on Windows ### Autostart on Windows
The most relevant alternatives for autostart on Windows are: The most relevant alternatives for autostart on Windows are:
- Run at user log on using the Startup folder - Run at user log on using the Startup folder
- Run at user log on, at system startup or at schedule using Task Scheduler - Run at user log on, at system startup or at schedule using Task Scheduler
- Run at system startup using Windows service - Run at system startup using Windows service
@@ -567,23 +523,22 @@ The most relevant alternatives for autostart on Windows are:
Rclone is a console application, so if not starting from an existing Command Prompt, Rclone is a console application, so if not starting from an existing Command Prompt,
e.g. when starting rclone.exe from a shortcut, it will open a Command Prompt window. e.g. when starting rclone.exe from a shortcut, it will open a Command Prompt window.
When configuring rclone to run from task scheduler and windows service you are able When configuring rclone to run from task scheduler and windows service you are able
to set it to run hidden in background. From rclone version 1.54 you can also make to set it to run hidden in background. From rclone version 1.54 you can also make it
it run hidden from anywhere by adding option `--no-console` (it may still flash run hidden from anywhere by adding option `--no-console` (it may still flash briefly
briefly when the program starts). Since rclone normally writes information and any when the program starts). Since rclone normally writes information and any error
error messages to the console, you must redirect this to a file to be able to see messages to the console, you must redirect this to a file to be able to see it.
it. Rclone has a built-in option `--log-file` for that. Rclone has a built-in option `--log-file` for that.
Example command to run a sync in background: Example command to run a sync in background:
```
```bat
c:\rclone\rclone.exe sync c:\files remote:/files --no-console --log-file c:\rclone\logs\sync_files.txt c:\rclone\rclone.exe sync c:\files remote:/files --no-console --log-file c:\rclone\logs\sync_files.txt
``` ```
#### User account #### User account
As mentioned in the [mount](https://rclone.org/commands/rclone_mount/) documentation, As mentioned in the [mount](https://rclone.org/commands/rclone_mount/) documentation,
mounted drives created as Administrator are not visible to other accounts, not even mounted drives created as Administrator are not visible to other accounts, not even the
the account that was elevated as Administrator. By running the mount command as the account that was elevated as Administrator. By running the mount command as the
built-in `SYSTEM` user account, it will create drives accessible for everyone on built-in `SYSTEM` user account, it will create drives accessible for everyone on
the system. Both scheduled task and Windows service can be used to achieve this. the system. Both scheduled task and Windows service can be used to achieve this.
@@ -620,7 +575,8 @@ configure rclone to be started automatically in a highly configurable way, e.g.
periodically on a schedule, on user log on, or at system startup. It can run periodically on a schedule, on user log on, or at system startup. It can run
be configured to run as the current user, or for a mount command that needs to be configured to run as the current user, or for a mount command that needs to
be available to all users it can run as the `SYSTEM` user. be available to all users it can run as the `SYSTEM` user.
For technical information, see [Task Scheduler for developers](https://docs.microsoft.com/windows/win32/taskschd/task-scheduler-start-page). For technical information, see
https://docs.microsoft.com/windows/win32/taskschd/task-scheduler-start-page.
#### Run as service #### Run as service
@@ -629,16 +585,15 @@ your rclone command, as an alternative to scheduled task configured to run at st
##### Mount command built-in service integration ##### Mount command built-in service integration
For mount commands, rclone has a built-in Windows service integration via the For mount commands, rclone has a built-in Windows service integration via the third-party
third-party WinFsp library it uses. Registering as a regular Windows service WinFsp library it uses. Registering as a regular Windows service easy, as you just have to
easy, as you just have to execute the built-in PowerShell command `New-Service` execute the built-in PowerShell command `New-Service` (requires administrative privileges).
(requires administrative privileges).
Example of a PowerShell command that creates a Windows service for mounting Example of a PowerShell command that creates a Windows service for mounting
some `remote:/files` as drive letter `X:`, for *all* users (service will be some `remote:/files` as drive letter `X:`, for *all* users (service will be running as the
running as the local system account): local system account):
```pwsh ```
New-Service -Name Rclone -BinaryPathName 'c:\rclone\rclone.exe mount remote:/files X: --config c:\rclone\config\rclone.conf --log-file c:\rclone\logs\mount.txt' New-Service -Name Rclone -BinaryPathName 'c:\rclone\rclone.exe mount remote:/files X: --config c:\rclone\config\rclone.conf --log-file c:\rclone\logs\mount.txt'
``` ```
@@ -648,7 +603,7 @@ into its own launcher service, as kind of "child services". This has the additio
advantage that it also implements a network provider that integrates into advantage that it also implements a network provider that integrates into
Windows standard methods for managing network drives. This is currently not Windows standard methods for managing network drives. This is currently not
officially supported by Rclone, but with WinFsp version 2019.3 B2 / v1.5B2 or later officially supported by Rclone, but with WinFsp version 2019.3 B2 / v1.5B2 or later
it should be possible through path rewriting as described in [#3340](https://github.com/rclone/rclone/issues/3340). it should be possible through path rewriting as described [here](https://github.com/rclone/rclone/issues/3340).
##### Third-party service integration ##### Third-party service integration
@@ -660,15 +615,15 @@ customized response to different exit codes, with a GUI to configure everything
(although it can also be used from command line ). (although it can also be used from command line ).
There are also several other alternatives. To mention one more, There are also several other alternatives. To mention one more,
[WinSW](https://github.com/winsw/winsw), "Windows Service Wrapper", is worth checking [WinSW](https://github.com/winsw/winsw), "Windows Service Wrapper", is worth checking out.
out. It requires .NET Framework, but it is preinstalled on newer versions of Windows, It requires .NET Framework, but it is preinstalled on newer versions of Windows, and it
and it also provides alternative standalone distributions which includes necessary also provides alternative standalone distributions which includes necessary runtime (.NET 5).
runtime (.NET 5). WinSW is a command-line only utility, where you have to manually WinSW is a command-line only utility, where you have to manually create an XML file with
create an XML file with service configuration. This may be a drawback for some, but service configuration. This may be a drawback for some, but it can also be an advantage
it can also be an advantage as it is easy to back up and reuse the configuration as it is easy to back up and reuse the configuration
settings, without having go through manual steps in a GUI. One thing to note is that settings, without having go through manual steps in a GUI. One thing to note is that
by default it does not restart the service on error, one have to explicit enable by default it does not restart the service on error, one have to explicit enable this
this in the configuration file (via the "onfailure" parameter). in the configuration file (via the "onfailure" parameter).
### Autostart on Linux ### Autostart on Linux

View File

@@ -8,7 +8,7 @@ description: "Rclone Licence"
This is free software under the terms of the MIT license (check the This is free software under the terms of the MIT license (check the
COPYING file included with the source code). COPYING file included with the source code).
```text ```
Copyright (C) 2019 by Nick Craig-Wood https://www.craig-wood.com/nick/ Copyright (C) 2019 by Nick Craig-Wood https://www.craig-wood.com/nick/
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -29,3 +29,4 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE. THE SOFTWARE.
``` ```

View File

@@ -4,13 +4,13 @@ description: "Overview of cloud storage systems"
type: page type: page
--- ---
# Overview of cloud storage systems # Overview of cloud storage systems #
Each cloud storage system is slightly different. Rclone attempts to Each cloud storage system is slightly different. Rclone attempts to
provide a unified interface to them, but some underlying differences provide a unified interface to them, but some underlying differences
show through. show through.
## Features ## Features ##
Here is an overview of the major features of each cloud storage system. Here is an overview of the major features of each cloud storage system.
@@ -79,11 +79,9 @@ This is an SHA256 sum of all the 4 MiB block SHA256s.
³ WebDAV supports hashes when used with Fastmail Files, Owncloud and Nextcloud only. ³ WebDAV supports hashes when used with Fastmail Files, Owncloud and Nextcloud only.
⁴ WebDAV supports modtimes when used with Fastmail Files, Owncloud and Nextcloud ⁴ WebDAV supports modtimes when used with Fastmail Files, Owncloud and Nextcloud only.
only.
⁵ [QuickXorHash](https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash) ⁵ [QuickXorHash](https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash) is Microsoft's own hash.
is Microsoft's own hash.
⁶ Mail.ru uses its own modified SHA1 hash ⁶ Mail.ru uses its own modified SHA1 hash
@@ -112,7 +110,7 @@ top-level sum.
¹³ Uloz.to provides server-calculated MD5 hash upon file upload. MD5 and SHA256 ¹³ Uloz.to provides server-calculated MD5 hash upon file upload. MD5 and SHA256
hashes are client-calculated and stored as metadata fields. hashes are client-calculated and stored as metadata fields.
### Hash ### Hash ###
The cloud storage system supports various hash types of the objects. The cloud storage system supports various hash types of the objects.
The hashes are used when transferring data as an integrity check and The hashes are used when transferring data as an integrity check and
@@ -122,7 +120,7 @@ the `check` command.
To use the verify checksums when transferring between cloud storage To use the verify checksums when transferring between cloud storage
systems they must support a common hash type. systems they must support a common hash type.
### ModTime ### ModTime ###
Almost all cloud storage systems store some sort of timestamp Almost all cloud storage systems store some sort of timestamp
on objects, but several of them not something that is appropriate on objects, but several of them not something that is appropriate
@@ -166,7 +164,7 @@ means they do also support modtime-only operations.
Storage systems with `D` in the ModTime column means that the Storage systems with `D` in the ModTime column means that the
following symbols apply to directories as well as files. following symbols apply to directories as well as files.
### Case Insensitive ### Case Insensitive ###
If a cloud storage systems is case sensitive then it is possible to If a cloud storage systems is case sensitive then it is possible to
have two files which differ only in case, e.g. `file.txt` and have two files which differ only in case, e.g. `file.txt` and
@@ -180,16 +178,15 @@ matter how many times you run the sync it never completes fully.
The local filesystem and SFTP may or may not be case sensitive The local filesystem and SFTP may or may not be case sensitive
depending on OS. depending on OS.
- Windows - usually case insensitive, though case is preserved * Windows - usually case insensitive, though case is preserved
- OSX - usually case insensitive, though it is possible to format case sensitive * OSX - usually case insensitive, though it is possible to format case sensitive
- Linux - usually case sensitive, but there are case insensitive file systems * Linux - usually case sensitive, but there are case insensitive file systems (e.g. FAT formatted USB keys)
(e.g. FAT formatted USB keys)
Most of the time this doesn't cause any problems as people tend to Most of the time this doesn't cause any problems as people tend to
avoid files whose name differs only by case even on case sensitive avoid files whose name differs only by case even on case sensitive
systems. systems.
### Duplicate files ### Duplicate files ###
If a cloud storage system allows duplicate files then it can have two If a cloud storage system allows duplicate files then it can have two
objects with the same name. objects with the same name.
@@ -197,7 +194,7 @@ objects with the same name.
This confuses rclone greatly when syncing - use the `rclone dedupe` This confuses rclone greatly when syncing - use the `rclone dedupe`
command to rename or remove duplicates. command to rename or remove duplicates.
### Restricted filenames ### Restricted filenames ###
Some cloud storage systems might have restrictions on the characters Some cloud storage systems might have restrictions on the characters
that are usable in file or directory names. that are usable in file or directory names.
@@ -405,27 +402,20 @@ and to maintain backward compatibility, its behavior has not been changed.
To take a specific example, the FTP backend's default encoding is To take a specific example, the FTP backend's default encoding is
```sh --ftp-encoding "Slash,Del,Ctl,RightSpace,Dot"
--ftp-encoding "Slash,Del,Ctl,RightSpace,Dot"
```
However, let's say the FTP server is running on Windows and can't have However, let's say the FTP server is running on Windows and can't have
any of the invalid Windows characters in file names. You are backing any of the invalid Windows characters in file names. You are backing
up Linux servers to this FTP server which do have those characters in up Linux servers to this FTP server which do have those characters in
file names. So you would add the Windows set which are file names. So you would add the Windows set which are
```text Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot
Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot
```
to the existing ones, giving: to the existing ones, giving:
```text Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot,Del,RightSpace
Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot,Del,RightSpace
```
This can be specified using the `--ftp-encoding` flag or using an `encoding` This can be specified using the `--ftp-encoding` flag or using an `encoding` parameter in the config file.
parameter in the config file.
##### Encoding example: Windows ##### Encoding example: Windows
@@ -439,7 +429,7 @@ To avoid this you can change the set of characters rclone should convert
for the local filesystem, using command-line argument `--local-encoding`. for the local filesystem, using command-line argument `--local-encoding`.
Rclone's default behavior on Windows corresponds to Rclone's default behavior on Windows corresponds to
```sh ```
--local-encoding "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot" --local-encoding "Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot"
``` ```
@@ -447,12 +437,11 @@ If you want to use fullwidth characters ``, `` and `` in your filenames
without rclone changing them when uploading to a remote, then set the same as without rclone changing them when uploading to a remote, then set the same as
the default value but without `Colon,Question,Asterisk`: the default value but without `Colon,Question,Asterisk`:
```sh ```
--local-encoding "Slash,LtGt,DoubleQuote,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot" --local-encoding "Slash,LtGt,DoubleQuote,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot"
``` ```
Alternatively, you can disable the conversion of any characters with Alternatively, you can disable the conversion of any characters with `--local-encoding Raw`.
`--local-encoding Raw`.
Instead of using command-line argument `--local-encoding`, you may also set it Instead of using command-line argument `--local-encoding`, you may also set it
as [environment variable](/docs/#environment-variables) `RCLONE_LOCAL_ENCODING`, as [environment variable](/docs/#environment-variables) `RCLONE_LOCAL_ENCODING`,
@@ -465,7 +454,7 @@ it to your Windows filesystem, this will fail. These characters are not
valid in filenames on Windows, and you have told rclone not to work around valid in filenames on Windows, and you have told rclone not to work around
this by converting them to valid fullwidth variants. this by converting them to valid fullwidth variants.
### MIME Type ### MIME Type ###
MIME types (also known as media types) classify types of documents MIME types (also known as media types) classify types of documents
using a simple text classification, e.g. `text/html` or using a simple text classification, e.g. `text/html` or
@@ -501,7 +490,7 @@ The levels of metadata support are
See [the metadata docs](/docs/#metadata) for more info. See [the metadata docs](/docs/#metadata) for more info.
## Optional Features ## Optional Features ##
All rclone remotes support a base command set. Other features depend All rclone remotes support a base command set. Other features depend
upon backend-specific capabilities. upon backend-specific capabilities.
@@ -574,12 +563,12 @@ purging a directory inside a bucket, files are deleted individually.
⁵ Use the `--onedrive-delta` flag to enable. ⁵ Use the `--onedrive-delta` flag to enable.
### Purge ### Purge ###
This deletes a directory quicker than just deleting all the files in This deletes a directory quicker than just deleting all the files in
the directory. the directory.
### Copy ### Copy ###
Used when copying an object to and from the same remote. This known Used when copying an object to and from the same remote. This known
as a server-side copy so you can copy a file without downloading it as a server-side copy so you can copy a file without downloading it
@@ -589,7 +578,7 @@ and uploading it again. It is used if you use `rclone copy` or
If the server doesn't support `Copy` directly then for copy operations If the server doesn't support `Copy` directly then for copy operations
the file is downloaded then re-uploaded. the file is downloaded then re-uploaded.
### Move ### Move ###
Used when moving/renaming an object on the same remote. This is known Used when moving/renaming an object on the same remote. This is known
as a server-side move of a file. This is used in `rclone move` if the as a server-side move of a file. This is used in `rclone move` if the
@@ -599,13 +588,13 @@ If the server isn't capable of `Move` then rclone simulates it with
`Copy` then delete. If the server doesn't support `Copy` then rclone `Copy` then delete. If the server doesn't support `Copy` then rclone
will download the file and re-upload it. will download the file and re-upload it.
### DirMove ### DirMove ###
This is used to implement `rclone move` to move a directory if This is used to implement `rclone move` to move a directory if
possible. If it isn't then it will use `Move` on each file (which possible. If it isn't then it will use `Move` on each file (which
falls back to `Copy` then download and upload - see `Move` section). falls back to `Copy` then download and upload - see `Move` section).
### CleanUp ### CleanUp ###
This is used for emptying the trash for a remote by `rclone cleanup`. This is used for emptying the trash for a remote by `rclone cleanup`.
@@ -615,31 +604,31 @@ error.
‡‡ Note that while Box implements this it has to delete every file ‡‡ Note that while Box implements this it has to delete every file
individually so it will be slower than emptying the trash via the WebUI individually so it will be slower than emptying the trash via the WebUI
### ListR ### ListR ###
The remote supports a recursive list to list all the contents beneath The remote supports a recursive list to list all the contents beneath
a directory quickly. This enables the `--fast-list` flag to work. a directory quickly. This enables the `--fast-list` flag to work.
See the [rclone docs](/docs/#fast-list) for more details. See the [rclone docs](/docs/#fast-list) for more details.
### StreamUpload ### StreamUpload ###
Some remotes allow files to be uploaded without knowing the file size Some remotes allow files to be uploaded without knowing the file size
in advance. This allows certain operations to work without spooling the in advance. This allows certain operations to work without spooling the
file to local disk first, e.g. `rclone rcat`. file to local disk first, e.g. `rclone rcat`.
### MultithreadUpload ### MultithreadUpload ###
Some remotes allow transfers to the remote to be sent as chunks in Some remotes allow transfers to the remote to be sent as chunks in
parallel. If this is supported then rclone will use multi-thread parallel. If this is supported then rclone will use multi-thread
copying to transfer files much faster. copying to transfer files much faster.
### LinkSharing ### LinkSharing ###
Sets the necessary permissions on a file or folder and prints a link Sets the necessary permissions on a file or folder and prints a link
that allows others to access them, even if they don't have an account that allows others to access them, even if they don't have an account
on the particular cloud provider. on the particular cloud provider.
### About ### About ###
Rclone `about` prints quota information for a remote. Typical output Rclone `about` prints quota information for a remote. Typical output
includes bytes used, free, quota and in trash. includes bytes used, free, quota and in trash.
@@ -653,7 +642,7 @@ rclone union remote.
See [rclone about command](https://rclone.org/commands/rclone_about/) See [rclone about command](https://rclone.org/commands/rclone_about/)
### EmptyDir ### EmptyDir ###
The remote supports empty directories. See [Limitations](/bugs/#limitations) The remote supports empty directories. See [Limitations](/bugs/#limitations)
for details. Most Object/Bucket-based remotes do not support this. for details. Most Object/Bucket-based remotes do not support this.

View File

@@ -3,141 +3,71 @@ title: "Privacy Policy"
description: "Rclone Privacy Policy" description: "Rclone Privacy Policy"
--- ---
# Rclone Privacy Policy # Rclone Privacy Policy #
## What is this Privacy Policy for? ## What is this Privacy Policy for? ##
This privacy policy is for this website <https://rclone.org> and governs the This privacy policy is for this website https://rclone.org and governs the privacy of its users who choose to use it.
privacy of its users who choose to use it.
The policy sets out the different areas where user privacy is concerned and The policy sets out the different areas where user privacy is concerned and outlines the obligations & requirements of the users, the website and website owners. Furthermore the way this website processes, stores and protects user data and information will also be detailed within this policy.
outlines the obligations & requirements of the users, the website and website
owners. Furthermore the way this website processes, stores and protects user
data and information will also be detailed within this policy.
## The Website ## The Website ##
This website and its owners take a proactive approach to user privacy and This website and its owners take a proactive approach to user privacy and ensure the necessary steps are taken to protect the privacy of its users throughout their visiting experience. This website complies to all UK national laws and requirements for user privacy.
ensure the necessary steps are taken to protect the privacy of its users
throughout their visiting experience. This website complies to all UK national
laws and requirements for user privacy.
## Use of Cookies ## Use of Cookies ##
This website uses cookies to better the users experience while visiting the This website uses cookies to better the users experience while visiting the website. Where applicable this website uses a cookie control system allowing the user on their first visit to the website to allow or disallow the use of cookies on their computer / device. This complies with recent legislation requirements for websites to obtain explicit consent from users before leaving behind or reading files such as cookies on a user's computer / device.
website. Where applicable this website uses a cookie control system allowing
the user on their first visit to the website to allow or disallow the use of
cookies on their computer / device. This complies with recent legislation
requirements for websites to obtain explicit consent from users before leaving
behind or reading files such as cookies on a user's computer / device.
Cookies are small files saved to the user's computers hard drive that track, Cookies are small files saved to the user's computers hard drive that track, save and store information about the user's interactions and usage of the website. This allows the website, through its server to provide the users with a tailored experience within this website.
save and store information about the user's interactions and usage of the
website. This allows the website, through its server to provide the users with
a tailored experience within this website.
Users are advised that if they wish to deny the use and saving of cookies from Users are advised that if they wish to deny the use and saving of cookies from this website on to their computers hard drive they should take necessary steps within their web browsers security settings to block all cookies from this website and its external serving vendors.
this website on to their computers hard drive they should take necessary steps
within their web browsers security settings to block all cookies from this
website and its external serving vendors.
This website uses tracking software to monitor its visitors to better This website uses tracking software to monitor its visitors to better understand how they use it. This software is provided by Google Analytics which uses cookies to track visitor usage. The software will save a cookie to your computers hard drive in order to track and monitor your engagement and usage of the website, but will not store, save or collect personal information. You can read [Google's privacy policy here](https://www.google.com/privacy.html) for further information.
understand how they use it. This software is provided by Google Analytics which
uses cookies to track visitor usage. The software will save a cookie to your
computers hard drive in order to track and monitor your engagement and usage of
the website, but will not store, save or collect personal information. You can
read [Google's privacy policy here](https://www.google.com/privacy.html) for
further information.
Other cookies may be stored to your computers hard drive by external vendors Other cookies may be stored to your computers hard drive by external vendors when this website uses referral programs, sponsored links or adverts. Such cookies are used for conversion and referral tracking and typically expire after 30 days, though some may take longer. No personal information is stored, saved or collected.
when this website uses referral programs, sponsored links or adverts. Such
cookies are used for conversion and referral tracking and typically expire
after 30 days, though some may take longer. No personal information is stored,
saved or collected.
## Contact & Communication ## Contact & Communication ##
Users contacting this website and/or its owners do so at their own discretion Users contacting this website and/or its owners do so at their own discretion and provide any such personal details requested at their own risk. Your personal information is kept private and stored securely until a time it is no longer required or has no use, as detailed in the Data Protection Act 1998.
and provide any such personal details requested at their own risk. Your
personal information is kept private and stored securely until a time it is no
longer required or has no use, as detailed in the Data Protection Act 1998.
This website and its owners use any information submitted to provide you with This website and its owners use any information submitted to provide you with further information about the products / services they offer or to assist you in answering any questions or queries you may have submitted.
further information about the products / services they offer or to assist you
in answering any questions or queries you may have submitted.
## External Links ## External Links ##
Although this website only looks to include quality, safe and relevant external Although this website only looks to include quality, safe and relevant external links, users are advised adopt a policy of caution before clicking any external web links mentioned throughout this website.
links, users are advised adopt a policy of caution before clicking any external
web links mentioned throughout this website.
The owners of this website cannot guarantee or verify the contents of any The owners of this website cannot guarantee or verify the contents of any externally linked website despite their best efforts. Users should therefore note they click on external links at their own risk and this website and its owners cannot be held liable for any damages or implications caused by visiting any external links mentioned.
externally linked website despite their best efforts. Users should therefore
note they click on external links at their own risk and this website and its
owners cannot be held liable for any damages or implications caused by visiting
any external links mentioned.
## Adverts and Sponsored Links ## Adverts and Sponsored Links ##
This website may contain sponsored links and adverts. These will typically be This website may contain sponsored links and adverts. These will typically be served through our advertising partners, to whom may have detailed privacy policies relating directly to the adverts they serve.
served through our advertising partners, to whom may have detailed privacy
policies relating directly to the adverts they serve.
Clicking on any such adverts will send you to the advertisers website through a Clicking on any such adverts will send you to the advertisers website through a referral program which may use cookies and will track the number of referrals sent from this website. This may include the use of cookies which may in turn be saved on your computers hard drive. Users should therefore note they click on sponsored external links at their own risk and this website and its owners cannot be held liable for any damages or implications caused by visiting any external links mentioned.
referral program which may use cookies and will track the number of referrals
sent from this website. This may include the use of cookies which may in turn
be saved on your computers hard drive. Users should therefore note they click
on sponsored external links at their own risk and this website and its owners
cannot be held liable for any damages or implications caused by visiting any
external links mentioned.
### Social Media Platforms ### Social Media Platforms ##
Communication, engagement and actions taken through external social media Communication, engagement and actions taken through external social media platforms that this website and its owners participate on are subject to the terms and conditions as well as the privacy policies held with each social media platform respectively.
platforms that this website and its owners participate on are subject to the
terms and conditions as well as the privacy policies held with each social media
platform respectively.
Users are advised to use social media platforms wisely and communicate / engage Users are advised to use social media platforms wisely and communicate / engage upon them with due care and caution in regard to their own privacy and personal details. This website nor its owners will ever ask for personal or sensitive information through social media platforms and encourage users wishing to discuss sensitive details to contact them through primary communication channels such as email.
upon them with due care and caution in regard to their own privacy and personal
details. This website nor its owners will ever ask for personal or sensitive
information through social media platforms and encourage users wishing to
discuss sensitive details to contact them through primary communication channels
such as email.
This website may use social sharing buttons which help share web content This website may use social sharing buttons which help share web content directly from web pages to the social media platform in question. Users are advised before using such social sharing buttons that they do so at their own discretion and note that the social media platform may track and save your request to share a web page respectively through your social media platform account.
directly from web pages to the social media platform in question. Users are
advised before using such social sharing buttons that they do so at their own
discretion and note that the social media platform may track and save your
request to share a web page respectively through your social media platform
account.
## Use of Cloud API User Data ## Use of Cloud API User Data ##
Rclone is a command-line program to manage files on cloud storage. Its sole Rclone is a command-line program to manage files on cloud storage. Its sole purpose is to access and manipulate user content in the [supported](/overview/) cloud storage systems from a local machine of the end user. For accessing the user content via the cloud provider API, Rclone uses authentication mechanisms, such as OAuth or HTTP Cookies, depending on the particular cloud provider offerings. Use of these authentication mechanisms and user data is governed by the privacy policies mentioned in the [Resources & Further Information](/privacy/#resources-further-information) section and followed by the privacy policy of Rclone.
purpose is to access and manipulate user content in the [supported](/overview/)
cloud storage systems from a local machine of the end user. For accessing the
user content via the cloud provider API, Rclone uses authentication mechanisms,
such as OAuth or HTTP Cookies, depending on the particular cloud provider
offerings. Use of these authentication mechanisms and user data is governed by
the privacy policies mentioned in the [Resources & Further Information](/privacy/#resources-further-information)
section and followed by the privacy policy of Rclone.
- Rclone provides the end user with access to their files available in a storage * Rclone provides the end user with access to their files available in a storage system associated by the authentication credentials via the publicly exposed API of the storage system.
system associated by the authentication credentials via the publicly exposed API * Rclone allows storing the authentication credentials on the user machine in the local configuration file.
of the storage system. * Rclone does not share any user data with third parties.
- Rclone allows storing the authentication credentials on the user machine in the
local configuration file.
- Rclone does not share any user data with third parties.
## Resources & Further Information ## Resources & Further Information ##
- [Data Protection Act 1998](http://www.legislation.gov.uk/ukpga/1998/29/contents) * [Data Protection Act 1998](http://www.legislation.gov.uk/ukpga/1998/29/contents)
- [Privacy and Electronic Communications Regulations 2003](http://www.legislation.gov.uk/uksi/2003/2426/contents/made) * [Privacy and Electronic Communications Regulations 2003](http://www.legislation.gov.uk/uksi/2003/2426/contents/made)
- [Privacy and Electronic Communications Regulations 2003 - The Guide](https://ico.org.uk/for-organisations/guide-to-pecr/) * [Privacy and Electronic Communications Regulations 2003 - The Guide](https://ico.org.uk/for-organisations/guide-to-pecr/)
- [Twitter Privacy Policy](https://twitter.com/privacy) * [Twitter Privacy Policy](https://twitter.com/privacy)
- [Facebook Privacy Policy](https://www.facebook.com/about/privacy/) * [Facebook Privacy Policy](https://www.facebook.com/about/privacy/)
- [Google Privacy Policy](https://www.google.com/privacy.html) * [Google Privacy Policy](https://www.google.com/privacy.html)
- [Google API Services User Data Policy](https://developers.google.com/terms/api-services-user-data-policy) * [Google API Services User Data Policy](https://developers.google.com/terms/api-services-user-data-policy)
- [Sample Website Privacy Policy](http://www.jamieking.co.uk/resources/free_sample_privacy_policy.html) * [Sample Website Privacy Policy](http://www.jamieking.co.uk/resources/free_sample_privacy_policy.html)

View File

@@ -12,15 +12,14 @@ which can be used to remote control rclone using its API.
You can either use the [rc](#api-rc) command to access the API You can either use the [rc](#api-rc) command to access the API
or [use HTTP directly](#api-http). or [use HTTP directly](#api-http).
If you just want to run a remote control then see the [rcd](/commands/rclone_rcd/) If you just want to run a remote control then see the [rcd](/commands/rclone_rcd/) command.
command.
## Supported parameters ## Supported parameters
### --rc ### --rc
Flag to start the http server listen on remote requests. Flag to start the http server listen on remote requests.
### --rc-addr=IP ### --rc-addr=IP
IPaddress:Port or :Port to bind server to. (default "localhost:5572"). IPaddress:Port or :Port to bind server to. (default "localhost:5572").
@@ -72,11 +71,11 @@ Timeout for server writing data (default 1h0m0s).
### --rc-serve ### --rc-serve
Enable the serving of remote objects via the HTTP interface. This Enable the serving of remote objects via the HTTP interface. This
means objects will be accessible at `http://127.0.0.1:5572/` by default, means objects will be accessible at http://127.0.0.1:5572/ by default,
so you can browse to `http://127.0.0.1:5572/` or `http://127.0.0.1:5572/*` so you can browse to http://127.0.0.1:5572/ or http://127.0.0.1:5572/*
to see a listing of the remotes. Objects may be requested from to see a listing of the remotes. Objects may be requested from
remotes using this syntax `http://127.0.0.1:5572/[remote:path]/path/to/object` remotes using this syntax http://127.0.0.1:5572/[remote:path]/path/to/object
Default Off. Default Off.
@@ -103,9 +102,7 @@ Default Off.
### --rc-enable-metrics ### --rc-enable-metrics
Enable OpenMetrics/Prometheus compatible endpoint at `/metrics`. Enable OpenMetrics/Prometheus compatible endpoint at `/metrics`.
If more control over the metrics is desired (for example running it on a If more control over the metrics is desired (for example running it on a different port or with different auth) then endpoint can be enabled with the `--metrics-*` flags instead.
different port or with different auth) then endpoint can be enabled with
the `--metrics-*` flags instead.
Default Off. Default Off.
@@ -127,7 +124,7 @@ Default is IP address on which rc is running.
Set the URL to fetch the rclone-web-gui files from. Set the URL to fetch the rclone-web-gui files from.
Default <https://api.github.com/repos/rclone/rclone-webui-react/releases/latest>. Default https://api.github.com/repos/rclone/rclone-webui-react/releases/latest.
### --rc-web-gui-update ### --rc-web-gui-update
@@ -185,26 +182,26 @@ rc` command.
You can use it like this: You can use it like this:
```sh ```
$ rclone rc rc/noop param1=one param2=two $ rclone rc rc/noop param1=one param2=two
{ {
"param1": "one", "param1": "one",
"param2": "two" "param2": "two"
} }
``` ```
If the remote is running on a different URL than the default If the remote is running on a different URL than the default
`http://localhost:5572/`, use the `--url` option to specify it: `http://localhost:5572/`, use the `--url` option to specify it:
```sh ```
rclone rc --url http://some.remote:1234/ rc/noop $ rclone rc --url http://some.remote:1234/ rc/noop
``` ```
Or, if the remote is listening on a Unix socket, use the `--unix-socket` option Or, if the remote is listening on a Unix socket, use the `--unix-socket` option
instead: instead:
```sh ```
rclone rc --unix-socket /tmp/rclone.sock rc/noop $ rclone rc --unix-socket /tmp/rclone.sock rc/noop
``` ```
Run `rclone rc` on its own, without any commands, to see the help for the Run `rclone rc` on its own, without any commands, to see the help for the
@@ -216,19 +213,19 @@ remote server.
`rclone rc` also supports a `--json` flag which can be used to send `rclone rc` also supports a `--json` flag which can be used to send
more complicated input parameters. more complicated input parameters.
```sh ```
$ rclone rc --json '{ "p1": [1,"2",null,4], "p2": { "a":1, "b":2 } }' rc/noop $ rclone rc --json '{ "p1": [1,"2",null,4], "p2": { "a":1, "b":2 } }' rc/noop
{ {
"p1": [ "p1": [
1, 1,
"2", "2",
null, null,
4 4
], ],
"p2": { "p2": {
"a": 1, "a": 1,
"b": 2 "b": 2
} }
} }
``` ```
@@ -236,13 +233,13 @@ If the parameter being passed is an object then it can be passed as a
JSON string rather than using the `--json` flag which simplifies the JSON string rather than using the `--json` flag which simplifies the
command line. command line.
```sh ```
rclone rc operations/list fs=/tmp remote=test opt='{"showHash": true}' rclone rc operations/list fs=/tmp remote=test opt='{"showHash": true}'
``` ```
Rather than Rather than
```sh ```
rclone rc operations/list --json '{"fs": "/tmp", "remote": "test", "opt": {"showHash": true}}' rclone rc operations/list --json '{"fs": "/tmp", "remote": "test", "opt": {"showHash": true}}'
``` ```
@@ -269,50 +266,50 @@ response timing out.
Starting a job with the `_async` flag: Starting a job with the `_async` flag:
```sh ```
$ rclone rc --json '{ "p1": [1,"2",null,4], "p2": { "a":1, "b":2 }, "_async": true }' rc/noop $ rclone rc --json '{ "p1": [1,"2",null,4], "p2": { "a":1, "b":2 }, "_async": true }' rc/noop
{ {
"jobid": 2 "jobid": 2
} }
``` ```
Query the status to see if the job has finished. For more information Query the status to see if the job has finished. For more information
on the meaning of these return parameters see the `job/status` call. on the meaning of these return parameters see the `job/status` call.
```sh ```
$ rclone rc --json '{ "jobid":2 }' job/status $ rclone rc --json '{ "jobid":2 }' job/status
{ {
"duration": 0.000124163, "duration": 0.000124163,
"endTime": "2018-10-27T11:38:07.911245881+01:00", "endTime": "2018-10-27T11:38:07.911245881+01:00",
"error": "", "error": "",
"finished": true, "finished": true,
"id": 2, "id": 2,
"output": { "output": {
"_async": true, "_async": true,
"p1": [ "p1": [
1, 1,
"2", "2",
null, null,
4 4
], ],
"p2": { "p2": {
"a": 1, "a": 1,
"b": 2 "b": 2
} }
}, },
"startTime": "2018-10-27T11:38:07.911121728+01:00", "startTime": "2018-10-27T11:38:07.911121728+01:00",
"success": true "success": true
} }
``` ```
`job/list` can be used to show the running or recently completed jobs `job/list` can be used to show the running or recently completed jobs
```sh ```
$ rclone rc job/list $ rclone rc job/list
{ {
"jobids": [ "jobids": [
2 2
] ]
} }
``` ```
@@ -324,29 +321,21 @@ duration of an rc call only then pass in the `_config` parameter.
This should be in the same format as the `main` key returned by This should be in the same format as the `main` key returned by
[options/get](#options-get). [options/get](#options-get).
```sh rclone rc --loopback options/get blocks=main
rclone rc --loopback options/get blocks=main
```
You can see more help on these options with this command (see [the You can see more help on these options with this command (see [the
options blocks section](#option-blocks) for more info). options blocks section](#option-blocks) for more info).
```sh rclone rc --loopback options/info blocks=main
rclone rc --loopback options/info blocks=main
```
For example, if you wished to run a sync with the `--checksum` For example, if you wished to run a sync with the `--checksum`
parameter, you would pass this parameter in your JSON blob. parameter, you would pass this parameter in your JSON blob.
```json "_config":{"CheckSum": true}
"_config":{"CheckSum": true}
```
If using `rclone rc` this could be passed as If using `rclone rc` this could be passed as
```sh rclone rc sync/sync ... _config='{"CheckSum": true}'
rclone rc sync/sync ... _config='{"CheckSum": true}'
```
Any config parameters you don't set will inherit the global defaults Any config parameters you don't set will inherit the global defaults
which were set with command line flags or environment variables. which were set with command line flags or environment variables.
@@ -355,10 +344,8 @@ Note that it is possible to set some values as strings or integers -
see [data types](#data-types) for more info. Here is an example see [data types](#data-types) for more info. Here is an example
setting the equivalent of `--buffer-size` in string or integer format. setting the equivalent of `--buffer-size` in string or integer format.
```json "_config":{"BufferSize": "42M"}
"_config":{"BufferSize": "42M"} "_config":{"BufferSize": 44040192}
"_config":{"BufferSize": 44040192}
```
If you wish to check the `_config` assignment has worked properly then If you wish to check the `_config` assignment has worked properly then
calling `options/local` will show what the value got set to. calling `options/local` will show what the value got set to.
@@ -371,34 +358,24 @@ pass in the `_filter` parameter.
This should be in the same format as the `filter` key returned by This should be in the same format as the `filter` key returned by
[options/get](#options-get). [options/get](#options-get).
```sh rclone rc --loopback options/get blocks=filter
rclone rc --loopback options/get blocks=filter
```
You can see more help on these options with this command (see [the You can see more help on these options with this command (see [the
options blocks section](#option-blocks) for more info). options blocks section](#option-blocks) for more info).
```sh rclone rc --loopback options/info blocks=filter
rclone rc --loopback options/info blocks=filter
```
For example, if you wished to run a sync with these flags For example, if you wished to run a sync with these flags
```sh --max-size 1M --max-age 42s --include "a" --include "b"
--max-size 1M --max-age 42s --include "a" --include "b"
```
you would pass this parameter in your JSON blob. you would pass this parameter in your JSON blob.
```json "_filter":{"MaxSize":"1M", "IncludeRule":["a","b"], "MaxAge":"42s"}
"_filter":{"MaxSize":"1M", "IncludeRule":["a","b"], "MaxAge":"42s"}
```
If using `rclone rc` this could be passed as If using `rclone rc` this could be passed as
```sh rclone rc ... _filter='{"MaxSize":"1M", "IncludeRule":["a","b"], "MaxAge":"42s"}'
rclone rc ... _filter='{"MaxSize":"1M", "IncludeRule":["a","b"], "MaxAge":"42s"}'
```
Any filter parameters you don't set will inherit the global defaults Any filter parameters you don't set will inherit the global defaults
which were set with command line flags or environment variables. which were set with command line flags or environment variables.
@@ -407,10 +384,8 @@ Note that it is possible to set some values as strings or integers -
see [data types](#data-types) for more info. Here is an example see [data types](#data-types) for more info. Here is an example
setting the equivalent of `--buffer-size` in string or integer format. setting the equivalent of `--buffer-size` in string or integer format.
```json "_filter":{"MinSize": "42M"}
"_filter":{"MinSize": "42M"} "_filter":{"MinSize": 44040192}
"_filter":{"MinSize": 44040192}
```
If you wish to check the `_filter` assignment has worked properly then If you wish to check the `_filter` assignment has worked properly then
calling `options/local` will show what the value got set to. calling `options/local` will show what the value got set to.
@@ -426,11 +401,11 @@ value. This allows caller to group stats under their own name.
Stats for specific group can be accessed by passing `group` to `core/stats`: Stats for specific group can be accessed by passing `group` to `core/stats`:
```sh ```
$ rclone rc --json '{ "group": "job/1" }' core/stats $ rclone rc --json '{ "group": "job/1" }' core/stats
{ {
"speed": 12345 "speed": 12345
... ...
} }
``` ```
@@ -491,7 +466,7 @@ An example of this might be the `--log-level` flag. Note that the
`Name` of the option becomes the command line flag with `_` replaced `Name` of the option becomes the command line flag with `_` replaced
with `-`. with `-`.
```json ```
{ {
"Advanced": false, "Advanced": false,
"Default": 5, "Default": 5,
@@ -550,7 +525,7 @@ isn't specified then it defaults to the root of the remote.
For example this JSON is equivalent to `remote:/tmp` For example this JSON is equivalent to `remote:/tmp`
```json ```
{ {
"_name": "remote", "_name": "remote",
"_root": "/tmp" "_root": "/tmp"
@@ -559,7 +534,7 @@ For example this JSON is equivalent to `remote:/tmp`
And this is equivalent to `:sftp,host='example.com':/tmp` And this is equivalent to `:sftp,host='example.com':/tmp`
```json ```
{ {
"type": "sftp", "type": "sftp",
"host": "example.com", "host": "example.com",
@@ -569,7 +544,7 @@ And this is equivalent to `:sftp,host='example.com':/tmp`
And this is equivalent to `/tmp/dir` And this is equivalent to `/tmp/dir`
```json ```
{ {
"type": "local", "type": "local",
"_root": "/tmp/dir" "_root": "/tmp/dir"
@@ -2377,7 +2352,7 @@ If an error occurs then there will be an HTTP error status (e.g. 500)
and the body of the response will contain a JSON encoded error object, and the body of the response will contain a JSON encoded error object,
e.g. e.g.
```json ```
{ {
"error": "Expecting string value for key \"remote\" (was float64)", "error": "Expecting string value for key \"remote\" (was float64)",
"input": { "input": {
@@ -2389,8 +2364,7 @@ e.g.
} }
``` ```
The keys in the error response are: The keys in the error response are
- error - error string - error - error string
- input - the input parameters to the call - input - the input parameters to the call
- status - the HTTP status code - status - the HTTP status code
@@ -2399,43 +2373,42 @@ The keys in the error response are:
### CORS ### CORS
The sever implements basic CORS support and allows all origins for that. The sever implements basic CORS support and allows all origins for that.
The response to a preflight OPTIONS request will echo the requested The response to a preflight OPTIONS request will echo the requested "Access-Control-Request-Headers" back.
"Access-Control-Request-Headers" back.
### Using POST with URL parameters only ### Using POST with URL parameters only
```sh ```
curl -X POST 'http://localhost:5572/rc/noop?potato=1&sausage=2' curl -X POST 'http://localhost:5572/rc/noop?potato=1&sausage=2'
``` ```
Response Response
```json ```
{ {
"potato": "1", "potato": "1",
"sausage": "2" "sausage": "2"
} }
``` ```
Here is what an error response looks like: Here is what an error response looks like:
```sh ```
curl -X POST 'http://localhost:5572/rc/error?potato=1&sausage=2' curl -X POST 'http://localhost:5572/rc/error?potato=1&sausage=2'
``` ```
```json ```
{ {
"error": "arbitrary error on input map[potato:1 sausage:2]", "error": "arbitrary error on input map[potato:1 sausage:2]",
"input": { "input": {
"potato": "1", "potato": "1",
"sausage": "2" "sausage": "2"
} }
} }
``` ```
Note that curl doesn't return errors to the shell unless you use the `-f` option Note that curl doesn't return errors to the shell unless you use the `-f` option
```sh ```
$ curl -f -X POST 'http://localhost:5572/rc/error?potato=1&sausage=2' $ curl -f -X POST 'http://localhost:5572/rc/error?potato=1&sausage=2'
curl: (22) The requested URL returned error: 400 Bad Request curl: (22) The requested URL returned error: 400 Bad Request
$ echo $? $ echo $?
@@ -2444,68 +2417,68 @@ $ echo $?
### Using POST with a form ### Using POST with a form
```sh ```
curl --data "potato=1" --data "sausage=2" http://localhost:5572/rc/noop curl --data "potato=1" --data "sausage=2" http://localhost:5572/rc/noop
``` ```
Response Response
```json ```
{ {
"potato": "1", "potato": "1",
"sausage": "2" "sausage": "2"
} }
``` ```
Note that you can combine these with URL parameters too with the POST Note that you can combine these with URL parameters too with the POST
parameters taking precedence. parameters taking precedence.
```sh ```
curl --data "potato=1" --data "sausage=2" "http://localhost:5572/rc/noop?rutabaga=3&sausage=4" curl --data "potato=1" --data "sausage=2" "http://localhost:5572/rc/noop?rutabaga=3&sausage=4"
``` ```
Response Response
```json ```
{ {
"potato": "1", "potato": "1",
"rutabaga": "3", "rutabaga": "3",
"sausage": "4" "sausage": "4"
} }
``` ```
### Using POST with a JSON blob ### Using POST with a JSON blob
```sh ```
curl -H "Content-Type: application/json" -X POST -d '{"potato":2,"sausage":1}' http://localhost:5572/rc/noop curl -H "Content-Type: application/json" -X POST -d '{"potato":2,"sausage":1}' http://localhost:5572/rc/noop
``` ```
response response
```json ```
{ {
"password": "xyz", "password": "xyz",
"username": "xyz" "username": "xyz"
} }
``` ```
This can be combined with URL parameters too if required. The JSON This can be combined with URL parameters too if required. The JSON
blob takes precedence. blob takes precedence.
```sh ```
curl -H "Content-Type: application/json" -X POST -d '{"potato":2,"sausage":1}' 'http://localhost:5572/rc/noop?rutabaga=3&potato=4' curl -H "Content-Type: application/json" -X POST -d '{"potato":2,"sausage":1}' 'http://localhost:5572/rc/noop?rutabaga=3&potato=4'
``` ```
```json ```
{ {
"potato": 2, "potato": 2,
"rutabaga": "3", "rutabaga": "3",
"sausage": 1 "sausage": 1
} }
``` ```
## Debugging rclone with pprof ## Debugging rclone with pprof ##
If you use the `--rc` flag this will also enable the use of the go If you use the `--rc` flag this will also enable the use of the go
profiling tools on the same port. profiling tools on the same port.
@@ -2516,16 +2489,14 @@ To use these, first [install go](https://golang.org/doc/install).
To profile rclone's memory use you can run: To profile rclone's memory use you can run:
```sh go tool pprof -web http://localhost:5572/debug/pprof/heap
go tool pprof -web http://localhost:5572/debug/pprof/heap
```
This should open a page in your browser showing what is using what This should open a page in your browser showing what is using what
memory. memory.
You can also use the `-text` flag to produce a textual summary You can also use the `-text` flag to produce a textual summary
```sh ```
$ go tool pprof -text http://localhost:5572/debug/pprof/heap $ go tool pprof -text http://localhost:5572/debug/pprof/heap
Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
flat flat% sum% cum cum% flat flat% sum% cum cum%
@@ -2550,15 +2521,13 @@ alive which should have been garbage collected.
See all active go routines using See all active go routines using
```sh curl http://localhost:5572/debug/pprof/goroutine?debug=1
curl http://localhost:5572/debug/pprof/goroutine?debug=1
```
Or go to <http://localhost:5572/debug/pprof/goroutine?debug=1> in your browser. Or go to http://localhost:5572/debug/pprof/goroutine?debug=1 in your browser.
### Other profiles to look at ### Other profiles to look at
You can see a summary of profiles available at <http://localhost:5572/debug/pprof/> You can see a summary of profiles available at http://localhost:5572/debug/pprof/
Here is how to use some of them: Here is how to use some of them:
@@ -2567,14 +2536,15 @@ Here is how to use some of them:
- 30-second CPU profile: `go tool pprof http://localhost:5572/debug/pprof/profile` - 30-second CPU profile: `go tool pprof http://localhost:5572/debug/pprof/profile`
- 5-second execution trace: `wget http://localhost:5572/debug/pprof/trace?seconds=5` - 5-second execution trace: `wget http://localhost:5572/debug/pprof/trace?seconds=5`
- Goroutine blocking profile - Goroutine blocking profile
- Enable first with: `rclone rc debug/set-block-profile-rate rate=1` ([docs](#debug-set-block-profile-rate)) - Enable first with: `rclone rc debug/set-block-profile-rate rate=1` ([docs](#debug-set-block-profile-rate))
- `go tool pprof http://localhost:5572/debug/pprof/block` - `go tool pprof http://localhost:5572/debug/pprof/block`
- Contended mutexes: - Contended mutexes:
- Enable first with: `rclone rc debug/set-mutex-profile-fraction rate=1` ([docs](#debug-set-mutex-profile-fraction)) - Enable first with: `rclone rc debug/set-mutex-profile-fraction rate=1` ([docs](#debug-set-mutex-profile-fraction))
- `go tool pprof http://localhost:5572/debug/pprof/mutex` - `go tool pprof http://localhost:5572/debug/pprof/mutex`
See the [net/http/pprof docs](https://golang.org/pkg/net/http/pprof/) See the [net/http/pprof docs](https://golang.org/pkg/net/http/pprof/)
for more info on how to use the profiling and for a general overview for more info on how to use the profiling and for a general overview
see [the Go team's blog post on profiling go programs](https://blog.golang.org/profiling-go-programs). see [the Go team's blog post on profiling go programs](https://blog.golang.org/profiling-go-programs).
The profiling hook is [zero overhead unless it is used](https://stackoverflow.com/q/26545159/164234). The profiling hook is [zero overhead unless it is used](https://stackoverflow.com/q/26545159/164234).

View File

@@ -29,7 +29,6 @@ The S3 backend can be used with a number of different providers:
{{< provider name="MEGA S4 Object Storage" home="https://mega.io/objectstorage" config="/s3/#mega" >}} {{< provider name="MEGA S4 Object Storage" home="https://mega.io/objectstorage" config="/s3/#mega" >}}
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}} {{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
{{< provider name="Outscale" home="https://en.outscale.com/storage/outscale-object-storage/" config="/s3/#outscale" >}} {{< provider name="Outscale" home="https://en.outscale.com/storage/outscale-object-storage/" config="/s3/#outscale" >}}
{{< provider name="OVHcloud" home="https://www.ovhcloud.com/en/public-cloud/object-storage/" config="/s3/#ovhcloud" >}}
{{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}} {{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}}
{{< provider name="Pure Storage FlashBlade" home="https://www.purestorage.com/products/unstructured-data-storage.html" config="/s3/#pure-storage-flashblade" >}} {{< provider name="Pure Storage FlashBlade" home="https://www.purestorage.com/products/unstructured-data-storage.html" config="/s3/#pure-storage-flashblade" >}}
{{< provider name="Qiniu Cloud Object Storage (Kodo)" home="https://www.qiniu.com/en/products/kodo" config="/s3/#qiniu" >}} {{< provider name="Qiniu Cloud Object Storage (Kodo)" home="https://www.qiniu.com/en/products/kodo" config="/s3/#qiniu" >}}
@@ -3612,206 +3611,6 @@ d) Delete this remote
y/e/d> y y/e/d> y
``` ```
### OVHcloud {#ovhcloud}
[OVHcloud Object Storage](https://www.ovhcloud.com/en-ie/public-cloud/object-storage/)
is an S3-compatible general-purpose object storage platform available in all OVHcloud regions.
To use the platform, you will need an access key and secret key. To know more about it and how
to interact with the platform, take a look at the [documentation](https://ovh.to/8stqhuo).
Here is an example of making an OVHcloud Object Storage configuration with `rclone config`:
```
No remotes found, make a new one?
n) New remote
s) Set configuration password
q) Quit config
n/s/q> n
Enter name for new remote.
name> ovhcloud-rbx
Option Storage.
Type of storage to configure.
Choose a number from below, or type in your own value.
[...]
XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Outscale, OVHcloud, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, Selectel, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others
\ (s3)
[...]
Storage> s3
Option provider.
Choose your S3 provider.
Choose a number from below, or type in your own value.
Press Enter to leave empty.
[...]
XX / OVHcloud Object Storage
\ (OVHcloud)
[...]
provider> OVHcloud
Option env_auth.
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
Only applies if access_key_id and secret_access_key is blank.
Choose a number from below, or type in your own boolean value (true or false).
Press Enter for the default (false).
1 / Enter AWS credentials in the next step.
\ (false)
2 / Get AWS credentials from the environment (env vars or IAM).
\ (true)
env_auth> 1
Option access_key_id.
AWS Access Key ID.
Leave blank for anonymous access or runtime credentials.
Enter a value. Press Enter to leave empty.
access_key_id> my_access
Option secret_access_key.
AWS Secret Access Key (password).
Leave blank for anonymous access or runtime credentials.
Enter a value. Press Enter to leave empty.
secret_access_key> my_secret
Option region.
Region where your bucket will be created and your data stored.
Choose a number from below, or type in your own value.
Press Enter to leave empty.
1 / Gravelines, France
\ (gra)
2 / Roubaix, France
\ (rbx)
3 / Strasbourg, France
\ (sbg)
4 / Paris, France (3AZ)
\ (eu-west-par)
5 / Frankfurt, Germany
\ (de)
6 / London, United Kingdom
\ (uk)
7 / Warsaw, Poland
\ (waw)
8 / Beauharnois, Canada
\ (bhs)
9 / Toronto, Canada
\ (ca-east-tor)
10 / Singapore
\ (sgp)
11 / Sydney, Australia
\ (ap-southeast-syd)
12 / Mumbai, India
\ (ap-south-mum)
13 / Vint Hill, Virginia, USA
\ (us-east-va)
14 / Hillsboro, Oregon, USA
\ (us-west-or)
15 / Roubaix, France (Cold Archive)
\ (rbx-archive)
region> 2
Option endpoint.
Endpoint for OVHcloud Object Storage.
Choose a number from below, or type in your own value.
Press Enter to leave empty.
1 / OVHcloud Gravelines, France
\ (s3.gra.io.cloud.ovh.net)
2 / OVHcloud Roubaix, France
\ (s3.rbx.io.cloud.ovh.net)
3 / OVHcloud Strasbourg, France
\ (s3.sbg.io.cloud.ovh.net)
4 / OVHcloud Paris, France (3AZ)
\ (s3.eu-west-par.io.cloud.ovh.net)
5 / OVHcloud Frankfurt, Germany
\ (s3.de.io.cloud.ovh.net)
6 / OVHcloud London, United Kingdom
\ (s3.uk.io.cloud.ovh.net)
7 / OVHcloud Warsaw, Poland
\ (s3.waw.io.cloud.ovh.net)
8 / OVHcloud Beauharnois, Canada
\ (s3.bhs.io.cloud.ovh.net)
9 / OVHcloud Toronto, Canada
\ (s3.ca-east-tor.io.cloud.ovh.net)
10 / OVHcloud Singapore
\ (s3.sgp.io.cloud.ovh.net)
11 / OVHcloud Sydney, Australia
\ (s3.ap-southeast-syd.io.cloud.ovh.net)
12 / OVHcloud Mumbai, India
\ (s3.ap-south-mum.io.cloud.ovh.net)
13 / OVHcloud Vint Hill, Virginia, USA
\ (s3.us-east-va.io.cloud.ovh.us)
14 / OVHcloud Hillsboro, Oregon, USA
\ (s3.us-west-or.io.cloud.ovh.us)
15 / OVHcloud Roubaix, France (Cold Archive)
\ (s3.rbx-archive.io.cloud.ovh.net)
endpoint> 2
Option acl.
Canned ACL used when creating buckets and storing or copying objects.
This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Note that this ACL is applied when server-side copying objects as S3
doesn't copy the ACL from the source but rather writes a fresh one.
If the acl is an empty string then no X-Amz-Acl: header is added and
the default (private) will be used.
Choose a number from below, or type in your own value.
Press Enter to leave empty.
/ Owner gets FULL_CONTROL.
1 | No one else has access rights (default).
\ (private)
/ Owner gets FULL_CONTROL.
2 | The AllUsers group gets READ access.
\ (public-read)
/ Owner gets FULL_CONTROL.
3 | The AllUsers group gets READ and WRITE access.
| Granting this on a bucket is generally not recommended.
\ (public-read-write)
/ Owner gets FULL_CONTROL.
4 | The AuthenticatedUsers group gets READ access.
\ (authenticated-read)
/ Object owner gets FULL_CONTROL.
5 | Bucket owner gets READ access.
| If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
\ (bucket-owner-read)
/ Both the object owner and the bucket owner get FULL_CONTROL over the object.
6 | If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
\ (bucket-owner-full-control)
acl> 1
Edit advanced config?
y) Yes
n) No (default)
y/n> n
Configuration complete.
Options:
- type: s3
- provider: OVHcloud
- access_key_id: my_access
- secret_access_key: my_secret
- region: rbx
- endpoint: s3.rbx.io.cloud.ovh.net
- acl: private
Keep this "ovhcloud-rbx" remote?
y) Yes this is OK (default)
e) Edit this remote
d) Delete this remote
y/e/d> y
```
Your configuration file should now look like this:
```
[ovhcloud-rbx]
type = s3
provider = OVHcloud
access_key_id = my_access
secret_access_key = my_secret
region = rbx
endpoint = s3.rbx.io.cloud.ovh.net
acl = private
```
### Qiniu Cloud Object Storage (Kodo) {#qiniu} ### Qiniu Cloud Object Storage (Kodo) {#qiniu}
[Qiniu Cloud Object Storage (Kodo)](https://www.qiniu.com/en/products/kodo), a completely independent-researched core technology which is proven by repeated customer experience has occupied absolute leading market leader position. Kodo can be widely applied to mass data management. [Qiniu Cloud Object Storage (Kodo)](https://www.qiniu.com/en/products/kodo), a completely independent-researched core technology which is proven by repeated customer experience has occupied absolute leading market leader position. Kodo can be widely applied to mass data management.

View File

@@ -62,6 +62,4 @@ Thank you very much to our sponsors:
{{< sponsor src="/img/logos/sia.svg" width="200" height="200" title="Visit our sponsor sia" link="https://sia.tech">}} {{< sponsor src="/img/logos/sia.svg" width="200" height="200" title="Visit our sponsor sia" link="https://sia.tech">}}
{{< sponsor src="/img/logos/route4me.svg" width="400" height="200" title="Visit our sponsor Route4Me" link="https://route4me.com/">}} {{< sponsor src="/img/logos/route4me.svg" width="400" height="200" title="Visit our sponsor Route4Me" link="https://route4me.com/">}}
{{< sponsor src="/img/logos/rcloneview.svg" width="300" height="200" title="Visit our sponsor RcloneView" link="https://rcloneview.com/">}} {{< sponsor src="/img/logos/rcloneview.svg" width="300" height="200" title="Visit our sponsor RcloneView" link="https://rcloneview.com/">}}
{{< sponsor src="/img/logos/rcloneui.svg" width="300" height="200" title="Visit our sponsor RcloneUI" link="https://rcloneui.com">}}
{{< sponsor src="/img/logos/filelu-rclone.svg" width="330" height="200" title="Visit our sponsor FileLu" link="https://filelu.com/">}} {{< sponsor src="/img/logos/filelu-rclone.svg" width="330" height="200" title="Visit our sponsor FileLu" link="https://filelu.com/">}}
{{< sponsor src="/img/logos/torbox.png" width="200" height="200" title="Visit our sponsor TORBOX" link="https://www.torbox.app/">}}

View File

@@ -11,7 +11,7 @@ Commercial implementations of that being:
* [Rackspace Cloud Files](https://www.rackspace.com/cloud/files/) * [Rackspace Cloud Files](https://www.rackspace.com/cloud/files/)
* [Memset Memstore](https://www.memset.com/cloud/storage/) * [Memset Memstore](https://www.memset.com/cloud/storage/)
* [OVH Object Storage](https://www.ovhcloud.com/en/public-cloud/object-storage/) * [OVH Object Storage](https://www.ovh.co.uk/public-cloud/storage/object-storage/)
* [Oracle Cloud Storage](https://docs.oracle.com/en-us/iaas/integration/doc/configure-object-storage.html) * [Oracle Cloud Storage](https://docs.oracle.com/en-us/iaas/integration/doc/configure-object-storage.html)
* [Blomp Cloud Storage](https://www.blomp.com/cloud-storage/) * [Blomp Cloud Storage](https://www.blomp.com/cloud-storage/)
* [IBM Bluemix Cloud ObjectStorage Swift](https://console.bluemix.net/docs/infrastructure/objectstorage-swift/index.html) * [IBM Bluemix Cloud ObjectStorage Swift](https://console.bluemix.net/docs/infrastructure/objectstorage-swift/index.html)

View File

@@ -6,14 +6,6 @@
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no"> <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta name="description" content="{{ .Description }}"> <meta name="description" content="{{ .Description }}">
<meta name="author" content="Nick Craig-Wood"> <meta name="author" content="Nick Craig-Wood">
<meta property="og:site_name" content="Rclone" />
<meta property="og:type" content="website" />
<meta property="og:image" content="{{ "/img/rclone-1200x630.png" | absURL }}">
<meta property="og:image:width" content="1200">
<meta property="og:image:height" content="630">
<meta property="og:url" content="{{ .Permalink }}" />
<meta property="og:title" content="{{ .Title }}" />
<meta property="og:description" content="{{ .Description }}" />
<link rel="shortcut icon" type="image/png" href="/img/rclone-32x32.png"/> <link rel="shortcut icon" type="image/png" href="/img/rclone-32x32.png"/>
<script defer data-domain="rclone.org" src="https://weblog.rclone.org/js/script.js"></script> <script defer data-domain="rclone.org" src="https://weblog.rclone.org/js/script.js"></script>
<title>{{ block "title" . }}{{ .Title }}{{ end }}</title> <title>{{ block "title" . }}{{ .Title }}{{ end }}</title>

View File

@@ -1,16 +1,17 @@
{{ if and (gt .WordCount 200 ) (not (.Params.notoc)) }} {{ if and (gt .WordCount 200 ) (not (.Params.notoc)) }}
<div class="card"> <div class="card">
<div class="card-header"> <div class="card-header" style="padding: 5px 10px;">
Contents Contents
</div> </div>
<div class="card-body card-body-padded"> <div class="card-body">
{{ .TableOfContents }} {{ .TableOfContents }}
<p></p>
</div> </div>
</div> </div>
{{end}} {{end}}
<div class="card"> <div class="card">
<div class="card-header"> <div class="card-header" style="padding: 5px 15px;">
Platinum Sponsor Platinum Sponsor
</div> </div>
<div class="card-body"> <div class="card-body">
@@ -19,7 +20,7 @@
</div> </div>
<div class="card"> <div class="card">
<div class="card-header"> <div class="card-header" style="padding: 5px 15px;">
Gold Sponsor Gold Sponsor
</div> </div>
<div class="card-body"> <div class="card-body">
@@ -28,7 +29,7 @@
</div> </div>
<div class="card"> <div class="card">
<div class="card-header"> <div class="card-header" style="padding: 5px 15px;">
Gold Sponsor Gold Sponsor
</div> </div>
<div class="card-body"> <div class="card-body">
@@ -36,18 +37,9 @@
</div> </div>
</div> </div>
<div class="card">
<div class="card-header">
Gold Sponsor
</div>
<div class="card-body">
<a href="https://mega.io/objectstorage?utm_source=rclone&utm_medium=referral&utm_campaign=rclone-mega-s4&mct=rclonepromo" target="_blank" rel="noopener" title="MEGA S4: New S3 compatible object storage. High scale. Low cost. Free egress."><img style="max-width: 100%; height: auto;" src="/img/logos/mega-s4.svg"></a><br />
</div>
</div>
{{if .IsHome}} {{if .IsHome}}
<div class="card"> <div class="card">
<div class="card-header"> <div class="card-header" style="padding: 5px 15px;">
Silver Sponsor Silver Sponsor
</div> </div>
<div class="card-body"> <div class="card-body">
@@ -55,7 +47,7 @@
</div> </div>
</div> </div>
<div class="card"> <div class="card">
<div class="card-header"> <div class="card-header" style="padding: 5px 15px;">
Silver Sponsor Silver Sponsor
</div> </div>
<div class="card-body"> <div class="card-body">
@@ -65,31 +57,31 @@
{{end}} {{end}}
<div class="card"> <div class="card">
<div class="card-header"> <div class="card-header" style="padding: 5px 10px;">
Share and Enjoy Share and Enjoy
</div> </div>
<div class="card-body card-body-padded"> <div class="card-body">
<div class="menu"> <p class="menu">
<!-- Non tracking sharing links from: https://sharingbuttons.io/ --> <!-- Non tracking sharing links from: https://sharingbuttons.io/ -->
<i class="fab fa-twitter fa-fw" aria-hidden="true"></i> <a href="https://twitter.com/intent/tweet/?text=rclone%20-%20rsync%20for%20cloud%20storage%20from%20%40njcw&amp;url=https%3A%2F%2Frclone.org" target="_blank" rel="noopener" aria-label="Share on Twitter">Twitter</a><br /> <i class="fab fa-twitter fa-fw" aria-hidden="true"></i> <a href="https://twitter.com/intent/tweet/?text=rclone%20-%20rsync%20for%20cloud%20storage%20from%20%40njcw&amp;url=https%3A%2F%2Frclone.org" target="_blank" rel="noopener" aria-label="Share on Twitter">Twitter</a><br />
<i class="fab fa-facebook fa-fw" aria-hidden="true"></i> <a href="https://facebook.com/sharer/sharer.php?u=https%3A%2F%2Frclone.org" target="_blank" rel="noopener" aria-label="Share on Facebook">Facebook</a><br /> <i class="fab fa-facebook fa-fw" aria-hidden="true"></i> <a href="https://facebook.com/sharer/sharer.php?u=https%3A%2F%2Frclone.org" target="_blank" rel="noopener" aria-label="Share on Facebook">Facebook</a><br />
<i class="fab fa-reddit fa-fw" aria-hidden="true"></i> <a href="https://reddit.com/submit/?url=https%3A%2F%2Frclone.org&amp;resubmit=true&amp;title=rclone%20-%20rsync%20for%20cloud%20storage" target="_blank" rel="noopener" aria-label="Share on Reddit">Reddit</a><br /> <i class="fab fa-reddit fa-fw" aria-hidden="true"></i> <a href="https://reddit.com/submit/?url=https%3A%2F%2Frclone.org&amp;resubmit=true&amp;title=rclone%20-%20rsync%20for%20cloud%20storage" target="_blank" rel="noopener" aria-label="Share on Reddit">Reddit</a><br />
<iframe src="//ghbtns.com/github-btn.html?user=rclone&amp;repo=rclone&amp;type=star&amp;count=true" allowtransparency="true" frameborder="0" scrolling="no" width="120" height="20"></iframe> <iframe src="//ghbtns.com/github-btn.html?user=rclone&amp;repo=rclone&amp;type=star&amp;count=true" allowtransparency="true" frameborder="0" scrolling="no" width="120" height="20"></iframe>
</div> </p>
</div> </div>
</div> </div>
<div class="card"> <div class="card">
<div class="card-header"> <div class="card-header" style="padding: 5px 15px;">
Links Links
</div> </div>
<div class="card-body card-body-padded"> <div class="card-body">
<div class="menu"> <p class="menu">
<i class="fa fa-comments fa-fw" aria-hidden="true"></i> <a href="https://forum.rclone.org">Rclone forum</a><br /> <i class="fa fa-comments fa-fw" aria-hidden="true"></i> <a href="https://forum.rclone.org">Rclone forum</a><br />
<i class="fab fa-github fa-fw" aria-hidden="true"></i> <a href="https://github.com/rclone/rclone">GitHub project</a><br /> <i class="fab fa-github fa-fw" aria-hidden="true"></i> <a href="https://github.com/rclone/rclone">GitHub project</a><br />
<i class="fa fa-book fa-fw" aria-hidden="true"></i> <a href="https://github.com/rclone/rclone/wiki">Rclone Wiki</a><br /> <i class="fa fa-book fa-fw" aria-hidden="true"></i> <a href="https://github.com/rclone/rclone/wiki">Rclone Wiki</a><br />
<i class="fa fa-heart heart fa-fw" aria-hidden="true"></i> <a href="/sponsor/">Sponsor</a><br /> <i class="fa fa-heart heart fa-fw" aria-hidden="true"></i> <a href="/sponsor/">Sponsor</a><br />
<i class="fab fa-twitter fa-fw" aria-hidden="true"></i> <a href="https://twitter.com/njcw">@njcw</a> <i class="fab fa-twitter fa-fw" aria-hidden="true"></i> <a href="https://twitter.com/njcw">@njcw</a>
</div> </p>
</div> </div>
</div> </div>

View File

@@ -85,7 +85,6 @@
<a class="dropdown-item" href="/linkbox/"><i class="fa fa-infinity fa-fw"></i> Linkbox</a> <a class="dropdown-item" href="/linkbox/"><i class="fa fa-infinity fa-fw"></i> Linkbox</a>
<a class="dropdown-item" href="/mailru/"><i class="fa fa-at fa-fw"></i> Mail.ru Cloud</a> <a class="dropdown-item" href="/mailru/"><i class="fa fa-at fa-fw"></i> Mail.ru Cloud</a>
<a class="dropdown-item" href="/mega/"><i class="fa fa-archive fa-fw"></i> Mega</a> <a class="dropdown-item" href="/mega/"><i class="fa fa-archive fa-fw"></i> Mega</a>
<a class="dropdown-item" href="/s3/#mega"><i class="fa fa-archive fa-fw"></i> Mega S4</a>
<a class="dropdown-item" href="/memory/"><i class="fas fa-memory fa-fw"></i> Memory</a> <a class="dropdown-item" href="/memory/"><i class="fas fa-memory fa-fw"></i> Memory</a>
<a class="dropdown-item" href="/azureblob/"><i class="fab fa-windows fa-fw"></i> Microsoft Azure Blob Storage</a> <a class="dropdown-item" href="/azureblob/"><i class="fab fa-windows fa-fw"></i> Microsoft Azure Blob Storage</a>
<a class="dropdown-item" href="/azurefiles/"><i class="fab fa-windows fa-fw"></i> Microsoft Azure Files Storage</a> <a class="dropdown-item" href="/azurefiles/"><i class="fab fa-windows fa-fw"></i> Microsoft Azure Files Storage</a>

View File

@@ -63,19 +63,11 @@ h1, h2, h3, h4, h5, h6 {
/* Fix spacing of info boxes */ /* Fix spacing of info boxes */
.card { .card {
margin-top: 0.5rem; margin-top: 0.75rem;
}
/* less padding on titles */
.card-header {
padding: 5px 15px;
} }
/* less padding around info box items */ /* less padding around info box items */
.card-body { .card-body {
padding: 0px; padding: 0.5rem;
}
/* more padding around info box items */
.card-body-padded {
padding: 10px 10px 10px 10px;
} }
/* make menus longer */ /* make menus longer */

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

View File

@@ -391,7 +391,6 @@ func (s *StatsInfo) _stopAverageLoop() {
if s.average.started { if s.average.started {
s.average.cancel() s.average.cancel()
s.average.stopped.Wait() s.average.stopped.Wait()
s.average.started = false
} }
} }

View File

@@ -555,11 +555,6 @@ var ConfigOptionsInfo = Options{{
Default: []string{}, Default: []string{},
Help: "Transform paths during the copy process.", Help: "Transform paths during the copy process.",
Groups: "Copy", Groups: "Copy",
}, {
Name: "http_proxy",
Default: "",
Help: "HTTP proxy URL.",
Groups: "Networking",
}} }}
// ConfigInfo is filesystem config options // ConfigInfo is filesystem config options
@@ -672,7 +667,6 @@ type ConfigInfo struct {
MetadataMapper SpaceSepList `config:"metadata_mapper"` MetadataMapper SpaceSepList `config:"metadata_mapper"`
MaxConnections int `config:"max_connections"` MaxConnections int `config:"max_connections"`
NameTransform []string `config:"name_transform"` NameTransform []string `config:"name_transform"`
HTTPProxy string `config:"http_proxy"`
} }
func init() { func init() {

View File

@@ -51,7 +51,6 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) {
ctx := context.Background() ctx := context.Background()
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
var usingPasswordCommand bool var usingPasswordCommand bool
var usingEnvPassword bool
// Find first non-empty line // Find first non-empty line
r := bufio.NewReader(b) r := bufio.NewReader(b)
@@ -100,18 +99,15 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) {
} else { } else {
usingPasswordCommand = false usingPasswordCommand = false
envPassword := os.Getenv("RCLONE_CONFIG_PASS") envpw := os.Getenv("RCLONE_CONFIG_PASS")
if envPassword != "" { if envpw != "" {
usingEnvPassword = true err := SetConfigPassword(envpw)
err := SetConfigPassword(envPassword)
if err != nil { if err != nil {
fs.Errorf(nil, "Using RCLONE_CONFIG_PASS returned: %v", err) fs.Errorf(nil, "Using RCLONE_CONFIG_PASS returned: %v", err)
} else { } else {
fs.Debugf(nil, "Using RCLONE_CONFIG_PASS password.") fs.Debugf(nil, "Using RCLONE_CONFIG_PASS password.")
} }
} else {
usingEnvPassword = false
} }
} }
} }
@@ -148,9 +144,6 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) {
if usingPasswordCommand { if usingPasswordCommand {
return nil, errors.New("using --password-command derived password, unable to decrypt configuration") return nil, errors.New("using --password-command derived password, unable to decrypt configuration")
} }
if usingEnvPassword {
return nil, errors.New("using RCLONE_CONFIG_PASS env password, unable to decrypt configuration")
}
if !ci.AskPassword { if !ci.AskPassword {
return nil, errors.New("unable to decrypt configuration and not allowed to ask for password - set RCLONE_CONFIG_PASS to your configuration password") return nil, errors.New("unable to decrypt configuration and not allowed to ask for password - set RCLONE_CONFIG_PASS to your configuration password")
} }

View File

@@ -9,37 +9,6 @@ import (
"github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/fs/rc"
) )
func init() {
rc.Add(rc.Call{
Path: "config/unlock",
Fn: rcConfigPassword,
Title: "Unlock the config file.",
AuthRequired: true,
Help: `
Unlocks the config file if it is locked.
Parameters:
- 'config_password' - password to unlock the config file
A good idea is to disable AskPassword before making this call
`,
})
}
// Unlock the config file
// A good idea is to disable AskPassword before making this call
func rcConfigPassword(ctx context.Context, in rc.Params) (out rc.Params, err error) {
configPass, err := in.GetString("config_password")
if err != nil {
return nil, err
}
if SetConfigPassword(configPass) != nil {
return nil, errors.New("failed to set config password")
}
return nil, nil
}
func init() { func init() {
rc.Add(rc.Call{ rc.Add(rc.Call{
Path: "config/dump", Path: "config/dump",
@@ -106,9 +75,6 @@ See the [listremotes](/commands/rclone_listremotes/) command for more informatio
// including any defined by environment variables. // including any defined by environment variables.
func rcListRemotes(ctx context.Context, in rc.Params) (out rc.Params, err error) { func rcListRemotes(ctx context.Context, in rc.Params) (out rc.Params, err error) {
remoteNames := GetRemoteNames() remoteNames := GetRemoteNames()
if remoteNames == nil {
remoteNames = []string{}
}
out = rc.Params{ out = rc.Params{
"remotes": remoteNames, "remotes": remoteNames,
} }

View File

@@ -138,22 +138,6 @@ func TestRc(t *testing.T) {
assert.Nil(t, out) assert.Nil(t, out)
assert.Equal(t, "", config.GetValue(testName, "type")) assert.Equal(t, "", config.GetValue(testName, "type"))
assert.Equal(t, "", config.GetValue(testName, "test_key")) assert.Equal(t, "", config.GetValue(testName, "test_key"))
t.Run("ListRemotes empty not nil", func(t *testing.T) {
call := rc.Calls.Get("config/listremotes")
assert.NotNil(t, call)
in := rc.Params{}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
var remotes []string
err = out.GetStruct("remotes", &remotes)
require.NoError(t, err)
assert.NotNil(t, remotes)
assert.Empty(t, remotes)
})
} }
func TestRcProviders(t *testing.T) { func TestRcProviders(t *testing.T) {
@@ -204,17 +188,3 @@ func TestRcPaths(t *testing.T) {
assert.Equal(t, config.GetCacheDir(), out["cache"]) assert.Equal(t, config.GetCacheDir(), out["cache"])
assert.Equal(t, os.TempDir(), out["temp"]) assert.Equal(t, os.TempDir(), out["temp"])
} }
func TestRcConfigUnlock(t *testing.T) {
call := rc.Calls.Get("config/unlock")
assert.NotNil(t, call)
in := rc.Params{
"config_password": "test",
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Nil(t, err)
assert.Nil(t, out)
}

View File

@@ -6,12 +6,10 @@ import (
"context" "context"
"crypto/tls" "crypto/tls"
"crypto/x509" "crypto/x509"
"fmt"
"net" "net"
"net/http" "net/http"
"net/http/cookiejar" "net/http/cookiejar"
"net/http/httputil" "net/http/httputil"
"net/url"
"os" "os"
"sync" "sync"
"time" "time"
@@ -57,18 +55,7 @@ func NewTransportCustom(ctx context.Context, customize func(*http.Transport)) ht
// This also means we get new stuff when it gets added to go // This also means we get new stuff when it gets added to go
t := new(http.Transport) t := new(http.Transport)
structs.SetDefaults(t, http.DefaultTransport.(*http.Transport)) structs.SetDefaults(t, http.DefaultTransport.(*http.Transport))
if ci.HTTPProxy != "" { t.Proxy = http.ProxyFromEnvironment
proxyURL, err := url.Parse(ci.HTTPProxy)
if err != nil {
t.Proxy = func(*http.Request) (*url.URL, error) {
return nil, fmt.Errorf("failed to set --http-proxy from %q: %w", ci.HTTPProxy, err)
}
} else {
t.Proxy = http.ProxyURL(proxyURL)
}
} else {
t.Proxy = http.ProxyFromEnvironment
}
t.MaxIdleConnsPerHost = 2 * (ci.Checkers + ci.Transfers + 1) t.MaxIdleConnsPerHost = 2 * (ci.Checkers + ci.Transfers + 1)
t.MaxIdleConns = 2 * t.MaxIdleConnsPerHost t.MaxIdleConns = 2 * t.MaxIdleConnsPerHost
t.TLSHandshakeTimeout = time.Duration(ci.ConnectTimeout) t.TLSHandshakeTimeout = time.Duration(ci.ConnectTimeout)

View File

@@ -20,7 +20,7 @@ const (
var ( var (
errInvalidCharacters = errors.New("config name contains invalid characters - may only contain numbers, letters, `_`, `-`, `.`, `+`, `@` and space, while not start with `-` or space, and not end with space") errInvalidCharacters = errors.New("config name contains invalid characters - may only contain numbers, letters, `_`, `-`, `.`, `+`, `@` and space, while not start with `-` or space, and not end with space")
errCantBeEmpty = errors.New("can't use empty string as a path") errCantBeEmpty = errors.New("can't use empty string as a path")
errBadConfigParam = errors.New("config parameters may only contain `0-9`, `A-Z`, `a-z`, `_` and `.`") errBadConfigParam = errors.New("config parameters may only contain `0-9`, `A-Z`, `a-z` and `_`")
errEmptyConfigParam = errors.New("config parameters can't be empty") errEmptyConfigParam = errors.New("config parameters can't be empty")
errConfigNameEmpty = errors.New("config name can't be empty") errConfigNameEmpty = errors.New("config name can't be empty")
errConfigName = errors.New("config name needs a trailing `:`") errConfigName = errors.New("config name needs a trailing `:`")
@@ -79,8 +79,7 @@ func isConfigParam(c rune) bool {
return ((c >= 'a' && c <= 'z') || return ((c >= 'a' && c <= 'z') ||
(c >= 'A' && c <= 'Z') || (c >= 'A' && c <= 'Z') ||
(c >= '0' && c <= '9') || (c >= '0' && c <= '9') ||
c == '_' || c == '_')
c == '.')
} }
// Parsed is returned from Parse with the results of the connection string decomposition // Parsed is returned from Parse with the results of the connection string decomposition

View File

@@ -7,15 +7,12 @@ import (
"crypto/md5" "crypto/md5"
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"maps"
"os" "os"
"path/filepath" "path/filepath"
"slices"
"strings" "strings"
"sync" "sync"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
) )
@@ -68,10 +65,6 @@ func NewFs(ctx context.Context, path string) (Fs, error) {
overriddenConfig[suffix] = extraConfig overriddenConfig[suffix] = extraConfig
overriddenConfigMu.Unlock() overriddenConfigMu.Unlock()
} }
ctx, err = addConfigToContext(ctx, configName, config)
if err != nil {
return nil, err
}
f, err := fsInfo.NewFs(ctx, configName, fsPath, config) f, err := fsInfo.NewFs(ctx, configName, fsPath, config)
if f != nil && (err == nil || err == ErrorIsFile) { if f != nil && (err == nil || err == ErrorIsFile) {
addReverse(f, fsInfo) addReverse(f, fsInfo)
@@ -79,54 +72,6 @@ func NewFs(ctx context.Context, path string) (Fs, error) {
return f, err return f, err
} }
// Add "global" config or "override" to ctx and the global config if required.
//
// This looks through keys prefixed with "global." or "override." in
// config and sets ctx and optionally the global context if "global.".
func addConfigToContext(ctx context.Context, configName string, config configmap.Getter) (newCtx context.Context, err error) {
overrideConfig := make(configmap.Simple)
globalConfig := make(configmap.Simple)
for i := range ConfigOptionsInfo {
opt := &ConfigOptionsInfo[i]
globalName := "global." + opt.Name
value, isSet := config.Get(globalName)
if isSet {
// Set both override and global if global
overrideConfig[opt.Name] = value
globalConfig[opt.Name] = value
}
overrideName := "override." + opt.Name
value, isSet = config.Get(overrideName)
if isSet {
overrideConfig[opt.Name] = value
}
}
if len(overrideConfig) == 0 && len(globalConfig) == 0 {
return ctx, nil
}
newCtx, ci := AddConfig(ctx)
overrideKeys := slices.Collect(maps.Keys(overrideConfig))
slices.Sort(overrideKeys)
globalKeys := slices.Collect(maps.Keys(globalConfig))
slices.Sort(globalKeys)
// Set the config in the newCtx
err = configstruct.Set(overrideConfig, ci)
if err != nil {
return ctx, fmt.Errorf("failed to set override config variables %q: %w", overrideKeys, err)
}
Debugf(configName, "Set overridden config %q for backend startup", overrideKeys)
// Set the global context only
if len(globalConfig) != 0 {
globalCI := GetConfig(context.Background())
err = configstruct.Set(globalConfig, globalCI)
if err != nil {
return ctx, fmt.Errorf("failed to set global config variables %q: %w", globalKeys, err)
}
Debugf(configName, "Set global config %q at backend startup", overrideKeys)
}
return newCtx, nil
}
// ConfigFs makes the config for calling NewFs with. // ConfigFs makes the config for calling NewFs with.
// //
// It parses the path which is of the form remote:path // It parses the path which is of the form remote:path

View File

@@ -1,55 +0,0 @@
package fs
import (
"context"
"testing"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// When no override/global keys exist, ctx must be returned unchanged.
func TestAddConfigToContext_NoChanges(t *testing.T) {
ctx := context.Background()
newCtx, err := addConfigToContext(ctx, "unit-test", configmap.Simple{})
require.NoError(t, err)
assert.Equal(t, newCtx, ctx)
}
// A single override.key must create a new ctx, but leave the
// background ctx untouched.
func TestAddConfigToContext_OverrideOnly(t *testing.T) {
override := configmap.Simple{
"override.user_agent": "potato",
}
ctx := context.Background()
globalCI := GetConfig(ctx)
original := globalCI.UserAgent
newCtx, err := addConfigToContext(ctx, "unit-test", override)
require.NoError(t, err)
assert.NotEqual(t, newCtx, ctx)
assert.Equal(t, original, globalCI.UserAgent)
ci := GetConfig(newCtx)
assert.Equal(t, "potato", ci.UserAgent)
}
// A single global.key must create a new ctx and update the
// background/global config.
func TestAddConfigToContext_GlobalOnly(t *testing.T) {
global := configmap.Simple{
"global.user_agent": "potato2",
}
ctx := context.Background()
globalCI := GetConfig(ctx)
original := globalCI.UserAgent
defer func() {
globalCI.UserAgent = original
}()
newCtx, err := addConfigToContext(ctx, "unit-test", global)
require.NoError(t, err)
assert.NotEqual(t, newCtx, ctx)
assert.Equal(t, "potato2", globalCI.UserAgent)
ci := GetConfig(newCtx)
assert.Equal(t, "potato2", ci.UserAgent)
}

View File

@@ -42,21 +42,4 @@ func TestNewFs(t *testing.T) {
assert.Equal(t, ":mockfs{S_NHG}:/tmp", fs.ConfigString(f3)) assert.Equal(t, ":mockfs{S_NHG}:/tmp", fs.ConfigString(f3))
assert.Equal(t, ":mockfs,potato='true':/tmp", fs.ConfigStringFull(f3)) assert.Equal(t, ":mockfs,potato='true':/tmp", fs.ConfigStringFull(f3))
// Check that the overrides work
globalCI := fs.GetConfig(ctx)
original := globalCI.UserAgent
defer func() {
globalCI.UserAgent = original
}()
f4, err := fs.NewFs(ctx, ":mockfs,global.user_agent='julian':/tmp")
require.NoError(t, err)
assert.Equal(t, ":mockfs", f4.Name())
assert.Equal(t, "/tmp", f4.Root())
assert.Equal(t, ":mockfs:/tmp", fs.ConfigString(f4))
assert.Equal(t, ":mockfs:/tmp", fs.ConfigStringFull(f4))
assert.Equal(t, "julian", globalCI.UserAgent)
} }

View File

@@ -820,7 +820,7 @@ func rcCheck(ctx context.Context, in rc.Params) (out rc.Params, err error) {
return nil, rc.NewErrParamInvalid(errors.New("need srcFs parameter when not using checkFileHash")) return nil, rc.NewErrParamInvalid(errors.New("need srcFs parameter when not using checkFileHash"))
} }
oneway, _ := in.GetBool("oneWay") oneway, _ := in.GetBool("oneway")
download, _ := in.GetBool("download") download, _ := in.GetBool("download")
opt := &CheckOpt{ opt := &CheckOpt{

View File

@@ -49,7 +49,7 @@ Parameters:
Note that these are the global options which are unaffected by use of Note that these are the global options which are unaffected by use of
the _config and _filter parameters. If you wish to read the parameters the _config and _filter parameters. If you wish to read the parameters
set in _config or _filter use options/local. set in _config then use options/config and for _filter use options/filter.
This shows the internal names of the option within rclone which should This shows the internal names of the option within rclone which should
map to the external options very easily with a few exceptions. map to the external options very easily with a few exceptions.

View File

@@ -658,7 +658,7 @@ func TestServerSideCopyOverSelf(t *testing.T) {
ctx = predictDstFromLogger(ctx) ctx = predictDstFromLogger(ctx)
err = CopyDir(ctx, FremoteCopy, r.Fremote, false) err = CopyDir(ctx, FremoteCopy, r.Fremote, false)
require.NoError(t, err) require.NoError(t, err)
testLoggerVsLsf(ctx, FremoteCopy, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) testLoggerVsLsf(ctx, r.Fremote, r.Flocal, operations.GetLoggerOpt(ctx).JSON, t)
fstest.CheckItems(t, FremoteCopy, file1) fstest.CheckItems(t, FremoteCopy, file1)
file2 := r.WriteObject(ctx, "sub dir/hello world", "hello world again", t2) file2 := r.WriteObject(ctx, "sub dir/hello world", "hello world again", t2)
@@ -667,7 +667,7 @@ func TestServerSideCopyOverSelf(t *testing.T) {
ctx = predictDstFromLogger(ctx) ctx = predictDstFromLogger(ctx)
err = CopyDir(ctx, FremoteCopy, r.Fremote, false) err = CopyDir(ctx, FremoteCopy, r.Fremote, false)
require.NoError(t, err) require.NoError(t, err)
testLoggerVsLsf(ctx, FremoteCopy, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) testLoggerVsLsf(ctx, r.Fremote, r.Flocal, operations.GetLoggerOpt(ctx).JSON, t)
fstest.CheckItems(t, FremoteCopy, file2) fstest.CheckItems(t, FremoteCopy, file2)
} }
@@ -703,7 +703,7 @@ func TestServerSideMoveOverSelf(t *testing.T) {
ctx = predictDstFromLogger(ctx) ctx = predictDstFromLogger(ctx)
err = CopyDir(ctx, FremoteCopy, r.Fremote, false) err = CopyDir(ctx, FremoteCopy, r.Fremote, false)
require.NoError(t, err) require.NoError(t, err)
testLoggerVsLsf(ctx, FremoteCopy, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) testLoggerVsLsf(ctx, r.Fremote, r.Flocal, operations.GetLoggerOpt(ctx).JSON, t)
fstest.CheckItems(t, FremoteCopy, file1) fstest.CheckItems(t, FremoteCopy, file1)
file2 := r.WriteObject(ctx, "sub dir/hello world", "hello world again", t2) file2 := r.WriteObject(ctx, "sub dir/hello world", "hello world again", t2)
@@ -3031,9 +3031,6 @@ func DstLsf(ctx context.Context, Fremote fs.Fs) *bytes.Buffer {
list.SetSeparator(";") list.SetSeparator(";")
timeFormat := operations.FormatForLSFPrecision(Fremote.Precision()) timeFormat := operations.FormatForLSFPrecision(Fremote.Precision())
if Fremote.Precision() == fs.ModTimeNotSupported {
timeFormat = "none"
}
list.AddModTime(timeFormat) list.AddModTime(timeFormat)
list.AddHash(hash.MD5) list.AddHash(hash.MD5)
list.AddSize() list.AddSize()
@@ -3085,7 +3082,7 @@ func testLoggerVsLsf(ctx context.Context, fdst, fsrc fs.Fs, logger *bytes.Buffer
elements := bytes.Split(line, []byte(";")) elements := bytes.Split(line, []byte(";"))
if len(elements) >= 2 { if len(elements) >= 2 {
if !canTestModtime { if !canTestModtime {
elements[0] = []byte("none") elements[0] = []byte("")
} }
if !canTestHash { if !canTestHash {
elements[1] = []byte("") elements[1] = []byte("")

View File

@@ -7,7 +7,6 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"io" "io"
"math"
"time" "time"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
@@ -336,15 +335,9 @@ type FlaggerNP interface {
} }
// NewUsageValue makes a valid value // NewUsageValue makes a valid value
func NewUsageValue[T interface { func NewUsageValue(value int64) *int64 {
int64 | uint64 | float64
}](value T) *int64 {
p := new(int64) p := new(int64)
if value > T(int64(math.MaxInt64)) { *p = value
*p = math.MaxInt64
} else {
*p = int64(value)
}
return p return p
} }

View File

@@ -120,6 +120,8 @@ backends:
- TestCopyURL - TestCopyURL
- TestMoveFileWithIgnoreExisting - TestMoveFileWithIgnoreExisting
- TestCopyFileCompareDest - TestCopyFileCompareDest
# fs/sync
- TestServerSideMoveOverSelf
#vfs #vfs
- TestFileSetModTime/cache=off,open=false,write=false - TestFileSetModTime/cache=off,open=false,write=false
- TestFileSetModTime/cache=off,open=true,write=false - TestFileSetModTime/cache=off,open=true,write=false

View File

@@ -692,10 +692,6 @@ version recommended):
newFormat := true newFormat := true
err := outM.Decode(code) err := outM.Decode(code)
if err != nil { if err != nil {
if len(code) > 0 && code[0] != '{' {
fs.Errorf(nil, "Couldn't decode rclone authorize output as base64, trying JSON: %v", err)
fs.Errorf(nil, "Check the code is complete and didn't get truncated >>>%s<<<", code)
}
newFormat = false newFormat = false
err = json.Unmarshal([]byte(code), &token) err = json.Unmarshal([]byte(code), &token)
} }

View File

@@ -88,9 +88,7 @@ func (r *Renew) Shutdown() {
} }
// closing a channel can only be done once // closing a channel can only be done once
r.shutdown.Do(func() { r.shutdown.Do(func() {
if r.ts != nil { r.ts.expiryTimer.Stop()
r.ts.expiryTimer.Stop()
}
close(r.done) close(r.done)
}) })
} }