mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
77 Commits
copilot/fi
...
fix-8569-s
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2dfbd0b47c | ||
|
|
c6a099ac14 | ||
|
|
80727496fc | ||
|
|
a5bd052fb4 | ||
|
|
538edfeee0 | ||
|
|
20253fb4df | ||
|
|
13875b4500 | ||
|
|
3d3ea9ee30 | ||
|
|
84f11ae448 | ||
|
|
ef975129a8 | ||
|
|
c9f3456341 | ||
|
|
0121dc11d3 | ||
|
|
32ea8f6dc8 | ||
|
|
775f3e49f0 | ||
|
|
b0310c7273 | ||
|
|
07bb122d47 | ||
|
|
aedcd56531 | ||
|
|
4a10491c2a | ||
|
|
bbceb84cad | ||
|
|
4c3737014a | ||
|
|
a49ccddb81 | ||
|
|
7fd3d0d1ab | ||
|
|
77c1031dcd | ||
|
|
d8c7031dc3 | ||
|
|
ab5844df77 | ||
|
|
a3c4e2fd20 | ||
|
|
b12d2f32db | ||
|
|
85c1563701 | ||
|
|
f52d753363 | ||
|
|
f76c72b5cc | ||
|
|
46dcc0eaf6 | ||
|
|
f46787dcc3 | ||
|
|
e7c46912a3 | ||
|
|
296c26b671 | ||
|
|
ef99b9ad18 | ||
|
|
2e853b6e75 | ||
|
|
d725f4f4de | ||
|
|
6a9c2350de | ||
|
|
659aca8711 | ||
|
|
1ac7e32302 | ||
|
|
286ae6a1b4 | ||
|
|
861456d970 | ||
|
|
5fa9c0209e | ||
|
|
b00dcc37bd | ||
|
|
92efc5ff43 | ||
|
|
57bbb4be9f | ||
|
|
c62fbc5269 | ||
|
|
6eb8919719 | ||
|
|
ff4d7c8dd0 | ||
|
|
4f8dfd14fc | ||
|
|
4e77a4ff73 | ||
|
|
b63c42f39b | ||
|
|
30c9bab35d | ||
|
|
68bbd8017d | ||
|
|
259dbbab55 | ||
|
|
5fa85f66fe | ||
|
|
fb648e4774 | ||
|
|
9978750a8c | ||
|
|
d953c0c51b | ||
|
|
9dfce11c9b | ||
|
|
504f2fb571 | ||
|
|
f79f929e57 | ||
|
|
83e04ead37 | ||
|
|
1a95a23fdc | ||
|
|
c4b592e549 | ||
|
|
642d1415d1 | ||
|
|
64556d4ca2 | ||
|
|
de69448565 | ||
|
|
ad941655c5 | ||
|
|
6cbb9fd7cb | ||
|
|
7988300f50 | ||
|
|
1b47b7a6bb | ||
|
|
626bdacd59 | ||
|
|
1ef2da31a7 | ||
|
|
376a5b1a83 | ||
|
|
ddaeb07019 | ||
|
|
c72f71bd02 |
30
.github/workflows/build.yml
vendored
30
.github/workflows/build.yml
vendored
@@ -26,12 +26,12 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.21', 'go1.22']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '1.24.0'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -42,14 +42,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '1.24.0'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '1.24.0'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -58,14 +58,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '1.24.0'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '1.24.0'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -75,23 +75,11 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '1.24.0'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.21
|
||||
os: ubuntu-latest
|
||||
go: '1.21'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.22
|
||||
os: ubuntu-latest
|
||||
go: '1.22'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -237,7 +225,7 @@ jobs:
|
||||
id: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.23.0-rc.1'
|
||||
go-version: '1.24.0'
|
||||
check-latest: true
|
||||
cache: false
|
||||
|
||||
@@ -311,7 +299,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.23.0-rc.1'
|
||||
go-version: '1.24.0'
|
||||
|
||||
- name: Set global environment variables
|
||||
shell: bash
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
|
||||
# GitHub Actions at the start of a workflow run to identify the job.
|
||||
# This is used to authenticate against GitHub Container Registry.
|
||||
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
||||
# for more detailed information.
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
push: true # push the image to ghcr
|
||||
tags: |
|
||||
ghcr.io/rclone/rclone:beta
|
||||
rclone/rclone:beta
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
cache-from: type=gha, scope=${{ github.workflow }}
|
||||
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
|
||||
provenance: false
|
||||
# Eventually cache will need to be cleared if builds more frequent than once a week
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
294
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
294
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
@@ -0,0 +1,294 @@
|
||||
---
|
||||
# Github Actions release for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_image.yml" -*-
|
||||
|
||||
name: Build & Push Docker Images
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
tags:
|
||||
- '**'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build-image:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && github.event_name != 'pull_request')
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runs-on: ubuntu-24.04
|
||||
- platform: linux/386
|
||||
runs-on: ubuntu-24.04
|
||||
- platform: linux/arm64
|
||||
runs-on: ubuntu-24.04-arm
|
||||
- platform: linux/arm/v7
|
||||
runs-on: ubuntu-24.04-arm
|
||||
- platform: linux/arm/v6
|
||||
runs-on: ubuntu-24.04-arm
|
||||
|
||||
name: Build Docker Image for ${{ matrix.platform }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
|
||||
steps:
|
||||
- name: Free Space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Set PLATFORM Variable
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set CACHE_NAME Variable
|
||||
shell: python
|
||||
run: |
|
||||
import os, re
|
||||
|
||||
def slugify(input_string, max_length=63):
|
||||
slug = input_string.lower()
|
||||
slug = re.sub(r'[^a-z0-9 -]', ' ', slug)
|
||||
slug = slug.strip()
|
||||
slug = re.sub(r'\s+', '-', slug)
|
||||
slug = re.sub(r'-+', '-', slug)
|
||||
slug = slug[:max_length]
|
||||
slug = re.sub(r'[-]+$', '', slug)
|
||||
return slug
|
||||
|
||||
ref_name_slug = "cache"
|
||||
|
||||
if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request":
|
||||
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"CACHE_NAME={ref_name_slug}\n")
|
||||
|
||||
- name: Get ImageOS
|
||||
# There's no way around this, because "ImageOS" is only available to
|
||||
# processes, but the setup-go action uses it in its key.
|
||||
id: imageos
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
return process.env.ImageOS
|
||||
|
||||
- name: Extract Metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,manifest-descriptor # Important for digest annotation (used by Github packages)
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/${{ env.REPO_NAME }}
|
||||
labels: |
|
||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
tags: |
|
||||
type=sha
|
||||
type=ref,event=pr
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=beta,enable={{is_default_branch}}
|
||||
|
||||
- name: Setup QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Load Go Build Cache for Docker
|
||||
id: go-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
||||
# Cache only the go builds, the module download is cached via the docker layer caching
|
||||
path: |
|
||||
go-build-cache
|
||||
|
||||
- name: Inject Go Build Cache into Docker
|
||||
uses: reproducible-containers/buildkit-cache-dance@v3
|
||||
with:
|
||||
cache-map: |
|
||||
{
|
||||
"go-build-cache": "/root/.cache/go-build"
|
||||
}
|
||||
skip-extraction: ${{ steps.go-cache.outputs.cache-hit }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and Publish Image Digest
|
||||
id: build
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
provenance: false
|
||||
# don't specify 'tags' here (error "get can't push tagged ref by digest")
|
||||
# tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
annotations: ${{ steps.meta.outputs.annotations }}
|
||||
platforms: ${{ matrix.platform }}
|
||||
outputs: |
|
||||
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
|
||||
cache-from: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
||||
cache-to: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd
|
||||
|
||||
- name: Export Image Digest
|
||||
run: |
|
||||
mkdir -p /tmp/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
|
||||
- name: Upload Image Digest
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM }}
|
||||
path: /tmp/digests/*
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
merge-image:
|
||||
name: Merge & Push Final Docker Image
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- build-image
|
||||
|
||||
steps:
|
||||
- name: Download Image Digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Extract Metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
||||
with:
|
||||
images: |
|
||||
${{ env.REPO_NAME }}
|
||||
ghcr.io/${{ env.REPO_NAME }}
|
||||
labels: |
|
||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
tags: |
|
||||
type=sha
|
||||
type=ref,event=pr
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=beta,enable={{is_default_branch}}
|
||||
|
||||
- name: Extract Tags
|
||||
shell: python
|
||||
run: |
|
||||
import json, os
|
||||
|
||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
||||
metadata = json.loads(metadata_json)
|
||||
|
||||
tags = [f"--tag '{tag}'" for tag in metadata["tags"]]
|
||||
tags_string = " ".join(tags)
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"TAGS={tags_string}\n")
|
||||
|
||||
- name: Extract Annotations
|
||||
shell: python
|
||||
run: |
|
||||
import json, os
|
||||
|
||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
||||
metadata = json.loads(metadata_json)
|
||||
|
||||
annotations = [f"--annotation '{annotation}'" for annotation in metadata["annotations"]]
|
||||
annotations_string = " ".join(annotations)
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"ANNOTATIONS={annotations_string}\n")
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create & Push Manifest List
|
||||
working-directory: /tmp/digests
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
${{ env.TAGS }} \
|
||||
${{ env.ANNOTATIONS }} \
|
||||
$(printf 'ghcr.io/${{ env.REPO_NAME }}@sha256:%s ' *)
|
||||
|
||||
- name: Inspect and Run Multi-Platform Image
|
||||
run: |
|
||||
docker buildx imagetools inspect --raw ${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
||||
docker buildx imagetools inspect --raw ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
||||
docker run --rm ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} version
|
||||
49
.github/workflows/build_publish_docker_plugin.yml
vendored
Normal file
49
.github/workflows/build_publish_docker_plugin.yml
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
---
|
||||
# Github Actions release for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_plugin.yml" -*-
|
||||
|
||||
name: Release Build for Docker Plugin
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build_docker_volume_plugin:
|
||||
if: inputs.manual || github.repository == 'rclone/rclone'
|
||||
name: Build docker plugin job
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
shell: bash
|
||||
run: |
|
||||
VER=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
done
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
||||
@@ -1,89 +0,0 @@
|
||||
name: Docker release build
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Get actual patch version
|
||||
id: actual_patch_version
|
||||
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
|
||||
- name: Get actual minor version
|
||||
id: actual_minor_version
|
||||
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
|
||||
- name: Get actual major version
|
||||
id: actual_major_version
|
||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USER }}
|
||||
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
push: true
|
||||
tags: |
|
||||
rclone/rclone:latest
|
||||
rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
|
||||
build_docker_volume_plugin:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
shell: bash
|
||||
run: |
|
||||
VER=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
done
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
||||
44
Dockerfile
44
Dockerfile
@@ -1,19 +1,47 @@
|
||||
FROM golang:alpine AS builder
|
||||
|
||||
COPY . /go/src/github.com/rclone/rclone/
|
||||
ARG CGO_ENABLED=0
|
||||
|
||||
WORKDIR /go/src/github.com/rclone/rclone/
|
||||
|
||||
RUN apk add --no-cache make bash gawk git
|
||||
RUN \
|
||||
CGO_ENABLED=0 \
|
||||
make
|
||||
RUN ./rclone version
|
||||
RUN echo "**** Set Go Environment Variables ****" && \
|
||||
go env -w GOCACHE=/root/.cache/go-build
|
||||
|
||||
RUN echo "**** Install Dependencies ****" && \
|
||||
apk add --no-cache \
|
||||
make \
|
||||
bash \
|
||||
gawk \
|
||||
git
|
||||
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
|
||||
RUN echo "**** Download Go Dependencies ****" && \
|
||||
go mod download -x
|
||||
|
||||
RUN echo "**** Verify Go Dependencies ****" && \
|
||||
go mod verify
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build,sharing=locked \
|
||||
echo "**** Build Binary ****" && \
|
||||
make
|
||||
|
||||
RUN echo "**** Print Version Binary ****" && \
|
||||
./rclone version
|
||||
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
RUN echo "**** Install Dependencies ****" && \
|
||||
apk add --no-cache \
|
||||
ca-certificates \
|
||||
fuse3 \
|
||||
tzdata && \
|
||||
echo "Enable user_allow_other in fuse" && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
|
||||
|
||||
762
MANUAL.html
generated
762
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
354
MANUAL.md
generated
354
MANUAL.md
generated
@@ -1,7 +1,78 @@
|
||||
% rclone(1) User Manual
|
||||
% Nick Craig-Wood
|
||||
% Jan 12, 2025
|
||||
% May 21, 2025
|
||||
|
||||
# NAME
|
||||
|
||||
rclone - manage files on cloud storage
|
||||
|
||||
# SYNOPSIS
|
||||
|
||||
```
|
||||
Usage:
|
||||
rclone [flags]
|
||||
rclone [command]
|
||||
|
||||
Available commands:
|
||||
about Get quota information from the remote.
|
||||
authorize Remote authorization.
|
||||
backend Run a backend-specific command.
|
||||
bisync Perform bidirectional synchronization between two paths.
|
||||
cat Concatenates any files and sends them to stdout.
|
||||
check Checks the files in the source and destination match.
|
||||
checksum Checks the files in the destination against a SUM file.
|
||||
cleanup Clean up the remote if possible.
|
||||
completion Output completion script for a given shell.
|
||||
config Enter an interactive configuration session.
|
||||
copy Copy files from source to dest, skipping identical files.
|
||||
copyto Copy files from source to dest, skipping identical files.
|
||||
copyurl Copy the contents of the URL supplied content to dest:path.
|
||||
cryptcheck Cryptcheck checks the integrity of an encrypted remote.
|
||||
cryptdecode Cryptdecode returns unencrypted file names.
|
||||
dedupe Interactively find duplicate filenames and delete/rename them.
|
||||
delete Remove the files in path.
|
||||
deletefile Remove a single file from remote.
|
||||
gendocs Output markdown docs for rclone to the directory supplied.
|
||||
gitannex Speaks with git-annex over stdin/stdout.
|
||||
hashsum Produces a hashsum file for all the objects in the path.
|
||||
help Show help for rclone commands, flags and backends.
|
||||
link Generate public link to file/folder.
|
||||
listremotes List all the remotes in the config file and defined in environment variables.
|
||||
ls List the objects in the path with size and path.
|
||||
lsd List all directories/containers/buckets in the path.
|
||||
lsf List directories and objects in remote:path formatted for parsing.
|
||||
lsjson List directories and objects in the path in JSON format.
|
||||
lsl List the objects in path with modification time, size and path.
|
||||
md5sum Produces an md5sum file for all the objects in the path.
|
||||
mkdir Make the path if it doesn't already exist.
|
||||
mount Mount the remote as file system on a mountpoint.
|
||||
move Move files from source to dest.
|
||||
moveto Move file or directory from source to dest.
|
||||
ncdu Explore a remote with a text based user interface.
|
||||
nfsmount Mount the remote as file system on a mountpoint.
|
||||
obscure Obscure password for use in the rclone config file.
|
||||
purge Remove the path and all of its contents.
|
||||
rc Run a command against a running rclone.
|
||||
rcat Copies standard input to file on remote.
|
||||
rcd Run rclone listening to remote control commands only.
|
||||
rmdir Remove the empty directory at path.
|
||||
rmdirs Remove empty directories under the path.
|
||||
selfupdate Update the rclone binary.
|
||||
serve Serve a remote over a protocol.
|
||||
settier Changes storage class/tier of objects in remote.
|
||||
sha1sum Produces an sha1sum file for all the objects in the path.
|
||||
size Prints the total size and number of objects in remote:path.
|
||||
sync Make source and dest identical, modifying destination only.
|
||||
test Run a test command
|
||||
touch Create new file or change file modification time.
|
||||
tree List the contents of the remote in a tree like fashion.
|
||||
version Show the version number.
|
||||
|
||||
Use "rclone [command] --help" for more information about a command.
|
||||
Use "rclone help flags" for to see the global flags.
|
||||
Use "rclone help backends" for a list of supported services.
|
||||
|
||||
```
|
||||
# Rclone syncs your files to cloud storage
|
||||
|
||||
<img width="50%" src="https://rclone.org/img/logo_on_light__horizontal_color.svg" alt="rclone logo" style="float:right; padding: 5px;" >
|
||||
@@ -1690,6 +1761,9 @@ include/exclude filters - everything will be removed. Use the
|
||||
delete files. To delete empty directories only, use command
|
||||
[rmdir](https://rclone.org/commands/rclone_rmdir/) or [rmdirs](https://rclone.org/commands/rclone_rmdirs/).
|
||||
|
||||
The concurrency of this operation is controlled by the `--checkers` global flag. However, some backends will
|
||||
implement this command directly, in which case `--checkers` will be ignored.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
`--dry-run` or the `--interactive`/`-i` flag.
|
||||
|
||||
@@ -2784,13 +2858,18 @@ Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.
|
||||
|
||||
The command requires 1-3 arguments:
|
||||
- fs name (e.g., "drive", "s3", etc.)
|
||||
- Either a base64 encoded JSON blob obtained from a previous rclone config session
|
||||
- Or a client_id and client_secret pair obtained from the remote service
|
||||
|
||||
Use --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.
|
||||
|
||||
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.
|
||||
|
||||
```
|
||||
rclone authorize [flags]
|
||||
rclone authorize <fs name> [base64_json_blob | client_id client_secret] [flags]
|
||||
```
|
||||
|
||||
## Options
|
||||
@@ -3745,12 +3824,12 @@ password to re-encrypt the config.
|
||||
|
||||
When `--password-command` is called to change the password then the
|
||||
environment variable `RCLONE_PASSWORD_CHANGE=1` will be set. So if
|
||||
changing passwords programatically you can use the environment
|
||||
changing passwords programmatically you can use the environment
|
||||
variable to distinguish which password you must supply.
|
||||
|
||||
Alternatively you can remove the password first (with `rclone config
|
||||
encryption remove`), then set it again with this command which may be
|
||||
easier if you don't mind the unecrypted config file being on the disk
|
||||
easier if you don't mind the unencrypted config file being on the disk
|
||||
briefly.
|
||||
|
||||
|
||||
@@ -4158,6 +4237,8 @@ This doesn't transfer files that are identical on src and dst, testing
|
||||
by size and modification time or MD5SUM. It doesn't delete files from
|
||||
the destination.
|
||||
|
||||
*If you are looking to copy just a byte range of a file, please see 'rclone cat --offset X --count Y'*
|
||||
|
||||
**Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics
|
||||
|
||||
|
||||
@@ -4279,7 +4360,7 @@ Setting `--auto-filename` will attempt to automatically determine the
|
||||
filename from the URL (after any redirections) and used in the
|
||||
destination path.
|
||||
|
||||
With `--auto-filename-header` in addition, if a specific filename is
|
||||
With `--header-filename` in addition, if a specific filename is
|
||||
set in HTTP headers, it will be used instead of the name from the URL.
|
||||
With `--print-filename` in addition, the resulting file name will be
|
||||
printed.
|
||||
@@ -4290,7 +4371,7 @@ destination if there is one with the same name.
|
||||
Setting `--stdout` or making the output file name `-`
|
||||
will cause the output to be written to standard output.
|
||||
|
||||
## Troublshooting
|
||||
## Troubleshooting
|
||||
|
||||
If you can't get `rclone copyurl` to work then here are some things you can try:
|
||||
|
||||
@@ -5787,11 +5868,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
@@ -7049,11 +7130,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
@@ -8177,11 +8258,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
@@ -8732,11 +8813,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
@@ -9289,11 +9370,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
@@ -10027,11 +10108,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
@@ -10581,7 +10662,7 @@ that it uses an on disk cache, but the cache entries are held as
|
||||
symlinks. Rclone will use the handle of the underlying file as the NFS
|
||||
handle which improves performance. This sort of cache can't be backed
|
||||
up and restored as the underlying handles will change. This is Linux
|
||||
only. It requres running rclone as root or with `CAP_DAC_READ_SEARCH`.
|
||||
only. It requires running rclone as root or with `CAP_DAC_READ_SEARCH`.
|
||||
You can run rclone with this extra permission by doing this to the
|
||||
rclone binary `sudo setcap cap_dac_read_search+ep /path/to/rclone`.
|
||||
|
||||
@@ -10704,11 +10785,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
@@ -11353,7 +11434,7 @@ docs](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)).
|
||||
access.
|
||||
|
||||
Please note that some clients may require HTTPS endpoints. See [the
|
||||
SSL docs](#ssl-tls) for more information.
|
||||
SSL docs](#tls-ssl) for more information.
|
||||
|
||||
This command uses the [VFS directory cache](#vfs-virtual-file-system).
|
||||
All the functionality will work with `--vfs-cache-mode off`. Using
|
||||
@@ -11408,7 +11489,7 @@ secret_access_key = SECRET_ACCESS_KEY
|
||||
use_multipart_uploads = false
|
||||
```
|
||||
|
||||
Note that setting `disable_multipart_uploads = true` is to work around
|
||||
Note that setting `use_multipart_uploads = false` is to work around
|
||||
[a bug](#bugs) which will be fixed in due course.
|
||||
|
||||
## Bugs
|
||||
@@ -11660,11 +11741,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
@@ -12254,11 +12335,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
@@ -13035,11 +13116,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
@@ -14444,6 +14525,11 @@ it to `false`. It is also possible to specify `--boolean=false` or
|
||||
parsed as `--boolean` and the `false` is parsed as an extra command
|
||||
line argument for rclone.
|
||||
|
||||
Options documented to take a `stringArray` parameter accept multiple
|
||||
values. To pass more than one value, repeat the option; for example:
|
||||
`--include value1 --include value2`.
|
||||
|
||||
|
||||
### Time or duration options {#time-option}
|
||||
|
||||
TIME or DURATION options can be specified as a duration string or a
|
||||
@@ -16755,7 +16841,7 @@ so they take exactly the same form.
|
||||
The options set by environment variables can be seen with the `-vv` flag, e.g. `rclone version -vv`.
|
||||
|
||||
Options that can appear multiple times (type `stringArray`) are
|
||||
treated slighly differently as environment variables can only be
|
||||
treated slightly differently as environment variables can only be
|
||||
defined once. In order to allow a simple mechanism for adding one or
|
||||
many items, the input is treated as a [CSV encoded](https://godoc.org/encoding/csv)
|
||||
string. For example
|
||||
@@ -19937,7 +20023,7 @@ the `--vfs-cache-mode` is off, it will return an empty result.
|
||||
],
|
||||
}
|
||||
|
||||
The `expiry` time is the time until the file is elegible for being
|
||||
The `expiry` time is the time until the file is eligible for being
|
||||
uploaded in floating point seconds. This may go negative. As rclone
|
||||
only transfers `--transfers` files at once, only the lowest
|
||||
`--transfers` expiry times will have `uploading` as `true`. So there
|
||||
@@ -21018,7 +21104,7 @@ Flags for general networking and HTTP stuff.
|
||||
--tpslimit float Limit HTTP transactions per second to this
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
|
||||
--use-cookies Enable session cookiejar
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.69.0")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.69.3")
|
||||
```
|
||||
|
||||
|
||||
@@ -22062,7 +22148,7 @@ on the host.
|
||||
The _FUSE_ driver is a prerequisite for rclone mounting and should be
|
||||
installed on host:
|
||||
```
|
||||
sudo apt-get -y install fuse
|
||||
sudo apt-get -y install fuse3
|
||||
```
|
||||
|
||||
Create two directories required by rclone docker plugin:
|
||||
@@ -23066,7 +23152,7 @@ See the [bisync filters](#filtering) section and generic
|
||||
[--filter-from](https://rclone.org/filtering/#filter-from-read-filtering-patterns-from-a-file)
|
||||
documentation.
|
||||
An [example filters file](#example-filters-file) contains filters for
|
||||
non-allowed files for synching with Dropbox.
|
||||
non-allowed files for syncing with Dropbox.
|
||||
|
||||
If you make changes to your filters file then bisync requires a run
|
||||
with `--resync`. This is a safety feature, which prevents existing files
|
||||
@@ -23243,7 +23329,7 @@ Using `--check-sync=false` will disable it and may significantly reduce the
|
||||
sync run times for very large numbers of files.
|
||||
|
||||
The check may be run manually with `--check-sync=only`. It runs only the
|
||||
integrity check and terminates without actually synching.
|
||||
integrity check and terminates without actually syncing.
|
||||
|
||||
Note that currently, `--check-sync` **only checks listing snapshots and NOT the
|
||||
actual files on the remotes.** Note also that the listing snapshots will not
|
||||
@@ -23720,7 +23806,7 @@ The `--include*`, `--exclude*`, and `--filter` flags are also supported.
|
||||
|
||||
### How to filter directories
|
||||
|
||||
Filtering portions of the directory tree is a critical feature for synching.
|
||||
Filtering portions of the directory tree is a critical feature for syncing.
|
||||
|
||||
Examples of directory trees (always beneath the Path1/Path2 root level)
|
||||
you may want to exclude from your sync:
|
||||
@@ -23829,7 +23915,7 @@ quashed by adding `--quiet` to the bisync command line.
|
||||
|
||||
## Example exclude-style filters files for use with Dropbox {#exclude-filters}
|
||||
|
||||
- Dropbox disallows synching the listed temporary and configuration/data files.
|
||||
- Dropbox disallows syncing the listed temporary and configuration/data files.
|
||||
The `- <filename>` filters exclude these files where ever they may occur
|
||||
in the sync tree. Consider adding similar exclusions for file types
|
||||
you don't need to sync, such as core dump and software build files.
|
||||
@@ -24163,7 +24249,7 @@ test command flags can be equally prefixed by a single `-` or double dash.
|
||||
|
||||
- `go test . -case basic -remote local -remote2 local`
|
||||
runs the `test_basic` test case using only the local filesystem,
|
||||
synching one local directory with another local directory.
|
||||
syncing one local directory with another local directory.
|
||||
Test script output is to the console, while commands within scenario.txt
|
||||
have their output sent to the `.../workdir/test.log` file,
|
||||
which is finally compared to the golden copy.
|
||||
@@ -24394,6 +24480,9 @@ about _Unison_ and synchronization in general.
|
||||
|
||||
## Changelog
|
||||
|
||||
### `v1.69.1`
|
||||
* Fixed an issue causing listings to not capture concurrent modifications under certain conditions
|
||||
|
||||
### `v1.68`
|
||||
* Fixed an issue affecting backends that round modtimes to a lower precision.
|
||||
|
||||
@@ -25680,7 +25769,7 @@ Notes on above:
|
||||
that `USER_NAME` has been created.
|
||||
2. The Resource entry must include both resource ARNs, as one implies
|
||||
the bucket and the other implies the bucket's objects.
|
||||
3. When using [s3-no-check-bucket](#s3-no-check-bucket) and the bucket already exsits, the `"arn:aws:s3:::BUCKET_NAME"` doesn't have to be included.
|
||||
3. When using [s3-no-check-bucket](#s3-no-check-bucket) and the bucket already exists, the `"arn:aws:s3:::BUCKET_NAME"` doesn't have to be included.
|
||||
|
||||
For reference, [here's an Ansible script](https://gist.github.com/ebridges/ebfc9042dd7c756cd101cfa807b7ae2b)
|
||||
that will generate one or more buckets that will work with `rclone sync`.
|
||||
@@ -25701,7 +25790,8 @@ tries to access data from the glacier storage class you will see an error like b
|
||||
2017/09/11 19:07:43 Failed to sync: failed to open source object: Object in GLACIER, restore first: path/to/file
|
||||
|
||||
In this case you need to [restore](http://docs.aws.amazon.com/AmazonS3/latest/user-guide/restore-archived-objects.html)
|
||||
the object(s) in question before using rclone.
|
||||
the object(s) in question before accessing object contents.
|
||||
The [restore](#restore) section below shows how to do this with rclone.
|
||||
|
||||
Note that rclone only speaks the S3 API it does not speak the Glacier
|
||||
Vault API, so rclone cannot directly access Glacier Vaults.
|
||||
@@ -27104,7 +27194,7 @@ or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Fre
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend restore s3:bucket/path/to/object -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket/path/to/ --include /object -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY
|
||||
@@ -28658,7 +28748,7 @@ location_constraint = au-nsw
|
||||
### Rclone Serve S3 {#rclone}
|
||||
|
||||
Rclone can serve any remote over the S3 protocol. For details see the
|
||||
[rclone serve s3](https://rclone.org/commands/rclone_serve_http/) documentation.
|
||||
[rclone serve s3](https://rclone.org/commands/rclone_serve_s3/) documentation.
|
||||
|
||||
For example, to serve `remote:path` over s3, run the server like this:
|
||||
|
||||
@@ -28678,8 +28768,8 @@ secret_access_key = SECRET_ACCESS_KEY
|
||||
use_multipart_uploads = false
|
||||
```
|
||||
|
||||
Note that setting `disable_multipart_uploads = true` is to work around
|
||||
[a bug](https://rclone.org/commands/rclone_serve_http/#bugs) which will be fixed in due course.
|
||||
Note that setting `use_multipart_uploads = false` is to work around
|
||||
[a bug](https://rclone.org/commands/rclone_serve_s3/#bugs) which will be fixed in due course.
|
||||
|
||||
### Scaleway
|
||||
|
||||
@@ -29775,27 +29865,49 @@ Option endpoint.
|
||||
Endpoint for Linode Object Storage API.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / Atlanta, GA (USA), us-southeast-1
|
||||
1 / Amsterdam (Netherlands), nl-ams-1
|
||||
\ (nl-ams-1.linodeobjects.com)
|
||||
2 / Atlanta, GA (USA), us-southeast-1
|
||||
\ (us-southeast-1.linodeobjects.com)
|
||||
2 / Chicago, IL (USA), us-ord-1
|
||||
3 / Chennai (India), in-maa-1
|
||||
\ (in-maa-1.linodeobjects.com)
|
||||
4 / Chicago, IL (USA), us-ord-1
|
||||
\ (us-ord-1.linodeobjects.com)
|
||||
3 / Frankfurt (Germany), eu-central-1
|
||||
5 / Frankfurt (Germany), eu-central-1
|
||||
\ (eu-central-1.linodeobjects.com)
|
||||
4 / Milan (Italy), it-mil-1
|
||||
6 / Jakarta (Indonesia), id-cgk-1
|
||||
\ (id-cgk-1.linodeobjects.com)
|
||||
7 / London 2 (Great Britain), gb-lon-1
|
||||
\ (gb-lon-1.linodeobjects.com)
|
||||
8 / Los Angeles, CA (USA), us-lax-1
|
||||
\ (us-lax-1.linodeobjects.com)
|
||||
9 / Madrid (Spain), es-mad-1
|
||||
\ (es-mad-1.linodeobjects.com)
|
||||
10 / Melbourne (Australia), au-mel-1
|
||||
\ (au-mel-1.linodeobjects.com)
|
||||
11 / Miami, FL (USA), us-mia-1
|
||||
\ (us-mia-1.linodeobjects.com)
|
||||
12 / Milan (Italy), it-mil-1
|
||||
\ (it-mil-1.linodeobjects.com)
|
||||
5 / Newark, NJ (USA), us-east-1
|
||||
13 / Newark, NJ (USA), us-east-1
|
||||
\ (us-east-1.linodeobjects.com)
|
||||
6 / Paris (France), fr-par-1
|
||||
14 / Osaka (Japan), jp-osa-1
|
||||
\ (jp-osa-1.linodeobjects.com)
|
||||
15 / Paris (France), fr-par-1
|
||||
\ (fr-par-1.linodeobjects.com)
|
||||
7 / Seattle, WA (USA), us-sea-1
|
||||
16 / São Paulo (Brazil), br-gru-1
|
||||
\ (br-gru-1.linodeobjects.com)
|
||||
17 / Seattle, WA (USA), us-sea-1
|
||||
\ (us-sea-1.linodeobjects.com)
|
||||
8 / Singapore ap-south-1
|
||||
18 / Singapore, ap-south-1
|
||||
\ (ap-south-1.linodeobjects.com)
|
||||
9 / Stockholm (Sweden), se-sto-1
|
||||
19 / Singapore 2, sg-sin-1
|
||||
\ (sg-sin-1.linodeobjects.com)
|
||||
20 / Stockholm (Sweden), se-sto-1
|
||||
\ (se-sto-1.linodeobjects.com)
|
||||
10 / Washington, DC, (USA), us-iad-1
|
||||
21 / Washington, DC, (USA), us-iad-1
|
||||
\ (us-iad-1.linodeobjects.com)
|
||||
endpoint> 3
|
||||
endpoint> 5
|
||||
|
||||
Option acl.
|
||||
Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -31488,7 +31600,7 @@ machine with no Internet browser available.
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Box. This only runs from the moment it opens
|
||||
your browser to the moment you get back the verification code. This
|
||||
is on `http://127.0.0.1:53682/` and this it may require you to unblock
|
||||
is on `http://127.0.0.1:53682/` and this may require you to unblock
|
||||
it temporarily if you are running a host firewall.
|
||||
|
||||
Once configured you can then use `rclone` like this,
|
||||
@@ -34415,7 +34527,7 @@ strong random number generator. The nonce is incremented for each
|
||||
chunk read making sure each nonce is unique for each block written.
|
||||
The chance of a nonce being reused is minuscule. If you wrote an
|
||||
exabyte of data (10¹⁸ bytes) you would have a probability of
|
||||
approximately 2×10⁻³² of re-using a nonce.
|
||||
approximately 2×10⁻³² of reusing a nonce.
|
||||
|
||||
#### Chunk
|
||||
|
||||
@@ -41561,7 +41673,7 @@ Enter a value.
|
||||
config_2fa> 2FACODE
|
||||
Remote config
|
||||
--------------------
|
||||
[koofr]
|
||||
[iclouddrive]
|
||||
- type: iclouddrive
|
||||
- apple_id: APPLEID
|
||||
- password: *** ENCRYPTED ***
|
||||
@@ -41578,6 +41690,20 @@ y/e/d> y
|
||||
|
||||
ADP is currently unsupported and need to be disabled
|
||||
|
||||
On iPhone, Settings `>` Apple Account `>` iCloud `>` 'Access iCloud Data on the Web' must be ON, and 'Advanced Data Protection' OFF.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Missing PCS cookies from the request
|
||||
|
||||
This means you have Advanced Data Protection (ADP) turned on. This is not supported at the moment. If you want to use rclone you will have to turn it off. See above for how to turn it off.
|
||||
|
||||
You will need to clear the `cookies` and the `trust_token` fields in the config. Or you can delete the remote config and start again.
|
||||
|
||||
You should then run `rclone reconnect remote:`.
|
||||
|
||||
Note that changing the ADP setting may not take effect immediately - you may need to wait a few hours or a day before you can get rclone to work - keep clearing the config entry and running `rclone reconnect remote:` until rclone functions properly.
|
||||
|
||||
|
||||
### Standard options
|
||||
|
||||
@@ -46035,7 +46161,7 @@ Properties:
|
||||
- "us"
|
||||
- Microsoft Cloud for US Government
|
||||
- "de"
|
||||
- Microsoft Cloud Germany
|
||||
- Microsoft Cloud Germany (deprecated - try global region first).
|
||||
- "cn"
|
||||
- Azure and Office 365 operated by Vnet Group in China
|
||||
|
||||
@@ -46652,6 +46778,28 @@ See the [metadata](https://rclone.org/docs/#metadata) docs for more info.
|
||||
|
||||
|
||||
|
||||
### Impersonate other users as Admin
|
||||
|
||||
Unlike Google Drive and impersonating any domain user via service accounts, OneDrive requires you to authenticate as an admin account, and manually setup a remote per user you wish to impersonate.
|
||||
|
||||
1. In [Microsoft 365 Admin Center](https://admin.microsoft.com), open each user you need to "impersonate" and go to the OneDrive section. There is a heading called "Get access to files", you need to click to create the link, this creates the link of the format `https://{tenant}-my.sharepoint.com/personal/{user_name_domain_tld}/` but also changes the permissions so you your admin user has access.
|
||||
2. Then in powershell run the following commands:
|
||||
```console
|
||||
Install-Module Microsoft.Graph -Scope CurrentUser -Repository PSGallery -Force
|
||||
Import-Module Microsoft.Graph.Files
|
||||
Connect-MgGraph -Scopes "Files.ReadWrite.All"
|
||||
# Follow the steps to allow access to your admin user
|
||||
# Then run this for each user you want to impersonate to get the Drive ID
|
||||
Get-MgUserDefaultDrive -UserId '{emailaddress}'
|
||||
# This will give you output of the format:
|
||||
# Name Id DriveType CreatedDateTime
|
||||
# ---- -- --------- ---------------
|
||||
# OneDrive b!XYZ123 business 14/10/2023 1:00:58 pm
|
||||
|
||||
```
|
||||
3. Then in rclone add a onedrive remote type, and use the `Type in driveID` with the DriveID you got in the previous step. One remote per user. It will then confirm the drive ID, and hopefully give you a message of `Found drive "root" of type "business"` and then include the URL of the format `https://{tenant}-my.sharepoint.com/personal/{user_name_domain_tld}/Documents`
|
||||
|
||||
|
||||
## Limitations
|
||||
|
||||
If you don't use rclone for 90 days the refresh token will
|
||||
@@ -52973,7 +53121,8 @@ Properties:
|
||||
|
||||
On some SFTP servers (e.g. Synology) the paths are different
|
||||
for SSH and SFTP so the hashes can't be calculated properly.
|
||||
For them using `disable_hashcheck` is a good idea.
|
||||
You can either use [`--sftp-path-override`](#--sftp-path-override)
|
||||
or [`disable_hashcheck`](#--sftp-disable-hashcheck).
|
||||
|
||||
The only ssh agent supported under Windows is Putty's pageant.
|
||||
|
||||
@@ -56509,6 +56658,84 @@ Options:
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.69.3 - 2025-05-21
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.69.2...v1.69.3)
|
||||
|
||||
* Bug Fixes
|
||||
* build: Reapply update github.com/golang-jwt/jwt/v5 from 5.2.1 to 5.2.2 to fix CVE-2025-30204 (dependabot[bot])
|
||||
* build: Update github.com/ebitengine/purego to work around bug in go1.24.3 (Nick Craig-Wood)
|
||||
|
||||
## v1.69.2 - 2025-05-01
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.69.1...v1.69.2)
|
||||
|
||||
* Bug fixes
|
||||
* accounting: Fix percentDiff calculation -- (Anagh Kumar Baranwal)
|
||||
* build
|
||||
* Update github.com/golang-jwt/jwt/v4 from 4.5.1 to 4.5.2 to fix CVE-2025-30204 (dependabot[bot])
|
||||
* Update github.com/golang-jwt/jwt/v5 from 5.2.1 to 5.2.2 to fix CVE-2025-30204 (dependabot[bot])
|
||||
* Update golang.org/x/crypto to v0.35.0 to fix CVE-2025-22869 (Nick Craig-Wood)
|
||||
* Update golang.org/x/net from 0.36.0 to 0.38.0 to fix CVE-2025-22870 (dependabot[bot])
|
||||
* Update golang.org/x/net to 0.36.0. to fix CVE-2025-22869 (dependabot[bot])
|
||||
* Stop building with go < go1.23 as security updates forbade it (Nick Craig-Wood)
|
||||
* Fix docker plugin build (Anagh Kumar Baranwal)
|
||||
* cmd: Fix crash if rclone is invoked without any arguments (Janne Hellsten)
|
||||
* config: Read configuration passwords from stdin even when terminated with EOF (Samantha Bowen)
|
||||
* doc fixes (Andrew Kreimer, Danny Garside, eccoisle, Ed Craig-Wood, emyarod, jack, Jugal Kishore, Markus Gerstel, Michael Kebe, Nick Craig-Wood, simonmcnair, simwai, Zachary Vorhies)
|
||||
* fs: Fix corruption of SizeSuffix with "B" suffix in config (eg --min-size) (Nick Craig-Wood)
|
||||
* lib/http: Fix race between Serve() and Shutdown() (Nick Craig-Wood)
|
||||
* object: Fix memory object out of bounds Seek (Nick Craig-Wood)
|
||||
* operations: Fix call fmt.Errorf with wrong err (alingse)
|
||||
* rc
|
||||
* Disable the metrics server when running `rclone rc` (hiddenmarten)
|
||||
* Fix debug/* commands not being available over unix sockets (Nick Craig-Wood)
|
||||
* serve nfs: Fix unlikely crash (Nick Craig-Wood)
|
||||
* stats: Fix the speed not getting updated after a pause in the processing (Anagh Kumar Baranwal)
|
||||
* sync
|
||||
* Fix cpu spinning when empty directory finding with leading slashes (Nick Craig-Wood)
|
||||
* Copy dir modtimes even when copyEmptySrcDirs is false (ll3006)
|
||||
* VFS
|
||||
* Fix directory cache serving stale data (Lorenz Brun)
|
||||
* Fix inefficient directory caching when directory reads are slow (huanghaojun)
|
||||
* Fix integration test failures (Nick Craig-Wood)
|
||||
* Drive
|
||||
* Metadata: fix error when setting copy-requires-writer-permission on a folder (Nick Craig-Wood)
|
||||
* Dropbox
|
||||
* Retry link without expiry (Dave Vasilevsky)
|
||||
* HTTP
|
||||
* Correct root if definitely pointing to a file (nielash)
|
||||
* Iclouddrive
|
||||
* Fix so created files are writable (Ben Alex)
|
||||
* Onedrive
|
||||
* Fix metadata ordering in permissions (Nick Craig-Wood)
|
||||
|
||||
## v1.69.1 - 2025-02-14
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.69.0...v1.69.1)
|
||||
|
||||
* Bug Fixes
|
||||
* lib/oauthutil: Fix redirect URL mismatch errors (Nick Craig-Wood)
|
||||
* bisync: Fix listings missing concurrent modifications (nielash)
|
||||
* serve s3: Fix list objects encoding-type (Nick Craig-Wood)
|
||||
* fs: Fix confusing "didn't find section in config file" error (Nick Craig-Wood)
|
||||
* doc fixes (Christoph Berger, Dimitri Papadopoulos, Matt Ickstadt, Nick Craig-Wood, Tim White, Zachary Vorhies)
|
||||
* build: Added parallel docker builds and caching for go build in the container (Anagh Kumar Baranwal)
|
||||
* VFS
|
||||
* Fix the cache failing to upload symlinks when `--links` was specified (Nick Craig-Wood)
|
||||
* Fix race detected by race detector (Nick Craig-Wood)
|
||||
* Close the change notify channel on Shutdown (izouxv)
|
||||
* B2
|
||||
* Fix "fatal error: concurrent map writes" (Nick Craig-Wood)
|
||||
* Iclouddrive
|
||||
* Add notes on ADP and Missing PCS cookies (Nick Craig-Wood)
|
||||
* Onedrive
|
||||
* Mark German (de) region as deprecated (Nick Craig-Wood)
|
||||
* S3
|
||||
* Added new storage class to magalu provider (Bruno Fernandes)
|
||||
* Add DigitalOcean regions SFO2, LON1, TOR1, BLR1 (jkpe)
|
||||
* Add latest Linode Object Storage endpoints (jbagwell-akamai)
|
||||
|
||||
## v1.69.0 - 2025-01-12
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.68.0...v1.69.0)
|
||||
@@ -56538,7 +56765,7 @@ Options:
|
||||
* fs: Make `--links` flag global and add new `--local-links` and `--vfs-links` flags (Nick Craig-Wood)
|
||||
* http servers: Disable automatic authentication skipping for unix sockets in http servers (Moises Lima)
|
||||
* This was making it impossible to use unix sockets with an proxy
|
||||
* This might now cause rclone to need authenticaton where it didn't before
|
||||
* This might now cause rclone to need authentication where it didn't before
|
||||
* oauthutil: add support for OAuth client credential flow (Martin Hassack, Nick Craig-Wood)
|
||||
* operations: make log messages consistent for mkdir/rmdir at INFO level (Nick Craig-Wood)
|
||||
* rc: Add `relative` to [vfs/queue-set-expiry](https://rclone.org/rc/#vfs-queue-set-expiry) (Nick Craig-Wood)
|
||||
@@ -57216,7 +57443,7 @@ instead of of `--size-only`, when `check` is not available.
|
||||
* Update all dependencies (Nick Craig-Wood)
|
||||
* Refactor version info and icon resource handling on windows (albertony)
|
||||
* doc updates (albertony, alfish2000, asdffdsazqqq, Dimitri Papadopoulos, Herby Gillot, Joda Stößer, Manoj Ghosh, Nick Craig-Wood)
|
||||
* Implement `--metadata-mapper` to transform metatadata with a user supplied program (Nick Craig-Wood)
|
||||
* Implement `--metadata-mapper` to transform metadata with a user supplied program (Nick Craig-Wood)
|
||||
* Add `ChunkWriterDoesntSeek` feature flag and set it for b2 (Nick Craig-Wood)
|
||||
* lib/http: Export basic go string functions for use in `--template` (Gabriel Espinoza)
|
||||
* makefile: Use POSIX compatible install arguments (Mina Galić)
|
||||
@@ -57331,7 +57558,7 @@ instead of of `--size-only`, when `check` is not available.
|
||||
* Fix "fatal error: concurrent map writes" (Nick Craig-Wood)
|
||||
* B2
|
||||
* Fix multipart upload: corrupted on transfer: sizes differ XXX vs 0 (Nick Craig-Wood)
|
||||
* Fix locking window when getting mutipart upload URL (Nick Craig-Wood)
|
||||
* Fix locking window when getting multipart upload URL (Nick Craig-Wood)
|
||||
* Fix server side copies greater than 4GB (Nick Craig-Wood)
|
||||
* Fix chunked streaming uploads (Nick Craig-Wood)
|
||||
* Reduce default `--b2-upload-concurrency` to 4 to reduce memory usage (Nick Craig-Wood)
|
||||
@@ -63238,7 +63465,6 @@ put them back in again.` >}}
|
||||
* ben-ba <benjamin.brauner@gmx.de>
|
||||
* Eli Orzitzer <e_orz@yahoo.com>
|
||||
* Anthony Metzidis <anthony.metzidis@gmail.com>
|
||||
* emyarod <afw5059@gmail.com>
|
||||
* keongalvin <keongalvin@gmail.com>
|
||||
* rarspace01 <rarspace01@users.noreply.github.com>
|
||||
* Paul Stern <paulstern45@gmail.com>
|
||||
|
||||
424
MANUAL.txt
generated
424
MANUAL.txt
generated
@@ -1,6 +1,75 @@
|
||||
rclone(1) User Manual
|
||||
Nick Craig-Wood
|
||||
Jan 12, 2025
|
||||
May 21, 2025
|
||||
|
||||
NAME
|
||||
|
||||
rclone - manage files on cloud storage
|
||||
|
||||
SYNOPSIS
|
||||
|
||||
Usage:
|
||||
rclone [flags]
|
||||
rclone [command]
|
||||
|
||||
Available commands:
|
||||
about Get quota information from the remote.
|
||||
authorize Remote authorization.
|
||||
backend Run a backend-specific command.
|
||||
bisync Perform bidirectional synchronization between two paths.
|
||||
cat Concatenates any files and sends them to stdout.
|
||||
check Checks the files in the source and destination match.
|
||||
checksum Checks the files in the destination against a SUM file.
|
||||
cleanup Clean up the remote if possible.
|
||||
completion Output completion script for a given shell.
|
||||
config Enter an interactive configuration session.
|
||||
copy Copy files from source to dest, skipping identical files.
|
||||
copyto Copy files from source to dest, skipping identical files.
|
||||
copyurl Copy the contents of the URL supplied content to dest:path.
|
||||
cryptcheck Cryptcheck checks the integrity of an encrypted remote.
|
||||
cryptdecode Cryptdecode returns unencrypted file names.
|
||||
dedupe Interactively find duplicate filenames and delete/rename them.
|
||||
delete Remove the files in path.
|
||||
deletefile Remove a single file from remote.
|
||||
gendocs Output markdown docs for rclone to the directory supplied.
|
||||
gitannex Speaks with git-annex over stdin/stdout.
|
||||
hashsum Produces a hashsum file for all the objects in the path.
|
||||
help Show help for rclone commands, flags and backends.
|
||||
link Generate public link to file/folder.
|
||||
listremotes List all the remotes in the config file and defined in environment variables.
|
||||
ls List the objects in the path with size and path.
|
||||
lsd List all directories/containers/buckets in the path.
|
||||
lsf List directories and objects in remote:path formatted for parsing.
|
||||
lsjson List directories and objects in the path in JSON format.
|
||||
lsl List the objects in path with modification time, size and path.
|
||||
md5sum Produces an md5sum file for all the objects in the path.
|
||||
mkdir Make the path if it doesn't already exist.
|
||||
mount Mount the remote as file system on a mountpoint.
|
||||
move Move files from source to dest.
|
||||
moveto Move file or directory from source to dest.
|
||||
ncdu Explore a remote with a text based user interface.
|
||||
nfsmount Mount the remote as file system on a mountpoint.
|
||||
obscure Obscure password for use in the rclone config file.
|
||||
purge Remove the path and all of its contents.
|
||||
rc Run a command against a running rclone.
|
||||
rcat Copies standard input to file on remote.
|
||||
rcd Run rclone listening to remote control commands only.
|
||||
rmdir Remove the empty directory at path.
|
||||
rmdirs Remove empty directories under the path.
|
||||
selfupdate Update the rclone binary.
|
||||
serve Serve a remote over a protocol.
|
||||
settier Changes storage class/tier of objects in remote.
|
||||
sha1sum Produces an sha1sum file for all the objects in the path.
|
||||
size Prints the total size and number of objects in remote:path.
|
||||
sync Make source and dest identical, modifying destination only.
|
||||
test Run a test command
|
||||
touch Create new file or change file modification time.
|
||||
tree List the contents of the remote in a tree like fashion.
|
||||
version Show the version number.
|
||||
|
||||
Use "rclone [command] --help" for more information about a command.
|
||||
Use "rclone help flags" for to see the global flags.
|
||||
Use "rclone help backends" for a list of supported services.
|
||||
|
||||
Rclone syncs your files to cloud storage
|
||||
|
||||
@@ -1600,6 +1669,10 @@ include/exclude filters - everything will be removed. Use the delete
|
||||
command if you want to selectively delete files. To delete empty
|
||||
directories only, use command rmdir or rmdirs.
|
||||
|
||||
The concurrency of this operation is controlled by the --checkers global
|
||||
flag. However, some backends will implement this command directly, in
|
||||
which case --checkers will be ignored.
|
||||
|
||||
Important: Since this can cause data loss, test first with the --dry-run
|
||||
or the --interactive/-i flag.
|
||||
|
||||
@@ -2595,6 +2668,11 @@ Synopsis
|
||||
Remote authorization. Used to authorize a remote or headless rclone from
|
||||
a machine with a browser - use as instructed by rclone config.
|
||||
|
||||
The command requires 1-3 arguments: - fs name (e.g., "drive", "s3",
|
||||
etc.) - Either a base64 encoded JSON blob obtained from a previous
|
||||
rclone config session - Or a client_id and client_secret pair obtained
|
||||
from the remote service
|
||||
|
||||
Use --auth-no-open-browser to prevent rclone to open auth link in
|
||||
default browser automatically.
|
||||
|
||||
@@ -2602,7 +2680,7 @@ Use --template to generate HTML output via a custom Go template. If a
|
||||
blank string is provided as an argument to this flag, the default
|
||||
template is used.
|
||||
|
||||
rclone authorize [flags]
|
||||
rclone authorize <fs name> [base64_json_blob | client_id client_secret] [flags]
|
||||
|
||||
Options
|
||||
|
||||
@@ -3467,12 +3545,12 @@ re-encrypt the config.
|
||||
|
||||
When --password-command is called to change the password then the
|
||||
environment variable RCLONE_PASSWORD_CHANGE=1 will be set. So if
|
||||
changing passwords programatically you can use the environment variable
|
||||
changing passwords programmatically you can use the environment variable
|
||||
to distinguish which password you must supply.
|
||||
|
||||
Alternatively you can remove the password first (with
|
||||
rclone config encryption remove), then set it again with this command
|
||||
which may be easier if you don't mind the unecrypted config file being
|
||||
which may be easier if you don't mind the unencrypted config file being
|
||||
on the disk briefly.
|
||||
|
||||
rclone config encryption set [flags]
|
||||
@@ -3831,6 +3909,9 @@ This doesn't transfer files that are identical on src and dst, testing
|
||||
by size and modification time or MD5SUM. It doesn't delete files from
|
||||
the destination.
|
||||
|
||||
If you are looking to copy just a byte range of a file, please see
|
||||
'rclone cat --offset X --count Y'
|
||||
|
||||
Note: Use the -P/--progress flag to view real-time transfer statistics
|
||||
|
||||
rclone copyto source:path dest:path [flags]
|
||||
@@ -3939,8 +4020,8 @@ Setting --auto-filename will attempt to automatically determine the
|
||||
filename from the URL (after any redirections) and used in the
|
||||
destination path.
|
||||
|
||||
With --auto-filename-header in addition, if a specific filename is set
|
||||
in HTTP headers, it will be used instead of the name from the URL. With
|
||||
With --header-filename in addition, if a specific filename is set in
|
||||
HTTP headers, it will be used instead of the name from the URL. With
|
||||
--print-filename in addition, the resulting file name will be printed.
|
||||
|
||||
Setting --no-clobber will prevent overwriting file on the destination if
|
||||
@@ -3949,7 +4030,7 @@ there is one with the same name.
|
||||
Setting --stdout or making the output file name - will cause the output
|
||||
to be written to standard output.
|
||||
|
||||
Troublshooting
|
||||
Troubleshooting
|
||||
|
||||
If you can't get rclone copyurl to work then here are some things you
|
||||
can try:
|
||||
@@ -5368,11 +5449,11 @@ and if they haven't been accessed for --vfs-write-back seconds. If
|
||||
rclone is quit or dies with files that haven't been uploaded, these will
|
||||
be uploaded next time rclone is run with the same flags.
|
||||
|
||||
If using --vfs-cache-max-size or --vfs-cache-min-free-size note that the
|
||||
cache may exceed these quotas for two reasons. Firstly because it is
|
||||
If using --vfs-cache-max-size or --vfs-cache-min-free-space note that
|
||||
the cache may exceed these quotas for two reasons. Firstly because it is
|
||||
only checked every --vfs-cache-poll-interval. Secondly because open
|
||||
files cannot be evicted from the cache. When --vfs-cache-max-size or
|
||||
--vfs-cache-min-free-size is exceeded, rclone will attempt to evict the
|
||||
--vfs-cache-min-free-space is exceeded, rclone will attempt to evict the
|
||||
least accessed files from the cache first. rclone will start with files
|
||||
that haven't been accessed for the longest. This cache flushing strategy
|
||||
is efficient and more relevant files are likely to remain cached.
|
||||
@@ -6600,11 +6681,11 @@ and if they haven't been accessed for --vfs-write-back seconds. If
|
||||
rclone is quit or dies with files that haven't been uploaded, these will
|
||||
be uploaded next time rclone is run with the same flags.
|
||||
|
||||
If using --vfs-cache-max-size or --vfs-cache-min-free-size note that the
|
||||
cache may exceed these quotas for two reasons. Firstly because it is
|
||||
If using --vfs-cache-max-size or --vfs-cache-min-free-space note that
|
||||
the cache may exceed these quotas for two reasons. Firstly because it is
|
||||
only checked every --vfs-cache-poll-interval. Secondly because open
|
||||
files cannot be evicted from the cache. When --vfs-cache-max-size or
|
||||
--vfs-cache-min-free-size is exceeded, rclone will attempt to evict the
|
||||
--vfs-cache-min-free-space is exceeded, rclone will attempt to evict the
|
||||
least accessed files from the cache first. rclone will start with files
|
||||
that haven't been accessed for the longest. This cache flushing strategy
|
||||
is efficient and more relevant files are likely to remain cached.
|
||||
@@ -7716,11 +7797,11 @@ and if they haven't been accessed for --vfs-write-back seconds. If
|
||||
rclone is quit or dies with files that haven't been uploaded, these will
|
||||
be uploaded next time rclone is run with the same flags.
|
||||
|
||||
If using --vfs-cache-max-size or --vfs-cache-min-free-size note that the
|
||||
cache may exceed these quotas for two reasons. Firstly because it is
|
||||
If using --vfs-cache-max-size or --vfs-cache-min-free-space note that
|
||||
the cache may exceed these quotas for two reasons. Firstly because it is
|
||||
only checked every --vfs-cache-poll-interval. Secondly because open
|
||||
files cannot be evicted from the cache. When --vfs-cache-max-size or
|
||||
--vfs-cache-min-free-size is exceeded, rclone will attempt to evict the
|
||||
--vfs-cache-min-free-space is exceeded, rclone will attempt to evict the
|
||||
least accessed files from the cache first. rclone will start with files
|
||||
that haven't been accessed for the longest. This cache flushing strategy
|
||||
is efficient and more relevant files are likely to remain cached.
|
||||
@@ -8261,11 +8342,11 @@ and if they haven't been accessed for --vfs-write-back seconds. If
|
||||
rclone is quit or dies with files that haven't been uploaded, these will
|
||||
be uploaded next time rclone is run with the same flags.
|
||||
|
||||
If using --vfs-cache-max-size or --vfs-cache-min-free-size note that the
|
||||
cache may exceed these quotas for two reasons. Firstly because it is
|
||||
If using --vfs-cache-max-size or --vfs-cache-min-free-space note that
|
||||
the cache may exceed these quotas for two reasons. Firstly because it is
|
||||
only checked every --vfs-cache-poll-interval. Secondly because open
|
||||
files cannot be evicted from the cache. When --vfs-cache-max-size or
|
||||
--vfs-cache-min-free-size is exceeded, rclone will attempt to evict the
|
||||
--vfs-cache-min-free-space is exceeded, rclone will attempt to evict the
|
||||
least accessed files from the cache first. rclone will start with files
|
||||
that haven't been accessed for the longest. This cache flushing strategy
|
||||
is efficient and more relevant files are likely to remain cached.
|
||||
@@ -8810,11 +8891,11 @@ and if they haven't been accessed for --vfs-write-back seconds. If
|
||||
rclone is quit or dies with files that haven't been uploaded, these will
|
||||
be uploaded next time rclone is run with the same flags.
|
||||
|
||||
If using --vfs-cache-max-size or --vfs-cache-min-free-size note that the
|
||||
cache may exceed these quotas for two reasons. Firstly because it is
|
||||
If using --vfs-cache-max-size or --vfs-cache-min-free-space note that
|
||||
the cache may exceed these quotas for two reasons. Firstly because it is
|
||||
only checked every --vfs-cache-poll-interval. Secondly because open
|
||||
files cannot be evicted from the cache. When --vfs-cache-max-size or
|
||||
--vfs-cache-min-free-size is exceeded, rclone will attempt to evict the
|
||||
--vfs-cache-min-free-space is exceeded, rclone will attempt to evict the
|
||||
least accessed files from the cache first. rclone will start with files
|
||||
that haven't been accessed for the longest. This cache flushing strategy
|
||||
is efficient and more relevant files are likely to remain cached.
|
||||
@@ -9567,11 +9648,11 @@ and if they haven't been accessed for --vfs-write-back seconds. If
|
||||
rclone is quit or dies with files that haven't been uploaded, these will
|
||||
be uploaded next time rclone is run with the same flags.
|
||||
|
||||
If using --vfs-cache-max-size or --vfs-cache-min-free-size note that the
|
||||
cache may exceed these quotas for two reasons. Firstly because it is
|
||||
If using --vfs-cache-max-size or --vfs-cache-min-free-space note that
|
||||
the cache may exceed these quotas for two reasons. Firstly because it is
|
||||
only checked every --vfs-cache-poll-interval. Secondly because open
|
||||
files cannot be evicted from the cache. When --vfs-cache-max-size or
|
||||
--vfs-cache-min-free-size is exceeded, rclone will attempt to evict the
|
||||
--vfs-cache-min-free-space is exceeded, rclone will attempt to evict the
|
||||
least accessed files from the cache first. rclone will start with files
|
||||
that haven't been accessed for the longest. This cache flushing strategy
|
||||
is efficient and more relevant files are likely to remain cached.
|
||||
@@ -10102,7 +10183,7 @@ uses an on disk cache, but the cache entries are held as symlinks.
|
||||
Rclone will use the handle of the underlying file as the NFS handle
|
||||
which improves performance. This sort of cache can't be backed up and
|
||||
restored as the underlying handles will change. This is Linux only. It
|
||||
requres running rclone as root or with CAP_DAC_READ_SEARCH. You can run
|
||||
requires running rclone as root or with CAP_DAC_READ_SEARCH. You can run
|
||||
rclone with this extra permission by doing this to the rclone binary
|
||||
sudo setcap cap_dac_read_search+ep /path/to/rclone.
|
||||
|
||||
@@ -10223,11 +10304,11 @@ and if they haven't been accessed for --vfs-write-back seconds. If
|
||||
rclone is quit or dies with files that haven't been uploaded, these will
|
||||
be uploaded next time rclone is run with the same flags.
|
||||
|
||||
If using --vfs-cache-max-size or --vfs-cache-min-free-size note that the
|
||||
cache may exceed these quotas for two reasons. Firstly because it is
|
||||
If using --vfs-cache-max-size or --vfs-cache-min-free-space note that
|
||||
the cache may exceed these quotas for two reasons. Firstly because it is
|
||||
only checked every --vfs-cache-poll-interval. Secondly because open
|
||||
files cannot be evicted from the cache. When --vfs-cache-max-size or
|
||||
--vfs-cache-min-free-size is exceeded, rclone will attempt to evict the
|
||||
--vfs-cache-min-free-space is exceeded, rclone will attempt to evict the
|
||||
least accessed files from the cache first. rclone will start with files
|
||||
that haven't been accessed for the longest. This cache flushing strategy
|
||||
is efficient and more relevant files are likely to remain cached.
|
||||
@@ -10903,8 +10984,8 @@ which is defined like this:
|
||||
secret_access_key = SECRET_ACCESS_KEY
|
||||
use_multipart_uploads = false
|
||||
|
||||
Note that setting disable_multipart_uploads = true is to work around a
|
||||
bug which will be fixed in due course.
|
||||
Note that setting use_multipart_uploads = false is to work around a bug
|
||||
which will be fixed in due course.
|
||||
|
||||
Bugs
|
||||
|
||||
@@ -11151,11 +11232,11 @@ and if they haven't been accessed for --vfs-write-back seconds. If
|
||||
rclone is quit or dies with files that haven't been uploaded, these will
|
||||
be uploaded next time rclone is run with the same flags.
|
||||
|
||||
If using --vfs-cache-max-size or --vfs-cache-min-free-size note that the
|
||||
cache may exceed these quotas for two reasons. Firstly because it is
|
||||
If using --vfs-cache-max-size or --vfs-cache-min-free-space note that
|
||||
the cache may exceed these quotas for two reasons. Firstly because it is
|
||||
only checked every --vfs-cache-poll-interval. Secondly because open
|
||||
files cannot be evicted from the cache. When --vfs-cache-max-size or
|
||||
--vfs-cache-min-free-size is exceeded, rclone will attempt to evict the
|
||||
--vfs-cache-min-free-space is exceeded, rclone will attempt to evict the
|
||||
least accessed files from the cache first. rclone will start with files
|
||||
that haven't been accessed for the longest. This cache flushing strategy
|
||||
is efficient and more relevant files are likely to remain cached.
|
||||
@@ -11738,11 +11819,11 @@ and if they haven't been accessed for --vfs-write-back seconds. If
|
||||
rclone is quit or dies with files that haven't been uploaded, these will
|
||||
be uploaded next time rclone is run with the same flags.
|
||||
|
||||
If using --vfs-cache-max-size or --vfs-cache-min-free-size note that the
|
||||
cache may exceed these quotas for two reasons. Firstly because it is
|
||||
If using --vfs-cache-max-size or --vfs-cache-min-free-space note that
|
||||
the cache may exceed these quotas for two reasons. Firstly because it is
|
||||
only checked every --vfs-cache-poll-interval. Secondly because open
|
||||
files cannot be evicted from the cache. When --vfs-cache-max-size or
|
||||
--vfs-cache-min-free-size is exceeded, rclone will attempt to evict the
|
||||
--vfs-cache-min-free-space is exceeded, rclone will attempt to evict the
|
||||
least accessed files from the cache first. rclone will start with files
|
||||
that haven't been accessed for the longest. This cache flushing strategy
|
||||
is efficient and more relevant files are likely to remain cached.
|
||||
@@ -12538,11 +12619,11 @@ and if they haven't been accessed for --vfs-write-back seconds. If
|
||||
rclone is quit or dies with files that haven't been uploaded, these will
|
||||
be uploaded next time rclone is run with the same flags.
|
||||
|
||||
If using --vfs-cache-max-size or --vfs-cache-min-free-size note that the
|
||||
cache may exceed these quotas for two reasons. Firstly because it is
|
||||
If using --vfs-cache-max-size or --vfs-cache-min-free-space note that
|
||||
the cache may exceed these quotas for two reasons. Firstly because it is
|
||||
only checked every --vfs-cache-poll-interval. Secondly because open
|
||||
files cannot be evicted from the cache. When --vfs-cache-max-size or
|
||||
--vfs-cache-min-free-size is exceeded, rclone will attempt to evict the
|
||||
--vfs-cache-min-free-space is exceeded, rclone will attempt to evict the
|
||||
least accessed files from the cache first. rclone will start with files
|
||||
that haven't been accessed for the longest. This cache flushing strategy
|
||||
is efficient and more relevant files are likely to remain cached.
|
||||
@@ -13895,6 +13976,10 @@ also possible to specify --boolean=false or --boolean=true. Note that
|
||||
--boolean false is not valid - this is parsed as --boolean and the false
|
||||
is parsed as an extra command line argument for rclone.
|
||||
|
||||
Options documented to take a stringArray parameter accept multiple
|
||||
values. To pass more than one value, repeat the option; for example:
|
||||
--include value1 --include value2.
|
||||
|
||||
Time or duration options
|
||||
|
||||
TIME or DURATION options can be specified as a duration string or a time
|
||||
@@ -16177,7 +16262,7 @@ The options set by environment variables can be seen with the -vv flag,
|
||||
e.g. rclone version -vv.
|
||||
|
||||
Options that can appear multiple times (type stringArray) are treated
|
||||
slighly differently as environment variables can only be defined once.
|
||||
slightly differently as environment variables can only be defined once.
|
||||
In order to allow a simple mechanism for adding one or many items, the
|
||||
input is treated as a CSV encoded string. For example
|
||||
|
||||
@@ -19420,7 +19505,7 @@ This is only useful if --vfs-cache-mode > off. If you call it when the
|
||||
],
|
||||
}
|
||||
|
||||
The expiry time is the time until the file is elegible for being
|
||||
The expiry time is the time until the file is eligible for being
|
||||
uploaded in floating point seconds. This may go negative. As rclone only
|
||||
transfers --transfers files at once, only the lowest --transfers expiry
|
||||
times will have uploading as true. So there may be files with negative
|
||||
@@ -20569,7 +20654,7 @@ Flags for general networking and HTTP stuff.
|
||||
--tpslimit float Limit HTTP transactions per second to this
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
|
||||
--use-cookies Enable session cookiejar
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.69.0")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.69.3")
|
||||
|
||||
Performance
|
||||
|
||||
@@ -21574,7 +21659,7 @@ Start from installing Docker on the host.
|
||||
The FUSE driver is a prerequisite for rclone mounting and should be
|
||||
installed on host:
|
||||
|
||||
sudo apt-get -y install fuse
|
||||
sudo apt-get -y install fuse3
|
||||
|
||||
Create two directories required by rclone docker plugin:
|
||||
|
||||
@@ -22531,7 +22616,7 @@ Also see the all files changed check.
|
||||
By using rclone filter features you can exclude file types or directory
|
||||
sub-trees from the sync. See the bisync filters section and generic
|
||||
--filter-from documentation. An example filters file contains filters
|
||||
for non-allowed files for synching with Dropbox.
|
||||
for non-allowed files for syncing with Dropbox.
|
||||
|
||||
If you make changes to your filters file then bisync requires a run with
|
||||
--resync. This is a safety feature, which prevents existing files on the
|
||||
@@ -22704,7 +22789,7 @@ of a sync. Using --check-sync=false will disable it and may
|
||||
significantly reduce the sync run times for very large numbers of files.
|
||||
|
||||
The check may be run manually with --check-sync=only. It runs only the
|
||||
integrity check and terminates without actually synching.
|
||||
integrity check and terminates without actually syncing.
|
||||
|
||||
Note that currently, --check-sync only checks listing snapshots and NOT
|
||||
the actual files on the remotes. Note also that the listing snapshots
|
||||
@@ -23237,7 +23322,7 @@ supported.
|
||||
How to filter directories
|
||||
|
||||
Filtering portions of the directory tree is a critical feature for
|
||||
synching.
|
||||
syncing.
|
||||
|
||||
Examples of directory trees (always beneath the Path1/Path2 root level)
|
||||
you may want to exclude from your sync: - Directory trees containing
|
||||
@@ -23348,7 +23433,7 @@ This noise can be quashed by adding --quiet to the bisync command line.
|
||||
|
||||
Example exclude-style filters files for use with Dropbox
|
||||
|
||||
- Dropbox disallows synching the listed temporary and
|
||||
- Dropbox disallows syncing the listed temporary and
|
||||
configuration/data files. The `- ` filters exclude these files where
|
||||
ever they may occur in the sync tree. Consider adding similar
|
||||
exclusions for file types you don't need to sync, such as core dump
|
||||
@@ -23668,7 +23753,7 @@ dash.
|
||||
Running tests
|
||||
|
||||
- go test . -case basic -remote local -remote2 local runs the
|
||||
test_basic test case using only the local filesystem, synching one
|
||||
test_basic test case using only the local filesystem, syncing one
|
||||
local directory with another local directory. Test script output is
|
||||
to the console, while commands within scenario.txt have their output
|
||||
sent to the .../workdir/test.log file, which is finally compared to
|
||||
@@ -23901,6 +23986,11 @@ Unison and synchronization in general.
|
||||
|
||||
Changelog
|
||||
|
||||
v1.69.1
|
||||
|
||||
- Fixed an issue causing listings to not capture concurrent
|
||||
modifications under certain conditions
|
||||
|
||||
v1.68
|
||||
|
||||
- Fixed an issue affecting backends that round modtimes to a lower
|
||||
@@ -25192,7 +25282,7 @@ Notes on above:
|
||||
that USER_NAME has been created.
|
||||
2. The Resource entry must include both resource ARNs, as one implies
|
||||
the bucket and the other implies the bucket's objects.
|
||||
3. When using s3-no-check-bucket and the bucket already exsits, the
|
||||
3. When using s3-no-check-bucket and the bucket already exists, the
|
||||
"arn:aws:s3:::BUCKET_NAME" doesn't have to be included.
|
||||
|
||||
For reference, here's an Ansible script that will generate one or more
|
||||
@@ -25214,8 +25304,9 @@ glacier storage class you will see an error like below.
|
||||
|
||||
2017/09/11 19:07:43 Failed to sync: failed to open source object: Object in GLACIER, restore first: path/to/file
|
||||
|
||||
In this case you need to restore the object(s) in question before using
|
||||
rclone.
|
||||
In this case you need to restore the object(s) in question before
|
||||
accessing object contents. The restore section below shows how to do
|
||||
this with rclone.
|
||||
|
||||
Note that rclone only speaks the S3 API it does not speak the Glacier
|
||||
Vault API, so rclone cannot directly access Glacier Vaults.
|
||||
@@ -26646,7 +26737,7 @@ Access tier to the Frequent Access tier.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend restore s3:bucket/path/to/object -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket/path/to/ --include /object -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY
|
||||
@@ -28155,8 +28246,8 @@ this:
|
||||
secret_access_key = SECRET_ACCESS_KEY
|
||||
use_multipart_uploads = false
|
||||
|
||||
Note that setting disable_multipart_uploads = true is to work around a
|
||||
bug which will be fixed in due course.
|
||||
Note that setting use_multipart_uploads = false is to work around a bug
|
||||
which will be fixed in due course.
|
||||
|
||||
Scaleway
|
||||
|
||||
@@ -29203,27 +29294,49 @@ This will guide you through an interactive setup process.
|
||||
Endpoint for Linode Object Storage API.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / Atlanta, GA (USA), us-southeast-1
|
||||
1 / Amsterdam (Netherlands), nl-ams-1
|
||||
\ (nl-ams-1.linodeobjects.com)
|
||||
2 / Atlanta, GA (USA), us-southeast-1
|
||||
\ (us-southeast-1.linodeobjects.com)
|
||||
2 / Chicago, IL (USA), us-ord-1
|
||||
3 / Chennai (India), in-maa-1
|
||||
\ (in-maa-1.linodeobjects.com)
|
||||
4 / Chicago, IL (USA), us-ord-1
|
||||
\ (us-ord-1.linodeobjects.com)
|
||||
3 / Frankfurt (Germany), eu-central-1
|
||||
5 / Frankfurt (Germany), eu-central-1
|
||||
\ (eu-central-1.linodeobjects.com)
|
||||
4 / Milan (Italy), it-mil-1
|
||||
6 / Jakarta (Indonesia), id-cgk-1
|
||||
\ (id-cgk-1.linodeobjects.com)
|
||||
7 / London 2 (Great Britain), gb-lon-1
|
||||
\ (gb-lon-1.linodeobjects.com)
|
||||
8 / Los Angeles, CA (USA), us-lax-1
|
||||
\ (us-lax-1.linodeobjects.com)
|
||||
9 / Madrid (Spain), es-mad-1
|
||||
\ (es-mad-1.linodeobjects.com)
|
||||
10 / Melbourne (Australia), au-mel-1
|
||||
\ (au-mel-1.linodeobjects.com)
|
||||
11 / Miami, FL (USA), us-mia-1
|
||||
\ (us-mia-1.linodeobjects.com)
|
||||
12 / Milan (Italy), it-mil-1
|
||||
\ (it-mil-1.linodeobjects.com)
|
||||
5 / Newark, NJ (USA), us-east-1
|
||||
13 / Newark, NJ (USA), us-east-1
|
||||
\ (us-east-1.linodeobjects.com)
|
||||
6 / Paris (France), fr-par-1
|
||||
14 / Osaka (Japan), jp-osa-1
|
||||
\ (jp-osa-1.linodeobjects.com)
|
||||
15 / Paris (France), fr-par-1
|
||||
\ (fr-par-1.linodeobjects.com)
|
||||
7 / Seattle, WA (USA), us-sea-1
|
||||
16 / São Paulo (Brazil), br-gru-1
|
||||
\ (br-gru-1.linodeobjects.com)
|
||||
17 / Seattle, WA (USA), us-sea-1
|
||||
\ (us-sea-1.linodeobjects.com)
|
||||
8 / Singapore ap-south-1
|
||||
18 / Singapore, ap-south-1
|
||||
\ (ap-south-1.linodeobjects.com)
|
||||
9 / Stockholm (Sweden), se-sto-1
|
||||
19 / Singapore 2, sg-sin-1
|
||||
\ (sg-sin-1.linodeobjects.com)
|
||||
20 / Stockholm (Sweden), se-sto-1
|
||||
\ (se-sto-1.linodeobjects.com)
|
||||
10 / Washington, DC, (USA), us-iad-1
|
||||
21 / Washington, DC, (USA), us-iad-1
|
||||
\ (us-iad-1.linodeobjects.com)
|
||||
endpoint> 3
|
||||
endpoint> 5
|
||||
|
||||
Option acl.
|
||||
Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -30848,7 +30961,7 @@ Internet browser available.
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Box. This only runs from the moment it opens your
|
||||
browser to the moment you get back the verification code. This is on
|
||||
http://127.0.0.1:53682/ and this it may require you to unblock it
|
||||
http://127.0.0.1:53682/ and this may require you to unblock it
|
||||
temporarily if you are running a host firewall.
|
||||
|
||||
Once configured you can then use rclone like this,
|
||||
@@ -33757,7 +33870,7 @@ The initial nonce is generated from the operating systems crypto strong
|
||||
random number generator. The nonce is incremented for each chunk read
|
||||
making sure each nonce is unique for each block written. The chance of a
|
||||
nonce being reused is minuscule. If you wrote an exabyte of data (10¹⁸
|
||||
bytes) you would have a probability of approximately 2×10⁻³² of re-using
|
||||
bytes) you would have a probability of approximately 2×10⁻³² of reusing
|
||||
a nonce.
|
||||
|
||||
Chunk
|
||||
@@ -40978,7 +41091,7 @@ This will guide you through an interactive setup process:
|
||||
config_2fa> 2FACODE
|
||||
Remote config
|
||||
--------------------
|
||||
[koofr]
|
||||
[iclouddrive]
|
||||
- type: iclouddrive
|
||||
- apple_id: APPLEID
|
||||
- password: *** ENCRYPTED ***
|
||||
@@ -40994,6 +41107,27 @@ Advanced Data Protection
|
||||
|
||||
ADP is currently unsupported and need to be disabled
|
||||
|
||||
On iPhone, Settings > Apple Account > iCloud > 'Access iCloud Data on
|
||||
the Web' must be ON, and 'Advanced Data Protection' OFF.
|
||||
|
||||
Troubleshooting
|
||||
|
||||
Missing PCS cookies from the request
|
||||
|
||||
This means you have Advanced Data Protection (ADP) turned on. This is
|
||||
not supported at the moment. If you want to use rclone you will have to
|
||||
turn it off. See above for how to turn it off.
|
||||
|
||||
You will need to clear the cookies and the trust_token fields in the
|
||||
config. Or you can delete the remote config and start again.
|
||||
|
||||
You should then run rclone reconnect remote:.
|
||||
|
||||
Note that changing the ADP setting may not take effect immediately - you
|
||||
may need to wait a few hours or a day before you can get rclone to work
|
||||
- keep clearing the config entry and running rclone reconnect remote:
|
||||
until rclone functions properly.
|
||||
|
||||
Standard options
|
||||
|
||||
Here are the Standard options specific to iclouddrive (iCloud Drive).
|
||||
@@ -45589,7 +45723,8 @@ Properties:
|
||||
- "us"
|
||||
- Microsoft Cloud for US Government
|
||||
- "de"
|
||||
- Microsoft Cloud Germany
|
||||
- Microsoft Cloud Germany (deprecated - try global region
|
||||
first).
|
||||
- "cn"
|
||||
- Azure and Office 365 operated by Vnet Group in China
|
||||
|
||||
@@ -46248,6 +46383,38 @@ Here are the possible system metadata items for the onedrive backend.
|
||||
|
||||
See the metadata docs for more info.
|
||||
|
||||
Impersonate other users as Admin
|
||||
|
||||
Unlike Google Drive and impersonating any domain user via service
|
||||
accounts, OneDrive requires you to authenticate as an admin account, and
|
||||
manually setup a remote per user you wish to impersonate.
|
||||
|
||||
1. In Microsoft 365 Admin Center, open each user you need to
|
||||
"impersonate" and go to the OneDrive section. There is a heading
|
||||
called "Get access to files", you need to click to create the link,
|
||||
this creates the link of the format
|
||||
https://{tenant}-my.sharepoint.com/personal/{user_name_domain_tld}/
|
||||
but also changes the permissions so you your admin user has access.
|
||||
2. Then in powershell run the following commands:
|
||||
|
||||
Install-Module Microsoft.Graph -Scope CurrentUser -Repository PSGallery -Force
|
||||
Import-Module Microsoft.Graph.Files
|
||||
Connect-MgGraph -Scopes "Files.ReadWrite.All"
|
||||
# Follow the steps to allow access to your admin user
|
||||
# Then run this for each user you want to impersonate to get the Drive ID
|
||||
Get-MgUserDefaultDrive -UserId '{emailaddress}'
|
||||
# This will give you output of the format:
|
||||
# Name Id DriveType CreatedDateTime
|
||||
# ---- -- --------- ---------------
|
||||
# OneDrive b!XYZ123 business 14/10/2023 1:00:58 pm
|
||||
|
||||
3. Then in rclone add a onedrive remote type, and use the
|
||||
Type in driveID with the DriveID you got in the previous step. One
|
||||
remote per user. It will then confirm the drive ID, and hopefully
|
||||
give you a message of Found drive "root" of type "business" and then
|
||||
include the URL of the format
|
||||
https://{tenant}-my.sharepoint.com/personal/{user_name_domain_tld}/Documents
|
||||
|
||||
Limitations
|
||||
|
||||
If you don't use rclone for 90 days the refresh token will expire. This
|
||||
@@ -52576,8 +52743,8 @@ Properties:
|
||||
Limitations
|
||||
|
||||
On some SFTP servers (e.g. Synology) the paths are different for SSH and
|
||||
SFTP so the hashes can't be calculated properly. For them using
|
||||
disable_hashcheck is a good idea.
|
||||
SFTP so the hashes can't be calculated properly. You can either use
|
||||
--sftp-path-override or disable_hashcheck.
|
||||
|
||||
The only ssh agent supported under Windows is Putty's pageant.
|
||||
|
||||
@@ -56157,6 +56324,112 @@ Options:
|
||||
|
||||
Changelog
|
||||
|
||||
v1.69.3 - 2025-05-21
|
||||
|
||||
See commits
|
||||
|
||||
- Bug Fixes
|
||||
- build: Reapply update github.com/golang-jwt/jwt/v5 from 5.2.1 to
|
||||
5.2.2 to fix CVE-2025-30204 (dependabot[bot])
|
||||
- build: Update github.com/ebitengine/purego to work around bug in
|
||||
go1.24.3 (Nick Craig-Wood)
|
||||
|
||||
v1.69.2 - 2025-05-01
|
||||
|
||||
See commits
|
||||
|
||||
- Bug fixes
|
||||
- accounting: Fix percentDiff calculation -- (Anagh Kumar
|
||||
Baranwal)
|
||||
- build
|
||||
- Update github.com/golang-jwt/jwt/v4 from 4.5.1 to 4.5.2 to
|
||||
fix CVE-2025-30204 (dependabot[bot])
|
||||
- Update github.com/golang-jwt/jwt/v5 from 5.2.1 to 5.2.2 to
|
||||
fix CVE-2025-30204 (dependabot[bot])
|
||||
- Update golang.org/x/crypto to v0.35.0 to fix CVE-2025-22869
|
||||
(Nick Craig-Wood)
|
||||
- Update golang.org/x/net from 0.36.0 to 0.38.0 to fix
|
||||
CVE-2025-22870 (dependabot[bot])
|
||||
- Update golang.org/x/net to 0.36.0. to fix CVE-2025-22869
|
||||
(dependabot[bot])
|
||||
- Stop building with go < go1.23 as security updates forbade
|
||||
it (Nick Craig-Wood)
|
||||
- Fix docker plugin build (Anagh Kumar Baranwal)
|
||||
- cmd: Fix crash if rclone is invoked without any arguments (Janne
|
||||
Hellsten)
|
||||
- config: Read configuration passwords from stdin even when
|
||||
terminated with EOF (Samantha Bowen)
|
||||
- doc fixes (Andrew Kreimer, Danny Garside, eccoisle, Ed
|
||||
Craig-Wood, emyarod, jack, Jugal Kishore, Markus Gerstel,
|
||||
Michael Kebe, Nick Craig-Wood, simonmcnair, simwai, Zachary
|
||||
Vorhies)
|
||||
- fs: Fix corruption of SizeSuffix with "B" suffix in config (eg
|
||||
--min-size) (Nick Craig-Wood)
|
||||
- lib/http: Fix race between Serve() and Shutdown() (Nick
|
||||
Craig-Wood)
|
||||
- object: Fix memory object out of bounds Seek (Nick Craig-Wood)
|
||||
- operations: Fix call fmt.Errorf with wrong err (alingse)
|
||||
- rc
|
||||
- Disable the metrics server when running rclone rc
|
||||
(hiddenmarten)
|
||||
- Fix debug/* commands not being available over unix sockets
|
||||
(Nick Craig-Wood)
|
||||
- serve nfs: Fix unlikely crash (Nick Craig-Wood)
|
||||
- stats: Fix the speed not getting updated after a pause in the
|
||||
processing (Anagh Kumar Baranwal)
|
||||
- sync
|
||||
- Fix cpu spinning when empty directory finding with leading
|
||||
slashes (Nick Craig-Wood)
|
||||
- Copy dir modtimes even when copyEmptySrcDirs is false
|
||||
(ll3006)
|
||||
- VFS
|
||||
- Fix directory cache serving stale data (Lorenz Brun)
|
||||
- Fix inefficient directory caching when directory reads are slow
|
||||
(huanghaojun)
|
||||
- Fix integration test failures (Nick Craig-Wood)
|
||||
- Drive
|
||||
- Metadata: fix error when setting copy-requires-writer-permission
|
||||
on a folder (Nick Craig-Wood)
|
||||
- Dropbox
|
||||
- Retry link without expiry (Dave Vasilevsky)
|
||||
- HTTP
|
||||
- Correct root if definitely pointing to a file (nielash)
|
||||
- Iclouddrive
|
||||
- Fix so created files are writable (Ben Alex)
|
||||
- Onedrive
|
||||
- Fix metadata ordering in permissions (Nick Craig-Wood)
|
||||
|
||||
v1.69.1 - 2025-02-14
|
||||
|
||||
See commits
|
||||
|
||||
- Bug Fixes
|
||||
- lib/oauthutil: Fix redirect URL mismatch errors (Nick
|
||||
Craig-Wood)
|
||||
- bisync: Fix listings missing concurrent modifications (nielash)
|
||||
- serve s3: Fix list objects encoding-type (Nick Craig-Wood)
|
||||
- fs: Fix confusing "didn't find section in config file" error
|
||||
(Nick Craig-Wood)
|
||||
- doc fixes (Christoph Berger, Dimitri Papadopoulos, Matt
|
||||
Ickstadt, Nick Craig-Wood, Tim White, Zachary Vorhies)
|
||||
- build: Added parallel docker builds and caching for go build in
|
||||
the container (Anagh Kumar Baranwal)
|
||||
- VFS
|
||||
- Fix the cache failing to upload symlinks when --links was
|
||||
specified (Nick Craig-Wood)
|
||||
- Fix race detected by race detector (Nick Craig-Wood)
|
||||
- Close the change notify channel on Shutdown (izouxv)
|
||||
- B2
|
||||
- Fix "fatal error: concurrent map writes" (Nick Craig-Wood)
|
||||
- Iclouddrive
|
||||
- Add notes on ADP and Missing PCS cookies (Nick Craig-Wood)
|
||||
- Onedrive
|
||||
- Mark German (de) region as deprecated (Nick Craig-Wood)
|
||||
- S3
|
||||
- Added new storage class to magalu provider (Bruno Fernandes)
|
||||
- Add DigitalOcean regions SFO2, LON1, TOR1, BLR1 (jkpe)
|
||||
- Add latest Linode Object Storage endpoints (jbagwell-akamai)
|
||||
|
||||
v1.69.0 - 2025-01-12
|
||||
|
||||
See commits
|
||||
@@ -56202,7 +56475,7 @@ See commits
|
||||
sockets in http servers (Moises Lima)
|
||||
- This was making it impossible to use unix sockets with an
|
||||
proxy
|
||||
- This might now cause rclone to need authenticaton where it
|
||||
- This might now cause rclone to need authentication where it
|
||||
didn't before
|
||||
- oauthutil: add support for OAuth client credential flow (Martin
|
||||
Hassack, Nick Craig-Wood)
|
||||
@@ -57147,7 +57420,7 @@ See commits
|
||||
- doc updates (albertony, alfish2000, asdffdsazqqq, Dimitri
|
||||
Papadopoulos, Herby Gillot, Joda Stößer, Manoj Ghosh, Nick
|
||||
Craig-Wood)
|
||||
- Implement --metadata-mapper to transform metatadata with a user
|
||||
- Implement --metadata-mapper to transform metadata with a user
|
||||
supplied program (Nick Craig-Wood)
|
||||
- Add ChunkWriterDoesntSeek feature flag and set it for b2 (Nick
|
||||
Craig-Wood)
|
||||
@@ -57309,7 +57582,7 @@ See commits
|
||||
- B2
|
||||
- Fix multipart upload: corrupted on transfer: sizes differ XXX vs
|
||||
0 (Nick Craig-Wood)
|
||||
- Fix locking window when getting mutipart upload URL (Nick
|
||||
- Fix locking window when getting multipart upload URL (Nick
|
||||
Craig-Wood)
|
||||
- Fix server side copies greater than 4GB (Nick Craig-Wood)
|
||||
- Fix chunked streaming uploads (Nick Craig-Wood)
|
||||
@@ -64699,7 +64972,6 @@ email addresses removed from here need to be added to bin/.ignore-emails to make
|
||||
- ben-ba benjamin.brauner@gmx.de
|
||||
- Eli Orzitzer e_orz@yahoo.com
|
||||
- Anthony Metzidis anthony.metzidis@gmail.com
|
||||
- emyarod afw5059@gmail.com
|
||||
- keongalvin keongalvin@gmail.com
|
||||
- rarspace01 rarspace01@users.noreply.github.com
|
||||
- Paul Stern paulstern45@gmail.com
|
||||
|
||||
18
README.md
18
README.md
@@ -1,20 +1,4 @@
|
||||
<div align="center">
|
||||
<sup>Special thanks to our sponsor:</sup>
|
||||
<br>
|
||||
<br>
|
||||
<a href="https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103">
|
||||
<div>
|
||||
<img src="https://rclone.org/img/logos/warp-github.svg" width="300" alt="Warp">
|
||||
</div>
|
||||
<b>Warp is a modern, Rust-based terminal with AI built in so you and your team can build great software, faster.</b>
|
||||
<div>
|
||||
<sup>Visit warp.dev to learn more.</sup>
|
||||
</div>
|
||||
</a>
|
||||
<br>
|
||||
<hr>
|
||||
</div>
|
||||
<br>
|
||||
|
||||
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
||||
|
||||
@@ -124,8 +124,8 @@ Now
|
||||
|
||||
* git co ${BASE_TAG}-stable
|
||||
* git cherry-pick any fixes
|
||||
* Do the steps as above
|
||||
* make startstable
|
||||
* Do the steps as above
|
||||
* git co master
|
||||
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
|
||||
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||
|
||||
@@ -899,7 +899,7 @@ func (o *Object) getMetadata(ctx context.Context) error {
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
//
|
||||
// May make a network request becaue the [fs.List] method does not
|
||||
// May make a network request because the [fs.List] method does not
|
||||
// return MD5 hashes for DirEntry
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
||||
if ty != hash.MD5 {
|
||||
|
||||
@@ -299,14 +299,13 @@ type Fs struct {
|
||||
|
||||
// Object describes a b2 object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // b2 id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
sha1 string // SHA-1 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
meta map[string]string // The object metadata if known - may be nil - with lower case keys
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // b2 id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
sha1 string // SHA-1 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -1598,9 +1597,6 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// For now, just set "mtime" in metadata
|
||||
o.meta = make(map[string]string, 1)
|
||||
o.meta["mtime"] = o.modTime.Format(time.RFC3339Nano)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1880,13 +1876,6 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||
Info: Info,
|
||||
}
|
||||
|
||||
// Embryonic metadata support - just mtime
|
||||
o.meta = make(map[string]string, 1)
|
||||
modTime, err := parseTimeStringHelper(info.Info[timeKey])
|
||||
if err == nil {
|
||||
o.meta["mtime"] = modTime.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
// When reading files from B2 via cloudflare using
|
||||
// --b2-download-url cloudflare strips the Content-Length
|
||||
// headers (presumably so it can inject stuff) so use the old
|
||||
|
||||
@@ -256,12 +256,6 @@ func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
// mtime
|
||||
for k, v := range metadata {
|
||||
got := o.meta[k]
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
||||
|
||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
||||
|
||||
@@ -2480,7 +2480,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte)
|
||||
if len(data) > maxMetadataSizeWritten {
|
||||
return nil, false, ErrMetaTooBig
|
||||
}
|
||||
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
if len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
return nil, false, errors.New("invalid json")
|
||||
}
|
||||
var metadata metaSimpleJSON
|
||||
|
||||
@@ -203,7 +203,6 @@ func driveScopesContainsAppFolder(scopes []string) bool {
|
||||
if scope == scopePrefix+"drive.appfolder" {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -1212,6 +1211,7 @@ func fixMimeType(mimeTypeIn string) string {
|
||||
}
|
||||
return mimeTypeOut
|
||||
}
|
||||
|
||||
func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
|
||||
out = make(map[string][]string, len(in))
|
||||
for k, v := range in {
|
||||
@@ -1222,9 +1222,11 @@ func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func isInternalMimeType(mimeType string) bool {
|
||||
return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
|
||||
}
|
||||
|
||||
func isLinkMimeType(mimeType string) bool {
|
||||
return strings.HasPrefix(mimeType, "application/x-link-")
|
||||
}
|
||||
@@ -1657,7 +1659,8 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.F
|
||||
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
|
||||
func (f *Fs) newObjectWithExportInfo(
|
||||
ctx context.Context, remote string, info *drive.File,
|
||||
extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
|
||||
extension, exportName, exportMimeType string, isDocument bool,
|
||||
) (o fs.Object, err error) {
|
||||
// Note that resolveShortcut will have been called already if
|
||||
// we are being called from a listing. However the drive.Item
|
||||
// will have been resolved so this will do nothing.
|
||||
@@ -1760,7 +1763,7 @@ func (f *Fs) createDir(ctx context.Context, pathID, leaf string, metadata fs.Met
|
||||
}
|
||||
var updateMetadata updateMetadataFn
|
||||
if len(metadata) > 0 {
|
||||
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true)
|
||||
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create dir: failed to update metadata: %w", err)
|
||||
}
|
||||
@@ -1791,7 +1794,7 @@ func (f *Fs) updateDir(ctx context.Context, dirID string, metadata fs.Metadata)
|
||||
}
|
||||
dirID = actualID(dirID)
|
||||
updateInfo := &drive.File{}
|
||||
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true)
|
||||
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("update dir: failed to update metadata from source object: %w", err)
|
||||
}
|
||||
@@ -1848,6 +1851,7 @@ func linkTemplate(mt string) *template.Template {
|
||||
})
|
||||
return _linkTemplates[mt]
|
||||
}
|
||||
|
||||
func (f *Fs) fetchFormats(ctx context.Context) {
|
||||
fetchFormatsOnce.Do(func() {
|
||||
var about *drive.About
|
||||
@@ -1893,7 +1897,8 @@ func (f *Fs) importFormats(ctx context.Context) map[string][]string {
|
||||
// Look through the exportExtensions and find the first format that can be
|
||||
// converted. If none found then return ("", "", false)
|
||||
func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string) (
|
||||
extension, mimeType string, isDocument bool) {
|
||||
extension, mimeType string, isDocument bool,
|
||||
) {
|
||||
exportMimeTypes, isDocument := f.exportFormats(ctx)[itemMimeType]
|
||||
if isDocument {
|
||||
for _, _extension := range f.exportExtensions {
|
||||
@@ -2689,7 +2694,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
if shortcutID != "" {
|
||||
return f.delete(ctx, shortcutID, f.opt.UseTrash)
|
||||
}
|
||||
var trashedFiles = false
|
||||
trashedFiles := false
|
||||
if check {
|
||||
found, err := f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, true, func(item *drive.File) bool {
|
||||
if !item.Trashed {
|
||||
@@ -2926,7 +2931,6 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -3187,6 +3191,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (pageToken string, err error) {
|
||||
var startPageToken *drive.StartPageToken
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -3990,14 +3995,13 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
case "query":
|
||||
if len(arg) == 1 {
|
||||
query := arg[0]
|
||||
var results, err = f.query(ctx, query)
|
||||
results, err := f.query(ctx, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute query: %q, error: %w", query, err)
|
||||
}
|
||||
return results, nil
|
||||
} else {
|
||||
return nil, errors.New("need a query argument")
|
||||
}
|
||||
return nil, errors.New("need a query argument")
|
||||
case "rescue":
|
||||
dirID := ""
|
||||
_, delete := opt["delete"]
|
||||
@@ -4057,6 +4061,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 {
|
||||
return "", hash.ErrUnsupported
|
||||
@@ -4071,7 +4076,8 @@ func (o *baseObject) Size() int64 {
|
||||
|
||||
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
|
||||
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
|
||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error,
|
||||
) {
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
@@ -4284,12 +4290,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
return o.baseObject.open(ctx, o.url, options...)
|
||||
}
|
||||
|
||||
func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Update the size with what we are reading as it can change from
|
||||
// the HEAD in the listing to this GET. This stops rclone marking
|
||||
// the transfer as corrupted.
|
||||
var offset, end int64 = 0, -1
|
||||
var newOptions = options[:0]
|
||||
newOptions := options[:0]
|
||||
for _, o := range options {
|
||||
// Note that Range requests don't work on Google docs:
|
||||
// https://developers.google.com/drive/v3/web/manage-downloads#partial_download
|
||||
@@ -4316,9 +4323,10 @@ func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
var data = o.content
|
||||
data := o.content
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
@@ -4343,7 +4351,8 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
|
||||
}
|
||||
|
||||
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
|
||||
src fs.ObjectInfo) (info *drive.File, err error) {
|
||||
src fs.ObjectInfo,
|
||||
) (info *drive.File, err error) {
|
||||
// Make the API request to upload metadata and file data.
|
||||
size := src.Size()
|
||||
if size >= 0 && size < int64(o.fs.opt.UploadCutoff) {
|
||||
@@ -4421,6 +4430,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
srcMimeType := fs.MimeType(ctx, src)
|
||||
importMimeType := ""
|
||||
@@ -4516,6 +4526,7 @@ func (o *baseObject) Metadata(ctx context.Context) (metadata fs.Metadata, err er
|
||||
func (o *documentObject) ext() string {
|
||||
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
|
||||
}
|
||||
|
||||
func (o *linkObject) ext() string {
|
||||
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
|
||||
}
|
||||
|
||||
@@ -508,7 +508,7 @@ type updateMetadataFn func(context.Context, *drive.File) error
|
||||
//
|
||||
// It returns a callback which should be called to finish the updates
|
||||
// after the data is uploaded.
|
||||
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update bool) (callback updateMetadataFn, err error) {
|
||||
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update, isFolder bool) (callback updateMetadataFn, err error) {
|
||||
callbackFns := []updateMetadataFn{}
|
||||
callback = func(ctx context.Context, info *drive.File) error {
|
||||
for _, fn := range callbackFns {
|
||||
@@ -533,7 +533,9 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
|
||||
}
|
||||
switch k {
|
||||
case "copy-requires-writer-permission":
|
||||
if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
|
||||
if isFolder {
|
||||
fs.Debugf(f, "Ignoring %s=%s as can't set on folders", k, v)
|
||||
} else if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "writers-can-share":
|
||||
@@ -630,7 +632,7 @@ func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, opti
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
callback, err = f.updateMetadata(ctx, updateInfo, meta, update)
|
||||
callback, err = f.updateMetadata(ctx, updateInfo, meta, update, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update metadata from source object: %w", err)
|
||||
}
|
||||
|
||||
@@ -1174,6 +1174,16 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil && createArg.Settings.Expires != nil && strings.Contains(err.Error(), sharing.SharedLinkSettingsErrorNotAuthorized) {
|
||||
// Some plans can't create links with expiry
|
||||
fs.Debugf(absPath, "can't create link with expiry, trying without")
|
||||
createArg.Settings.Expires = nil
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil && strings.Contains(err.Error(),
|
||||
sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
|
||||
fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
|
||||
|
||||
@@ -2,6 +2,7 @@ package googlephotos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -35,7 +36,7 @@ func TestIntegration(t *testing.T) {
|
||||
*fstest.RemoteName = "TestGooglePhotos:"
|
||||
}
|
||||
f, err := fs.NewFs(ctx, *fstest.RemoteName)
|
||||
if err == fs.ErrorNotFoundInConfigFile {
|
||||
if errors.Is(err, fs.ErrorNotFoundInConfigFile) {
|
||||
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -180,7 +180,6 @@ func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Op
|
||||
}
|
||||
addHeaders(req, opt)
|
||||
res, err := noRedir.Do(req)
|
||||
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be sent: %v", err)
|
||||
return createFileResult()
|
||||
@@ -249,6 +248,14 @@ func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err
|
||||
f.httpClient = client
|
||||
f.endpoint = u
|
||||
f.endpointURL = u.String()
|
||||
|
||||
if isFile {
|
||||
// Correct root if definitely pointing to a file
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." || f.root == "/" {
|
||||
f.root = ""
|
||||
}
|
||||
}
|
||||
return isFile, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -631,7 +631,7 @@ func NewUpdateFileInfo() UpdateFileInfo {
|
||||
FileFlags: FileFlags{
|
||||
IsExecutable: true,
|
||||
IsHidden: false,
|
||||
IsWritable: false,
|
||||
IsWritable: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -445,7 +445,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// build request
|
||||
// cant use normal rename as file needs to be "activated" first
|
||||
// can't use normal rename as file needs to be "activated" first
|
||||
|
||||
r := api.NewUpdateFileInfo()
|
||||
r.DocumentID = doc.DocumentID
|
||||
|
||||
@@ -75,7 +75,7 @@ type MoveFolderParam struct {
|
||||
DestinationPath string `validate:"nonzero" json:"destinationPath"`
|
||||
}
|
||||
|
||||
// JobIDResponse respresents response struct with JobID for folder operations
|
||||
// JobIDResponse represents response struct with JobID for folder operations
|
||||
type JobIDResponse struct {
|
||||
JobID string `json:"jobId"`
|
||||
}
|
||||
|
||||
@@ -396,10 +396,57 @@ func (m *Metadata) WritePermissions(ctx context.Context) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Order the permissions so that any with users come first.
|
||||
//
|
||||
// This is to work around a quirk with Graph:
|
||||
//
|
||||
// 1. You are adding permissions for both a group and a user.
|
||||
// 2. The user is a member of the group.
|
||||
// 3. The permissions for the group and user are the same.
|
||||
// 4. You are adding the group permission before the user permission.
|
||||
//
|
||||
// When all of the above are true, Graph indicates it has added the
|
||||
// user permission, but it immediately drops it
|
||||
//
|
||||
// See: https://github.com/rclone/rclone/issues/8465
|
||||
func (m *Metadata) orderPermissions(xs []*api.PermissionsType) {
|
||||
// Return true if identity has any user permissions
|
||||
hasUserIdentity := func(identity *api.IdentitySet) bool {
|
||||
if identity == nil {
|
||||
return false
|
||||
}
|
||||
return identity.User.ID != "" || identity.User.DisplayName != "" || identity.User.Email != "" || identity.User.LoginName != ""
|
||||
}
|
||||
// Return true if p has any user permissions
|
||||
hasUser := func(p *api.PermissionsType) bool {
|
||||
if hasUserIdentity(p.GetGrantedTo(m.fs.driveType)) {
|
||||
return true
|
||||
}
|
||||
for _, identity := range p.GetGrantedToIdentities(m.fs.driveType) {
|
||||
if hasUserIdentity(identity) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
// Put Permissions with a user first, leaving unsorted otherwise
|
||||
slices.SortStableFunc(xs, func(a, b *api.PermissionsType) int {
|
||||
aHasUser := hasUser(a)
|
||||
bHasUser := hasUser(b)
|
||||
if aHasUser && !bHasUser {
|
||||
return -1
|
||||
} else if !aHasUser && bHasUser {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
})
|
||||
}
|
||||
|
||||
// sortPermissions sorts the permissions (to be written) into add, update, and remove queues
|
||||
func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType) {
|
||||
new, old := m.queuedPermissions, m.permissions
|
||||
if len(old) == 0 || m.permsAddOnly {
|
||||
m.orderPermissions(new)
|
||||
return new, nil, nil // they must all be "add"
|
||||
}
|
||||
|
||||
@@ -447,6 +494,9 @@ func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType
|
||||
remove = append(remove, o)
|
||||
}
|
||||
}
|
||||
m.orderPermissions(add)
|
||||
m.orderPermissions(update)
|
||||
m.orderPermissions(remove)
|
||||
return add, update, remove
|
||||
}
|
||||
|
||||
|
||||
125
backend/onedrive/metadata_test.go
Normal file
125
backend/onedrive/metadata_test.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/onedrive/api"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestOrderPermissions(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input []*api.PermissionsType
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
input: []*api.PermissionsType{},
|
||||
expected: []string(nil),
|
||||
},
|
||||
{
|
||||
name: "users first, then group, then none",
|
||||
input: []*api.PermissionsType{
|
||||
{ID: "1", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group1"}}},
|
||||
{ID: "2", GrantedToIdentities: []*api.IdentitySet{{User: api.Identity{DisplayName: "Alice"}}}},
|
||||
{ID: "3", GrantedTo: &api.IdentitySet{User: api.Identity{DisplayName: "Alice"}}},
|
||||
{ID: "4"},
|
||||
},
|
||||
expected: []string{"2", "3", "1", "4"},
|
||||
},
|
||||
{
|
||||
name: "same type unsorted",
|
||||
input: []*api.PermissionsType{
|
||||
{ID: "b", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group B"}}},
|
||||
{ID: "a", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group A"}}},
|
||||
{ID: "c", GrantedToIdentities: []*api.IdentitySet{{Group: api.Identity{DisplayName: "Group A"}}, {User: api.Identity{DisplayName: "Alice"}}}},
|
||||
},
|
||||
expected: []string{"c", "b", "a"},
|
||||
},
|
||||
{
|
||||
name: "all user identities",
|
||||
input: []*api.PermissionsType{
|
||||
{ID: "c", GrantedTo: &api.IdentitySet{User: api.Identity{DisplayName: "Bob"}}},
|
||||
{ID: "a", GrantedTo: &api.IdentitySet{User: api.Identity{Email: "alice@example.com"}}},
|
||||
{ID: "b", GrantedToIdentities: []*api.IdentitySet{{User: api.Identity{LoginName: "user3"}}}},
|
||||
},
|
||||
expected: []string{"c", "a", "b"},
|
||||
},
|
||||
{
|
||||
name: "no user or group info",
|
||||
input: []*api.PermissionsType{
|
||||
{ID: "z"},
|
||||
{ID: "x"},
|
||||
{ID: "y"},
|
||||
},
|
||||
expected: []string{"z", "x", "y"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, driveType := range []string{driveTypePersonal, driveTypeBusiness} {
|
||||
t.Run(driveType, func(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
m := &Metadata{fs: &Fs{driveType: driveType}}
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if driveType == driveTypeBusiness {
|
||||
for i := range tt.input {
|
||||
tt.input[i].GrantedToV2 = tt.input[i].GrantedTo
|
||||
tt.input[i].GrantedTo = nil
|
||||
tt.input[i].GrantedToIdentitiesV2 = tt.input[i].GrantedToIdentities
|
||||
tt.input[i].GrantedToIdentities = nil
|
||||
}
|
||||
}
|
||||
m.orderPermissions(tt.input)
|
||||
var gotIDs []string
|
||||
for _, p := range tt.input {
|
||||
gotIDs = append(gotIDs, p.ID)
|
||||
}
|
||||
assert.Equal(t, tt.expected, gotIDs)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestOrderPermissionsJSON(t *testing.T) {
|
||||
testJSON := `[
|
||||
{
|
||||
"id": "1",
|
||||
"grantedToV2": {
|
||||
"group": {
|
||||
"id": "group@example.com"
|
||||
}
|
||||
},
|
||||
"roles": [
|
||||
"write"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "2",
|
||||
"grantedToV2": {
|
||||
"user": {
|
||||
"id": "user@example.com"
|
||||
}
|
||||
},
|
||||
"roles": [
|
||||
"write"
|
||||
]
|
||||
}
|
||||
]`
|
||||
|
||||
var testPerms []*api.PermissionsType
|
||||
err := json.Unmarshal([]byte(testJSON), &testPerms)
|
||||
require.NoError(t, err)
|
||||
|
||||
m := &Metadata{fs: &Fs{driveType: driveTypeBusiness}}
|
||||
m.orderPermissions(testPerms)
|
||||
var gotIDs []string
|
||||
for _, p := range testPerms {
|
||||
gotIDs = append(gotIDs, p.ID)
|
||||
}
|
||||
assert.Equal(t, []string{"2", "1"}, gotIDs)
|
||||
|
||||
}
|
||||
@@ -131,7 +131,7 @@ func init() {
|
||||
Help: "Microsoft Cloud for US Government",
|
||||
}, {
|
||||
Value: regionDE,
|
||||
Help: "Microsoft Cloud Germany",
|
||||
Help: "Microsoft Cloud Germany (deprecated - try " + regionGlobal + " region first).",
|
||||
}, {
|
||||
Value: regionCN,
|
||||
Help: "Azure and Office 365 operated by Vnet Group in China",
|
||||
|
||||
@@ -424,7 +424,7 @@ func (f *Fs) newSingleConnClient(ctx context.Context) (*rest.Client, error) {
|
||||
})
|
||||
// Set our own http client in the context
|
||||
ctx = oauthutil.Context(ctx, baseClient)
|
||||
// create a new oauth client, re-use the token source
|
||||
// create a new oauth client, reuse the token source
|
||||
oAuthClient := oauth2.NewClient(ctx, f.ts)
|
||||
return rest.NewClient(oAuthClient).SetRoot("https://" + f.opt.Hostname), nil
|
||||
}
|
||||
|
||||
@@ -934,34 +934,67 @@ func init() {
|
||||
Help: "The default endpoint\nIran",
|
||||
}},
|
||||
}, {
|
||||
// Linode endpoints: https://www.linode.com/docs/products/storage/object-storage/guides/urls/#cluster-url-s3-endpoint
|
||||
// Linode endpoints: https://techdocs.akamai.com/cloud-computing/docs/object-storage-product-limits#supported-endpoint-types-by-region
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Linode Object Storage API.",
|
||||
Provider: "Linode",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "nl-ams-1.linodeobjects.com",
|
||||
Help: "Amsterdam (Netherlands), nl-ams-1",
|
||||
}, {
|
||||
Value: "us-southeast-1.linodeobjects.com",
|
||||
Help: "Atlanta, GA (USA), us-southeast-1",
|
||||
}, {
|
||||
Value: "in-maa-1.linodeobjects.com",
|
||||
Help: "Chennai (India), in-maa-1",
|
||||
}, {
|
||||
Value: "us-ord-1.linodeobjects.com",
|
||||
Help: "Chicago, IL (USA), us-ord-1",
|
||||
}, {
|
||||
Value: "eu-central-1.linodeobjects.com",
|
||||
Help: "Frankfurt (Germany), eu-central-1",
|
||||
}, {
|
||||
Value: "id-cgk-1.linodeobjects.com",
|
||||
Help: "Jakarta (Indonesia), id-cgk-1",
|
||||
}, {
|
||||
Value: "gb-lon-1.linodeobjects.com",
|
||||
Help: "London 2 (Great Britain), gb-lon-1",
|
||||
}, {
|
||||
Value: "us-lax-1.linodeobjects.com",
|
||||
Help: "Los Angeles, CA (USA), us-lax-1",
|
||||
}, {
|
||||
Value: "es-mad-1.linodeobjects.com",
|
||||
Help: "Madrid (Spain), es-mad-1",
|
||||
}, {
|
||||
Value: "au-mel-1.linodeobjects.com",
|
||||
Help: "Melbourne (Australia), au-mel-1",
|
||||
}, {
|
||||
Value: "us-mia-1.linodeobjects.com",
|
||||
Help: "Miami, FL (USA), us-mia-1",
|
||||
}, {
|
||||
Value: "it-mil-1.linodeobjects.com",
|
||||
Help: "Milan (Italy), it-mil-1",
|
||||
}, {
|
||||
Value: "us-east-1.linodeobjects.com",
|
||||
Help: "Newark, NJ (USA), us-east-1",
|
||||
}, {
|
||||
Value: "jp-osa-1.linodeobjects.com",
|
||||
Help: "Osaka (Japan), jp-osa-1",
|
||||
}, {
|
||||
Value: "fr-par-1.linodeobjects.com",
|
||||
Help: "Paris (France), fr-par-1",
|
||||
}, {
|
||||
Value: "br-gru-1.linodeobjects.com",
|
||||
Help: "São Paulo (Brazil), br-gru-1",
|
||||
}, {
|
||||
Value: "us-sea-1.linodeobjects.com",
|
||||
Help: "Seattle, WA (USA), us-sea-1",
|
||||
}, {
|
||||
Value: "ap-south-1.linodeobjects.com",
|
||||
Help: "Singapore ap-south-1",
|
||||
Help: "Singapore, ap-south-1",
|
||||
}, {
|
||||
Value: "sg-sin-1.linodeobjects.com",
|
||||
Help: "Singapore 2, sg-sin-1",
|
||||
}, {
|
||||
Value: "se-sto-1.linodeobjects.com",
|
||||
Help: "Stockholm (Sweden), se-sto-1",
|
||||
@@ -1343,7 +1376,7 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,MagaluCloud,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,Magalu,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -1356,6 +1389,10 @@ func init() {
|
||||
Value: "sfo3.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces San Francisco 3",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "sfo2.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces San Francisco 2",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "fra1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Frankfurt 1",
|
||||
@@ -1372,6 +1409,18 @@ func init() {
|
||||
Value: "sgp1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Singapore 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "lon1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces London 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "tor1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Toronto 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "blr1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Bangalore 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "localhost:8333",
|
||||
Help: "SeaweedFS S3 localhost",
|
||||
@@ -1476,14 +1525,6 @@ func init() {
|
||||
Value: "s3.ir-tbz-sh1.arvanstorage.ir",
|
||||
Help: "ArvanCloud Tabriz Iran (Shahriar) endpoint",
|
||||
Provider: "ArvanCloud",
|
||||
}, {
|
||||
Value: "br-se1.magaluobjects.com",
|
||||
Help: "Magalu BR Southeast 1 endpoint",
|
||||
Provider: "Magalu",
|
||||
}, {
|
||||
Value: "br-ne1.magaluobjects.com",
|
||||
Help: "Magalu BR Northeast 1 endpoint",
|
||||
Provider: "Magalu",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
@@ -2122,13 +2163,16 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Help: "Standard storage class",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: #todo
|
||||
// Mapping from here: https://docs.magalu.cloud/docs/storage/object-storage/Classes-de-Armazenamento/standard
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in Magalu.",
|
||||
Provider: "Magalu",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "STANDARD",
|
||||
Help: "Standard storage class",
|
||||
}, {
|
||||
Value: "GLACIER_IR",
|
||||
Help: "Glacier Instant Retrieval storage class",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
|
||||
@@ -3344,7 +3388,7 @@ func setQuirks(opt *Options) {
|
||||
listObjectsV2 = true // Always use ListObjectsV2 instead of ListObjects
|
||||
virtualHostStyle = true // Use bucket.provider.com instead of putting the bucket in the URL
|
||||
urlEncodeListings = true // URL encode the listings to help with control characters
|
||||
useMultipartEtag = true // Set if Etags for multpart uploads are compatible with AWS
|
||||
useMultipartEtag = true // Set if Etags for multipart uploads are compatible with AWS
|
||||
useAcceptEncodingGzip = true // Set Accept-Encoding: gzip
|
||||
mightGzip = true // assume all providers might use content encoding gzip until proven otherwise
|
||||
useAlreadyExists = true // Set if provider returns AlreadyOwnedByYou or no error if you try to remake your own bucket
|
||||
@@ -4932,7 +4976,7 @@ or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Fre
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend restore s3:bucket/path/to/object -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket/path/to/ --include /object -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY
|
||||
@@ -6057,7 +6101,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
if mOut == nil {
|
||||
err = fserrors.RetryErrorf("internal error: no info from multipart upload")
|
||||
} else if mOut.UploadId == nil {
|
||||
err = fserrors.RetryErrorf("internal error: no UploadId in multpart upload: %#v", *mOut)
|
||||
err = fserrors.RetryErrorf("internal error: no UploadId in multipart upload: %#v", *mOut)
|
||||
}
|
||||
}
|
||||
return f.shouldRetry(ctx, err)
|
||||
|
||||
@@ -120,7 +120,7 @@ func init() {
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(rootURL) // FIXME
|
||||
|
||||
// FIXME
|
||||
//err = f.pacer.Call(func() (bool, error) {
|
||||
// err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = srv.CallXML(context.Background(), &opts, &authRequest, nil)
|
||||
// return shouldRetry(ctx, resp, err)
|
||||
//})
|
||||
@@ -327,7 +327,7 @@ func (f *Fs) readMetaDataForID(ctx context.Context, ID string) (info *api.File,
|
||||
func (f *Fs) getAuthToken(ctx context.Context) error {
|
||||
fs.Debugf(f, "Renewing token")
|
||||
|
||||
var authRequest = api.TokenAuthRequest{
|
||||
authRequest := api.TokenAuthRequest{
|
||||
AccessKeyID: withDefault(f.opt.AccessKeyID, accessKeyID),
|
||||
PrivateAccessKey: withDefault(f.opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)),
|
||||
RefreshToken: f.opt.RefreshToken,
|
||||
@@ -509,7 +509,7 @@ func errorHandler(resp *http.Response) (err error) {
|
||||
return fmt.Errorf("error reading error out of body: %w", err)
|
||||
}
|
||||
match := findError.FindSubmatch(body)
|
||||
if match == nil || len(match) < 2 || len(match[1]) == 0 {
|
||||
if len(match) < 2 || len(match[1]) == 0 {
|
||||
return fmt.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
|
||||
}
|
||||
return fmt.Errorf("HTTP error %v (%v): %s", resp.StatusCode, resp.Status, match[1])
|
||||
@@ -552,7 +552,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(ctx, pathID, nil, func(item *api.Collection) bool {
|
||||
if strings.EqualFold(item.Name, leaf) {
|
||||
|
||||
@@ -11,4 +11,5 @@
|
||||
<services+github@simjo.st>
|
||||
<seb•ɑƬ•chezwam•ɖɵʈ•org>
|
||||
<allllaboutyou@gmail.com>
|
||||
<psycho@feltzv.fr>
|
||||
<psycho@feltzv.fr>
|
||||
<afw5059@gmail.com>
|
||||
@@ -7,6 +7,7 @@ conversion into man pages etc.
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
|
||||
docpath = "docs/content"
|
||||
@@ -192,13 +193,23 @@ def main():
|
||||
command_docs = read_commands(docpath).replace("\\", "\\\\") # escape \ so we can use command_docs in re.sub
|
||||
build_date = datetime.utcfromtimestamp(
|
||||
int(os.environ.get('SOURCE_DATE_EPOCH', time.time())))
|
||||
help_output = subprocess.check_output(["rclone", "help"]).decode("utf-8")
|
||||
with open(outfile, "w") as out:
|
||||
out.write("""\
|
||||
%% rclone(1) User Manual
|
||||
%% Nick Craig-Wood
|
||||
%% %s
|
||||
|
||||
""" % build_date.strftime("%b %d, %Y"))
|
||||
# NAME
|
||||
|
||||
rclone - manage files on cloud storage
|
||||
|
||||
# SYNOPSIS
|
||||
|
||||
```
|
||||
%s
|
||||
```
|
||||
""" % (build_date.strftime("%b %d, %Y"), help_output))
|
||||
for doc in docs:
|
||||
contents = read_doc(doc)
|
||||
# Substitute the commands into doc.md
|
||||
|
||||
@@ -29,7 +29,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
||||
cmd := exec.Command("git", "log", "--oneline", from+".."+to)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
logMap = map[string]string{}
|
||||
logs = []string{}
|
||||
@@ -39,7 +39,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
||||
}
|
||||
match := logRe.FindSubmatch(line)
|
||||
if match == nil {
|
||||
log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
var hash, logMessage = string(match[1]), string(match[2])
|
||||
logMap[logMessage] = hash
|
||||
@@ -52,12 +52,12 @@ func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 0 {
|
||||
log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
// v1.54.0
|
||||
versionBytes, err := os.ReadFile("VERSION")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
if versionBytes[0] == 'v' {
|
||||
versionBytes = versionBytes[1:]
|
||||
@@ -65,7 +65,7 @@ func main() {
|
||||
versionBytes = bytes.TrimSpace(versionBytes)
|
||||
semver := semver.New(string(versionBytes))
|
||||
stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1)
|
||||
log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
masterMap, masterLogs := readCommits(stable+".0", "master")
|
||||
stableMap, _ := readCommits(stable+".0", stable+"-stable")
|
||||
for _, logMessage := range masterLogs {
|
||||
|
||||
@@ -23,19 +23,23 @@ func init() {
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "authorize",
|
||||
Use: "authorize <fs name> [base64_json_blob | client_id client_secret]",
|
||||
Short: `Remote authorization.`,
|
||||
Long: `Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.
|
||||
|
||||
The command requires 1-3 arguments:
|
||||
- fs name (e.g., "drive", "s3", etc.)
|
||||
- Either a base64 encoded JSON blob obtained from a previous rclone config session
|
||||
- Or a client_id and client_secret pair obtained from the remote service
|
||||
|
||||
Use --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.
|
||||
|
||||
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
// "groups": "",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 3, command, args)
|
||||
|
||||
32
cmd/authorize/authorize_test.go
Normal file
32
cmd/authorize/authorize_test.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package authorize
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func TestAuthorizeCommand(t *testing.T) {
|
||||
// Test that the Use string is correctly formatted
|
||||
if commandDefinition.Use != "authorize <fs name> [base64_json_blob | client_id client_secret]" {
|
||||
t.Errorf("Command Use string doesn't match expected format: %s", commandDefinition.Use)
|
||||
}
|
||||
|
||||
// Test that help output contains the argument information
|
||||
buf := &bytes.Buffer{}
|
||||
cmd := &cobra.Command{}
|
||||
cmd.AddCommand(commandDefinition)
|
||||
cmd.SetOut(buf)
|
||||
cmd.SetArgs([]string{"authorize", "--help"})
|
||||
err := cmd.Execute()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to execute help command: %v", err)
|
||||
}
|
||||
|
||||
helpOutput := buf.String()
|
||||
if !strings.Contains(helpOutput, "authorize <fs name>") {
|
||||
t.Errorf("Help output doesn't contain correct usage information")
|
||||
}
|
||||
}
|
||||
@@ -746,6 +746,16 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
case "test-func":
|
||||
b.TestFn = testFunc
|
||||
return
|
||||
case "concurrent-func":
|
||||
b.TestFn = func() {
|
||||
src := filepath.Join(b.dataDir, "file7.txt")
|
||||
dst := "file1.txt"
|
||||
err := b.copyFile(ctx, src, b.replaceHex(b.path2), dst)
|
||||
if err != nil {
|
||||
fs.Errorf(src, "error copying file: %v", err)
|
||||
}
|
||||
}
|
||||
return
|
||||
case "fix-names":
|
||||
// in case the local os converted any filenames
|
||||
ci.NoUnicodeNormalization = true
|
||||
@@ -871,10 +881,9 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
if !ok || err != nil {
|
||||
fs.Logf(remotePath, "Can't find expected file %s (was it renamed by the os?) %v", args[1], err)
|
||||
return
|
||||
} else {
|
||||
// include hash of filename to make unicode form differences easier to see in logs
|
||||
fs.Debugf(remotePath, "verified file exists at correct path. filename hash: %s", stringToHash(leaf))
|
||||
}
|
||||
// include hash of filename to make unicode form differences easier to see in logs
|
||||
fs.Debugf(remotePath, "verified file exists at correct path. filename hash: %s", stringToHash(leaf))
|
||||
return
|
||||
default:
|
||||
return fmt.Errorf("unknown command: %q", args[0])
|
||||
|
||||
@@ -218,7 +218,7 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
|
||||
if b.opt.CompareFlag == "" {
|
||||
return nil
|
||||
}
|
||||
var CompareFlag CompareOpt // for exlcusions
|
||||
var CompareFlag CompareOpt // for exclusions
|
||||
opts := strings.Split(b.opt.CompareFlag, ",")
|
||||
for _, opt := range opts {
|
||||
switch strings.ToLower(strings.TrimSpace(opt)) {
|
||||
|
||||
@@ -161,9 +161,7 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
||||
return
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
err = b.checkListing(now, newListing, "current "+msg)
|
||||
}
|
||||
err = b.checkListing(now, newListing, "current "+msg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -286,7 +284,7 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
||||
}
|
||||
|
||||
// applyDeltas
|
||||
func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (changes1, changes2 bool, results2to1, results1to2 []Results, queues queues, err error) {
|
||||
func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (results2to1, results1to2 []Results, queues queues, err error) {
|
||||
path1 := bilib.FsPath(b.fs1)
|
||||
path2 := bilib.FsPath(b.fs2)
|
||||
|
||||
@@ -367,7 +365,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
}
|
||||
}
|
||||
|
||||
//if there are potential conflicts to check, check them all here (outside the loop) in one fell swoop
|
||||
// if there are potential conflicts to check, check them all here (outside the loop) in one fell swoop
|
||||
matches, err := b.checkconflicts(ctxCheck, filterCheck, b.fs1, b.fs2)
|
||||
|
||||
for _, file := range ds1.sort() {
|
||||
@@ -392,7 +390,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
} else if d2.is(deltaOther) {
|
||||
b.indent("!WARNING", file, "New or changed in both paths")
|
||||
|
||||
//if files are identical, leave them alone instead of renaming
|
||||
// if files are identical, leave them alone instead of renaming
|
||||
if (dirs1.has(file) || dirs1.has(alias)) && (dirs2.has(file) || dirs2.has(alias)) {
|
||||
fs.Infof(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file)
|
||||
ls1.getPut(file, skippedDirs1)
|
||||
@@ -486,7 +484,6 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
|
||||
// Do the batch operation
|
||||
if copy2to1.NotEmpty() && !b.InGracefulShutdown {
|
||||
changes1 = true
|
||||
b.indent("Path2", "Path1", "Do queued copies to")
|
||||
ctx = b.setBackupDir(ctx, 1)
|
||||
results2to1, err = b.fastCopy(ctx, b.fs2, b.fs1, copy2to1, "copy2to1")
|
||||
@@ -498,12 +495,11 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
return
|
||||
}
|
||||
|
||||
//copy empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
// copy empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs1, copy2to1, dirs2, &results2to1, "make")
|
||||
}
|
||||
|
||||
if copy1to2.NotEmpty() && !b.InGracefulShutdown {
|
||||
changes2 = true
|
||||
b.indent("Path1", "Path2", "Do queued copies to")
|
||||
ctx = b.setBackupDir(ctx, 2)
|
||||
results1to2, err = b.fastCopy(ctx, b.fs1, b.fs2, copy1to2, "copy1to2")
|
||||
@@ -515,7 +511,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
return
|
||||
}
|
||||
|
||||
//copy empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
// copy empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, copy1to2, dirs1, &results1to2, "make")
|
||||
}
|
||||
|
||||
@@ -523,7 +519,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
if err = b.saveQueue(delete1, "delete1"); err != nil {
|
||||
return
|
||||
}
|
||||
//propagate deletions of empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
// propagate deletions of empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs1, delete1, dirs1, &results2to1, "remove")
|
||||
}
|
||||
|
||||
@@ -531,7 +527,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
if err = b.saveQueue(delete2, "delete2"); err != nil {
|
||||
return
|
||||
}
|
||||
//propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
// propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, delete2, dirs2, &results1to2, "remove")
|
||||
}
|
||||
|
||||
|
||||
@@ -394,7 +394,7 @@ func parseHash(str string) (string, string, error) {
|
||||
return "", "", fmt.Errorf("invalid hash %q", str)
|
||||
}
|
||||
|
||||
// checkListing verifies that listing is not empty (unless resynching)
|
||||
// checkListing verifies that listing is not empty (unless resyncing)
|
||||
func (b *bisyncRun) checkListing(ls *fileList, listing, msg string) error {
|
||||
if b.opt.Resync || !ls.empty() {
|
||||
return nil
|
||||
|
||||
@@ -359,8 +359,6 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
|
||||
// Determine and apply changes to Path1 and Path2
|
||||
noChanges := ds1.empty() && ds2.empty()
|
||||
changes1 := false // 2to1
|
||||
changes2 := false // 1to2
|
||||
results2to1 := []Results{}
|
||||
results1to2 := []Results{}
|
||||
|
||||
@@ -370,7 +368,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
fs.Infof(nil, "No changes found")
|
||||
} else {
|
||||
fs.Infof(nil, "Applying changes")
|
||||
changes1, changes2, results2to1, results1to2, queues, err = b.applyDeltas(octx, ds1, ds2)
|
||||
results2to1, results1to2, queues, err = b.applyDeltas(octx, ds1, ds2)
|
||||
if err != nil {
|
||||
if b.InGracefulShutdown && (err == context.Canceled || err == accounting.ErrorMaxTransferLimitReachedGraceful || strings.Contains(err.Error(), "context canceled")) {
|
||||
fs.Infof(nil, "Ignoring sync error due to Graceful Shutdown: %v", err)
|
||||
@@ -395,21 +393,11 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
}
|
||||
b.saveOldListings()
|
||||
// save new listings
|
||||
// NOTE: "changes" in this case does not mean this run vs. last run, it means start of this run vs. end of this run.
|
||||
// i.e. whether we can use the March lst-new as this side's lst without modifying it.
|
||||
if noChanges {
|
||||
b.replaceCurrentListings()
|
||||
} else {
|
||||
if changes1 || b.InGracefulShutdown { // 2to1
|
||||
err1 = b.modifyListing(fctx, b.fs2, b.fs1, results2to1, queues, false)
|
||||
} else {
|
||||
err1 = bilib.CopyFileIfExists(b.newListing1, b.listing1)
|
||||
}
|
||||
if changes2 || b.InGracefulShutdown { // 1to2
|
||||
err2 = b.modifyListing(fctx, b.fs1, b.fs2, results1to2, queues, true)
|
||||
} else {
|
||||
err2 = bilib.CopyFileIfExists(b.newListing2, b.listing2)
|
||||
}
|
||||
err1 = b.modifyListing(fctx, b.fs2, b.fs1, results2to1, queues, false) // 2to1
|
||||
err2 = b.modifyListing(fctx, b.fs1, b.fs2, results1to2, queues, true) // 1to2
|
||||
}
|
||||
if b.DebugName != "" {
|
||||
l1, _ := b.loadListing(b.listing1)
|
||||
|
||||
1
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.copy2to1.que
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.copy2to1.que
vendored
Normal file
@@ -0,0 +1 @@
|
||||
"file1.txt"
|
||||
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst-new
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst-new
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst-old
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst-old
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst-new
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst-new
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst-old
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst-old
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
73
cmd/bisync/testdata/test_concurrent/golden/test.log
vendored
Normal file
73
cmd/bisync/testdata/test_concurrent/golden/test.log
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
[36m(01) :[0m [34mtest concurrent[0m
|
||||
|
||||
[36m(02) :[0m [34mtest initial bisync[0m
|
||||
[36m(03) :[0m [34mbisync resync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
[36m(04) :[0m [34mtest changed on one path - file1[0m
|
||||
[36m(05) :[0m [34mtouch-glob 2001-01-02 {datadir/} file5R.txt[0m
|
||||
[36m(06) :[0m [34mtouch-glob 2023-08-26 {datadir/} file7.txt[0m
|
||||
[36m(07) :[0m [34mcopy-as {datadir/}file5R.txt {path2/} file1.txt[0m
|
||||
|
||||
[36m(08) :[0m [34mtest bisync with file changed during[0m
|
||||
[36m(09) :[0m [34mconcurrent-func[0m
|
||||
[36m(10) :[0m [34mbisync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Building Path1 and Path2 listings
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : - [34mPath2[0m [35m[33mFile changed: [35msize (larger)[0m, [35mtime (newer)[0m[0m[0m - [36mfile1.txt[0m
|
||||
INFO : Path2: 1 changes: [32m 0 new[0m, [33m 1 modified[0m, [31m 0 deleted[0m
|
||||
INFO : ([33mModified[0m: [36m 1 newer[0m, [34m 0 older[0m, [36m 1 larger[0m, [34m 0 smaller[0m)
|
||||
INFO : Applying changes
|
||||
INFO : - [34mPath2[0m [35m[32mQueue copy to[0m Path1[0m - [36m{path1/}file1.txt[0m
|
||||
INFO : - [34mPath2[0m [35mDo queued copies to[0m - [36mPath1[0m
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
[36m(11) :[0m [34mbisync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Building Path1 and Path2 listings
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : No changes found
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
1
cmd/bisync/testdata/test_concurrent/initial/RCLONE_TEST
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/initial/RCLONE_TEST
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is used for testing the health of rclone accesses to the local/remote file system. Do not delete.
|
||||
0
cmd/bisync/testdata/test_concurrent/initial/file1.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file1.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file2.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file2.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file3.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file3.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file4.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file4.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file5.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file5.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file6.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file6.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file7.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file7.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file8.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file8.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/modfiles/dummy.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/modfiles/dummy.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file1.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file1.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is newer
|
||||
1
cmd/bisync/testdata/test_concurrent/modfiles/file10.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file10.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is newer
|
||||
1
cmd/bisync/testdata/test_concurrent/modfiles/file11.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file11.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is newer
|
||||
1
cmd/bisync/testdata/test_concurrent/modfiles/file2.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file2.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
Newer version
|
||||
1
cmd/bisync/testdata/test_concurrent/modfiles/file5L.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file5L.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is newer and not equal to 5R
|
||||
1
cmd/bisync/testdata/test_concurrent/modfiles/file5R.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file5R.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is newer and not equal to 5L
|
||||
1
cmd/bisync/testdata/test_concurrent/modfiles/file6.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file6.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is newer
|
||||
1
cmd/bisync/testdata/test_concurrent/modfiles/file7.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file7.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is newer
|
||||
15
cmd/bisync/testdata/test_concurrent/scenario.txt
vendored
Normal file
15
cmd/bisync/testdata/test_concurrent/scenario.txt
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
test concurrent
|
||||
|
||||
test initial bisync
|
||||
bisync resync
|
||||
|
||||
test changed on one path - file1
|
||||
touch-glob 2001-01-02 {datadir/} file5R.txt
|
||||
touch-glob 2023-08-26 {datadir/} file7.txt
|
||||
copy-as {datadir/}file5R.txt {path2/} file1.txt
|
||||
|
||||
test bisync with file changed during
|
||||
concurrent-func
|
||||
bisync
|
||||
|
||||
bisync
|
||||
@@ -23,7 +23,7 @@ INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
[36m(05) :[0m [34mmove-listings empty-path1[0m
|
||||
|
||||
[36m(06) :[0m [34mtest 2. resync with empty path2, resulting in synching all content to path2.[0m
|
||||
[36m(06) :[0m [34mtest 2. resync with empty path2, resulting in syncing all content to path2.[0m
|
||||
[36m(07) :[0m [34mpurge-children {path2/}[0m
|
||||
[36m(08) :[0m [34mbisync resync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
|
||||
4
cmd/bisync/testdata/test_resync/scenario.txt
vendored
4
cmd/bisync/testdata/test_resync/scenario.txt
vendored
@@ -1,6 +1,6 @@
|
||||
test resync
|
||||
# 1. Resync with empty Path1, resulting in copying all content FROM Path2
|
||||
# 2. Resync with empty Path2, resulting in synching all content TO Path2
|
||||
# 2. Resync with empty Path2, resulting in syncing all content TO Path2
|
||||
# 3. Exercise all of the various file difference scenarios during a resync:
|
||||
# File Path1 Path2 Expected action Who wins
|
||||
# - file1.txt Exists Missing Sync Path1 >Path2 Path1
|
||||
@@ -17,7 +17,7 @@ purge-children {path1/}
|
||||
bisync resync
|
||||
move-listings empty-path1
|
||||
|
||||
test 2. resync with empty path2, resulting in synching all content to path2.
|
||||
test 2. resync with empty path2, resulting in syncing all content to path2.
|
||||
purge-children {path2/}
|
||||
bisync resync
|
||||
move-listings empty-path2
|
||||
|
||||
11
cmd/cmd.go
11
cmd/cmd.go
@@ -429,11 +429,12 @@ func initConfig() {
|
||||
fs.Fatalf(nil, "Failed to start remote control: %v", err)
|
||||
}
|
||||
|
||||
// Start the metrics server if configured
|
||||
_, err = rcserver.MetricsStart(ctx, &rc.Opt)
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "Failed to start metrics server: %v", err)
|
||||
|
||||
// Start the metrics server if configured and not running the "rc" command
|
||||
if len(os.Args) >= 2 && os.Args[1] != "rc" {
|
||||
_, err = rcserver.MetricsStart(ctx, &rc.Opt)
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "Failed to start metrics server: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Setup CPU profiling if desired
|
||||
|
||||
@@ -549,12 +549,12 @@ password to re-encrypt the config.
|
||||
|
||||
When |--password-command| is called to change the password then the
|
||||
environment variable |RCLONE_PASSWORD_CHANGE=1| will be set. So if
|
||||
changing passwords programatically you can use the environment
|
||||
changing passwords programmatically you can use the environment
|
||||
variable to distinguish which password you must supply.
|
||||
|
||||
Alternatively you can remove the password first (with |rclone config
|
||||
encryption remove|), then set it again with this command which may be
|
||||
easier if you don't mind the unecrypted config file being on the disk
|
||||
easier if you don't mind the unencrypted config file being on the disk
|
||||
briefly.
|
||||
`, "|", "`"),
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
|
||||
@@ -43,6 +43,8 @@ This doesn't transfer files that are identical on src and dst, testing
|
||||
by size and modification time or MD5SUM. It doesn't delete files from
|
||||
the destination.
|
||||
|
||||
*If you are looking to copy just a byte range of a file, please see 'rclone cat --offset X --count Y'*
|
||||
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
|
||||
@@ -43,7 +43,7 @@ Setting |--auto-filename| will attempt to automatically determine the
|
||||
filename from the URL (after any redirections) and used in the
|
||||
destination path.
|
||||
|
||||
With |--auto-filename-header| in addition, if a specific filename is
|
||||
With |--header-filename| in addition, if a specific filename is
|
||||
set in HTTP headers, it will be used instead of the name from the URL.
|
||||
With |--print-filename| in addition, the resulting file name will be
|
||||
printed.
|
||||
@@ -54,7 +54,7 @@ destination if there is one with the same name.
|
||||
Setting |--stdout| or making the output file name |-|
|
||||
will cause the output to be written to standard output.
|
||||
|
||||
### Troublshooting
|
||||
### Troubleshooting
|
||||
|
||||
If you can't get |rclone copyurl| to work then here are some things you can try:
|
||||
|
||||
|
||||
@@ -22,6 +22,9 @@ include/exclude filters - everything will be removed. Use the
|
||||
delete files. To delete empty directories only, use command
|
||||
[rmdir](/commands/rclone_rmdir/) or [rmdirs](/commands/rclone_rmdirs/).
|
||||
|
||||
The concurrency of this operation is controlled by the ` + "`--checkers`" + ` global flag. However, some backends will
|
||||
implement this command directly, in which case ` + "`--checkers`" + ` will be ignored.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.
|
||||
`,
|
||||
|
||||
@@ -29,6 +29,7 @@ func setSys(fi os.FileInfo) {
|
||||
node, ok := fi.(vfs.Node)
|
||||
if !ok {
|
||||
fs.Errorf(fi, "internal error: %T is not a vfs.Node", fi)
|
||||
return
|
||||
}
|
||||
vfs := node.VFS()
|
||||
// Set the UID and GID for the node passed in from the VFS defaults.
|
||||
@@ -194,7 +195,7 @@ func (f *FS) Chown(name string, uid, gid int) (err error) {
|
||||
return file.Chown(uid, gid)
|
||||
}
|
||||
|
||||
// Chtimes changes the acces time and modified time
|
||||
// Chtimes changes the access time and modified time
|
||||
func (f *FS) Chtimes(name string, atime time.Time, mtime time.Time) (err error) {
|
||||
defer log.Trace(name, "atime=%v, mtime=%v", atime, mtime)("err=%v", &err)
|
||||
return f.vfs.Chtimes(name, atime, mtime)
|
||||
|
||||
@@ -145,7 +145,7 @@ that it uses an on disk cache, but the cache entries are held as
|
||||
symlinks. Rclone will use the handle of the underlying file as the NFS
|
||||
handle which improves performance. This sort of cache can't be backed
|
||||
up and restored as the underlying handles will change. This is Linux
|
||||
only. It requres running rclone as root or with |CAP_DAC_READ_SEARCH|.
|
||||
only. It requires running rclone as root or with |CAP_DAC_READ_SEARCH|.
|
||||
You can run rclone with this extra permission by doing this to the
|
||||
rclone binary |sudo setcap cap_dac_read_search+ep /path/to/rclone|.
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ func (b *s3Backend) ListBuckets(ctx context.Context) ([]gofakes3.BucketInfo, err
|
||||
for _, entry := range dirEntries {
|
||||
if entry.IsDir() {
|
||||
response = append(response, gofakes3.BucketInfo{
|
||||
Name: gofakes3.URLEncode(entry.Name()),
|
||||
Name: entry.Name(),
|
||||
CreationDate: gofakes3.NewContentTime(entry.ModTime()),
|
||||
})
|
||||
}
|
||||
@@ -158,7 +158,7 @@ func (b *s3Backend) HeadObject(ctx context.Context, bucketName, objectName strin
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetObject fetchs the object from the filesystem.
|
||||
// GetObject fetches the object from the filesystem.
|
||||
func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string, rangeRequest *gofakes3.ObjectRangeRequest) (obj *gofakes3.Object, err error) {
|
||||
_vfs, err := b.s.getVFS(ctx)
|
||||
if err != nil {
|
||||
@@ -227,7 +227,7 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string
|
||||
}
|
||||
|
||||
return &gofakes3.Object{
|
||||
Name: gofakes3.URLEncode(objectName),
|
||||
Name: objectName,
|
||||
Hash: hash,
|
||||
Metadata: meta,
|
||||
Size: size,
|
||||
@@ -400,7 +400,7 @@ func (b *s3Backend) deleteObject(ctx context.Context, bucketName, objectName str
|
||||
}
|
||||
|
||||
fp := path.Join(bucketName, objectName)
|
||||
// S3 does not report an error when attemping to delete a key that does not exist, so
|
||||
// S3 does not report an error when attempting to delete a key that does not exist, so
|
||||
// we need to skip IsNotExist errors.
|
||||
if err := _vfs.Remove(fp); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
|
||||
@@ -19,7 +19,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
|
||||
for _, entry := range dirEntries {
|
||||
object := entry.Name()
|
||||
|
||||
// workround for control-chars detect
|
||||
// workaround for control-chars detect
|
||||
objectPath := path.Join(fdPath, object)
|
||||
|
||||
if !strings.HasPrefix(object, name) {
|
||||
@@ -28,7 +28,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
|
||||
|
||||
if entry.IsDir() {
|
||||
if addPrefix {
|
||||
response.AddPrefix(gofakes3.URLEncode(objectPath))
|
||||
response.AddPrefix(objectPath)
|
||||
continue
|
||||
}
|
||||
err := b.entryListR(_vfs, bucket, path.Join(fdPath, object), "", false, response)
|
||||
@@ -37,7 +37,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
|
||||
}
|
||||
} else {
|
||||
item := &gofakes3.Content{
|
||||
Key: gofakes3.URLEncode(objectPath),
|
||||
Key: objectPath,
|
||||
LastModified: gofakes3.NewContentTime(entry.ModTime()),
|
||||
ETag: getFileHash(entry),
|
||||
Size: entry.Size(),
|
||||
|
||||
@@ -14,7 +14,7 @@ docs](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)).
|
||||
access.
|
||||
|
||||
Please note that some clients may require HTTPS endpoints. See [the
|
||||
SSL docs](#ssl-tls) for more information.
|
||||
SSL docs](#tls-ssl) for more information.
|
||||
|
||||
This command uses the [VFS directory cache](#vfs-virtual-file-system).
|
||||
All the functionality will work with `--vfs-cache-mode off`. Using
|
||||
@@ -69,7 +69,7 @@ secret_access_key = SECRET_ACCESS_KEY
|
||||
use_multipart_uploads = false
|
||||
```
|
||||
|
||||
Note that setting `disable_multipart_uploads = true` is to work around
|
||||
Note that setting `use_multipart_uploads = false` is to work around
|
||||
[a bug](#bugs) which will be fixed in due course.
|
||||
|
||||
### Bugs
|
||||
|
||||
@@ -809,7 +809,6 @@ put them back in again.` >}}
|
||||
* ben-ba <benjamin.brauner@gmx.de>
|
||||
* Eli Orzitzer <e_orz@yahoo.com>
|
||||
* Anthony Metzidis <anthony.metzidis@gmail.com>
|
||||
* emyarod <afw5059@gmail.com>
|
||||
* keongalvin <keongalvin@gmail.com>
|
||||
* rarspace01 <rarspace01@users.noreply.github.com>
|
||||
* Paul Stern <paulstern45@gmail.com>
|
||||
|
||||
@@ -487,7 +487,7 @@ See the [bisync filters](#filtering) section and generic
|
||||
[--filter-from](/filtering/#filter-from-read-filtering-patterns-from-a-file)
|
||||
documentation.
|
||||
An [example filters file](#example-filters-file) contains filters for
|
||||
non-allowed files for synching with Dropbox.
|
||||
non-allowed files for syncing with Dropbox.
|
||||
|
||||
If you make changes to your filters file then bisync requires a run
|
||||
with `--resync`. This is a safety feature, which prevents existing files
|
||||
@@ -664,7 +664,7 @@ Using `--check-sync=false` will disable it and may significantly reduce the
|
||||
sync run times for very large numbers of files.
|
||||
|
||||
The check may be run manually with `--check-sync=only`. It runs only the
|
||||
integrity check and terminates without actually synching.
|
||||
integrity check and terminates without actually syncing.
|
||||
|
||||
Note that currently, `--check-sync` **only checks listing snapshots and NOT the
|
||||
actual files on the remotes.** Note also that the listing snapshots will not
|
||||
@@ -1141,7 +1141,7 @@ The `--include*`, `--exclude*`, and `--filter` flags are also supported.
|
||||
|
||||
### How to filter directories
|
||||
|
||||
Filtering portions of the directory tree is a critical feature for synching.
|
||||
Filtering portions of the directory tree is a critical feature for syncing.
|
||||
|
||||
Examples of directory trees (always beneath the Path1/Path2 root level)
|
||||
you may want to exclude from your sync:
|
||||
@@ -1250,7 +1250,7 @@ quashed by adding `--quiet` to the bisync command line.
|
||||
|
||||
## Example exclude-style filters files for use with Dropbox {#exclude-filters}
|
||||
|
||||
- Dropbox disallows synching the listed temporary and configuration/data files.
|
||||
- Dropbox disallows syncing the listed temporary and configuration/data files.
|
||||
The `- <filename>` filters exclude these files where ever they may occur
|
||||
in the sync tree. Consider adding similar exclusions for file types
|
||||
you don't need to sync, such as core dump and software build files.
|
||||
@@ -1584,7 +1584,7 @@ test command flags can be equally prefixed by a single `-` or double dash.
|
||||
|
||||
- `go test . -case basic -remote local -remote2 local`
|
||||
runs the `test_basic` test case using only the local filesystem,
|
||||
synching one local directory with another local directory.
|
||||
syncing one local directory with another local directory.
|
||||
Test script output is to the console, while commands within scenario.txt
|
||||
have their output sent to the `.../workdir/test.log` file,
|
||||
which is finally compared to the golden copy.
|
||||
@@ -1815,6 +1815,9 @@ about _Unison_ and synchronization in general.
|
||||
|
||||
## Changelog
|
||||
|
||||
### `v1.69.1`
|
||||
* Fixed an issue causing listings to not capture concurrent modifications under certain conditions
|
||||
|
||||
### `v1.68`
|
||||
* Fixed an issue affecting backends that round modtimes to a lower precision.
|
||||
|
||||
@@ -1860,4 +1863,4 @@ causing bisync to consider more files than necessary due to overbroad filters du
|
||||
* Added [new `--ignore-listing-checksum` flag](https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=6.%20%2D%2Dignore%2Dchecksum%20should%20be%20split%20into%20two%20flags%20for%20separate%20purposes)
|
||||
to distinguish from `--ignore-checksum`
|
||||
* [Performance improvements](https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=6.%20Deletes%20take%20several%20times%20longer%20than%20copies) for large remotes
|
||||
* Documentation and testing improvements
|
||||
* Documentation and testing improvements
|
||||
|
||||
@@ -87,7 +87,7 @@ machine with no Internet browser available.
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Box. This only runs from the moment it opens
|
||||
your browser to the moment you get back the verification code. This
|
||||
is on `http://127.0.0.1:53682/` and this it may require you to unblock
|
||||
is on `http://127.0.0.1:53682/` and this may require you to unblock
|
||||
it temporarily if you are running a host firewall.
|
||||
|
||||
Once configured you can then use `rclone` like this,
|
||||
|
||||
@@ -5,6 +5,84 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.69.3 - 2025-05-21
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.69.2...v1.69.3)
|
||||
|
||||
* Bug Fixes
|
||||
* build: Reapply update github.com/golang-jwt/jwt/v5 from 5.2.1 to 5.2.2 to fix CVE-2025-30204 (dependabot[bot])
|
||||
* build: Update github.com/ebitengine/purego to work around bug in go1.24.3 (Nick Craig-Wood)
|
||||
|
||||
## v1.69.2 - 2025-05-01
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.69.1...v1.69.2)
|
||||
|
||||
* Bug fixes
|
||||
* accounting: Fix percentDiff calculation -- (Anagh Kumar Baranwal)
|
||||
* build
|
||||
* Update github.com/golang-jwt/jwt/v4 from 4.5.1 to 4.5.2 to fix CVE-2025-30204 (dependabot[bot])
|
||||
* Update github.com/golang-jwt/jwt/v5 from 5.2.1 to 5.2.2 to fix CVE-2025-30204 (dependabot[bot])
|
||||
* Update golang.org/x/crypto to v0.35.0 to fix CVE-2025-22869 (Nick Craig-Wood)
|
||||
* Update golang.org/x/net from 0.36.0 to 0.38.0 to fix CVE-2025-22870 (dependabot[bot])
|
||||
* Update golang.org/x/net to 0.36.0. to fix CVE-2025-22869 (dependabot[bot])
|
||||
* Stop building with go < go1.23 as security updates forbade it (Nick Craig-Wood)
|
||||
* Fix docker plugin build (Anagh Kumar Baranwal)
|
||||
* cmd: Fix crash if rclone is invoked without any arguments (Janne Hellsten)
|
||||
* config: Read configuration passwords from stdin even when terminated with EOF (Samantha Bowen)
|
||||
* doc fixes (Andrew Kreimer, Danny Garside, eccoisle, Ed Craig-Wood, emyarod, jack, Jugal Kishore, Markus Gerstel, Michael Kebe, Nick Craig-Wood, simonmcnair, simwai, Zachary Vorhies)
|
||||
* fs: Fix corruption of SizeSuffix with "B" suffix in config (eg --min-size) (Nick Craig-Wood)
|
||||
* lib/http: Fix race between Serve() and Shutdown() (Nick Craig-Wood)
|
||||
* object: Fix memory object out of bounds Seek (Nick Craig-Wood)
|
||||
* operations: Fix call fmt.Errorf with wrong err (alingse)
|
||||
* rc
|
||||
* Disable the metrics server when running `rclone rc` (hiddenmarten)
|
||||
* Fix debug/* commands not being available over unix sockets (Nick Craig-Wood)
|
||||
* serve nfs: Fix unlikely crash (Nick Craig-Wood)
|
||||
* stats: Fix the speed not getting updated after a pause in the processing (Anagh Kumar Baranwal)
|
||||
* sync
|
||||
* Fix cpu spinning when empty directory finding with leading slashes (Nick Craig-Wood)
|
||||
* Copy dir modtimes even when copyEmptySrcDirs is false (ll3006)
|
||||
* VFS
|
||||
* Fix directory cache serving stale data (Lorenz Brun)
|
||||
* Fix inefficient directory caching when directory reads are slow (huanghaojun)
|
||||
* Fix integration test failures (Nick Craig-Wood)
|
||||
* Drive
|
||||
* Metadata: fix error when setting copy-requires-writer-permission on a folder (Nick Craig-Wood)
|
||||
* Dropbox
|
||||
* Retry link without expiry (Dave Vasilevsky)
|
||||
* HTTP
|
||||
* Correct root if definitely pointing to a file (nielash)
|
||||
* Iclouddrive
|
||||
* Fix so created files are writable (Ben Alex)
|
||||
* Onedrive
|
||||
* Fix metadata ordering in permissions (Nick Craig-Wood)
|
||||
|
||||
## v1.69.1 - 2025-02-14
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.69.0...v1.69.1)
|
||||
|
||||
* Bug Fixes
|
||||
* lib/oauthutil: Fix redirect URL mismatch errors (Nick Craig-Wood)
|
||||
* bisync: Fix listings missing concurrent modifications (nielash)
|
||||
* serve s3: Fix list objects encoding-type (Nick Craig-Wood)
|
||||
* fs: Fix confusing "didn't find section in config file" error (Nick Craig-Wood)
|
||||
* doc fixes (Christoph Berger, Dimitri Papadopoulos, Matt Ickstadt, Nick Craig-Wood, Tim White, Zachary Vorhies)
|
||||
* build: Added parallel docker builds and caching for go build in the container (Anagh Kumar Baranwal)
|
||||
* VFS
|
||||
* Fix the cache failing to upload symlinks when `--links` was specified (Nick Craig-Wood)
|
||||
* Fix race detected by race detector (Nick Craig-Wood)
|
||||
* Close the change notify channel on Shutdown (izouxv)
|
||||
* B2
|
||||
* Fix "fatal error: concurrent map writes" (Nick Craig-Wood)
|
||||
* Iclouddrive
|
||||
* Add notes on ADP and Missing PCS cookies (Nick Craig-Wood)
|
||||
* Onedrive
|
||||
* Mark German (de) region as deprecated (Nick Craig-Wood)
|
||||
* S3
|
||||
* Added new storage class to magalu provider (Bruno Fernandes)
|
||||
* Add DigitalOcean regions SFO2, LON1, TOR1, BLR1 (jkpe)
|
||||
* Add latest Linode Object Storage endpoints (jbagwell-akamai)
|
||||
|
||||
## v1.69.0 - 2025-01-12
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.68.0...v1.69.0)
|
||||
@@ -34,7 +112,7 @@ description: "Rclone Changelog"
|
||||
* fs: Make `--links` flag global and add new `--local-links` and `--vfs-links` flags (Nick Craig-Wood)
|
||||
* http servers: Disable automatic authentication skipping for unix sockets in http servers (Moises Lima)
|
||||
* This was making it impossible to use unix sockets with an proxy
|
||||
* This might now cause rclone to need authenticaton where it didn't before
|
||||
* This might now cause rclone to need authentication where it didn't before
|
||||
* oauthutil: add support for OAuth client credential flow (Martin Hassack, Nick Craig-Wood)
|
||||
* operations: make log messages consistent for mkdir/rmdir at INFO level (Nick Craig-Wood)
|
||||
* rc: Add `relative` to [vfs/queue-set-expiry](/rc/#vfs-queue-set-expiry) (Nick Craig-Wood)
|
||||
@@ -712,7 +790,7 @@ instead of of `--size-only`, when `check` is not available.
|
||||
* Update all dependencies (Nick Craig-Wood)
|
||||
* Refactor version info and icon resource handling on windows (albertony)
|
||||
* doc updates (albertony, alfish2000, asdffdsazqqq, Dimitri Papadopoulos, Herby Gillot, Joda Stößer, Manoj Ghosh, Nick Craig-Wood)
|
||||
* Implement `--metadata-mapper` to transform metatadata with a user supplied program (Nick Craig-Wood)
|
||||
* Implement `--metadata-mapper` to transform metadata with a user supplied program (Nick Craig-Wood)
|
||||
* Add `ChunkWriterDoesntSeek` feature flag and set it for b2 (Nick Craig-Wood)
|
||||
* lib/http: Export basic go string functions for use in `--template` (Gabriel Espinoza)
|
||||
* makefile: Use POSIX compatible install arguments (Mina Galić)
|
||||
@@ -827,7 +905,7 @@ instead of of `--size-only`, when `check` is not available.
|
||||
* Fix "fatal error: concurrent map writes" (Nick Craig-Wood)
|
||||
* B2
|
||||
* Fix multipart upload: corrupted on transfer: sizes differ XXX vs 0 (Nick Craig-Wood)
|
||||
* Fix locking window when getting mutipart upload URL (Nick Craig-Wood)
|
||||
* Fix locking window when getting multipart upload URL (Nick Craig-Wood)
|
||||
* Fix server side copies greater than 4GB (Nick Craig-Wood)
|
||||
* Fix chunked streaming uploads (Nick Craig-Wood)
|
||||
* Reduce default `--b2-upload-concurrency` to 4 to reduce memory usage (Nick Craig-Wood)
|
||||
|
||||
@@ -965,7 +965,7 @@ rclone [flags]
|
||||
--use-json-log Use json log format
|
||||
--use-mmap Use mmap allocator (see docs)
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.69.0")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.69.3")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
-V, --version Print the version number
|
||||
--webdav-auth-redirect Preserve authentication on redirect
|
||||
|
||||
@@ -14,13 +14,18 @@ Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.
|
||||
|
||||
The command requires 1-3 arguments:
|
||||
- fs name (e.g., "drive", "s3", etc.)
|
||||
- Either a base64 encoded JSON blob obtained from a previous rclone config session
|
||||
- Or a client_id and client_secret pair obtained from the remote service
|
||||
|
||||
Use --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.
|
||||
|
||||
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.
|
||||
|
||||
```
|
||||
rclone authorize [flags]
|
||||
rclone authorize <fs name> [base64_json_blob | client_id client_secret] [flags]
|
||||
```
|
||||
|
||||
## Options
|
||||
|
||||
@@ -21,12 +21,12 @@ password to re-encrypt the config.
|
||||
|
||||
When `--password-command` is called to change the password then the
|
||||
environment variable `RCLONE_PASSWORD_CHANGE=1` will be set. So if
|
||||
changing passwords programatically you can use the environment
|
||||
changing passwords programmatically you can use the environment
|
||||
variable to distinguish which password you must supply.
|
||||
|
||||
Alternatively you can remove the password first (with `rclone config
|
||||
encryption remove`), then set it again with this command which may be
|
||||
easier if you don't mind the unecrypted config file being on the disk
|
||||
easier if you don't mind the unencrypted config file being on the disk
|
||||
briefly.
|
||||
|
||||
|
||||
|
||||
@@ -36,6 +36,8 @@ This doesn't transfer files that are identical on src and dst, testing
|
||||
by size and modification time or MD5SUM. It doesn't delete files from
|
||||
the destination.
|
||||
|
||||
*If you are looking to copy just a byte range of a file, please see 'rclone cat --offset X --count Y'*
|
||||
|
||||
**Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics
|
||||
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ Setting `--auto-filename` will attempt to automatically determine the
|
||||
filename from the URL (after any redirections) and used in the
|
||||
destination path.
|
||||
|
||||
With `--auto-filename-header` in addition, if a specific filename is
|
||||
With `--header-filename` in addition, if a specific filename is
|
||||
set in HTTP headers, it will be used instead of the name from the URL.
|
||||
With `--print-filename` in addition, the resulting file name will be
|
||||
printed.
|
||||
@@ -28,7 +28,7 @@ destination if there is one with the same name.
|
||||
Setting `--stdout` or making the output file name `-`
|
||||
will cause the output to be written to standard output.
|
||||
|
||||
## Troublshooting
|
||||
## Troubleshooting
|
||||
|
||||
If you can't get `rclone copyurl` to work then here are some things you can try:
|
||||
|
||||
|
||||
@@ -571,11 +571,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
|
||||
@@ -572,11 +572,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
|
||||
@@ -15,6 +15,9 @@ include/exclude filters - everything will be removed. Use the
|
||||
delete files. To delete empty directories only, use command
|
||||
[rmdir](/commands/rclone_rmdir/) or [rmdirs](/commands/rclone_rmdirs/).
|
||||
|
||||
The concurrency of this operation is controlled by the `--checkers` global flag. However, some backends will
|
||||
implement this command directly, in which case `--checkers` will be ignored.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
`--dry-run` or the `--interactive`/`-i` flag.
|
||||
|
||||
|
||||
@@ -134,11 +134,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
|
||||
@@ -146,11 +146,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
|
||||
@@ -127,11 +127,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
|
||||
@@ -245,11 +245,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
|
||||
@@ -53,7 +53,7 @@ that it uses an on disk cache, but the cache entries are held as
|
||||
symlinks. Rclone will use the handle of the underlying file as the NFS
|
||||
handle which improves performance. This sort of cache can't be backed
|
||||
up and restored as the underlying handles will change. This is Linux
|
||||
only. It requres running rclone as root or with `CAP_DAC_READ_SEARCH`.
|
||||
only. It requires running rclone as root or with `CAP_DAC_READ_SEARCH`.
|
||||
You can run rclone with this extra permission by doing this to the
|
||||
rclone binary `sudo setcap cap_dac_read_search+ep /path/to/rclone`.
|
||||
|
||||
@@ -176,11 +176,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
|
||||
@@ -27,7 +27,7 @@ docs](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)).
|
||||
access.
|
||||
|
||||
Please note that some clients may require HTTPS endpoints. See [the
|
||||
SSL docs](#ssl-tls) for more information.
|
||||
SSL docs](#tls-ssl) for more information.
|
||||
|
||||
This command uses the [VFS directory cache](#vfs-virtual-file-system).
|
||||
All the functionality will work with `--vfs-cache-mode off`. Using
|
||||
@@ -82,7 +82,7 @@ secret_access_key = SECRET_ACCESS_KEY
|
||||
use_multipart_uploads = false
|
||||
```
|
||||
|
||||
Note that setting `disable_multipart_uploads = true` is to work around
|
||||
Note that setting `use_multipart_uploads = false` is to work around
|
||||
[a bug](#bugs) which will be fixed in due course.
|
||||
|
||||
## Bugs
|
||||
@@ -334,11 +334,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
|
||||
@@ -170,11 +170,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
|
||||
@@ -288,11 +288,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
|
||||
@@ -741,7 +741,7 @@ strong random number generator. The nonce is incremented for each
|
||||
chunk read making sure each nonce is unique for each block written.
|
||||
The chance of a nonce being reused is minuscule. If you wrote an
|
||||
exabyte of data (10¹⁸ bytes) you would have a probability of
|
||||
approximately 2×10⁻³² of re-using a nonce.
|
||||
approximately 2×10⁻³² of reusing a nonce.
|
||||
|
||||
#### Chunk
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user