mirror of
https://github.com/rclone/rclone.git
synced 2025-12-30 23:23:30 +00:00
Compare commits
27 Commits
6858bf242e
...
v1.69.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4e77a4ff73 | ||
|
|
b63c42f39b | ||
|
|
30c9bab35d | ||
|
|
68bbd8017d | ||
|
|
259dbbab55 | ||
|
|
5fa85f66fe | ||
|
|
fb648e4774 | ||
|
|
9978750a8c | ||
|
|
d953c0c51b | ||
|
|
9dfce11c9b | ||
|
|
504f2fb571 | ||
|
|
f79f929e57 | ||
|
|
83e04ead37 | ||
|
|
1a95a23fdc | ||
|
|
c4b592e549 | ||
|
|
642d1415d1 | ||
|
|
64556d4ca2 | ||
|
|
de69448565 | ||
|
|
ad941655c5 | ||
|
|
6cbb9fd7cb | ||
|
|
7988300f50 | ||
|
|
1b47b7a6bb | ||
|
|
626bdacd59 | ||
|
|
1ef2da31a7 | ||
|
|
376a5b1a83 | ||
|
|
ddaeb07019 | ||
|
|
c72f71bd02 |
@@ -1,77 +0,0 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
|
||||
# GitHub Actions at the start of a workflow run to identify the job.
|
||||
# This is used to authenticate against GitHub Container Registry.
|
||||
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
||||
# for more detailed information.
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
push: true # push the image to ghcr
|
||||
tags: |
|
||||
ghcr.io/rclone/rclone:beta
|
||||
rclone/rclone:beta
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
cache-from: type=gha, scope=${{ github.workflow }}
|
||||
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
|
||||
provenance: false
|
||||
# Eventually cache will need to be cleared if builds more frequent than once a week
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
294
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
294
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
@@ -0,0 +1,294 @@
|
||||
---
|
||||
# Github Actions release for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_image.yml" -*-
|
||||
|
||||
name: Build & Push Docker Images
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
tags:
|
||||
- '**'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build-image:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && github.event_name != 'pull_request')
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runs-on: ubuntu-24.04
|
||||
- platform: linux/386
|
||||
runs-on: ubuntu-24.04
|
||||
- platform: linux/arm64
|
||||
runs-on: ubuntu-24.04-arm
|
||||
- platform: linux/arm/v7
|
||||
runs-on: ubuntu-24.04-arm
|
||||
- platform: linux/arm/v6
|
||||
runs-on: ubuntu-24.04-arm
|
||||
|
||||
name: Build Docker Image for ${{ matrix.platform }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
|
||||
steps:
|
||||
- name: Free Space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Set PLATFORM Variable
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set CACHE_NAME Variable
|
||||
shell: python
|
||||
run: |
|
||||
import os, re
|
||||
|
||||
def slugify(input_string, max_length=63):
|
||||
slug = input_string.lower()
|
||||
slug = re.sub(r'[^a-z0-9 -]', ' ', slug)
|
||||
slug = slug.strip()
|
||||
slug = re.sub(r'\s+', '-', slug)
|
||||
slug = re.sub(r'-+', '-', slug)
|
||||
slug = slug[:max_length]
|
||||
slug = re.sub(r'[-]+$', '', slug)
|
||||
return slug
|
||||
|
||||
ref_name_slug = "cache"
|
||||
|
||||
if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request":
|
||||
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"CACHE_NAME={ref_name_slug}\n")
|
||||
|
||||
- name: Get ImageOS
|
||||
# There's no way around this, because "ImageOS" is only available to
|
||||
# processes, but the setup-go action uses it in its key.
|
||||
id: imageos
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
return process.env.ImageOS
|
||||
|
||||
- name: Extract Metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,manifest-descriptor # Important for digest annotation (used by Github packages)
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/${{ env.REPO_NAME }}
|
||||
labels: |
|
||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
tags: |
|
||||
type=sha
|
||||
type=ref,event=pr
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=beta,enable={{is_default_branch}}
|
||||
|
||||
- name: Setup QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Load Go Build Cache for Docker
|
||||
id: go-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
||||
# Cache only the go builds, the module download is cached via the docker layer caching
|
||||
path: |
|
||||
go-build-cache
|
||||
|
||||
- name: Inject Go Build Cache into Docker
|
||||
uses: reproducible-containers/buildkit-cache-dance@v3
|
||||
with:
|
||||
cache-map: |
|
||||
{
|
||||
"go-build-cache": "/root/.cache/go-build"
|
||||
}
|
||||
skip-extraction: ${{ steps.go-cache.outputs.cache-hit }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and Publish Image Digest
|
||||
id: build
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
provenance: false
|
||||
# don't specify 'tags' here (error "get can't push tagged ref by digest")
|
||||
# tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
annotations: ${{ steps.meta.outputs.annotations }}
|
||||
platforms: ${{ matrix.platform }}
|
||||
outputs: |
|
||||
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
|
||||
cache-from: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
||||
cache-to: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd
|
||||
|
||||
- name: Export Image Digest
|
||||
run: |
|
||||
mkdir -p /tmp/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
|
||||
- name: Upload Image Digest
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM }}
|
||||
path: /tmp/digests/*
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
merge-image:
|
||||
name: Merge & Push Final Docker Image
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- build-image
|
||||
|
||||
steps:
|
||||
- name: Download Image Digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Extract Metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
||||
with:
|
||||
images: |
|
||||
${{ env.REPO_NAME }}
|
||||
ghcr.io/${{ env.REPO_NAME }}
|
||||
labels: |
|
||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
tags: |
|
||||
type=sha
|
||||
type=ref,event=pr
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=beta,enable={{is_default_branch}}
|
||||
|
||||
- name: Extract Tags
|
||||
shell: python
|
||||
run: |
|
||||
import json, os
|
||||
|
||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
||||
metadata = json.loads(metadata_json)
|
||||
|
||||
tags = [f"--tag '{tag}'" for tag in metadata["tags"]]
|
||||
tags_string = " ".join(tags)
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"TAGS={tags_string}\n")
|
||||
|
||||
- name: Extract Annotations
|
||||
shell: python
|
||||
run: |
|
||||
import json, os
|
||||
|
||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
||||
metadata = json.loads(metadata_json)
|
||||
|
||||
annotations = [f"--annotation '{annotation}'" for annotation in metadata["annotations"]]
|
||||
annotations_string = " ".join(annotations)
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"ANNOTATIONS={annotations_string}\n")
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create & Push Manifest List
|
||||
working-directory: /tmp/digests
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
${{ env.TAGS }} \
|
||||
${{ env.ANNOTATIONS }} \
|
||||
$(printf 'ghcr.io/${{ env.REPO_NAME }}@sha256:%s ' *)
|
||||
|
||||
- name: Inspect and Run Multi-Platform Image
|
||||
run: |
|
||||
docker buildx imagetools inspect --raw ${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
||||
docker buildx imagetools inspect --raw ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
||||
docker run --rm ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} version
|
||||
45
.github/workflows/build_publish_docker_plugin.yml
vendored
Normal file
45
.github/workflows/build_publish_docker_plugin.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
---
|
||||
# Github Actions release for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_plugin.yml" -*-
|
||||
|
||||
name: Release Build for Docker Plugin
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
|
||||
build_docker_volume_plugin:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
shell: bash
|
||||
run: |
|
||||
VER=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
done
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
||||
@@ -1,89 +0,0 @@
|
||||
name: Docker release build
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Get actual patch version
|
||||
id: actual_patch_version
|
||||
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
|
||||
- name: Get actual minor version
|
||||
id: actual_minor_version
|
||||
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
|
||||
- name: Get actual major version
|
||||
id: actual_major_version
|
||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USER }}
|
||||
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
push: true
|
||||
tags: |
|
||||
rclone/rclone:latest
|
||||
rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
|
||||
build_docker_volume_plugin:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
shell: bash
|
||||
run: |
|
||||
VER=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
done
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
||||
44
Dockerfile
44
Dockerfile
@@ -1,19 +1,47 @@
|
||||
FROM golang:alpine AS builder
|
||||
|
||||
COPY . /go/src/github.com/rclone/rclone/
|
||||
ARG CGO_ENABLED=0
|
||||
|
||||
WORKDIR /go/src/github.com/rclone/rclone/
|
||||
|
||||
RUN apk add --no-cache make bash gawk git
|
||||
RUN \
|
||||
CGO_ENABLED=0 \
|
||||
make
|
||||
RUN ./rclone version
|
||||
RUN echo "**** Set Go Environment Variables ****" && \
|
||||
go env -w GOCACHE=/root/.cache/go-build
|
||||
|
||||
RUN echo "**** Install Dependencies ****" && \
|
||||
apk add --no-cache \
|
||||
make \
|
||||
bash \
|
||||
gawk \
|
||||
git
|
||||
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
|
||||
RUN echo "**** Download Go Dependencies ****" && \
|
||||
go mod download -x
|
||||
|
||||
RUN echo "**** Verify Go Dependencies ****" && \
|
||||
go mod verify
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build,sharing=locked \
|
||||
echo "**** Build Binary ****" && \
|
||||
make
|
||||
|
||||
RUN echo "**** Print Version Binary ****" && \
|
||||
./rclone version
|
||||
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
RUN echo "**** Install Dependencies ****" && \
|
||||
apk add --no-cache \
|
||||
ca-certificates \
|
||||
fuse3 \
|
||||
tzdata && \
|
||||
echo "Enable user_allow_other in fuse" && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
|
||||
|
||||
644
MANUAL.html
generated
644
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
230
MANUAL.md
generated
230
MANUAL.md
generated
@@ -1,7 +1,78 @@
|
||||
% rclone(1) User Manual
|
||||
% Nick Craig-Wood
|
||||
% Jan 12, 2025
|
||||
% Feb 14, 2025
|
||||
|
||||
# NAME
|
||||
|
||||
rclone - manage files on cloud storage
|
||||
|
||||
# SYNOPSIS
|
||||
|
||||
```
|
||||
Usage:
|
||||
rclone [flags]
|
||||
rclone [command]
|
||||
|
||||
Available commands:
|
||||
about Get quota information from the remote.
|
||||
authorize Remote authorization.
|
||||
backend Run a backend-specific command.
|
||||
bisync Perform bidirectional synchronization between two paths.
|
||||
cat Concatenates any files and sends them to stdout.
|
||||
check Checks the files in the source and destination match.
|
||||
checksum Checks the files in the destination against a SUM file.
|
||||
cleanup Clean up the remote if possible.
|
||||
completion Output completion script for a given shell.
|
||||
config Enter an interactive configuration session.
|
||||
copy Copy files from source to dest, skipping identical files.
|
||||
copyto Copy files from source to dest, skipping identical files.
|
||||
copyurl Copy the contents of the URL supplied content to dest:path.
|
||||
cryptcheck Cryptcheck checks the integrity of an encrypted remote.
|
||||
cryptdecode Cryptdecode returns unencrypted file names.
|
||||
dedupe Interactively find duplicate filenames and delete/rename them.
|
||||
delete Remove the files in path.
|
||||
deletefile Remove a single file from remote.
|
||||
gendocs Output markdown docs for rclone to the directory supplied.
|
||||
gitannex Speaks with git-annex over stdin/stdout.
|
||||
hashsum Produces a hashsum file for all the objects in the path.
|
||||
help Show help for rclone commands, flags and backends.
|
||||
link Generate public link to file/folder.
|
||||
listremotes List all the remotes in the config file and defined in environment variables.
|
||||
ls List the objects in the path with size and path.
|
||||
lsd List all directories/containers/buckets in the path.
|
||||
lsf List directories and objects in remote:path formatted for parsing.
|
||||
lsjson List directories and objects in the path in JSON format.
|
||||
lsl List the objects in path with modification time, size and path.
|
||||
md5sum Produces an md5sum file for all the objects in the path.
|
||||
mkdir Make the path if it doesn't already exist.
|
||||
mount Mount the remote as file system on a mountpoint.
|
||||
move Move files from source to dest.
|
||||
moveto Move file or directory from source to dest.
|
||||
ncdu Explore a remote with a text based user interface.
|
||||
nfsmount Mount the remote as file system on a mountpoint.
|
||||
obscure Obscure password for use in the rclone config file.
|
||||
purge Remove the path and all of its contents.
|
||||
rc Run a command against a running rclone.
|
||||
rcat Copies standard input to file on remote.
|
||||
rcd Run rclone listening to remote control commands only.
|
||||
rmdir Remove the empty directory at path.
|
||||
rmdirs Remove empty directories under the path.
|
||||
selfupdate Update the rclone binary.
|
||||
serve Serve a remote over a protocol.
|
||||
settier Changes storage class/tier of objects in remote.
|
||||
sha1sum Produces an sha1sum file for all the objects in the path.
|
||||
size Prints the total size and number of objects in remote:path.
|
||||
sync Make source and dest identical, modifying destination only.
|
||||
test Run a test command
|
||||
touch Create new file or change file modification time.
|
||||
tree List the contents of the remote in a tree like fashion.
|
||||
version Show the version number.
|
||||
|
||||
Use "rclone [command] --help" for more information about a command.
|
||||
Use "rclone help flags" for to see the global flags.
|
||||
Use "rclone help backends" for a list of supported services.
|
||||
|
||||
```
|
||||
# Rclone syncs your files to cloud storage
|
||||
|
||||
<img width="50%" src="https://rclone.org/img/logo_on_light__horizontal_color.svg" alt="rclone logo" style="float:right; padding: 5px;" >
|
||||
@@ -1690,6 +1761,9 @@ include/exclude filters - everything will be removed. Use the
|
||||
delete files. To delete empty directories only, use command
|
||||
[rmdir](https://rclone.org/commands/rclone_rmdir/) or [rmdirs](https://rclone.org/commands/rclone_rmdirs/).
|
||||
|
||||
The concurrency of this operation is controlled by the `--checkers` global flag. However, some backends will
|
||||
implement this command directly, in which case `--checkers` will be ignored.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
`--dry-run` or the `--interactive`/`-i` flag.
|
||||
|
||||
@@ -3745,12 +3819,12 @@ password to re-encrypt the config.
|
||||
|
||||
When `--password-command` is called to change the password then the
|
||||
environment variable `RCLONE_PASSWORD_CHANGE=1` will be set. So if
|
||||
changing passwords programatically you can use the environment
|
||||
changing passwords programmatically you can use the environment
|
||||
variable to distinguish which password you must supply.
|
||||
|
||||
Alternatively you can remove the password first (with `rclone config
|
||||
encryption remove`), then set it again with this command which may be
|
||||
easier if you don't mind the unecrypted config file being on the disk
|
||||
easier if you don't mind the unencrypted config file being on the disk
|
||||
briefly.
|
||||
|
||||
|
||||
@@ -4290,7 +4364,7 @@ destination if there is one with the same name.
|
||||
Setting `--stdout` or making the output file name `-`
|
||||
will cause the output to be written to standard output.
|
||||
|
||||
## Troublshooting
|
||||
## Troubleshooting
|
||||
|
||||
If you can't get `rclone copyurl` to work then here are some things you can try:
|
||||
|
||||
@@ -10581,7 +10655,7 @@ that it uses an on disk cache, but the cache entries are held as
|
||||
symlinks. Rclone will use the handle of the underlying file as the NFS
|
||||
handle which improves performance. This sort of cache can't be backed
|
||||
up and restored as the underlying handles will change. This is Linux
|
||||
only. It requres running rclone as root or with `CAP_DAC_READ_SEARCH`.
|
||||
only. It requires running rclone as root or with `CAP_DAC_READ_SEARCH`.
|
||||
You can run rclone with this extra permission by doing this to the
|
||||
rclone binary `sudo setcap cap_dac_read_search+ep /path/to/rclone`.
|
||||
|
||||
@@ -11408,7 +11482,7 @@ secret_access_key = SECRET_ACCESS_KEY
|
||||
use_multipart_uploads = false
|
||||
```
|
||||
|
||||
Note that setting `disable_multipart_uploads = true` is to work around
|
||||
Note that setting `use_multipart_uploads = false` is to work around
|
||||
[a bug](#bugs) which will be fixed in due course.
|
||||
|
||||
## Bugs
|
||||
@@ -14444,6 +14518,11 @@ it to `false`. It is also possible to specify `--boolean=false` or
|
||||
parsed as `--boolean` and the `false` is parsed as an extra command
|
||||
line argument for rclone.
|
||||
|
||||
Options documented to take a `stringArray` parameter accept multiple
|
||||
values. To pass more than one value, repeat the option; for example:
|
||||
`--include value1 --include value2`.
|
||||
|
||||
|
||||
### Time or duration options {#time-option}
|
||||
|
||||
TIME or DURATION options can be specified as a duration string or a
|
||||
@@ -16755,7 +16834,7 @@ so they take exactly the same form.
|
||||
The options set by environment variables can be seen with the `-vv` flag, e.g. `rclone version -vv`.
|
||||
|
||||
Options that can appear multiple times (type `stringArray`) are
|
||||
treated slighly differently as environment variables can only be
|
||||
treated slightly differently as environment variables can only be
|
||||
defined once. In order to allow a simple mechanism for adding one or
|
||||
many items, the input is treated as a [CSV encoded](https://godoc.org/encoding/csv)
|
||||
string. For example
|
||||
@@ -19937,7 +20016,7 @@ the `--vfs-cache-mode` is off, it will return an empty result.
|
||||
],
|
||||
}
|
||||
|
||||
The `expiry` time is the time until the file is elegible for being
|
||||
The `expiry` time is the time until the file is eligible for being
|
||||
uploaded in floating point seconds. This may go negative. As rclone
|
||||
only transfers `--transfers` files at once, only the lowest
|
||||
`--transfers` expiry times will have `uploading` as `true`. So there
|
||||
@@ -21018,7 +21097,7 @@ Flags for general networking and HTTP stuff.
|
||||
--tpslimit float Limit HTTP transactions per second to this
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
|
||||
--use-cookies Enable session cookiejar
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.69.0")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.69.1")
|
||||
```
|
||||
|
||||
|
||||
@@ -23066,7 +23145,7 @@ See the [bisync filters](#filtering) section and generic
|
||||
[--filter-from](https://rclone.org/filtering/#filter-from-read-filtering-patterns-from-a-file)
|
||||
documentation.
|
||||
An [example filters file](#example-filters-file) contains filters for
|
||||
non-allowed files for synching with Dropbox.
|
||||
non-allowed files for syncing with Dropbox.
|
||||
|
||||
If you make changes to your filters file then bisync requires a run
|
||||
with `--resync`. This is a safety feature, which prevents existing files
|
||||
@@ -23243,7 +23322,7 @@ Using `--check-sync=false` will disable it and may significantly reduce the
|
||||
sync run times for very large numbers of files.
|
||||
|
||||
The check may be run manually with `--check-sync=only`. It runs only the
|
||||
integrity check and terminates without actually synching.
|
||||
integrity check and terminates without actually syncing.
|
||||
|
||||
Note that currently, `--check-sync` **only checks listing snapshots and NOT the
|
||||
actual files on the remotes.** Note also that the listing snapshots will not
|
||||
@@ -23720,7 +23799,7 @@ The `--include*`, `--exclude*`, and `--filter` flags are also supported.
|
||||
|
||||
### How to filter directories
|
||||
|
||||
Filtering portions of the directory tree is a critical feature for synching.
|
||||
Filtering portions of the directory tree is a critical feature for syncing.
|
||||
|
||||
Examples of directory trees (always beneath the Path1/Path2 root level)
|
||||
you may want to exclude from your sync:
|
||||
@@ -23829,7 +23908,7 @@ quashed by adding `--quiet` to the bisync command line.
|
||||
|
||||
## Example exclude-style filters files for use with Dropbox {#exclude-filters}
|
||||
|
||||
- Dropbox disallows synching the listed temporary and configuration/data files.
|
||||
- Dropbox disallows syncing the listed temporary and configuration/data files.
|
||||
The `- <filename>` filters exclude these files where ever they may occur
|
||||
in the sync tree. Consider adding similar exclusions for file types
|
||||
you don't need to sync, such as core dump and software build files.
|
||||
@@ -24163,7 +24242,7 @@ test command flags can be equally prefixed by a single `-` or double dash.
|
||||
|
||||
- `go test . -case basic -remote local -remote2 local`
|
||||
runs the `test_basic` test case using only the local filesystem,
|
||||
synching one local directory with another local directory.
|
||||
syncing one local directory with another local directory.
|
||||
Test script output is to the console, while commands within scenario.txt
|
||||
have their output sent to the `.../workdir/test.log` file,
|
||||
which is finally compared to the golden copy.
|
||||
@@ -24394,6 +24473,9 @@ about _Unison_ and synchronization in general.
|
||||
|
||||
## Changelog
|
||||
|
||||
### `v1.69.1`
|
||||
* Fixed an issue causing listings to not capture concurrent modifications under certain conditions
|
||||
|
||||
### `v1.68`
|
||||
* Fixed an issue affecting backends that round modtimes to a lower precision.
|
||||
|
||||
@@ -25680,7 +25762,7 @@ Notes on above:
|
||||
that `USER_NAME` has been created.
|
||||
2. The Resource entry must include both resource ARNs, as one implies
|
||||
the bucket and the other implies the bucket's objects.
|
||||
3. When using [s3-no-check-bucket](#s3-no-check-bucket) and the bucket already exsits, the `"arn:aws:s3:::BUCKET_NAME"` doesn't have to be included.
|
||||
3. When using [s3-no-check-bucket](#s3-no-check-bucket) and the bucket already exists, the `"arn:aws:s3:::BUCKET_NAME"` doesn't have to be included.
|
||||
|
||||
For reference, [here's an Ansible script](https://gist.github.com/ebridges/ebfc9042dd7c756cd101cfa807b7ae2b)
|
||||
that will generate one or more buckets that will work with `rclone sync`.
|
||||
@@ -28658,7 +28740,7 @@ location_constraint = au-nsw
|
||||
### Rclone Serve S3 {#rclone}
|
||||
|
||||
Rclone can serve any remote over the S3 protocol. For details see the
|
||||
[rclone serve s3](https://rclone.org/commands/rclone_serve_http/) documentation.
|
||||
[rclone serve s3](https://rclone.org/commands/rclone_serve_s3/) documentation.
|
||||
|
||||
For example, to serve `remote:path` over s3, run the server like this:
|
||||
|
||||
@@ -28678,8 +28760,8 @@ secret_access_key = SECRET_ACCESS_KEY
|
||||
use_multipart_uploads = false
|
||||
```
|
||||
|
||||
Note that setting `disable_multipart_uploads = true` is to work around
|
||||
[a bug](https://rclone.org/commands/rclone_serve_http/#bugs) which will be fixed in due course.
|
||||
Note that setting `use_multipart_uploads = false` is to work around
|
||||
[a bug](https://rclone.org/commands/rclone_serve_s3/#bugs) which will be fixed in due course.
|
||||
|
||||
### Scaleway
|
||||
|
||||
@@ -29775,27 +29857,49 @@ Option endpoint.
|
||||
Endpoint for Linode Object Storage API.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / Atlanta, GA (USA), us-southeast-1
|
||||
1 / Amsterdam (Netherlands), nl-ams-1
|
||||
\ (nl-ams-1.linodeobjects.com)
|
||||
2 / Atlanta, GA (USA), us-southeast-1
|
||||
\ (us-southeast-1.linodeobjects.com)
|
||||
2 / Chicago, IL (USA), us-ord-1
|
||||
3 / Chennai (India), in-maa-1
|
||||
\ (in-maa-1.linodeobjects.com)
|
||||
4 / Chicago, IL (USA), us-ord-1
|
||||
\ (us-ord-1.linodeobjects.com)
|
||||
3 / Frankfurt (Germany), eu-central-1
|
||||
5 / Frankfurt (Germany), eu-central-1
|
||||
\ (eu-central-1.linodeobjects.com)
|
||||
4 / Milan (Italy), it-mil-1
|
||||
6 / Jakarta (Indonesia), id-cgk-1
|
||||
\ (id-cgk-1.linodeobjects.com)
|
||||
7 / London 2 (Great Britain), gb-lon-1
|
||||
\ (gb-lon-1.linodeobjects.com)
|
||||
8 / Los Angeles, CA (USA), us-lax-1
|
||||
\ (us-lax-1.linodeobjects.com)
|
||||
9 / Madrid (Spain), es-mad-1
|
||||
\ (es-mad-1.linodeobjects.com)
|
||||
10 / Melbourne (Australia), au-mel-1
|
||||
\ (au-mel-1.linodeobjects.com)
|
||||
11 / Miami, FL (USA), us-mia-1
|
||||
\ (us-mia-1.linodeobjects.com)
|
||||
12 / Milan (Italy), it-mil-1
|
||||
\ (it-mil-1.linodeobjects.com)
|
||||
5 / Newark, NJ (USA), us-east-1
|
||||
13 / Newark, NJ (USA), us-east-1
|
||||
\ (us-east-1.linodeobjects.com)
|
||||
6 / Paris (France), fr-par-1
|
||||
14 / Osaka (Japan), jp-osa-1
|
||||
\ (jp-osa-1.linodeobjects.com)
|
||||
15 / Paris (France), fr-par-1
|
||||
\ (fr-par-1.linodeobjects.com)
|
||||
7 / Seattle, WA (USA), us-sea-1
|
||||
16 / São Paulo (Brazil), br-gru-1
|
||||
\ (br-gru-1.linodeobjects.com)
|
||||
17 / Seattle, WA (USA), us-sea-1
|
||||
\ (us-sea-1.linodeobjects.com)
|
||||
8 / Singapore ap-south-1
|
||||
18 / Singapore, ap-south-1
|
||||
\ (ap-south-1.linodeobjects.com)
|
||||
9 / Stockholm (Sweden), se-sto-1
|
||||
19 / Singapore 2, sg-sin-1
|
||||
\ (sg-sin-1.linodeobjects.com)
|
||||
20 / Stockholm (Sweden), se-sto-1
|
||||
\ (se-sto-1.linodeobjects.com)
|
||||
10 / Washington, DC, (USA), us-iad-1
|
||||
21 / Washington, DC, (USA), us-iad-1
|
||||
\ (us-iad-1.linodeobjects.com)
|
||||
endpoint> 3
|
||||
endpoint> 5
|
||||
|
||||
Option acl.
|
||||
Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -34415,7 +34519,7 @@ strong random number generator. The nonce is incremented for each
|
||||
chunk read making sure each nonce is unique for each block written.
|
||||
The chance of a nonce being reused is minuscule. If you wrote an
|
||||
exabyte of data (10¹⁸ bytes) you would have a probability of
|
||||
approximately 2×10⁻³² of re-using a nonce.
|
||||
approximately 2×10⁻³² of reusing a nonce.
|
||||
|
||||
#### Chunk
|
||||
|
||||
@@ -41561,7 +41665,7 @@ Enter a value.
|
||||
config_2fa> 2FACODE
|
||||
Remote config
|
||||
--------------------
|
||||
[koofr]
|
||||
[iclouddrive]
|
||||
- type: iclouddrive
|
||||
- apple_id: APPLEID
|
||||
- password: *** ENCRYPTED ***
|
||||
@@ -41578,6 +41682,20 @@ y/e/d> y
|
||||
|
||||
ADP is currently unsupported and need to be disabled
|
||||
|
||||
On iPhone, Settings `>` Apple Account `>` iCloud `>` 'Access iCloud Data on the Web' must be ON, and 'Advanced Data Protection' OFF.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Missing PCS cookies from the request
|
||||
|
||||
This means you have Advanced Data Protection (ADP) turned on. This is not supported at the moment. If you want to use rclone you will have to turn it off. See above for how to turn it off.
|
||||
|
||||
You will need to clear the `cookies` and the `trust_token` fields in the config. Or you can delete the remote config and start again.
|
||||
|
||||
You should then run `rclone reconnect remote:`.
|
||||
|
||||
Note that changing the ADP setting may not take effect immediately - you may need to wait a few hours or a day before you can get rclone to work - keep clearing the config entry and running `rclone reconnect remote:` until rclone functions properly.
|
||||
|
||||
|
||||
### Standard options
|
||||
|
||||
@@ -46035,7 +46153,7 @@ Properties:
|
||||
- "us"
|
||||
- Microsoft Cloud for US Government
|
||||
- "de"
|
||||
- Microsoft Cloud Germany
|
||||
- Microsoft Cloud Germany (deprecated - try global region first).
|
||||
- "cn"
|
||||
- Azure and Office 365 operated by Vnet Group in China
|
||||
|
||||
@@ -46652,6 +46770,28 @@ See the [metadata](https://rclone.org/docs/#metadata) docs for more info.
|
||||
|
||||
|
||||
|
||||
### Impersonate other users as Admin
|
||||
|
||||
Unlike Google Drive and impersonating any domain user via service accounts, OneDrive requires you to authenticate as an admin account, and manually setup a remote per user you wish to impersonate.
|
||||
|
||||
1. In [Microsoft 365 Admin Center](https://admin.microsoft.com), open each user you need to "impersonate" and go to the OneDrive section. There is a heading called "Get access to files", you need to click to create the link, this creates the link of the format `https://{tenant}-my.sharepoint.com/personal/{user_name_domain_tld}/` but also changes the permissions so you your admin user has access.
|
||||
2. Then in powershell run the following commands:
|
||||
```console
|
||||
Install-Module Microsoft.Graph -Scope CurrentUser -Repository PSGallery -Force
|
||||
Import-Module Microsoft.Graph.Files
|
||||
Connect-MgGraph -Scopes "Files.ReadWrite.All"
|
||||
# Follow the steps to allow access to your admin user
|
||||
# Then run this for each user you want to impersonate to get the Drive ID
|
||||
Get-MgUserDefaultDrive -UserId '{emailaddress}'
|
||||
# This will give you output of the format:
|
||||
# Name Id DriveType CreatedDateTime
|
||||
# ---- -- --------- ---------------
|
||||
# OneDrive b!XYZ123 business 14/10/2023 1:00:58 pm
|
||||
|
||||
```
|
||||
3. Then in rclone add a onedrive remote type, and use the `Type in driveID` with the DriveID you got in the previous step. One remote per user. It will then confirm the drive ID, and hopefully give you a message of `Found drive "root" of type "business"` and then include the URL of the format `https://{tenant}-my.sharepoint.com/personal/{user_name_domain_tld}/Documents`
|
||||
|
||||
|
||||
## Limitations
|
||||
|
||||
If you don't use rclone for 90 days the refresh token will
|
||||
@@ -56509,6 +56649,32 @@ Options:
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.69.1 - 2025-02-14
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.69.0...v1.69.1)
|
||||
|
||||
* Bug Fixes
|
||||
* lib/oauthutil: Fix redirect URL mismatch errors (Nick Craig-Wood)
|
||||
* bisync: Fix listings missing concurrent modifications (nielash)
|
||||
* serve s3: Fix list objects encoding-type (Nick Craig-Wood)
|
||||
* fs: Fix confusing "didn't find section in config file" error (Nick Craig-Wood)
|
||||
* doc fixes (Christoph Berger, Dimitri Papadopoulos, Matt Ickstadt, Nick Craig-Wood, Tim White, Zachary Vorhies)
|
||||
* build: Added parallel docker builds and caching for go build in the container (Anagh Kumar Baranwal)
|
||||
* VFS
|
||||
* Fix the cache failing to upload symlinks when `--links` was specified (Nick Craig-Wood)
|
||||
* Fix race detected by race detector (Nick Craig-Wood)
|
||||
* Close the change notify channel on Shutdown (izouxv)
|
||||
* B2
|
||||
* Fix "fatal error: concurrent map writes" (Nick Craig-Wood)
|
||||
* Iclouddrive
|
||||
* Add notes on ADP and Missing PCS cookies (Nick Craig-Wood)
|
||||
* Onedrive
|
||||
* Mark German (de) region as deprecated (Nick Craig-Wood)
|
||||
* S3
|
||||
* Added new storage class to magalu provider (Bruno Fernandes)
|
||||
* Add DigitalOcean regions SFO2, LON1, TOR1, BLR1 (jkpe)
|
||||
* Add latest Linode Object Storage endpoints (jbagwell-akamai)
|
||||
|
||||
## v1.69.0 - 2025-01-12
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.68.0...v1.69.0)
|
||||
|
||||
253
MANUAL.txt
generated
253
MANUAL.txt
generated
@@ -1,6 +1,75 @@
|
||||
rclone(1) User Manual
|
||||
Nick Craig-Wood
|
||||
Jan 12, 2025
|
||||
Feb 14, 2025
|
||||
|
||||
NAME
|
||||
|
||||
rclone - manage files on cloud storage
|
||||
|
||||
SYNOPSIS
|
||||
|
||||
Usage:
|
||||
rclone [flags]
|
||||
rclone [command]
|
||||
|
||||
Available commands:
|
||||
about Get quota information from the remote.
|
||||
authorize Remote authorization.
|
||||
backend Run a backend-specific command.
|
||||
bisync Perform bidirectional synchronization between two paths.
|
||||
cat Concatenates any files and sends them to stdout.
|
||||
check Checks the files in the source and destination match.
|
||||
checksum Checks the files in the destination against a SUM file.
|
||||
cleanup Clean up the remote if possible.
|
||||
completion Output completion script for a given shell.
|
||||
config Enter an interactive configuration session.
|
||||
copy Copy files from source to dest, skipping identical files.
|
||||
copyto Copy files from source to dest, skipping identical files.
|
||||
copyurl Copy the contents of the URL supplied content to dest:path.
|
||||
cryptcheck Cryptcheck checks the integrity of an encrypted remote.
|
||||
cryptdecode Cryptdecode returns unencrypted file names.
|
||||
dedupe Interactively find duplicate filenames and delete/rename them.
|
||||
delete Remove the files in path.
|
||||
deletefile Remove a single file from remote.
|
||||
gendocs Output markdown docs for rclone to the directory supplied.
|
||||
gitannex Speaks with git-annex over stdin/stdout.
|
||||
hashsum Produces a hashsum file for all the objects in the path.
|
||||
help Show help for rclone commands, flags and backends.
|
||||
link Generate public link to file/folder.
|
||||
listremotes List all the remotes in the config file and defined in environment variables.
|
||||
ls List the objects in the path with size and path.
|
||||
lsd List all directories/containers/buckets in the path.
|
||||
lsf List directories and objects in remote:path formatted for parsing.
|
||||
lsjson List directories and objects in the path in JSON format.
|
||||
lsl List the objects in path with modification time, size and path.
|
||||
md5sum Produces an md5sum file for all the objects in the path.
|
||||
mkdir Make the path if it doesn't already exist.
|
||||
mount Mount the remote as file system on a mountpoint.
|
||||
move Move files from source to dest.
|
||||
moveto Move file or directory from source to dest.
|
||||
ncdu Explore a remote with a text based user interface.
|
||||
nfsmount Mount the remote as file system on a mountpoint.
|
||||
obscure Obscure password for use in the rclone config file.
|
||||
purge Remove the path and all of its contents.
|
||||
rc Run a command against a running rclone.
|
||||
rcat Copies standard input to file on remote.
|
||||
rcd Run rclone listening to remote control commands only.
|
||||
rmdir Remove the empty directory at path.
|
||||
rmdirs Remove empty directories under the path.
|
||||
selfupdate Update the rclone binary.
|
||||
serve Serve a remote over a protocol.
|
||||
settier Changes storage class/tier of objects in remote.
|
||||
sha1sum Produces an sha1sum file for all the objects in the path.
|
||||
size Prints the total size and number of objects in remote:path.
|
||||
sync Make source and dest identical, modifying destination only.
|
||||
test Run a test command
|
||||
touch Create new file or change file modification time.
|
||||
tree List the contents of the remote in a tree like fashion.
|
||||
version Show the version number.
|
||||
|
||||
Use "rclone [command] --help" for more information about a command.
|
||||
Use "rclone help flags" for to see the global flags.
|
||||
Use "rclone help backends" for a list of supported services.
|
||||
|
||||
Rclone syncs your files to cloud storage
|
||||
|
||||
@@ -1600,6 +1669,10 @@ include/exclude filters - everything will be removed. Use the delete
|
||||
command if you want to selectively delete files. To delete empty
|
||||
directories only, use command rmdir or rmdirs.
|
||||
|
||||
The concurrency of this operation is controlled by the --checkers global
|
||||
flag. However, some backends will implement this command directly, in
|
||||
which case --checkers will be ignored.
|
||||
|
||||
Important: Since this can cause data loss, test first with the --dry-run
|
||||
or the --interactive/-i flag.
|
||||
|
||||
@@ -3467,12 +3540,12 @@ re-encrypt the config.
|
||||
|
||||
When --password-command is called to change the password then the
|
||||
environment variable RCLONE_PASSWORD_CHANGE=1 will be set. So if
|
||||
changing passwords programatically you can use the environment variable
|
||||
changing passwords programmatically you can use the environment variable
|
||||
to distinguish which password you must supply.
|
||||
|
||||
Alternatively you can remove the password first (with
|
||||
rclone config encryption remove), then set it again with this command
|
||||
which may be easier if you don't mind the unecrypted config file being
|
||||
which may be easier if you don't mind the unencrypted config file being
|
||||
on the disk briefly.
|
||||
|
||||
rclone config encryption set [flags]
|
||||
@@ -3949,7 +4022,7 @@ there is one with the same name.
|
||||
Setting --stdout or making the output file name - will cause the output
|
||||
to be written to standard output.
|
||||
|
||||
Troublshooting
|
||||
Troubleshooting
|
||||
|
||||
If you can't get rclone copyurl to work then here are some things you
|
||||
can try:
|
||||
@@ -10102,7 +10175,7 @@ uses an on disk cache, but the cache entries are held as symlinks.
|
||||
Rclone will use the handle of the underlying file as the NFS handle
|
||||
which improves performance. This sort of cache can't be backed up and
|
||||
restored as the underlying handles will change. This is Linux only. It
|
||||
requres running rclone as root or with CAP_DAC_READ_SEARCH. You can run
|
||||
requires running rclone as root or with CAP_DAC_READ_SEARCH. You can run
|
||||
rclone with this extra permission by doing this to the rclone binary
|
||||
sudo setcap cap_dac_read_search+ep /path/to/rclone.
|
||||
|
||||
@@ -10903,8 +10976,8 @@ which is defined like this:
|
||||
secret_access_key = SECRET_ACCESS_KEY
|
||||
use_multipart_uploads = false
|
||||
|
||||
Note that setting disable_multipart_uploads = true is to work around a
|
||||
bug which will be fixed in due course.
|
||||
Note that setting use_multipart_uploads = false is to work around a bug
|
||||
which will be fixed in due course.
|
||||
|
||||
Bugs
|
||||
|
||||
@@ -13895,6 +13968,10 @@ also possible to specify --boolean=false or --boolean=true. Note that
|
||||
--boolean false is not valid - this is parsed as --boolean and the false
|
||||
is parsed as an extra command line argument for rclone.
|
||||
|
||||
Options documented to take a stringArray parameter accept multiple
|
||||
values. To pass more than one value, repeat the option; for example:
|
||||
--include value1 --include value2.
|
||||
|
||||
Time or duration options
|
||||
|
||||
TIME or DURATION options can be specified as a duration string or a time
|
||||
@@ -16177,7 +16254,7 @@ The options set by environment variables can be seen with the -vv flag,
|
||||
e.g. rclone version -vv.
|
||||
|
||||
Options that can appear multiple times (type stringArray) are treated
|
||||
slighly differently as environment variables can only be defined once.
|
||||
slightly differently as environment variables can only be defined once.
|
||||
In order to allow a simple mechanism for adding one or many items, the
|
||||
input is treated as a CSV encoded string. For example
|
||||
|
||||
@@ -19420,7 +19497,7 @@ This is only useful if --vfs-cache-mode > off. If you call it when the
|
||||
],
|
||||
}
|
||||
|
||||
The expiry time is the time until the file is elegible for being
|
||||
The expiry time is the time until the file is eligible for being
|
||||
uploaded in floating point seconds. This may go negative. As rclone only
|
||||
transfers --transfers files at once, only the lowest --transfers expiry
|
||||
times will have uploading as true. So there may be files with negative
|
||||
@@ -20569,7 +20646,7 @@ Flags for general networking and HTTP stuff.
|
||||
--tpslimit float Limit HTTP transactions per second to this
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
|
||||
--use-cookies Enable session cookiejar
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.69.0")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.69.1")
|
||||
|
||||
Performance
|
||||
|
||||
@@ -22531,7 +22608,7 @@ Also see the all files changed check.
|
||||
By using rclone filter features you can exclude file types or directory
|
||||
sub-trees from the sync. See the bisync filters section and generic
|
||||
--filter-from documentation. An example filters file contains filters
|
||||
for non-allowed files for synching with Dropbox.
|
||||
for non-allowed files for syncing with Dropbox.
|
||||
|
||||
If you make changes to your filters file then bisync requires a run with
|
||||
--resync. This is a safety feature, which prevents existing files on the
|
||||
@@ -22704,7 +22781,7 @@ of a sync. Using --check-sync=false will disable it and may
|
||||
significantly reduce the sync run times for very large numbers of files.
|
||||
|
||||
The check may be run manually with --check-sync=only. It runs only the
|
||||
integrity check and terminates without actually synching.
|
||||
integrity check and terminates without actually syncing.
|
||||
|
||||
Note that currently, --check-sync only checks listing snapshots and NOT
|
||||
the actual files on the remotes. Note also that the listing snapshots
|
||||
@@ -23237,7 +23314,7 @@ supported.
|
||||
How to filter directories
|
||||
|
||||
Filtering portions of the directory tree is a critical feature for
|
||||
synching.
|
||||
syncing.
|
||||
|
||||
Examples of directory trees (always beneath the Path1/Path2 root level)
|
||||
you may want to exclude from your sync: - Directory trees containing
|
||||
@@ -23348,7 +23425,7 @@ This noise can be quashed by adding --quiet to the bisync command line.
|
||||
|
||||
Example exclude-style filters files for use with Dropbox
|
||||
|
||||
- Dropbox disallows synching the listed temporary and
|
||||
- Dropbox disallows syncing the listed temporary and
|
||||
configuration/data files. The `- ` filters exclude these files where
|
||||
ever they may occur in the sync tree. Consider adding similar
|
||||
exclusions for file types you don't need to sync, such as core dump
|
||||
@@ -23668,7 +23745,7 @@ dash.
|
||||
Running tests
|
||||
|
||||
- go test . -case basic -remote local -remote2 local runs the
|
||||
test_basic test case using only the local filesystem, synching one
|
||||
test_basic test case using only the local filesystem, syncing one
|
||||
local directory with another local directory. Test script output is
|
||||
to the console, while commands within scenario.txt have their output
|
||||
sent to the .../workdir/test.log file, which is finally compared to
|
||||
@@ -23901,6 +23978,11 @@ Unison and synchronization in general.
|
||||
|
||||
Changelog
|
||||
|
||||
v1.69.1
|
||||
|
||||
- Fixed an issue causing listings to not capture concurrent
|
||||
modifications under certain conditions
|
||||
|
||||
v1.68
|
||||
|
||||
- Fixed an issue affecting backends that round modtimes to a lower
|
||||
@@ -25192,7 +25274,7 @@ Notes on above:
|
||||
that USER_NAME has been created.
|
||||
2. The Resource entry must include both resource ARNs, as one implies
|
||||
the bucket and the other implies the bucket's objects.
|
||||
3. When using s3-no-check-bucket and the bucket already exsits, the
|
||||
3. When using s3-no-check-bucket and the bucket already exists, the
|
||||
"arn:aws:s3:::BUCKET_NAME" doesn't have to be included.
|
||||
|
||||
For reference, here's an Ansible script that will generate one or more
|
||||
@@ -28155,8 +28237,8 @@ this:
|
||||
secret_access_key = SECRET_ACCESS_KEY
|
||||
use_multipart_uploads = false
|
||||
|
||||
Note that setting disable_multipart_uploads = true is to work around a
|
||||
bug which will be fixed in due course.
|
||||
Note that setting use_multipart_uploads = false is to work around a bug
|
||||
which will be fixed in due course.
|
||||
|
||||
Scaleway
|
||||
|
||||
@@ -29203,27 +29285,49 @@ This will guide you through an interactive setup process.
|
||||
Endpoint for Linode Object Storage API.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / Atlanta, GA (USA), us-southeast-1
|
||||
1 / Amsterdam (Netherlands), nl-ams-1
|
||||
\ (nl-ams-1.linodeobjects.com)
|
||||
2 / Atlanta, GA (USA), us-southeast-1
|
||||
\ (us-southeast-1.linodeobjects.com)
|
||||
2 / Chicago, IL (USA), us-ord-1
|
||||
3 / Chennai (India), in-maa-1
|
||||
\ (in-maa-1.linodeobjects.com)
|
||||
4 / Chicago, IL (USA), us-ord-1
|
||||
\ (us-ord-1.linodeobjects.com)
|
||||
3 / Frankfurt (Germany), eu-central-1
|
||||
5 / Frankfurt (Germany), eu-central-1
|
||||
\ (eu-central-1.linodeobjects.com)
|
||||
4 / Milan (Italy), it-mil-1
|
||||
6 / Jakarta (Indonesia), id-cgk-1
|
||||
\ (id-cgk-1.linodeobjects.com)
|
||||
7 / London 2 (Great Britain), gb-lon-1
|
||||
\ (gb-lon-1.linodeobjects.com)
|
||||
8 / Los Angeles, CA (USA), us-lax-1
|
||||
\ (us-lax-1.linodeobjects.com)
|
||||
9 / Madrid (Spain), es-mad-1
|
||||
\ (es-mad-1.linodeobjects.com)
|
||||
10 / Melbourne (Australia), au-mel-1
|
||||
\ (au-mel-1.linodeobjects.com)
|
||||
11 / Miami, FL (USA), us-mia-1
|
||||
\ (us-mia-1.linodeobjects.com)
|
||||
12 / Milan (Italy), it-mil-1
|
||||
\ (it-mil-1.linodeobjects.com)
|
||||
5 / Newark, NJ (USA), us-east-1
|
||||
13 / Newark, NJ (USA), us-east-1
|
||||
\ (us-east-1.linodeobjects.com)
|
||||
6 / Paris (France), fr-par-1
|
||||
14 / Osaka (Japan), jp-osa-1
|
||||
\ (jp-osa-1.linodeobjects.com)
|
||||
15 / Paris (France), fr-par-1
|
||||
\ (fr-par-1.linodeobjects.com)
|
||||
7 / Seattle, WA (USA), us-sea-1
|
||||
16 / São Paulo (Brazil), br-gru-1
|
||||
\ (br-gru-1.linodeobjects.com)
|
||||
17 / Seattle, WA (USA), us-sea-1
|
||||
\ (us-sea-1.linodeobjects.com)
|
||||
8 / Singapore ap-south-1
|
||||
18 / Singapore, ap-south-1
|
||||
\ (ap-south-1.linodeobjects.com)
|
||||
9 / Stockholm (Sweden), se-sto-1
|
||||
19 / Singapore 2, sg-sin-1
|
||||
\ (sg-sin-1.linodeobjects.com)
|
||||
20 / Stockholm (Sweden), se-sto-1
|
||||
\ (se-sto-1.linodeobjects.com)
|
||||
10 / Washington, DC, (USA), us-iad-1
|
||||
21 / Washington, DC, (USA), us-iad-1
|
||||
\ (us-iad-1.linodeobjects.com)
|
||||
endpoint> 3
|
||||
endpoint> 5
|
||||
|
||||
Option acl.
|
||||
Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -33757,7 +33861,7 @@ The initial nonce is generated from the operating systems crypto strong
|
||||
random number generator. The nonce is incremented for each chunk read
|
||||
making sure each nonce is unique for each block written. The chance of a
|
||||
nonce being reused is minuscule. If you wrote an exabyte of data (10¹⁸
|
||||
bytes) you would have a probability of approximately 2×10⁻³² of re-using
|
||||
bytes) you would have a probability of approximately 2×10⁻³² of reusing
|
||||
a nonce.
|
||||
|
||||
Chunk
|
||||
@@ -40978,7 +41082,7 @@ This will guide you through an interactive setup process:
|
||||
config_2fa> 2FACODE
|
||||
Remote config
|
||||
--------------------
|
||||
[koofr]
|
||||
[iclouddrive]
|
||||
- type: iclouddrive
|
||||
- apple_id: APPLEID
|
||||
- password: *** ENCRYPTED ***
|
||||
@@ -40994,6 +41098,27 @@ Advanced Data Protection
|
||||
|
||||
ADP is currently unsupported and need to be disabled
|
||||
|
||||
On iPhone, Settings > Apple Account > iCloud > 'Access iCloud Data on
|
||||
the Web' must be ON, and 'Advanced Data Protection' OFF.
|
||||
|
||||
Troubleshooting
|
||||
|
||||
Missing PCS cookies from the request
|
||||
|
||||
This means you have Advanced Data Protection (ADP) turned on. This is
|
||||
not supported at the moment. If you want to use rclone you will have to
|
||||
turn it off. See above for how to turn it off.
|
||||
|
||||
You will need to clear the cookies and the trust_token fields in the
|
||||
config. Or you can delete the remote config and start again.
|
||||
|
||||
You should then run rclone reconnect remote:.
|
||||
|
||||
Note that changing the ADP setting may not take effect immediately - you
|
||||
may need to wait a few hours or a day before you can get rclone to work
|
||||
- keep clearing the config entry and running rclone reconnect remote:
|
||||
until rclone functions properly.
|
||||
|
||||
Standard options
|
||||
|
||||
Here are the Standard options specific to iclouddrive (iCloud Drive).
|
||||
@@ -45589,7 +45714,8 @@ Properties:
|
||||
- "us"
|
||||
- Microsoft Cloud for US Government
|
||||
- "de"
|
||||
- Microsoft Cloud Germany
|
||||
- Microsoft Cloud Germany (deprecated - try global region
|
||||
first).
|
||||
- "cn"
|
||||
- Azure and Office 365 operated by Vnet Group in China
|
||||
|
||||
@@ -46248,6 +46374,38 @@ Here are the possible system metadata items for the onedrive backend.
|
||||
|
||||
See the metadata docs for more info.
|
||||
|
||||
Impersonate other users as Admin
|
||||
|
||||
Unlike Google Drive and impersonating any domain user via service
|
||||
accounts, OneDrive requires you to authenticate as an admin account, and
|
||||
manually setup a remote per user you wish to impersonate.
|
||||
|
||||
1. In Microsoft 365 Admin Center, open each user you need to
|
||||
"impersonate" and go to the OneDrive section. There is a heading
|
||||
called "Get access to files", you need to click to create the link,
|
||||
this creates the link of the format
|
||||
https://{tenant}-my.sharepoint.com/personal/{user_name_domain_tld}/
|
||||
but also changes the permissions so you your admin user has access.
|
||||
2. Then in powershell run the following commands:
|
||||
|
||||
Install-Module Microsoft.Graph -Scope CurrentUser -Repository PSGallery -Force
|
||||
Import-Module Microsoft.Graph.Files
|
||||
Connect-MgGraph -Scopes "Files.ReadWrite.All"
|
||||
# Follow the steps to allow access to your admin user
|
||||
# Then run this for each user you want to impersonate to get the Drive ID
|
||||
Get-MgUserDefaultDrive -UserId '{emailaddress}'
|
||||
# This will give you output of the format:
|
||||
# Name Id DriveType CreatedDateTime
|
||||
# ---- -- --------- ---------------
|
||||
# OneDrive b!XYZ123 business 14/10/2023 1:00:58 pm
|
||||
|
||||
3. Then in rclone add a onedrive remote type, and use the
|
||||
Type in driveID with the DriveID you got in the previous step. One
|
||||
remote per user. It will then confirm the drive ID, and hopefully
|
||||
give you a message of Found drive "root" of type "business" and then
|
||||
include the URL of the format
|
||||
https://{tenant}-my.sharepoint.com/personal/{user_name_domain_tld}/Documents
|
||||
|
||||
Limitations
|
||||
|
||||
If you don't use rclone for 90 days the refresh token will expire. This
|
||||
@@ -56157,6 +56315,37 @@ Options:
|
||||
|
||||
Changelog
|
||||
|
||||
v1.69.1 - 2025-02-14
|
||||
|
||||
See commits
|
||||
|
||||
- Bug Fixes
|
||||
- lib/oauthutil: Fix redirect URL mismatch errors (Nick
|
||||
Craig-Wood)
|
||||
- bisync: Fix listings missing concurrent modifications (nielash)
|
||||
- serve s3: Fix list objects encoding-type (Nick Craig-Wood)
|
||||
- fs: Fix confusing "didn't find section in config file" error
|
||||
(Nick Craig-Wood)
|
||||
- doc fixes (Christoph Berger, Dimitri Papadopoulos, Matt
|
||||
Ickstadt, Nick Craig-Wood, Tim White, Zachary Vorhies)
|
||||
- build: Added parallel docker builds and caching for go build in
|
||||
the container (Anagh Kumar Baranwal)
|
||||
- VFS
|
||||
- Fix the cache failing to upload symlinks when --links was
|
||||
specified (Nick Craig-Wood)
|
||||
- Fix race detected by race detector (Nick Craig-Wood)
|
||||
- Close the change notify channel on Shutdown (izouxv)
|
||||
- B2
|
||||
- Fix "fatal error: concurrent map writes" (Nick Craig-Wood)
|
||||
- Iclouddrive
|
||||
- Add notes on ADP and Missing PCS cookies (Nick Craig-Wood)
|
||||
- Onedrive
|
||||
- Mark German (de) region as deprecated (Nick Craig-Wood)
|
||||
- S3
|
||||
- Added new storage class to magalu provider (Bruno Fernandes)
|
||||
- Add DigitalOcean regions SFO2, LON1, TOR1, BLR1 (jkpe)
|
||||
- Add latest Linode Object Storage endpoints (jbagwell-akamai)
|
||||
|
||||
v1.69.0 - 2025-01-12
|
||||
|
||||
See commits
|
||||
|
||||
@@ -899,7 +899,7 @@ func (o *Object) getMetadata(ctx context.Context) error {
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
//
|
||||
// May make a network request becaue the [fs.List] method does not
|
||||
// May make a network request because the [fs.List] method does not
|
||||
// return MD5 hashes for DirEntry
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
||||
if ty != hash.MD5 {
|
||||
|
||||
@@ -299,14 +299,13 @@ type Fs struct {
|
||||
|
||||
// Object describes a b2 object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // b2 id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
sha1 string // SHA-1 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
meta map[string]string // The object metadata if known - may be nil - with lower case keys
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // b2 id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
sha1 string // SHA-1 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -1598,9 +1597,6 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// For now, just set "mtime" in metadata
|
||||
o.meta = make(map[string]string, 1)
|
||||
o.meta["mtime"] = o.modTime.Format(time.RFC3339Nano)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1880,13 +1876,6 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||
Info: Info,
|
||||
}
|
||||
|
||||
// Embryonic metadata support - just mtime
|
||||
o.meta = make(map[string]string, 1)
|
||||
modTime, err := parseTimeStringHelper(info.Info[timeKey])
|
||||
if err == nil {
|
||||
o.meta["mtime"] = modTime.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
// When reading files from B2 via cloudflare using
|
||||
// --b2-download-url cloudflare strips the Content-Length
|
||||
// headers (presumably so it can inject stuff) so use the old
|
||||
|
||||
@@ -256,12 +256,6 @@ func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
// mtime
|
||||
for k, v := range metadata {
|
||||
got := o.meta[k]
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
||||
|
||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
||||
|
||||
@@ -2480,7 +2480,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte)
|
||||
if len(data) > maxMetadataSizeWritten {
|
||||
return nil, false, ErrMetaTooBig
|
||||
}
|
||||
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
if len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
return nil, false, errors.New("invalid json")
|
||||
}
|
||||
var metadata metaSimpleJSON
|
||||
|
||||
@@ -203,7 +203,6 @@ func driveScopesContainsAppFolder(scopes []string) bool {
|
||||
if scope == scopePrefix+"drive.appfolder" {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -1212,6 +1211,7 @@ func fixMimeType(mimeTypeIn string) string {
|
||||
}
|
||||
return mimeTypeOut
|
||||
}
|
||||
|
||||
func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
|
||||
out = make(map[string][]string, len(in))
|
||||
for k, v := range in {
|
||||
@@ -1222,9 +1222,11 @@ func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func isInternalMimeType(mimeType string) bool {
|
||||
return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
|
||||
}
|
||||
|
||||
func isLinkMimeType(mimeType string) bool {
|
||||
return strings.HasPrefix(mimeType, "application/x-link-")
|
||||
}
|
||||
@@ -1657,7 +1659,8 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.F
|
||||
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
|
||||
func (f *Fs) newObjectWithExportInfo(
|
||||
ctx context.Context, remote string, info *drive.File,
|
||||
extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
|
||||
extension, exportName, exportMimeType string, isDocument bool,
|
||||
) (o fs.Object, err error) {
|
||||
// Note that resolveShortcut will have been called already if
|
||||
// we are being called from a listing. However the drive.Item
|
||||
// will have been resolved so this will do nothing.
|
||||
@@ -1848,6 +1851,7 @@ func linkTemplate(mt string) *template.Template {
|
||||
})
|
||||
return _linkTemplates[mt]
|
||||
}
|
||||
|
||||
func (f *Fs) fetchFormats(ctx context.Context) {
|
||||
fetchFormatsOnce.Do(func() {
|
||||
var about *drive.About
|
||||
@@ -1893,7 +1897,8 @@ func (f *Fs) importFormats(ctx context.Context) map[string][]string {
|
||||
// Look through the exportExtensions and find the first format that can be
|
||||
// converted. If none found then return ("", "", false)
|
||||
func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string) (
|
||||
extension, mimeType string, isDocument bool) {
|
||||
extension, mimeType string, isDocument bool,
|
||||
) {
|
||||
exportMimeTypes, isDocument := f.exportFormats(ctx)[itemMimeType]
|
||||
if isDocument {
|
||||
for _, _extension := range f.exportExtensions {
|
||||
@@ -2689,7 +2694,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
if shortcutID != "" {
|
||||
return f.delete(ctx, shortcutID, f.opt.UseTrash)
|
||||
}
|
||||
var trashedFiles = false
|
||||
trashedFiles := false
|
||||
if check {
|
||||
found, err := f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, true, func(item *drive.File) bool {
|
||||
if !item.Trashed {
|
||||
@@ -2926,7 +2931,6 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -3187,6 +3191,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (pageToken string, err error) {
|
||||
var startPageToken *drive.StartPageToken
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -3990,14 +3995,13 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
case "query":
|
||||
if len(arg) == 1 {
|
||||
query := arg[0]
|
||||
var results, err = f.query(ctx, query)
|
||||
results, err := f.query(ctx, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute query: %q, error: %w", query, err)
|
||||
}
|
||||
return results, nil
|
||||
} else {
|
||||
return nil, errors.New("need a query argument")
|
||||
}
|
||||
return nil, errors.New("need a query argument")
|
||||
case "rescue":
|
||||
dirID := ""
|
||||
_, delete := opt["delete"]
|
||||
@@ -4057,6 +4061,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 {
|
||||
return "", hash.ErrUnsupported
|
||||
@@ -4071,7 +4076,8 @@ func (o *baseObject) Size() int64 {
|
||||
|
||||
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
|
||||
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
|
||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error,
|
||||
) {
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
@@ -4284,12 +4290,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
return o.baseObject.open(ctx, o.url, options...)
|
||||
}
|
||||
|
||||
func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Update the size with what we are reading as it can change from
|
||||
// the HEAD in the listing to this GET. This stops rclone marking
|
||||
// the transfer as corrupted.
|
||||
var offset, end int64 = 0, -1
|
||||
var newOptions = options[:0]
|
||||
newOptions := options[:0]
|
||||
for _, o := range options {
|
||||
// Note that Range requests don't work on Google docs:
|
||||
// https://developers.google.com/drive/v3/web/manage-downloads#partial_download
|
||||
@@ -4316,9 +4323,10 @@ func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
var data = o.content
|
||||
data := o.content
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
@@ -4343,7 +4351,8 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
|
||||
}
|
||||
|
||||
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
|
||||
src fs.ObjectInfo) (info *drive.File, err error) {
|
||||
src fs.ObjectInfo,
|
||||
) (info *drive.File, err error) {
|
||||
// Make the API request to upload metadata and file data.
|
||||
size := src.Size()
|
||||
if size >= 0 && size < int64(o.fs.opt.UploadCutoff) {
|
||||
@@ -4421,6 +4430,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
srcMimeType := fs.MimeType(ctx, src)
|
||||
importMimeType := ""
|
||||
@@ -4516,6 +4526,7 @@ func (o *baseObject) Metadata(ctx context.Context) (metadata fs.Metadata, err er
|
||||
func (o *documentObject) ext() string {
|
||||
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
|
||||
}
|
||||
|
||||
func (o *linkObject) ext() string {
|
||||
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package googlephotos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -35,7 +36,7 @@ func TestIntegration(t *testing.T) {
|
||||
*fstest.RemoteName = "TestGooglePhotos:"
|
||||
}
|
||||
f, err := fs.NewFs(ctx, *fstest.RemoteName)
|
||||
if err == fs.ErrorNotFoundInConfigFile {
|
||||
if errors.Is(err, fs.ErrorNotFoundInConfigFile) {
|
||||
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -445,7 +445,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// build request
|
||||
// cant use normal rename as file needs to be "activated" first
|
||||
// can't use normal rename as file needs to be "activated" first
|
||||
|
||||
r := api.NewUpdateFileInfo()
|
||||
r.DocumentID = doc.DocumentID
|
||||
|
||||
@@ -75,7 +75,7 @@ type MoveFolderParam struct {
|
||||
DestinationPath string `validate:"nonzero" json:"destinationPath"`
|
||||
}
|
||||
|
||||
// JobIDResponse respresents response struct with JobID for folder operations
|
||||
// JobIDResponse represents response struct with JobID for folder operations
|
||||
type JobIDResponse struct {
|
||||
JobID string `json:"jobId"`
|
||||
}
|
||||
|
||||
@@ -131,7 +131,7 @@ func init() {
|
||||
Help: "Microsoft Cloud for US Government",
|
||||
}, {
|
||||
Value: regionDE,
|
||||
Help: "Microsoft Cloud Germany",
|
||||
Help: "Microsoft Cloud Germany (deprecated - try " + regionGlobal + " region first).",
|
||||
}, {
|
||||
Value: regionCN,
|
||||
Help: "Azure and Office 365 operated by Vnet Group in China",
|
||||
|
||||
@@ -424,7 +424,7 @@ func (f *Fs) newSingleConnClient(ctx context.Context) (*rest.Client, error) {
|
||||
})
|
||||
// Set our own http client in the context
|
||||
ctx = oauthutil.Context(ctx, baseClient)
|
||||
// create a new oauth client, re-use the token source
|
||||
// create a new oauth client, reuse the token source
|
||||
oAuthClient := oauth2.NewClient(ctx, f.ts)
|
||||
return rest.NewClient(oAuthClient).SetRoot("https://" + f.opt.Hostname), nil
|
||||
}
|
||||
|
||||
@@ -934,34 +934,67 @@ func init() {
|
||||
Help: "The default endpoint\nIran",
|
||||
}},
|
||||
}, {
|
||||
// Linode endpoints: https://www.linode.com/docs/products/storage/object-storage/guides/urls/#cluster-url-s3-endpoint
|
||||
// Linode endpoints: https://techdocs.akamai.com/cloud-computing/docs/object-storage-product-limits#supported-endpoint-types-by-region
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Linode Object Storage API.",
|
||||
Provider: "Linode",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "nl-ams-1.linodeobjects.com",
|
||||
Help: "Amsterdam (Netherlands), nl-ams-1",
|
||||
}, {
|
||||
Value: "us-southeast-1.linodeobjects.com",
|
||||
Help: "Atlanta, GA (USA), us-southeast-1",
|
||||
}, {
|
||||
Value: "in-maa-1.linodeobjects.com",
|
||||
Help: "Chennai (India), in-maa-1",
|
||||
}, {
|
||||
Value: "us-ord-1.linodeobjects.com",
|
||||
Help: "Chicago, IL (USA), us-ord-1",
|
||||
}, {
|
||||
Value: "eu-central-1.linodeobjects.com",
|
||||
Help: "Frankfurt (Germany), eu-central-1",
|
||||
}, {
|
||||
Value: "id-cgk-1.linodeobjects.com",
|
||||
Help: "Jakarta (Indonesia), id-cgk-1",
|
||||
}, {
|
||||
Value: "gb-lon-1.linodeobjects.com",
|
||||
Help: "London 2 (Great Britain), gb-lon-1",
|
||||
}, {
|
||||
Value: "us-lax-1.linodeobjects.com",
|
||||
Help: "Los Angeles, CA (USA), us-lax-1",
|
||||
}, {
|
||||
Value: "es-mad-1.linodeobjects.com",
|
||||
Help: "Madrid (Spain), es-mad-1",
|
||||
}, {
|
||||
Value: "au-mel-1.linodeobjects.com",
|
||||
Help: "Melbourne (Australia), au-mel-1",
|
||||
}, {
|
||||
Value: "us-mia-1.linodeobjects.com",
|
||||
Help: "Miami, FL (USA), us-mia-1",
|
||||
}, {
|
||||
Value: "it-mil-1.linodeobjects.com",
|
||||
Help: "Milan (Italy), it-mil-1",
|
||||
}, {
|
||||
Value: "us-east-1.linodeobjects.com",
|
||||
Help: "Newark, NJ (USA), us-east-1",
|
||||
}, {
|
||||
Value: "jp-osa-1.linodeobjects.com",
|
||||
Help: "Osaka (Japan), jp-osa-1",
|
||||
}, {
|
||||
Value: "fr-par-1.linodeobjects.com",
|
||||
Help: "Paris (France), fr-par-1",
|
||||
}, {
|
||||
Value: "br-gru-1.linodeobjects.com",
|
||||
Help: "São Paulo (Brazil), br-gru-1",
|
||||
}, {
|
||||
Value: "us-sea-1.linodeobjects.com",
|
||||
Help: "Seattle, WA (USA), us-sea-1",
|
||||
}, {
|
||||
Value: "ap-south-1.linodeobjects.com",
|
||||
Help: "Singapore ap-south-1",
|
||||
Help: "Singapore, ap-south-1",
|
||||
}, {
|
||||
Value: "sg-sin-1.linodeobjects.com",
|
||||
Help: "Singapore 2, sg-sin-1",
|
||||
}, {
|
||||
Value: "se-sto-1.linodeobjects.com",
|
||||
Help: "Stockholm (Sweden), se-sto-1",
|
||||
@@ -1343,7 +1376,7 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,MagaluCloud,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,Magalu,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -1356,6 +1389,10 @@ func init() {
|
||||
Value: "sfo3.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces San Francisco 3",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "sfo2.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces San Francisco 2",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "fra1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Frankfurt 1",
|
||||
@@ -1372,6 +1409,18 @@ func init() {
|
||||
Value: "sgp1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Singapore 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "lon1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces London 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "tor1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Toronto 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "blr1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Bangalore 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "localhost:8333",
|
||||
Help: "SeaweedFS S3 localhost",
|
||||
@@ -1476,14 +1525,6 @@ func init() {
|
||||
Value: "s3.ir-tbz-sh1.arvanstorage.ir",
|
||||
Help: "ArvanCloud Tabriz Iran (Shahriar) endpoint",
|
||||
Provider: "ArvanCloud",
|
||||
}, {
|
||||
Value: "br-se1.magaluobjects.com",
|
||||
Help: "Magalu BR Southeast 1 endpoint",
|
||||
Provider: "Magalu",
|
||||
}, {
|
||||
Value: "br-ne1.magaluobjects.com",
|
||||
Help: "Magalu BR Northeast 1 endpoint",
|
||||
Provider: "Magalu",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
@@ -2122,13 +2163,16 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Help: "Standard storage class",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: #todo
|
||||
// Mapping from here: https://docs.magalu.cloud/docs/storage/object-storage/Classes-de-Armazenamento/standard
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in Magalu.",
|
||||
Provider: "Magalu",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "STANDARD",
|
||||
Help: "Standard storage class",
|
||||
}, {
|
||||
Value: "GLACIER_IR",
|
||||
Help: "Glacier Instant Retrieval storage class",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
|
||||
@@ -3344,7 +3388,7 @@ func setQuirks(opt *Options) {
|
||||
listObjectsV2 = true // Always use ListObjectsV2 instead of ListObjects
|
||||
virtualHostStyle = true // Use bucket.provider.com instead of putting the bucket in the URL
|
||||
urlEncodeListings = true // URL encode the listings to help with control characters
|
||||
useMultipartEtag = true // Set if Etags for multpart uploads are compatible with AWS
|
||||
useMultipartEtag = true // Set if Etags for multipart uploads are compatible with AWS
|
||||
useAcceptEncodingGzip = true // Set Accept-Encoding: gzip
|
||||
mightGzip = true // assume all providers might use content encoding gzip until proven otherwise
|
||||
useAlreadyExists = true // Set if provider returns AlreadyOwnedByYou or no error if you try to remake your own bucket
|
||||
@@ -6057,7 +6101,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
if mOut == nil {
|
||||
err = fserrors.RetryErrorf("internal error: no info from multipart upload")
|
||||
} else if mOut.UploadId == nil {
|
||||
err = fserrors.RetryErrorf("internal error: no UploadId in multpart upload: %#v", *mOut)
|
||||
err = fserrors.RetryErrorf("internal error: no UploadId in multipart upload: %#v", *mOut)
|
||||
}
|
||||
}
|
||||
return f.shouldRetry(ctx, err)
|
||||
|
||||
@@ -120,7 +120,7 @@ func init() {
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(rootURL) // FIXME
|
||||
|
||||
// FIXME
|
||||
//err = f.pacer.Call(func() (bool, error) {
|
||||
// err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = srv.CallXML(context.Background(), &opts, &authRequest, nil)
|
||||
// return shouldRetry(ctx, resp, err)
|
||||
//})
|
||||
@@ -327,7 +327,7 @@ func (f *Fs) readMetaDataForID(ctx context.Context, ID string) (info *api.File,
|
||||
func (f *Fs) getAuthToken(ctx context.Context) error {
|
||||
fs.Debugf(f, "Renewing token")
|
||||
|
||||
var authRequest = api.TokenAuthRequest{
|
||||
authRequest := api.TokenAuthRequest{
|
||||
AccessKeyID: withDefault(f.opt.AccessKeyID, accessKeyID),
|
||||
PrivateAccessKey: withDefault(f.opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)),
|
||||
RefreshToken: f.opt.RefreshToken,
|
||||
@@ -509,7 +509,7 @@ func errorHandler(resp *http.Response) (err error) {
|
||||
return fmt.Errorf("error reading error out of body: %w", err)
|
||||
}
|
||||
match := findError.FindSubmatch(body)
|
||||
if match == nil || len(match) < 2 || len(match[1]) == 0 {
|
||||
if len(match) < 2 || len(match[1]) == 0 {
|
||||
return fmt.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
|
||||
}
|
||||
return fmt.Errorf("HTTP error %v (%v): %s", resp.StatusCode, resp.Status, match[1])
|
||||
@@ -552,7 +552,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(ctx, pathID, nil, func(item *api.Collection) bool {
|
||||
if strings.EqualFold(item.Name, leaf) {
|
||||
|
||||
@@ -7,6 +7,7 @@ conversion into man pages etc.
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
|
||||
docpath = "docs/content"
|
||||
@@ -192,13 +193,23 @@ def main():
|
||||
command_docs = read_commands(docpath).replace("\\", "\\\\") # escape \ so we can use command_docs in re.sub
|
||||
build_date = datetime.utcfromtimestamp(
|
||||
int(os.environ.get('SOURCE_DATE_EPOCH', time.time())))
|
||||
help_output = subprocess.check_output(["rclone", "help"]).decode("utf-8")
|
||||
with open(outfile, "w") as out:
|
||||
out.write("""\
|
||||
%% rclone(1) User Manual
|
||||
%% Nick Craig-Wood
|
||||
%% %s
|
||||
|
||||
""" % build_date.strftime("%b %d, %Y"))
|
||||
# NAME
|
||||
|
||||
rclone - manage files on cloud storage
|
||||
|
||||
# SYNOPSIS
|
||||
|
||||
```
|
||||
%s
|
||||
```
|
||||
""" % (build_date.strftime("%b %d, %Y"), help_output))
|
||||
for doc in docs:
|
||||
contents = read_doc(doc)
|
||||
# Substitute the commands into doc.md
|
||||
|
||||
@@ -29,7 +29,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
||||
cmd := exec.Command("git", "log", "--oneline", from+".."+to)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
logMap = map[string]string{}
|
||||
logs = []string{}
|
||||
@@ -39,7 +39,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
||||
}
|
||||
match := logRe.FindSubmatch(line)
|
||||
if match == nil {
|
||||
log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
var hash, logMessage = string(match[1]), string(match[2])
|
||||
logMap[logMessage] = hash
|
||||
@@ -52,12 +52,12 @@ func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 0 {
|
||||
log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
// v1.54.0
|
||||
versionBytes, err := os.ReadFile("VERSION")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
if versionBytes[0] == 'v' {
|
||||
versionBytes = versionBytes[1:]
|
||||
@@ -65,7 +65,7 @@ func main() {
|
||||
versionBytes = bytes.TrimSpace(versionBytes)
|
||||
semver := semver.New(string(versionBytes))
|
||||
stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1)
|
||||
log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
masterMap, masterLogs := readCommits(stable+".0", "master")
|
||||
stableMap, _ := readCommits(stable+".0", stable+"-stable")
|
||||
for _, logMessage := range masterLogs {
|
||||
|
||||
@@ -746,6 +746,16 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
case "test-func":
|
||||
b.TestFn = testFunc
|
||||
return
|
||||
case "concurrent-func":
|
||||
b.TestFn = func() {
|
||||
src := filepath.Join(b.dataDir, "file7.txt")
|
||||
dst := "file1.txt"
|
||||
err := b.copyFile(ctx, src, b.replaceHex(b.path2), dst)
|
||||
if err != nil {
|
||||
fs.Errorf(src, "error copying file: %v", err)
|
||||
}
|
||||
}
|
||||
return
|
||||
case "fix-names":
|
||||
// in case the local os converted any filenames
|
||||
ci.NoUnicodeNormalization = true
|
||||
@@ -871,10 +881,9 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
if !ok || err != nil {
|
||||
fs.Logf(remotePath, "Can't find expected file %s (was it renamed by the os?) %v", args[1], err)
|
||||
return
|
||||
} else {
|
||||
// include hash of filename to make unicode form differences easier to see in logs
|
||||
fs.Debugf(remotePath, "verified file exists at correct path. filename hash: %s", stringToHash(leaf))
|
||||
}
|
||||
// include hash of filename to make unicode form differences easier to see in logs
|
||||
fs.Debugf(remotePath, "verified file exists at correct path. filename hash: %s", stringToHash(leaf))
|
||||
return
|
||||
default:
|
||||
return fmt.Errorf("unknown command: %q", args[0])
|
||||
|
||||
@@ -218,7 +218,7 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
|
||||
if b.opt.CompareFlag == "" {
|
||||
return nil
|
||||
}
|
||||
var CompareFlag CompareOpt // for exlcusions
|
||||
var CompareFlag CompareOpt // for exclusions
|
||||
opts := strings.Split(b.opt.CompareFlag, ",")
|
||||
for _, opt := range opts {
|
||||
switch strings.ToLower(strings.TrimSpace(opt)) {
|
||||
|
||||
@@ -161,9 +161,7 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
||||
return
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
err = b.checkListing(now, newListing, "current "+msg)
|
||||
}
|
||||
err = b.checkListing(now, newListing, "current "+msg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -286,7 +284,7 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
||||
}
|
||||
|
||||
// applyDeltas
|
||||
func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (changes1, changes2 bool, results2to1, results1to2 []Results, queues queues, err error) {
|
||||
func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (results2to1, results1to2 []Results, queues queues, err error) {
|
||||
path1 := bilib.FsPath(b.fs1)
|
||||
path2 := bilib.FsPath(b.fs2)
|
||||
|
||||
@@ -367,7 +365,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
}
|
||||
}
|
||||
|
||||
//if there are potential conflicts to check, check them all here (outside the loop) in one fell swoop
|
||||
// if there are potential conflicts to check, check them all here (outside the loop) in one fell swoop
|
||||
matches, err := b.checkconflicts(ctxCheck, filterCheck, b.fs1, b.fs2)
|
||||
|
||||
for _, file := range ds1.sort() {
|
||||
@@ -392,7 +390,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
} else if d2.is(deltaOther) {
|
||||
b.indent("!WARNING", file, "New or changed in both paths")
|
||||
|
||||
//if files are identical, leave them alone instead of renaming
|
||||
// if files are identical, leave them alone instead of renaming
|
||||
if (dirs1.has(file) || dirs1.has(alias)) && (dirs2.has(file) || dirs2.has(alias)) {
|
||||
fs.Infof(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file)
|
||||
ls1.getPut(file, skippedDirs1)
|
||||
@@ -486,7 +484,6 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
|
||||
// Do the batch operation
|
||||
if copy2to1.NotEmpty() && !b.InGracefulShutdown {
|
||||
changes1 = true
|
||||
b.indent("Path2", "Path1", "Do queued copies to")
|
||||
ctx = b.setBackupDir(ctx, 1)
|
||||
results2to1, err = b.fastCopy(ctx, b.fs2, b.fs1, copy2to1, "copy2to1")
|
||||
@@ -498,12 +495,11 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
return
|
||||
}
|
||||
|
||||
//copy empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
// copy empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs1, copy2to1, dirs2, &results2to1, "make")
|
||||
}
|
||||
|
||||
if copy1to2.NotEmpty() && !b.InGracefulShutdown {
|
||||
changes2 = true
|
||||
b.indent("Path1", "Path2", "Do queued copies to")
|
||||
ctx = b.setBackupDir(ctx, 2)
|
||||
results1to2, err = b.fastCopy(ctx, b.fs1, b.fs2, copy1to2, "copy1to2")
|
||||
@@ -515,7 +511,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
return
|
||||
}
|
||||
|
||||
//copy empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
// copy empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, copy1to2, dirs1, &results1to2, "make")
|
||||
}
|
||||
|
||||
@@ -523,7 +519,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
if err = b.saveQueue(delete1, "delete1"); err != nil {
|
||||
return
|
||||
}
|
||||
//propagate deletions of empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
// propagate deletions of empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs1, delete1, dirs1, &results2to1, "remove")
|
||||
}
|
||||
|
||||
@@ -531,7 +527,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
if err = b.saveQueue(delete2, "delete2"); err != nil {
|
||||
return
|
||||
}
|
||||
//propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
// propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, delete2, dirs2, &results1to2, "remove")
|
||||
}
|
||||
|
||||
|
||||
@@ -394,7 +394,7 @@ func parseHash(str string) (string, string, error) {
|
||||
return "", "", fmt.Errorf("invalid hash %q", str)
|
||||
}
|
||||
|
||||
// checkListing verifies that listing is not empty (unless resynching)
|
||||
// checkListing verifies that listing is not empty (unless resyncing)
|
||||
func (b *bisyncRun) checkListing(ls *fileList, listing, msg string) error {
|
||||
if b.opt.Resync || !ls.empty() {
|
||||
return nil
|
||||
|
||||
@@ -359,8 +359,6 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
|
||||
// Determine and apply changes to Path1 and Path2
|
||||
noChanges := ds1.empty() && ds2.empty()
|
||||
changes1 := false // 2to1
|
||||
changes2 := false // 1to2
|
||||
results2to1 := []Results{}
|
||||
results1to2 := []Results{}
|
||||
|
||||
@@ -370,7 +368,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
fs.Infof(nil, "No changes found")
|
||||
} else {
|
||||
fs.Infof(nil, "Applying changes")
|
||||
changes1, changes2, results2to1, results1to2, queues, err = b.applyDeltas(octx, ds1, ds2)
|
||||
results2to1, results1to2, queues, err = b.applyDeltas(octx, ds1, ds2)
|
||||
if err != nil {
|
||||
if b.InGracefulShutdown && (err == context.Canceled || err == accounting.ErrorMaxTransferLimitReachedGraceful || strings.Contains(err.Error(), "context canceled")) {
|
||||
fs.Infof(nil, "Ignoring sync error due to Graceful Shutdown: %v", err)
|
||||
@@ -395,21 +393,11 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
}
|
||||
b.saveOldListings()
|
||||
// save new listings
|
||||
// NOTE: "changes" in this case does not mean this run vs. last run, it means start of this run vs. end of this run.
|
||||
// i.e. whether we can use the March lst-new as this side's lst without modifying it.
|
||||
if noChanges {
|
||||
b.replaceCurrentListings()
|
||||
} else {
|
||||
if changes1 || b.InGracefulShutdown { // 2to1
|
||||
err1 = b.modifyListing(fctx, b.fs2, b.fs1, results2to1, queues, false)
|
||||
} else {
|
||||
err1 = bilib.CopyFileIfExists(b.newListing1, b.listing1)
|
||||
}
|
||||
if changes2 || b.InGracefulShutdown { // 1to2
|
||||
err2 = b.modifyListing(fctx, b.fs1, b.fs2, results1to2, queues, true)
|
||||
} else {
|
||||
err2 = bilib.CopyFileIfExists(b.newListing2, b.listing2)
|
||||
}
|
||||
err1 = b.modifyListing(fctx, b.fs2, b.fs1, results2to1, queues, false) // 2to1
|
||||
err2 = b.modifyListing(fctx, b.fs1, b.fs2, results1to2, queues, true) // 1to2
|
||||
}
|
||||
if b.DebugName != "" {
|
||||
l1, _ := b.loadListing(b.listing1)
|
||||
|
||||
1
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.copy2to1.que
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.copy2to1.que
vendored
Normal file
@@ -0,0 +1 @@
|
||||
"file1.txt"
|
||||
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst-new
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst-new
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst-old
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst-old
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst-new
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst-new
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst-old
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst-old
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
73
cmd/bisync/testdata/test_concurrent/golden/test.log
vendored
Normal file
73
cmd/bisync/testdata/test_concurrent/golden/test.log
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
[36m(01) :[0m [34mtest concurrent[0m
|
||||
|
||||
[36m(02) :[0m [34mtest initial bisync[0m
|
||||
[36m(03) :[0m [34mbisync resync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
[36m(04) :[0m [34mtest changed on one path - file1[0m
|
||||
[36m(05) :[0m [34mtouch-glob 2001-01-02 {datadir/} file5R.txt[0m
|
||||
[36m(06) :[0m [34mtouch-glob 2023-08-26 {datadir/} file7.txt[0m
|
||||
[36m(07) :[0m [34mcopy-as {datadir/}file5R.txt {path2/} file1.txt[0m
|
||||
|
||||
[36m(08) :[0m [34mtest bisync with file changed during[0m
|
||||
[36m(09) :[0m [34mconcurrent-func[0m
|
||||
[36m(10) :[0m [34mbisync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Building Path1 and Path2 listings
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : - [34mPath2[0m [35m[33mFile changed: [35msize (larger)[0m, [35mtime (newer)[0m[0m[0m - [36mfile1.txt[0m
|
||||
INFO : Path2: 1 changes: [32m 0 new[0m, [33m 1 modified[0m, [31m 0 deleted[0m
|
||||
INFO : ([33mModified[0m: [36m 1 newer[0m, [34m 0 older[0m, [36m 1 larger[0m, [34m 0 smaller[0m)
|
||||
INFO : Applying changes
|
||||
INFO : - [34mPath2[0m [35m[32mQueue copy to[0m Path1[0m - [36m{path1/}file1.txt[0m
|
||||
INFO : - [34mPath2[0m [35mDo queued copies to[0m - [36mPath1[0m
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
[36m(11) :[0m [34mbisync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Building Path1 and Path2 listings
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : No changes found
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
1
cmd/bisync/testdata/test_concurrent/initial/RCLONE_TEST
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/initial/RCLONE_TEST
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is used for testing the health of rclone accesses to the local/remote file system. Do not delete.
|
||||
0
cmd/bisync/testdata/test_concurrent/initial/file1.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file1.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file2.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file2.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file3.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file3.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file4.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file4.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file5.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file5.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file6.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file6.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file7.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file7.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file8.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file8.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/modfiles/dummy.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/modfiles/dummy.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file1.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file1.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is newer
|
||||
1
cmd/bisync/testdata/test_concurrent/modfiles/file10.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file10.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is newer
|
||||
1
cmd/bisync/testdata/test_concurrent/modfiles/file11.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file11.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is newer
|
||||
1
cmd/bisync/testdata/test_concurrent/modfiles/file2.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file2.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
Newer version
|
||||
1
cmd/bisync/testdata/test_concurrent/modfiles/file5L.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file5L.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is newer and not equal to 5R
|
||||
1
cmd/bisync/testdata/test_concurrent/modfiles/file5R.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file5R.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is newer and not equal to 5L
|
||||
1
cmd/bisync/testdata/test_concurrent/modfiles/file6.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file6.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is newer
|
||||
1
cmd/bisync/testdata/test_concurrent/modfiles/file7.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file7.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is newer
|
||||
15
cmd/bisync/testdata/test_concurrent/scenario.txt
vendored
Normal file
15
cmd/bisync/testdata/test_concurrent/scenario.txt
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
test concurrent
|
||||
|
||||
test initial bisync
|
||||
bisync resync
|
||||
|
||||
test changed on one path - file1
|
||||
touch-glob 2001-01-02 {datadir/} file5R.txt
|
||||
touch-glob 2023-08-26 {datadir/} file7.txt
|
||||
copy-as {datadir/}file5R.txt {path2/} file1.txt
|
||||
|
||||
test bisync with file changed during
|
||||
concurrent-func
|
||||
bisync
|
||||
|
||||
bisync
|
||||
@@ -23,7 +23,7 @@ INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
[36m(05) :[0m [34mmove-listings empty-path1[0m
|
||||
|
||||
[36m(06) :[0m [34mtest 2. resync with empty path2, resulting in synching all content to path2.[0m
|
||||
[36m(06) :[0m [34mtest 2. resync with empty path2, resulting in syncing all content to path2.[0m
|
||||
[36m(07) :[0m [34mpurge-children {path2/}[0m
|
||||
[36m(08) :[0m [34mbisync resync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
|
||||
4
cmd/bisync/testdata/test_resync/scenario.txt
vendored
4
cmd/bisync/testdata/test_resync/scenario.txt
vendored
@@ -1,6 +1,6 @@
|
||||
test resync
|
||||
# 1. Resync with empty Path1, resulting in copying all content FROM Path2
|
||||
# 2. Resync with empty Path2, resulting in synching all content TO Path2
|
||||
# 2. Resync with empty Path2, resulting in syncing all content TO Path2
|
||||
# 3. Exercise all of the various file difference scenarios during a resync:
|
||||
# File Path1 Path2 Expected action Who wins
|
||||
# - file1.txt Exists Missing Sync Path1 >Path2 Path1
|
||||
@@ -17,7 +17,7 @@ purge-children {path1/}
|
||||
bisync resync
|
||||
move-listings empty-path1
|
||||
|
||||
test 2. resync with empty path2, resulting in synching all content to path2.
|
||||
test 2. resync with empty path2, resulting in syncing all content to path2.
|
||||
purge-children {path2/}
|
||||
bisync resync
|
||||
move-listings empty-path2
|
||||
|
||||
@@ -549,12 +549,12 @@ password to re-encrypt the config.
|
||||
|
||||
When |--password-command| is called to change the password then the
|
||||
environment variable |RCLONE_PASSWORD_CHANGE=1| will be set. So if
|
||||
changing passwords programatically you can use the environment
|
||||
changing passwords programmatically you can use the environment
|
||||
variable to distinguish which password you must supply.
|
||||
|
||||
Alternatively you can remove the password first (with |rclone config
|
||||
encryption remove|), then set it again with this command which may be
|
||||
easier if you don't mind the unecrypted config file being on the disk
|
||||
easier if you don't mind the unencrypted config file being on the disk
|
||||
briefly.
|
||||
`, "|", "`"),
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
|
||||
@@ -54,7 +54,7 @@ destination if there is one with the same name.
|
||||
Setting |--stdout| or making the output file name |-|
|
||||
will cause the output to be written to standard output.
|
||||
|
||||
### Troublshooting
|
||||
### Troubleshooting
|
||||
|
||||
If you can't get |rclone copyurl| to work then here are some things you can try:
|
||||
|
||||
|
||||
@@ -22,6 +22,9 @@ include/exclude filters - everything will be removed. Use the
|
||||
delete files. To delete empty directories only, use command
|
||||
[rmdir](/commands/rclone_rmdir/) or [rmdirs](/commands/rclone_rmdirs/).
|
||||
|
||||
The concurrency of this operation is controlled by the ` + "`--checkers`" + ` global flag. However, some backends will
|
||||
implement this command directly, in which case ` + "`--checkers`" + ` will be ignored.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.
|
||||
`,
|
||||
|
||||
@@ -194,7 +194,7 @@ func (f *FS) Chown(name string, uid, gid int) (err error) {
|
||||
return file.Chown(uid, gid)
|
||||
}
|
||||
|
||||
// Chtimes changes the acces time and modified time
|
||||
// Chtimes changes the access time and modified time
|
||||
func (f *FS) Chtimes(name string, atime time.Time, mtime time.Time) (err error) {
|
||||
defer log.Trace(name, "atime=%v, mtime=%v", atime, mtime)("err=%v", &err)
|
||||
return f.vfs.Chtimes(name, atime, mtime)
|
||||
|
||||
@@ -145,7 +145,7 @@ that it uses an on disk cache, but the cache entries are held as
|
||||
symlinks. Rclone will use the handle of the underlying file as the NFS
|
||||
handle which improves performance. This sort of cache can't be backed
|
||||
up and restored as the underlying handles will change. This is Linux
|
||||
only. It requres running rclone as root or with |CAP_DAC_READ_SEARCH|.
|
||||
only. It requires running rclone as root or with |CAP_DAC_READ_SEARCH|.
|
||||
You can run rclone with this extra permission by doing this to the
|
||||
rclone binary |sudo setcap cap_dac_read_search+ep /path/to/rclone|.
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ func (b *s3Backend) ListBuckets(ctx context.Context) ([]gofakes3.BucketInfo, err
|
||||
for _, entry := range dirEntries {
|
||||
if entry.IsDir() {
|
||||
response = append(response, gofakes3.BucketInfo{
|
||||
Name: gofakes3.URLEncode(entry.Name()),
|
||||
Name: entry.Name(),
|
||||
CreationDate: gofakes3.NewContentTime(entry.ModTime()),
|
||||
})
|
||||
}
|
||||
@@ -158,7 +158,7 @@ func (b *s3Backend) HeadObject(ctx context.Context, bucketName, objectName strin
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetObject fetchs the object from the filesystem.
|
||||
// GetObject fetches the object from the filesystem.
|
||||
func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string, rangeRequest *gofakes3.ObjectRangeRequest) (obj *gofakes3.Object, err error) {
|
||||
_vfs, err := b.s.getVFS(ctx)
|
||||
if err != nil {
|
||||
@@ -227,7 +227,7 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string
|
||||
}
|
||||
|
||||
return &gofakes3.Object{
|
||||
Name: gofakes3.URLEncode(objectName),
|
||||
Name: objectName,
|
||||
Hash: hash,
|
||||
Metadata: meta,
|
||||
Size: size,
|
||||
@@ -400,7 +400,7 @@ func (b *s3Backend) deleteObject(ctx context.Context, bucketName, objectName str
|
||||
}
|
||||
|
||||
fp := path.Join(bucketName, objectName)
|
||||
// S3 does not report an error when attemping to delete a key that does not exist, so
|
||||
// S3 does not report an error when attempting to delete a key that does not exist, so
|
||||
// we need to skip IsNotExist errors.
|
||||
if err := _vfs.Remove(fp); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
|
||||
@@ -19,7 +19,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
|
||||
for _, entry := range dirEntries {
|
||||
object := entry.Name()
|
||||
|
||||
// workround for control-chars detect
|
||||
// workaround for control-chars detect
|
||||
objectPath := path.Join(fdPath, object)
|
||||
|
||||
if !strings.HasPrefix(object, name) {
|
||||
@@ -28,7 +28,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
|
||||
|
||||
if entry.IsDir() {
|
||||
if addPrefix {
|
||||
response.AddPrefix(gofakes3.URLEncode(objectPath))
|
||||
response.AddPrefix(objectPath)
|
||||
continue
|
||||
}
|
||||
err := b.entryListR(_vfs, bucket, path.Join(fdPath, object), "", false, response)
|
||||
@@ -37,7 +37,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
|
||||
}
|
||||
} else {
|
||||
item := &gofakes3.Content{
|
||||
Key: gofakes3.URLEncode(objectPath),
|
||||
Key: objectPath,
|
||||
LastModified: gofakes3.NewContentTime(entry.ModTime()),
|
||||
ETag: getFileHash(entry),
|
||||
Size: entry.Size(),
|
||||
|
||||
@@ -69,7 +69,7 @@ secret_access_key = SECRET_ACCESS_KEY
|
||||
use_multipart_uploads = false
|
||||
```
|
||||
|
||||
Note that setting `disable_multipart_uploads = true` is to work around
|
||||
Note that setting `use_multipart_uploads = false` is to work around
|
||||
[a bug](#bugs) which will be fixed in due course.
|
||||
|
||||
### Bugs
|
||||
|
||||
@@ -487,7 +487,7 @@ See the [bisync filters](#filtering) section and generic
|
||||
[--filter-from](/filtering/#filter-from-read-filtering-patterns-from-a-file)
|
||||
documentation.
|
||||
An [example filters file](#example-filters-file) contains filters for
|
||||
non-allowed files for synching with Dropbox.
|
||||
non-allowed files for syncing with Dropbox.
|
||||
|
||||
If you make changes to your filters file then bisync requires a run
|
||||
with `--resync`. This is a safety feature, which prevents existing files
|
||||
@@ -664,7 +664,7 @@ Using `--check-sync=false` will disable it and may significantly reduce the
|
||||
sync run times for very large numbers of files.
|
||||
|
||||
The check may be run manually with `--check-sync=only`. It runs only the
|
||||
integrity check and terminates without actually synching.
|
||||
integrity check and terminates without actually syncing.
|
||||
|
||||
Note that currently, `--check-sync` **only checks listing snapshots and NOT the
|
||||
actual files on the remotes.** Note also that the listing snapshots will not
|
||||
@@ -1141,7 +1141,7 @@ The `--include*`, `--exclude*`, and `--filter` flags are also supported.
|
||||
|
||||
### How to filter directories
|
||||
|
||||
Filtering portions of the directory tree is a critical feature for synching.
|
||||
Filtering portions of the directory tree is a critical feature for syncing.
|
||||
|
||||
Examples of directory trees (always beneath the Path1/Path2 root level)
|
||||
you may want to exclude from your sync:
|
||||
@@ -1250,7 +1250,7 @@ quashed by adding `--quiet` to the bisync command line.
|
||||
|
||||
## Example exclude-style filters files for use with Dropbox {#exclude-filters}
|
||||
|
||||
- Dropbox disallows synching the listed temporary and configuration/data files.
|
||||
- Dropbox disallows syncing the listed temporary and configuration/data files.
|
||||
The `- <filename>` filters exclude these files where ever they may occur
|
||||
in the sync tree. Consider adding similar exclusions for file types
|
||||
you don't need to sync, such as core dump and software build files.
|
||||
@@ -1584,7 +1584,7 @@ test command flags can be equally prefixed by a single `-` or double dash.
|
||||
|
||||
- `go test . -case basic -remote local -remote2 local`
|
||||
runs the `test_basic` test case using only the local filesystem,
|
||||
synching one local directory with another local directory.
|
||||
syncing one local directory with another local directory.
|
||||
Test script output is to the console, while commands within scenario.txt
|
||||
have their output sent to the `.../workdir/test.log` file,
|
||||
which is finally compared to the golden copy.
|
||||
@@ -1815,6 +1815,9 @@ about _Unison_ and synchronization in general.
|
||||
|
||||
## Changelog
|
||||
|
||||
### `v1.69.1`
|
||||
* Fixed an issue causing listings to not capture concurrent modifications under certain conditions
|
||||
|
||||
### `v1.68`
|
||||
* Fixed an issue affecting backends that round modtimes to a lower precision.
|
||||
|
||||
@@ -1860,4 +1863,4 @@ causing bisync to consider more files than necessary due to overbroad filters du
|
||||
* Added [new `--ignore-listing-checksum` flag](https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=6.%20%2D%2Dignore%2Dchecksum%20should%20be%20split%20into%20two%20flags%20for%20separate%20purposes)
|
||||
to distinguish from `--ignore-checksum`
|
||||
* [Performance improvements](https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=6.%20Deletes%20take%20several%20times%20longer%20than%20copies) for large remotes
|
||||
* Documentation and testing improvements
|
||||
* Documentation and testing improvements
|
||||
|
||||
@@ -5,6 +5,32 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.69.1 - 2025-02-14
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.69.0...v1.69.1)
|
||||
|
||||
* Bug Fixes
|
||||
* lib/oauthutil: Fix redirect URL mismatch errors (Nick Craig-Wood)
|
||||
* bisync: Fix listings missing concurrent modifications (nielash)
|
||||
* serve s3: Fix list objects encoding-type (Nick Craig-Wood)
|
||||
* fs: Fix confusing "didn't find section in config file" error (Nick Craig-Wood)
|
||||
* doc fixes (Christoph Berger, Dimitri Papadopoulos, Matt Ickstadt, Nick Craig-Wood, Tim White, Zachary Vorhies)
|
||||
* build: Added parallel docker builds and caching for go build in the container (Anagh Kumar Baranwal)
|
||||
* VFS
|
||||
* Fix the cache failing to upload symlinks when `--links` was specified (Nick Craig-Wood)
|
||||
* Fix race detected by race detector (Nick Craig-Wood)
|
||||
* Close the change notify channel on Shutdown (izouxv)
|
||||
* B2
|
||||
* Fix "fatal error: concurrent map writes" (Nick Craig-Wood)
|
||||
* Iclouddrive
|
||||
* Add notes on ADP and Missing PCS cookies (Nick Craig-Wood)
|
||||
* Onedrive
|
||||
* Mark German (de) region as deprecated (Nick Craig-Wood)
|
||||
* S3
|
||||
* Added new storage class to magalu provider (Bruno Fernandes)
|
||||
* Add DigitalOcean regions SFO2, LON1, TOR1, BLR1 (jkpe)
|
||||
* Add latest Linode Object Storage endpoints (jbagwell-akamai)
|
||||
|
||||
## v1.69.0 - 2025-01-12
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.68.0...v1.69.0)
|
||||
|
||||
@@ -965,7 +965,7 @@ rclone [flags]
|
||||
--use-json-log Use json log format
|
||||
--use-mmap Use mmap allocator (see docs)
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.69.0")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.69.1")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
-V, --version Print the version number
|
||||
--webdav-auth-redirect Preserve authentication on redirect
|
||||
|
||||
@@ -21,12 +21,12 @@ password to re-encrypt the config.
|
||||
|
||||
When `--password-command` is called to change the password then the
|
||||
environment variable `RCLONE_PASSWORD_CHANGE=1` will be set. So if
|
||||
changing passwords programatically you can use the environment
|
||||
changing passwords programmatically you can use the environment
|
||||
variable to distinguish which password you must supply.
|
||||
|
||||
Alternatively you can remove the password first (with `rclone config
|
||||
encryption remove`), then set it again with this command which may be
|
||||
easier if you don't mind the unecrypted config file being on the disk
|
||||
easier if you don't mind the unencrypted config file being on the disk
|
||||
briefly.
|
||||
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ destination if there is one with the same name.
|
||||
Setting `--stdout` or making the output file name `-`
|
||||
will cause the output to be written to standard output.
|
||||
|
||||
## Troublshooting
|
||||
## Troubleshooting
|
||||
|
||||
If you can't get `rclone copyurl` to work then here are some things you can try:
|
||||
|
||||
|
||||
@@ -15,6 +15,9 @@ include/exclude filters - everything will be removed. Use the
|
||||
delete files. To delete empty directories only, use command
|
||||
[rmdir](/commands/rclone_rmdir/) or [rmdirs](/commands/rclone_rmdirs/).
|
||||
|
||||
The concurrency of this operation is controlled by the `--checkers` global flag. However, some backends will
|
||||
implement this command directly, in which case `--checkers` will be ignored.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
`--dry-run` or the `--interactive`/`-i` flag.
|
||||
|
||||
|
||||
@@ -53,7 +53,7 @@ that it uses an on disk cache, but the cache entries are held as
|
||||
symlinks. Rclone will use the handle of the underlying file as the NFS
|
||||
handle which improves performance. This sort of cache can't be backed
|
||||
up and restored as the underlying handles will change. This is Linux
|
||||
only. It requres running rclone as root or with `CAP_DAC_READ_SEARCH`.
|
||||
only. It requires running rclone as root or with `CAP_DAC_READ_SEARCH`.
|
||||
You can run rclone with this extra permission by doing this to the
|
||||
rclone binary `sudo setcap cap_dac_read_search+ep /path/to/rclone`.
|
||||
|
||||
|
||||
@@ -82,7 +82,7 @@ secret_access_key = SECRET_ACCESS_KEY
|
||||
use_multipart_uploads = false
|
||||
```
|
||||
|
||||
Note that setting `disable_multipart_uploads = true` is to work around
|
||||
Note that setting `use_multipart_uploads = false` is to work around
|
||||
[a bug](#bugs) which will be fixed in due course.
|
||||
|
||||
## Bugs
|
||||
|
||||
@@ -741,7 +741,7 @@ strong random number generator. The nonce is incremented for each
|
||||
chunk read making sure each nonce is unique for each block written.
|
||||
The chance of a nonce being reused is minuscule. If you wrote an
|
||||
exabyte of data (10¹⁸ bytes) you would have a probability of
|
||||
approximately 2×10⁻³² of re-using a nonce.
|
||||
approximately 2×10⁻³² of reusing a nonce.
|
||||
|
||||
#### Chunk
|
||||
|
||||
|
||||
@@ -619,6 +619,11 @@ it to `false`. It is also possible to specify `--boolean=false` or
|
||||
parsed as `--boolean` and the `false` is parsed as an extra command
|
||||
line argument for rclone.
|
||||
|
||||
Options documented to take a `stringArray` parameter accept multiple
|
||||
values. To pass more than one value, repeat the option; for example:
|
||||
`--include value1 --include value2`.
|
||||
|
||||
|
||||
### Time or duration options {#time-option}
|
||||
|
||||
TIME or DURATION options can be specified as a duration string or a
|
||||
@@ -2930,7 +2935,7 @@ so they take exactly the same form.
|
||||
The options set by environment variables can be seen with the `-vv` flag, e.g. `rclone version -vv`.
|
||||
|
||||
Options that can appear multiple times (type `stringArray`) are
|
||||
treated slighly differently as environment variables can only be
|
||||
treated slightly differently as environment variables can only be
|
||||
defined once. In order to allow a simple mechanism for adding one or
|
||||
many items, the input is treated as a [CSV encoded](https://godoc.org/encoding/csv)
|
||||
string. For example
|
||||
|
||||
@@ -116,7 +116,7 @@ Flags for general networking and HTTP stuff.
|
||||
--tpslimit float Limit HTTP transactions per second to this
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
|
||||
--use-cookies Enable session cookiejar
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.69.0")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.69.1")
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@ Enter a value.
|
||||
config_2fa> 2FACODE
|
||||
Remote config
|
||||
--------------------
|
||||
[koofr]
|
||||
[iclouddrive]
|
||||
- type: iclouddrive
|
||||
- apple_id: APPLEID
|
||||
- password: *** ENCRYPTED ***
|
||||
@@ -78,6 +78,20 @@ y/e/d> y
|
||||
|
||||
ADP is currently unsupported and need to be disabled
|
||||
|
||||
On iPhone, Settings `>` Apple Account `>` iCloud `>` 'Access iCloud Data on the Web' must be ON, and 'Advanced Data Protection' OFF.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Missing PCS cookies from the request
|
||||
|
||||
This means you have Advanced Data Protection (ADP) turned on. This is not supported at the moment. If you want to use rclone you will have to turn it off. See above for how to turn it off.
|
||||
|
||||
You will need to clear the `cookies` and the `trust_token` fields in the config. Or you can delete the remote config and start again.
|
||||
|
||||
You should then run `rclone reconnect remote:`.
|
||||
|
||||
Note that changing the ADP setting may not take effect immediately - you may need to wait a few hours or a day before you can get rclone to work - keep clearing the config entry and running `rclone reconnect remote:` until rclone functions properly.
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/iclouddrive/iclouddrive.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
|
||||
@@ -319,7 +319,7 @@ Properties:
|
||||
- "us"
|
||||
- Microsoft Cloud for US Government
|
||||
- "de"
|
||||
- Microsoft Cloud Germany
|
||||
- Microsoft Cloud Germany (deprecated - try global region first).
|
||||
- "cn"
|
||||
- Azure and Office 365 operated by Vnet Group in China
|
||||
|
||||
@@ -936,6 +936,28 @@ See the [metadata](/docs/#metadata) docs for more info.
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
||||
|
||||
### Impersonate other users as Admin
|
||||
|
||||
Unlike Google Drive and impersonating any domain user via service accounts, OneDrive requires you to authenticate as an admin account, and manually setup a remote per user you wish to impersonate.
|
||||
|
||||
1. In [Microsoft 365 Admin Center](https://admin.microsoft.com), open each user you need to "impersonate" and go to the OneDrive section. There is a heading called "Get access to files", you need to click to create the link, this creates the link of the format `https://{tenant}-my.sharepoint.com/personal/{user_name_domain_tld}/` but also changes the permissions so you your admin user has access.
|
||||
2. Then in powershell run the following commands:
|
||||
```console
|
||||
Install-Module Microsoft.Graph -Scope CurrentUser -Repository PSGallery -Force
|
||||
Import-Module Microsoft.Graph.Files
|
||||
Connect-MgGraph -Scopes "Files.ReadWrite.All"
|
||||
# Follow the steps to allow access to your admin user
|
||||
# Then run this for each user you want to impersonate to get the Drive ID
|
||||
Get-MgUserDefaultDrive -UserId '{emailaddress}'
|
||||
# This will give you output of the format:
|
||||
# Name Id DriveType CreatedDateTime
|
||||
# ---- -- --------- ---------------
|
||||
# OneDrive b!XYZ123 business 14/10/2023 1:00:58 pm
|
||||
|
||||
```
|
||||
3. Then in rclone add a onedrive remote type, and use the `Type in driveID` with the DriveID you got in the previous step. One remote per user. It will then confirm the drive ID, and hopefully give you a message of `Found drive "root" of type "business"` and then include the URL of the format `https://{tenant}-my.sharepoint.com/personal/{user_name_domain_tld}/Documents`
|
||||
|
||||
|
||||
## Limitations
|
||||
|
||||
If you don't use rclone for 90 days the refresh token will
|
||||
|
||||
@@ -2068,7 +2068,7 @@ the `--vfs-cache-mode` is off, it will return an empty result.
|
||||
],
|
||||
}
|
||||
|
||||
The `expiry` time is the time until the file is elegible for being
|
||||
The `expiry` time is the time until the file is eligible for being
|
||||
uploaded in floating point seconds. This may go negative. As rclone
|
||||
only transfers `--transfers` files at once, only the lowest
|
||||
`--transfers` expiry times will have `uploading` as `true`. So there
|
||||
|
||||
@@ -31,7 +31,7 @@ The S3 backend can be used with a number of different providers:
|
||||
{{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}}
|
||||
{{< provider name="Qiniu Cloud Object Storage (Kodo)" home="https://www.qiniu.com/en/products/kodo" config="/s3/#qiniu" >}}
|
||||
{{< provider name="RackCorp Object Storage" home="https://www.rackcorp.com/" config="/s3/#RackCorp" >}}
|
||||
{{< provider name="Rclone Serve S3" home="/commands/rclone_serve_http/" config="/s3/#rclone" >}}
|
||||
{{< provider name="Rclone Serve S3" home="/commands/rclone_serve_s3/" config="/s3/#rclone" >}}
|
||||
{{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}}
|
||||
{{< provider name="Seagate Lyve Cloud" home="https://www.seagate.com/gb/en/services/cloud/storage/" config="/s3/#lyve" >}}
|
||||
{{< provider name="SeaweedFS" home="https://github.com/chrislusf/seaweedfs/" config="/s3/#seaweedfs" >}}
|
||||
@@ -750,7 +750,7 @@ Notes on above:
|
||||
that `USER_NAME` has been created.
|
||||
2. The Resource entry must include both resource ARNs, as one implies
|
||||
the bucket and the other implies the bucket's objects.
|
||||
3. When using [s3-no-check-bucket](#s3-no-check-bucket) and the bucket already exsits, the `"arn:aws:s3:::BUCKET_NAME"` doesn't have to be included.
|
||||
3. When using [s3-no-check-bucket](#s3-no-check-bucket) and the bucket already exists, the `"arn:aws:s3:::BUCKET_NAME"` doesn't have to be included.
|
||||
|
||||
For reference, [here's an Ansible script](https://gist.github.com/ebridges/ebfc9042dd7c756cd101cfa807b7ae2b)
|
||||
that will generate one or more buckets that will work with `rclone sync`.
|
||||
@@ -3728,7 +3728,7 @@ location_constraint = au-nsw
|
||||
### Rclone Serve S3 {#rclone}
|
||||
|
||||
Rclone can serve any remote over the S3 protocol. For details see the
|
||||
[rclone serve s3](/commands/rclone_serve_http/) documentation.
|
||||
[rclone serve s3](/commands/rclone_serve_s3/) documentation.
|
||||
|
||||
For example, to serve `remote:path` over s3, run the server like this:
|
||||
|
||||
@@ -3748,8 +3748,8 @@ secret_access_key = SECRET_ACCESS_KEY
|
||||
use_multipart_uploads = false
|
||||
```
|
||||
|
||||
Note that setting `disable_multipart_uploads = true` is to work around
|
||||
[a bug](/commands/rclone_serve_http/#bugs) which will be fixed in due course.
|
||||
Note that setting `use_multipart_uploads = false` is to work around
|
||||
[a bug](/commands/rclone_serve_s3/#bugs) which will be fixed in due course.
|
||||
|
||||
### Scaleway
|
||||
|
||||
@@ -4845,27 +4845,49 @@ Option endpoint.
|
||||
Endpoint for Linode Object Storage API.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / Atlanta, GA (USA), us-southeast-1
|
||||
1 / Amsterdam (Netherlands), nl-ams-1
|
||||
\ (nl-ams-1.linodeobjects.com)
|
||||
2 / Atlanta, GA (USA), us-southeast-1
|
||||
\ (us-southeast-1.linodeobjects.com)
|
||||
2 / Chicago, IL (USA), us-ord-1
|
||||
3 / Chennai (India), in-maa-1
|
||||
\ (in-maa-1.linodeobjects.com)
|
||||
4 / Chicago, IL (USA), us-ord-1
|
||||
\ (us-ord-1.linodeobjects.com)
|
||||
3 / Frankfurt (Germany), eu-central-1
|
||||
5 / Frankfurt (Germany), eu-central-1
|
||||
\ (eu-central-1.linodeobjects.com)
|
||||
4 / Milan (Italy), it-mil-1
|
||||
6 / Jakarta (Indonesia), id-cgk-1
|
||||
\ (id-cgk-1.linodeobjects.com)
|
||||
7 / London 2 (Great Britain), gb-lon-1
|
||||
\ (gb-lon-1.linodeobjects.com)
|
||||
8 / Los Angeles, CA (USA), us-lax-1
|
||||
\ (us-lax-1.linodeobjects.com)
|
||||
9 / Madrid (Spain), es-mad-1
|
||||
\ (es-mad-1.linodeobjects.com)
|
||||
10 / Melbourne (Australia), au-mel-1
|
||||
\ (au-mel-1.linodeobjects.com)
|
||||
11 / Miami, FL (USA), us-mia-1
|
||||
\ (us-mia-1.linodeobjects.com)
|
||||
12 / Milan (Italy), it-mil-1
|
||||
\ (it-mil-1.linodeobjects.com)
|
||||
5 / Newark, NJ (USA), us-east-1
|
||||
13 / Newark, NJ (USA), us-east-1
|
||||
\ (us-east-1.linodeobjects.com)
|
||||
6 / Paris (France), fr-par-1
|
||||
14 / Osaka (Japan), jp-osa-1
|
||||
\ (jp-osa-1.linodeobjects.com)
|
||||
15 / Paris (France), fr-par-1
|
||||
\ (fr-par-1.linodeobjects.com)
|
||||
7 / Seattle, WA (USA), us-sea-1
|
||||
16 / São Paulo (Brazil), br-gru-1
|
||||
\ (br-gru-1.linodeobjects.com)
|
||||
17 / Seattle, WA (USA), us-sea-1
|
||||
\ (us-sea-1.linodeobjects.com)
|
||||
8 / Singapore ap-south-1
|
||||
18 / Singapore, ap-south-1
|
||||
\ (ap-south-1.linodeobjects.com)
|
||||
9 / Stockholm (Sweden), se-sto-1
|
||||
19 / Singapore 2, sg-sin-1
|
||||
\ (sg-sin-1.linodeobjects.com)
|
||||
20 / Stockholm (Sweden), se-sto-1
|
||||
\ (se-sto-1.linodeobjects.com)
|
||||
10 / Washington, DC, (USA), us-iad-1
|
||||
21 / Washington, DC, (USA), us-iad-1
|
||||
\ (us-iad-1.linodeobjects.com)
|
||||
endpoint> 3
|
||||
endpoint> 5
|
||||
|
||||
Option acl.
|
||||
Canned ACL used when creating buckets and storing or copying objects.
|
||||
|
||||
@@ -57,7 +57,8 @@ off donation.
|
||||
Thank you very much to our sponsors:
|
||||
|
||||
{{< sponsor src="/img/logos/idrive_e2.svg" width="300" height="200" title="Visit our sponsor IDrive e2" link="https://www.idrive.com/e2/?refer=rclone">}}
|
||||
{{< sponsor src="/img/logos/warp.svg" width="300" height="200" title="Visit our sponsor warp.dev" link="https://www.warp.dev/?utm_source=rclone&utm_medium=referral&utm_campaign=rclone_20231103">}}
|
||||
{{< sponsor src="/img/logos/warp.svg" width="285" height="200" title="Visit our sponsor warp.dev" link="https://www.warp.dev/?utm_source=rclone&utm_medium=referral&utm_campaign=rclone_20231103">}}
|
||||
{{< sponsor src="/img/logos/sia.svg" width="200" height="200" title="Visit our sponsor sia" link="https://sia.tech">}}
|
||||
{{< sponsor src="/img/logos/route4me.svg" width="400" height="200" title="Visit our sponsor Route4Me" link="https://route4me.com/">}}
|
||||
{{< sponsor src="/img/logos/rcloneview.svg" width="300" height="200" title="Visit our sponsor RcloneView" link="https://rcloneview.com/">}}
|
||||
{{< sponsor src="/img/logos/filelu-rclone.svg" width="330" height="200" title="Visit our sponsor FileLu" link="https://filelu.com/">}}
|
||||
|
||||
@@ -1 +1 @@
|
||||
v1.69.0
|
||||
v1.69.1
|
||||
@@ -1,3 +1,3 @@
|
||||
<a href="{{ .Get "link" }}" target="_blank" >
|
||||
<img width="{{ .Get "width" }}" src="{{ .Get "src" }}" title="{{ .Get "title" }}" style="{{ .Get "style" | safeCSS }}">
|
||||
<img width="{{ .Get "width" }}" src="{{ .Get "src" }}" title="{{ .Get "title" }}" styleX="{{ .Get "style" | safeCSS }}" style="margin: 2px; padding: 1px; border: 1px solid #ddd; border-radius: 4px;">
|
||||
</a>
|
||||
|
||||
4
fs/cache/cache_test.go
vendored
4
fs/cache/cache_test.go
vendored
@@ -131,7 +131,7 @@ func TestPutErr(t *testing.T) {
|
||||
assert.Equal(t, 1, Entries())
|
||||
|
||||
fNew, err := GetFn(context.Background(), "mock:/", create)
|
||||
require.Equal(t, fs.ErrorNotFoundInConfigFile, err)
|
||||
require.True(t, errors.Is(err, fs.ErrorNotFoundInConfigFile))
|
||||
require.Equal(t, f, fNew)
|
||||
|
||||
assert.Equal(t, 1, Entries())
|
||||
@@ -141,7 +141,7 @@ func TestPutErr(t *testing.T) {
|
||||
PutErr("mock:/file.txt", f, fs.ErrorNotFoundInConfigFile)
|
||||
|
||||
fNew, err = GetFn(context.Background(), "mock:/file.txt", create)
|
||||
require.Equal(t, fs.ErrorNotFoundInConfigFile, err)
|
||||
require.True(t, errors.Is(err, fs.ErrorNotFoundInConfigFile))
|
||||
require.Equal(t, f, fNew)
|
||||
|
||||
assert.Equal(t, 1, Entries())
|
||||
|
||||
@@ -133,7 +133,7 @@ func TestCertificates(t *testing.T) {
|
||||
assert.Fail(t, "Certificate expired", "Certificate expires at %s, current time is %s", cert[0].NotAfter.Sub(startTime), time.Since(startTime))
|
||||
}
|
||||
|
||||
// Write some test data to fullfil the request
|
||||
// Write some test data to fulfill the request
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
_, _ = fmt.Fprintln(w, "test data")
|
||||
}))
|
||||
|
||||
@@ -95,7 +95,7 @@ func LogValueHide(key string, value interface{}) LogValueItem {
|
||||
return LogValueItem{key: key, value: value, render: false}
|
||||
}
|
||||
|
||||
// String returns the representation of value. If render is fals this
|
||||
// String returns the representation of value. If render is false this
|
||||
// is an empty string so LogValueItem entries won't show in the
|
||||
// textual representation of logs.
|
||||
func (j LogValueItem) String() string {
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -104,7 +105,7 @@ func ParseRemote(path string) (fsInfo *RegInfo, configName, fsPath string, conne
|
||||
m := ConfigMap("", nil, configName, parsed.Config)
|
||||
fsName, ok = m.Get("type")
|
||||
if !ok {
|
||||
return nil, "", "", nil, ErrorNotFoundInConfigFile
|
||||
return nil, "", "", nil, fmt.Errorf("%w (%q)", ErrorNotFoundInConfigFile, configName)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -297,7 +297,7 @@ func (o *MemoryObject) Open(ctx context.Context, options ...fs.OpenOption) (io.R
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// This re-uses the internal buffer if at all possible.
|
||||
// This reuses the internal buffer if at all possible.
|
||||
func (o *MemoryObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
size := src.Size()
|
||||
if size == 0 {
|
||||
|
||||
@@ -142,7 +142,7 @@ func TestMemoryObject(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
checkContent(o, "Rutabaga")
|
||||
assert.Equal(t, newNow, o.ModTime(context.Background()))
|
||||
assert.Equal(t, "Rutaba", string(content)) // check we re-used the buffer
|
||||
assert.Equal(t, "Rutaba", string(content)) // check we reused the buffer
|
||||
|
||||
// not within the buffer
|
||||
newStr := "0123456789"
|
||||
|
||||
@@ -358,7 +358,7 @@ func TestRemoteServing(t *testing.T) {
|
||||
URL: "[notfoundremote:]/",
|
||||
Status: http.StatusInternalServerError,
|
||||
Expected: `{
|
||||
"error": "failed to make Fs: didn't find section in config file",
|
||||
"error": "failed to make Fs: didn't find section in config file (\"notfoundremote\")",
|
||||
"input": null,
|
||||
"path": "/",
|
||||
"status": 500
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package fs
|
||||
|
||||
// VersionTag of rclone
|
||||
var VersionTag = "v1.69.0"
|
||||
var VersionTag = "v1.69.1"
|
||||
|
||||
@@ -459,7 +459,7 @@ func Run(t *testing.T, opt *Opt) {
|
||||
subRemoteName, subRemoteLeaf, err = fstest.RandomRemoteName(remoteName)
|
||||
require.NoError(t, err)
|
||||
f, err = fs.NewFs(context.Background(), subRemoteName)
|
||||
if err == fs.ErrorNotFoundInConfigFile {
|
||||
if errors.Is(err, fs.ErrorNotFoundInConfigFile) {
|
||||
t.Logf("Didn't find %q in config file - skipping tests", remoteName)
|
||||
return
|
||||
}
|
||||
@@ -2391,7 +2391,7 @@ func Run(t *testing.T, opt *Opt) {
|
||||
var itemCopy = item
|
||||
itemCopy.Path += ".copy"
|
||||
|
||||
// Set copy cutoff to mininum value so we make chunks
|
||||
// Set copy cutoff to minimum value so we make chunks
|
||||
origCutoff, err := do.SetCopyCutoff(minChunkSize)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
|
||||
3
go.mod
3
go.mod
@@ -60,7 +60,7 @@ require (
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.22
|
||||
github.com/rclone/gofakes3 v0.0.3
|
||||
github.com/rclone/gofakes3 v0.0.4
|
||||
github.com/rfjakob/eme v1.1.2
|
||||
github.com/rivo/uniseg v0.4.7
|
||||
github.com/rogpeppe/go-internal v1.12.0
|
||||
@@ -177,6 +177,7 @@ require (
|
||||
github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/xxml v0.0.3 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/onsi/ginkgo v1.16.5 // indirect
|
||||
github.com/panjf2000/ants/v2 v2.9.1 // indirect
|
||||
|
||||
6
go.sum
6
go.sum
@@ -461,6 +461,8 @@ github.com/minio/minio-go/v6 v6.0.46/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tB
|
||||
github.com/minio/minio-go/v7 v7.0.74 h1:fTo/XlPBTSpo3BAMshlwKL5RspXRv9us5UeHEGYCFe0=
|
||||
github.com/minio/minio-go/v7 v7.0.74/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8=
|
||||
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/minio/xxml v0.0.3 h1:ZIpPQpfyG5uZQnqqC0LZuWtPk/WT8G/qkxvO6jb7zMU=
|
||||
github.com/minio/xxml v0.0.3/go.mod h1:wcXErosl6IezQIMEWSK/LYC2VS7LJ1dAkgvuyIN3aH4=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
|
||||
@@ -529,8 +531,8 @@ github.com/quic-go/quic-go v0.40.1 h1:X3AGzUNFs0jVuO3esAGnTfvdgvL4fq655WaOi1snv1
|
||||
github.com/quic-go/quic-go v0.40.1/go.mod h1:PeN7kuVJ4xZbxSv/4OX6S1USOX8MJvydwpTx31vx60c=
|
||||
github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93 h1:UVArwN/wkKjMVhh2EQGC0tEc1+FqiLlvYXY5mQ2f8Wg=
|
||||
github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93/go.mod h1:Nfe4efndBz4TibWycNE+lqyJZiMX4ycx+QKV8Ta0f/o=
|
||||
github.com/rclone/gofakes3 v0.0.3 h1:0sKCxJ8TUUAG5KXGuc/fcDKGnzB/j6IjNQui9ntIZPo=
|
||||
github.com/rclone/gofakes3 v0.0.3/go.mod h1:z7+o2VUwitO0WuVHReQlOW9jZ03LpeJ0PUFSULyTIds=
|
||||
github.com/rclone/gofakes3 v0.0.4 h1:LswpC49VY/UJ1zucoL5ktnOEX6lq3qK7e1aFIAfqCbk=
|
||||
github.com/rclone/gofakes3 v0.0.4/go.mod h1:j/UoS+2/Mr7xAlfKhyVC58YyFQmh9uoQA5YZQXQUqmg=
|
||||
github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4=
|
||||
github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA=
|
||||
github.com/relvacode/iso8601 v1.3.0 h1:HguUjsGpIMh/zsTczGN3DVJFxTU/GX+MMmzcKoMO7ko=
|
||||
|
||||
@@ -108,7 +108,7 @@ func (conf *Config) MakeOauth2Config() *oauth2.Config {
|
||||
return &oauth2.Config{
|
||||
ClientID: conf.ClientID,
|
||||
ClientSecret: conf.ClientSecret,
|
||||
RedirectURL: RedirectLocalhostURL,
|
||||
RedirectURL: conf.RedirectURL,
|
||||
Scopes: conf.Scopes,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: conf.AuthURL,
|
||||
|
||||
294
rclone.1
generated
294
rclone.1
generated
@@ -1,8 +1,79 @@
|
||||
.\"t
|
||||
.\" Automatically generated by Pandoc 2.9.2.1
|
||||
.\"
|
||||
.TH "rclone" "1" "Jan 12, 2025" "User Manual" ""
|
||||
.TH "rclone" "1" "Feb 14, 2025" "User Manual" ""
|
||||
.hy
|
||||
.SH NAME
|
||||
.PP
|
||||
rclone - manage files on cloud storage
|
||||
.SH SYNOPSIS
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
Usage:
|
||||
rclone [flags]
|
||||
rclone [command]
|
||||
|
||||
Available commands:
|
||||
about Get quota information from the remote.
|
||||
authorize Remote authorization.
|
||||
backend Run a backend-specific command.
|
||||
bisync Perform bidirectional synchronization between two paths.
|
||||
cat Concatenates any files and sends them to stdout.
|
||||
check Checks the files in the source and destination match.
|
||||
checksum Checks the files in the destination against a SUM file.
|
||||
cleanup Clean up the remote if possible.
|
||||
completion Output completion script for a given shell.
|
||||
config Enter an interactive configuration session.
|
||||
copy Copy files from source to dest, skipping identical files.
|
||||
copyto Copy files from source to dest, skipping identical files.
|
||||
copyurl Copy the contents of the URL supplied content to dest:path.
|
||||
cryptcheck Cryptcheck checks the integrity of an encrypted remote.
|
||||
cryptdecode Cryptdecode returns unencrypted file names.
|
||||
dedupe Interactively find duplicate filenames and delete/rename them.
|
||||
delete Remove the files in path.
|
||||
deletefile Remove a single file from remote.
|
||||
gendocs Output markdown docs for rclone to the directory supplied.
|
||||
gitannex Speaks with git-annex over stdin/stdout.
|
||||
hashsum Produces a hashsum file for all the objects in the path.
|
||||
help Show help for rclone commands, flags and backends.
|
||||
link Generate public link to file/folder.
|
||||
listremotes List all the remotes in the config file and defined in environment variables.
|
||||
ls List the objects in the path with size and path.
|
||||
lsd List all directories/containers/buckets in the path.
|
||||
lsf List directories and objects in remote:path formatted for parsing.
|
||||
lsjson List directories and objects in the path in JSON format.
|
||||
lsl List the objects in path with modification time, size and path.
|
||||
md5sum Produces an md5sum file for all the objects in the path.
|
||||
mkdir Make the path if it doesn\[aq]t already exist.
|
||||
mount Mount the remote as file system on a mountpoint.
|
||||
move Move files from source to dest.
|
||||
moveto Move file or directory from source to dest.
|
||||
ncdu Explore a remote with a text based user interface.
|
||||
nfsmount Mount the remote as file system on a mountpoint.
|
||||
obscure Obscure password for use in the rclone config file.
|
||||
purge Remove the path and all of its contents.
|
||||
rc Run a command against a running rclone.
|
||||
rcat Copies standard input to file on remote.
|
||||
rcd Run rclone listening to remote control commands only.
|
||||
rmdir Remove the empty directory at path.
|
||||
rmdirs Remove empty directories under the path.
|
||||
selfupdate Update the rclone binary.
|
||||
serve Serve a remote over a protocol.
|
||||
settier Changes storage class/tier of objects in remote.
|
||||
sha1sum Produces an sha1sum file for all the objects in the path.
|
||||
size Prints the total size and number of objects in remote:path.
|
||||
sync Make source and dest identical, modifying destination only.
|
||||
test Run a test command
|
||||
touch Create new file or change file modification time.
|
||||
tree List the contents of the remote in a tree like fashion.
|
||||
version Show the version number.
|
||||
|
||||
Use \[dq]rclone [command] --help\[dq] for more information about a command.
|
||||
Use \[dq]rclone help flags\[dq] for to see the global flags.
|
||||
Use \[dq]rclone help backends\[dq] for a list of supported services.
|
||||
\f[R]
|
||||
.fi
|
||||
.SH Rclone syncs your files to cloud storage
|
||||
.PP
|
||||
.IP \[bu] 2
|
||||
@@ -2238,6 +2309,11 @@ To delete empty directories only, use command
|
||||
rmdir (https://rclone.org/commands/rclone_rmdir/) or
|
||||
rmdirs (https://rclone.org/commands/rclone_rmdirs/).
|
||||
.PP
|
||||
The concurrency of this operation is controlled by the
|
||||
\f[C]--checkers\f[R] global flag.
|
||||
However, some backends will implement this command directly, in which
|
||||
case \f[C]--checkers\f[R] will be ignored.
|
||||
.PP
|
||||
\f[B]Important\f[R]: Since this can cause data loss, test first with the
|
||||
\f[C]--dry-run\f[R] or the \f[C]--interactive\f[R]/\f[C]-i\f[R] flag.
|
||||
.IP
|
||||
@@ -4652,12 +4728,12 @@ password to re-encrypt the config.
|
||||
.PP
|
||||
When \f[C]--password-command\f[R] is called to change the password then
|
||||
the environment variable \f[C]RCLONE_PASSWORD_CHANGE=1\f[R] will be set.
|
||||
So if changing passwords programatically you can use the environment
|
||||
So if changing passwords programmatically you can use the environment
|
||||
variable to distinguish which password you must supply.
|
||||
.PP
|
||||
Alternatively you can remove the password first (with
|
||||
\f[C]rclone config encryption remove\f[R]), then set it again with this
|
||||
command which may be easier if you don\[aq]t mind the unecrypted config
|
||||
command which may be easier if you don\[aq]t mind the unencrypted config
|
||||
file being on the disk briefly.
|
||||
.IP
|
||||
.nf
|
||||
@@ -5273,7 +5349,7 @@ destination if there is one with the same name.
|
||||
.PP
|
||||
Setting \f[C]--stdout\f[R] or making the output file name \f[C]-\f[R]
|
||||
will cause the output to be written to standard output.
|
||||
.SS Troublshooting
|
||||
.SS Troubleshooting
|
||||
.PP
|
||||
If you can\[aq]t get \f[C]rclone copyurl\f[R] to work then here are some
|
||||
things you can try:
|
||||
@@ -12993,7 +13069,8 @@ which improves performance.
|
||||
This sort of cache can\[aq]t be backed up and restored as the underlying
|
||||
handles will change.
|
||||
This is Linux only.
|
||||
It requres running rclone as root or with \f[C]CAP_DAC_READ_SEARCH\f[R].
|
||||
It requires running rclone as root or with
|
||||
\f[C]CAP_DAC_READ_SEARCH\f[R].
|
||||
You can run rclone with this extra permission by doing this to the
|
||||
rclone binary
|
||||
\f[C]sudo setcap cap_dac_read_search+ep /path/to/rclone\f[R].
|
||||
@@ -13973,7 +14050,7 @@ use_multipart_uploads = false
|
||||
\f[R]
|
||||
.fi
|
||||
.PP
|
||||
Note that setting \f[C]disable_multipart_uploads = true\f[R] is to work
|
||||
Note that setting \f[C]use_multipart_uploads = false\f[R] is to work
|
||||
around a bug which will be fixed in due course.
|
||||
.SS Bugs
|
||||
.PP
|
||||
@@ -17806,6 +17883,11 @@ It is also possible to specify \f[C]--boolean=false\f[R] or
|
||||
Note that \f[C]--boolean false\f[R] is not valid - this is parsed as
|
||||
\f[C]--boolean\f[R] and the \f[C]false\f[R] is parsed as an extra
|
||||
command line argument for rclone.
|
||||
.PP
|
||||
Options documented to take a \f[C]stringArray\f[R] parameter accept
|
||||
multiple values.
|
||||
To pass more than one value, repeat the option; for example:
|
||||
\f[C]--include value1 --include value2\f[R].
|
||||
.SS Time or duration options
|
||||
.PP
|
||||
TIME or DURATION options can be specified as a duration string or a time
|
||||
@@ -20455,8 +20537,8 @@ The options set by environment variables can be seen with the
|
||||
\f[C]rclone version -vv\f[R].
|
||||
.PP
|
||||
Options that can appear multiple times (type \f[C]stringArray\f[R]) are
|
||||
treated slighly differently as environment variables can only be defined
|
||||
once.
|
||||
treated slightly differently as environment variables can only be
|
||||
defined once.
|
||||
In order to allow a simple mechanism for adding one or many items, the
|
||||
input is treated as a CSV encoded (https://godoc.org/encoding/csv)
|
||||
string.
|
||||
@@ -24731,7 +24813,7 @@ return an empty result.
|
||||
\f[R]
|
||||
.fi
|
||||
.PP
|
||||
The \f[C]expiry\f[R] time is the time until the file is elegible for
|
||||
The \f[C]expiry\f[R] time is the time until the file is eligible for
|
||||
being uploaded in floating point seconds.
|
||||
This may go negative.
|
||||
As rclone only transfers \f[C]--transfers\f[R] files at once, only the
|
||||
@@ -28442,7 +28524,7 @@ Flags for general networking and HTTP stuff.
|
||||
--tpslimit float Limit HTTP transactions per second to this
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
|
||||
--use-cookies Enable session cookiejar
|
||||
--user-agent string Set the user-agent to a specified string (default \[dq]rclone/v1.69.0\[dq])
|
||||
--user-agent string Set the user-agent to a specified string (default \[dq]rclone/v1.69.1\[dq])
|
||||
\f[R]
|
||||
.fi
|
||||
.SS Performance
|
||||
@@ -30761,7 +30843,7 @@ See the bisync filters section and generic
|
||||
--filter-from (https://rclone.org/filtering/#filter-from-read-filtering-patterns-from-a-file)
|
||||
documentation.
|
||||
An example filters file contains filters for non-allowed files for
|
||||
synching with Dropbox.
|
||||
syncing with Dropbox.
|
||||
.PP
|
||||
If you make changes to your filters file then bisync requires a run with
|
||||
\f[C]--resync\f[R].
|
||||
@@ -30987,7 +31069,7 @@ reduce the sync run times for very large numbers of files.
|
||||
.PP
|
||||
The check may be run manually with \f[C]--check-sync=only\f[R].
|
||||
It runs only the integrity check and terminates without actually
|
||||
synching.
|
||||
syncing.
|
||||
.PP
|
||||
Note that currently, \f[C]--check-sync\f[R] \f[B]only checks listing
|
||||
snapshots and NOT the actual files on the remotes.\f[R] Note also that
|
||||
@@ -31701,7 +31783,7 @@ flags are also supported.
|
||||
.SS How to filter directories
|
||||
.PP
|
||||
Filtering portions of the directory tree is a critical feature for
|
||||
synching.
|
||||
syncing.
|
||||
.PP
|
||||
Examples of directory trees (always beneath the Path1/Path2 root level)
|
||||
you may want to exclude from your sync: - Directory trees containing
|
||||
@@ -31859,7 +31941,7 @@ This noise can be quashed by adding \f[C]--quiet\f[R] to the bisync
|
||||
command line.
|
||||
.SS Example exclude-style filters files for use with Dropbox
|
||||
.IP \[bu] 2
|
||||
Dropbox disallows synching the listed temporary and configuration/data
|
||||
Dropbox disallows syncing the listed temporary and configuration/data
|
||||
files.
|
||||
The \[ga]- \[ga] filters exclude these files where ever they may occur
|
||||
in the sync tree.
|
||||
@@ -32246,7 +32328,7 @@ single \f[C]-\f[R] or double dash.
|
||||
.SS Running tests
|
||||
.IP \[bu] 2
|
||||
\f[C]go test . -case basic -remote local -remote2 local\f[R] runs the
|
||||
\f[C]test_basic\f[R] test case using only the local filesystem, synching
|
||||
\f[C]test_basic\f[R] test case using only the local filesystem, syncing
|
||||
one local directory with another local directory.
|
||||
Test script output is to the console, while commands within scenario.txt
|
||||
have their output sent to the \f[C].../workdir/test.log\f[R] file, which
|
||||
@@ -32579,6 +32661,10 @@ Also note a number of academic publications by Benjamin
|
||||
Pierce (http://www.cis.upenn.edu/%7Ebcpierce/papers/index.shtml#File%20Synchronization)
|
||||
about \f[I]Unison\f[R] and synchronization in general.
|
||||
.SS Changelog
|
||||
.SS \f[C]v1.69.1\f[R]
|
||||
.IP \[bu] 2
|
||||
Fixed an issue causing listings to not capture concurrent modifications
|
||||
under certain conditions
|
||||
.SS \f[C]v1.68\f[R]
|
||||
.IP \[bu] 2
|
||||
Fixed an issue affecting backends that round modtimes to a lower
|
||||
@@ -34293,7 +34379,7 @@ It assumes that \f[C]USER_NAME\f[R] has been created.
|
||||
The Resource entry must include both resource ARNs, as one implies the
|
||||
bucket and the other implies the bucket\[aq]s objects.
|
||||
.IP "3." 3
|
||||
When using s3-no-check-bucket and the bucket already exsits, the
|
||||
When using s3-no-check-bucket and the bucket already exists, the
|
||||
\f[C]\[dq]arn:aws:s3:::BUCKET_NAME\[dq]\f[R] doesn\[aq]t have to be
|
||||
included.
|
||||
.PP
|
||||
@@ -38469,7 +38555,7 @@ location_constraint = au-nsw
|
||||
.PP
|
||||
Rclone can serve any remote over the S3 protocol.
|
||||
For details see the rclone serve
|
||||
s3 (https://rclone.org/commands/rclone_serve_http/) documentation.
|
||||
s3 (https://rclone.org/commands/rclone_serve_s3/) documentation.
|
||||
.PP
|
||||
For example, to serve \f[C]remote:path\f[R] over s3, run the server like
|
||||
this:
|
||||
@@ -38495,8 +38581,8 @@ use_multipart_uploads = false
|
||||
\f[R]
|
||||
.fi
|
||||
.PP
|
||||
Note that setting \f[C]disable_multipart_uploads = true\f[R] is to work
|
||||
around a bug (https://rclone.org/commands/rclone_serve_http/#bugs) which
|
||||
Note that setting \f[C]use_multipart_uploads = false\f[R] is to work
|
||||
around a bug (https://rclone.org/commands/rclone_serve_s3/#bugs) which
|
||||
will be fixed in due course.
|
||||
.SS Scaleway
|
||||
.PP
|
||||
@@ -39689,27 +39775,49 @@ Option endpoint.
|
||||
Endpoint for Linode Object Storage API.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / Atlanta, GA (USA), us-southeast-1
|
||||
1 / Amsterdam (Netherlands), nl-ams-1
|
||||
\[rs] (nl-ams-1.linodeobjects.com)
|
||||
2 / Atlanta, GA (USA), us-southeast-1
|
||||
\[rs] (us-southeast-1.linodeobjects.com)
|
||||
2 / Chicago, IL (USA), us-ord-1
|
||||
3 / Chennai (India), in-maa-1
|
||||
\[rs] (in-maa-1.linodeobjects.com)
|
||||
4 / Chicago, IL (USA), us-ord-1
|
||||
\[rs] (us-ord-1.linodeobjects.com)
|
||||
3 / Frankfurt (Germany), eu-central-1
|
||||
5 / Frankfurt (Germany), eu-central-1
|
||||
\[rs] (eu-central-1.linodeobjects.com)
|
||||
4 / Milan (Italy), it-mil-1
|
||||
6 / Jakarta (Indonesia), id-cgk-1
|
||||
\[rs] (id-cgk-1.linodeobjects.com)
|
||||
7 / London 2 (Great Britain), gb-lon-1
|
||||
\[rs] (gb-lon-1.linodeobjects.com)
|
||||
8 / Los Angeles, CA (USA), us-lax-1
|
||||
\[rs] (us-lax-1.linodeobjects.com)
|
||||
9 / Madrid (Spain), es-mad-1
|
||||
\[rs] (es-mad-1.linodeobjects.com)
|
||||
10 / Melbourne (Australia), au-mel-1
|
||||
\[rs] (au-mel-1.linodeobjects.com)
|
||||
11 / Miami, FL (USA), us-mia-1
|
||||
\[rs] (us-mia-1.linodeobjects.com)
|
||||
12 / Milan (Italy), it-mil-1
|
||||
\[rs] (it-mil-1.linodeobjects.com)
|
||||
5 / Newark, NJ (USA), us-east-1
|
||||
13 / Newark, NJ (USA), us-east-1
|
||||
\[rs] (us-east-1.linodeobjects.com)
|
||||
6 / Paris (France), fr-par-1
|
||||
14 / Osaka (Japan), jp-osa-1
|
||||
\[rs] (jp-osa-1.linodeobjects.com)
|
||||
15 / Paris (France), fr-par-1
|
||||
\[rs] (fr-par-1.linodeobjects.com)
|
||||
7 / Seattle, WA (USA), us-sea-1
|
||||
16 / S\[~a]o Paulo (Brazil), br-gru-1
|
||||
\[rs] (br-gru-1.linodeobjects.com)
|
||||
17 / Seattle, WA (USA), us-sea-1
|
||||
\[rs] (us-sea-1.linodeobjects.com)
|
||||
8 / Singapore ap-south-1
|
||||
18 / Singapore, ap-south-1
|
||||
\[rs] (ap-south-1.linodeobjects.com)
|
||||
9 / Stockholm (Sweden), se-sto-1
|
||||
19 / Singapore 2, sg-sin-1
|
||||
\[rs] (sg-sin-1.linodeobjects.com)
|
||||
20 / Stockholm (Sweden), se-sto-1
|
||||
\[rs] (se-sto-1.linodeobjects.com)
|
||||
10 / Washington, DC, (USA), us-iad-1
|
||||
21 / Washington, DC, (USA), us-iad-1
|
||||
\[rs] (us-iad-1.linodeobjects.com)
|
||||
endpoint> 3
|
||||
endpoint> 5
|
||||
|
||||
Option acl.
|
||||
Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -45295,7 +45403,7 @@ The nonce is incremented for each chunk read making sure each nonce is
|
||||
unique for each block written.
|
||||
The chance of a nonce being reused is minuscule.
|
||||
If you wrote an exabyte of data (10\[S1]\[u2078] bytes) you would have a
|
||||
probability of approximately 2\[tmu]10\[u207B]\[S3]\[S2] of re-using a
|
||||
probability of approximately 2\[tmu]10\[u207B]\[S3]\[S2] of reusing a
|
||||
nonce.
|
||||
.SS Chunk
|
||||
.PP
|
||||
@@ -54838,7 +54946,7 @@ Enter a value.
|
||||
config_2fa> 2FACODE
|
||||
Remote config
|
||||
--------------------
|
||||
[koofr]
|
||||
[iclouddrive]
|
||||
- type: iclouddrive
|
||||
- apple_id: APPLEID
|
||||
- password: *** ENCRYPTED ***
|
||||
@@ -54854,6 +54962,28 @@ y/e/d> y
|
||||
.SS Advanced Data Protection
|
||||
.PP
|
||||
ADP is currently unsupported and need to be disabled
|
||||
.PP
|
||||
On iPhone, Settings \f[C]>\f[R] Apple Account \f[C]>\f[R] iCloud
|
||||
\f[C]>\f[R] \[aq]Access iCloud Data on the Web\[aq] must be ON, and
|
||||
\[aq]Advanced Data Protection\[aq] OFF.
|
||||
.SS Troubleshooting
|
||||
.SS Missing PCS cookies from the request
|
||||
.PP
|
||||
This means you have Advanced Data Protection (ADP) turned on.
|
||||
This is not supported at the moment.
|
||||
If you want to use rclone you will have to turn it off.
|
||||
See above for how to turn it off.
|
||||
.PP
|
||||
You will need to clear the \f[C]cookies\f[R] and the
|
||||
\f[C]trust_token\f[R] fields in the config.
|
||||
Or you can delete the remote config and start again.
|
||||
.PP
|
||||
You should then run \f[C]rclone reconnect remote:\f[R].
|
||||
.PP
|
||||
Note that changing the ADP setting may not take effect immediately - you
|
||||
may need to wait a few hours or a day before you can get rclone to work
|
||||
- keep clearing the config entry and running
|
||||
\f[C]rclone reconnect remote:\f[R] until rclone functions properly.
|
||||
.SS Standard options
|
||||
.PP
|
||||
Here are the Standard options specific to iclouddrive (iCloud Drive).
|
||||
@@ -60946,7 +61076,7 @@ Microsoft Cloud for US Government
|
||||
\[dq]de\[dq]
|
||||
.RS 2
|
||||
.IP \[bu] 2
|
||||
Microsoft Cloud Germany
|
||||
Microsoft Cloud Germany (deprecated - try global region first).
|
||||
.RE
|
||||
.IP \[bu] 2
|
||||
\[dq]cn\[dq]
|
||||
@@ -61951,6 +62081,43 @@ T}
|
||||
.TE
|
||||
.PP
|
||||
See the metadata (https://rclone.org/docs/#metadata) docs for more info.
|
||||
.SS Impersonate other users as Admin
|
||||
.PP
|
||||
Unlike Google Drive and impersonating any domain user via service
|
||||
accounts, OneDrive requires you to authenticate as an admin account, and
|
||||
manually setup a remote per user you wish to impersonate.
|
||||
.IP "1." 3
|
||||
In Microsoft 365 Admin Center (https://admin.microsoft.com), open each
|
||||
user you need to \[dq]impersonate\[dq] and go to the OneDrive section.
|
||||
There is a heading called \[dq]Get access to files\[dq], you need to
|
||||
click to create the link, this creates the link of the format
|
||||
\f[C]https://{tenant}-my.sharepoint.com/personal/{user_name_domain_tld}/\f[R]
|
||||
but also changes the permissions so you your admin user has access.
|
||||
.IP "2." 3
|
||||
Then in powershell run the following commands:
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
Install-Module Microsoft.Graph -Scope CurrentUser -Repository PSGallery -Force
|
||||
Import-Module Microsoft.Graph.Files
|
||||
Connect-MgGraph -Scopes \[dq]Files.ReadWrite.All\[dq]
|
||||
# Follow the steps to allow access to your admin user
|
||||
# Then run this for each user you want to impersonate to get the Drive ID
|
||||
Get-MgUserDefaultDrive -UserId \[aq]{emailaddress}\[aq]
|
||||
# This will give you output of the format:
|
||||
# Name Id DriveType CreatedDateTime
|
||||
# ---- -- --------- ---------------
|
||||
# OneDrive b!XYZ123 business 14/10/2023 1:00:58\[u202F]pm
|
||||
\f[R]
|
||||
.fi
|
||||
.IP "3." 3
|
||||
Then in rclone add a onedrive remote type, and use the
|
||||
\f[C]Type in driveID\f[R] with the DriveID you got in the previous step.
|
||||
One remote per user.
|
||||
It will then confirm the drive ID, and hopefully give you a message of
|
||||
\f[C]Found drive \[dq]root\[dq] of type \[dq]business\[dq]\f[R] and then
|
||||
include the URL of the format
|
||||
\f[C]https://{tenant}-my.sharepoint.com/personal/{user_name_domain_tld}/Documents\f[R]
|
||||
.SS Limitations
|
||||
.PP
|
||||
If you don\[aq]t use rclone for 90 days the refresh token will expire.
|
||||
@@ -74872,6 +75039,67 @@ Options:
|
||||
.IP \[bu] 2
|
||||
\[dq]error\[dq]: return an error based on option value
|
||||
.SH Changelog
|
||||
.SS v1.69.1 - 2025-02-14
|
||||
.PP
|
||||
See commits (https://github.com/rclone/rclone/compare/v1.69.0...v1.69.1)
|
||||
.IP \[bu] 2
|
||||
Bug Fixes
|
||||
.RS 2
|
||||
.IP \[bu] 2
|
||||
lib/oauthutil: Fix redirect URL mismatch errors (Nick Craig-Wood)
|
||||
.IP \[bu] 2
|
||||
bisync: Fix listings missing concurrent modifications (nielash)
|
||||
.IP \[bu] 2
|
||||
serve s3: Fix list objects encoding-type (Nick Craig-Wood)
|
||||
.IP \[bu] 2
|
||||
fs: Fix confusing \[dq]didn\[aq]t find section in config file\[dq] error
|
||||
(Nick Craig-Wood)
|
||||
.IP \[bu] 2
|
||||
doc fixes (Christoph Berger, Dimitri Papadopoulos, Matt Ickstadt, Nick
|
||||
Craig-Wood, Tim White, Zachary Vorhies)
|
||||
.IP \[bu] 2
|
||||
build: Added parallel docker builds and caching for go build in the
|
||||
container (Anagh Kumar Baranwal)
|
||||
.RE
|
||||
.IP \[bu] 2
|
||||
VFS
|
||||
.RS 2
|
||||
.IP \[bu] 2
|
||||
Fix the cache failing to upload symlinks when \f[C]--links\f[R] was
|
||||
specified (Nick Craig-Wood)
|
||||
.IP \[bu] 2
|
||||
Fix race detected by race detector (Nick Craig-Wood)
|
||||
.IP \[bu] 2
|
||||
Close the change notify channel on Shutdown (izouxv)
|
||||
.RE
|
||||
.IP \[bu] 2
|
||||
B2
|
||||
.RS 2
|
||||
.IP \[bu] 2
|
||||
Fix \[dq]fatal error: concurrent map writes\[dq] (Nick Craig-Wood)
|
||||
.RE
|
||||
.IP \[bu] 2
|
||||
Iclouddrive
|
||||
.RS 2
|
||||
.IP \[bu] 2
|
||||
Add notes on ADP and Missing PCS cookies (Nick Craig-Wood)
|
||||
.RE
|
||||
.IP \[bu] 2
|
||||
Onedrive
|
||||
.RS 2
|
||||
.IP \[bu] 2
|
||||
Mark German (de) region as deprecated (Nick Craig-Wood)
|
||||
.RE
|
||||
.IP \[bu] 2
|
||||
S3
|
||||
.RS 2
|
||||
.IP \[bu] 2
|
||||
Added new storage class to magalu provider (Bruno Fernandes)
|
||||
.IP \[bu] 2
|
||||
Add DigitalOcean regions SFO2, LON1, TOR1, BLR1 (jkpe)
|
||||
.IP \[bu] 2
|
||||
Add latest Linode Object Storage endpoints (jbagwell-akamai)
|
||||
.RE
|
||||
.SS v1.69.0 - 2025-01-12
|
||||
.PP
|
||||
See commits (https://github.com/rclone/rclone/compare/v1.68.0...v1.69.0)
|
||||
|
||||
@@ -66,7 +66,10 @@ func newDir(vfs *VFS, f fs.Fs, parent *Dir, fsDir fs.Directory) *Dir {
|
||||
inode: newInode(),
|
||||
items: make(map[string]Node),
|
||||
}
|
||||
d.cleanupTimer = time.AfterFunc(time.Duration(vfs.Opt.DirCacheTime*2), d.cacheCleanup)
|
||||
// Set timer up like this to avoid race of d.cacheCleanup being called
|
||||
// before d.cleanupTimer is assigned to
|
||||
d.cleanupTimer = time.AfterFunc(time.Hour, d.cacheCleanup)
|
||||
d.cleanupTimer.Reset(time.Duration(vfs.Opt.DirCacheTime * 2))
|
||||
return d
|
||||
}
|
||||
|
||||
|
||||
@@ -464,7 +464,7 @@ the |--vfs-cache-mode| is off, it will return an empty result.
|
||||
],
|
||||
}
|
||||
|
||||
The |expiry| time is the time until the file is elegible for being
|
||||
The |expiry| time is the time until the file is eligible for being
|
||||
uploaded in floating point seconds. This may go negative. As rclone
|
||||
only transfers |--transfers| files at once, only the lowest
|
||||
|--transfers| expiry times will have |uploading| as |true|. So there
|
||||
|
||||
@@ -216,7 +216,7 @@ func New(f fs.Fs, opt *vfscommon.Options) *VFS {
|
||||
configName := fs.ConfigString(f)
|
||||
for _, activeVFS := range active[configName] {
|
||||
if vfs.Opt == activeVFS.Opt {
|
||||
fs.Debugf(f, "Re-using VFS from active cache")
|
||||
fs.Debugf(f, "Reusing VFS from active cache")
|
||||
activeVFS.inUse.Add(1)
|
||||
return activeVFS
|
||||
}
|
||||
@@ -365,6 +365,11 @@ func (vfs *VFS) Shutdown() {
|
||||
activeMu.Unlock()
|
||||
|
||||
vfs.shutdownCache()
|
||||
|
||||
if vfs.pollChan != nil {
|
||||
close(vfs.pollChan)
|
||||
vfs.pollChan = nil
|
||||
}
|
||||
}
|
||||
|
||||
// CleanUp deletes the contents of the on disk cache
|
||||
|
||||
@@ -227,7 +227,10 @@ func (c *Cache) createItemDir(name string) (string, error) {
|
||||
|
||||
// getBackend gets a backend for a cache root dir
|
||||
func getBackend(ctx context.Context, parentPath string, name string, relativeDirPath string) (fs.Fs, error) {
|
||||
path := fmt.Sprintf(":local,encoding='%v':%s/%s/%s", encoder.OS, parentPath, name, relativeDirPath)
|
||||
// Make sure we turn off the global links flag as it overrides the backend specific one
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.Links = false
|
||||
path := fmt.Sprintf(":local,encoding='%v',links=false:%s/%s/%s", encoder.OS, parentPath, name, relativeDirPath)
|
||||
return fscache.Get(ctx, path)
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user