mirror of
https://github.com/rclone/rclone.git
synced 2025-12-24 04:04:37 +00:00
Compare commits
61 Commits
jwt-v5-com
...
fix-8379-h
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d7f23b6b97 | ||
|
|
539e96cc1f | ||
|
|
5086aad0b2 | ||
|
|
c1b414e2cf | ||
|
|
2ff8aa1c20 | ||
|
|
6d2a72367a | ||
|
|
9df751d4ec | ||
|
|
e175c863aa | ||
|
|
64cd8ae0f0 | ||
|
|
46b498b86a | ||
|
|
b76cd74087 | ||
|
|
3b49fd24d4 | ||
|
|
c0515a51a5 | ||
|
|
dc9c87279b | ||
|
|
057fdb3a9d | ||
|
|
3daf62cf3d | ||
|
|
0ef495fa76 | ||
|
|
722c567504 | ||
|
|
0ebe1c0f81 | ||
|
|
2dc06b2548 | ||
|
|
b52aabd8fe | ||
|
|
6494ac037f | ||
|
|
5c3a1bbf30 | ||
|
|
c837664653 | ||
|
|
77429b154e | ||
|
|
39b8f17ebb | ||
|
|
81ecfb0f64 | ||
|
|
656e789c5b | ||
|
|
fe19184084 | ||
|
|
b4990cd858 | ||
|
|
8e955c6b13 | ||
|
|
3a5ddfcd3c | ||
|
|
ac3f7a87c3 | ||
|
|
4e9b63e141 | ||
|
|
7fd7fe3c82 | ||
|
|
9dff45563d | ||
|
|
83cf8fb821 | ||
|
|
32e79a5c5c | ||
|
|
fc44a8114e | ||
|
|
657172ef77 | ||
|
|
71eb4199c3 | ||
|
|
ac3c21368d | ||
|
|
db71b2bd5f | ||
|
|
8cfe42d09f | ||
|
|
e673a28a72 | ||
|
|
59889ce46b | ||
|
|
62e8a01e7e | ||
|
|
87eaf37629 | ||
|
|
7c7606a6cf | ||
|
|
dbb21165d4 | ||
|
|
375953cba3 | ||
|
|
af5385b344 | ||
|
|
347be176af | ||
|
|
bf5a4774c6 | ||
|
|
0275d3edf2 | ||
|
|
be53ae98f8 | ||
|
|
0d9fe51632 | ||
|
|
03bd795221 | ||
|
|
5a4026ccb4 | ||
|
|
b1d4de69c2 | ||
|
|
5316acd046 |
@@ -1,77 +0,0 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
|
||||
# GitHub Actions at the start of a workflow run to identify the job.
|
||||
# This is used to authenticate against GitHub Container Registry.
|
||||
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
||||
# for more detailed information.
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
push: true # push the image to ghcr
|
||||
tags: |
|
||||
ghcr.io/rclone/rclone:beta
|
||||
rclone/rclone:beta
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
cache-from: type=gha, scope=${{ github.workflow }}
|
||||
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
|
||||
provenance: false
|
||||
# Eventually cache will need to be cleared if builds more frequent than once a week
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
294
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
294
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
@@ -0,0 +1,294 @@
|
||||
---
|
||||
# Github Actions release for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_image.yml" -*-
|
||||
|
||||
name: Build & Push Docker Images
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
tags:
|
||||
- '**'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build-image:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && github.event_name != 'pull_request')
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runs-on: ubuntu-24.04
|
||||
- platform: linux/386
|
||||
runs-on: ubuntu-24.04
|
||||
- platform: linux/arm64
|
||||
runs-on: ubuntu-24.04-arm
|
||||
- platform: linux/arm/v7
|
||||
runs-on: ubuntu-24.04-arm
|
||||
- platform: linux/arm/v6
|
||||
runs-on: ubuntu-24.04-arm
|
||||
|
||||
name: Build Docker Image for ${{ matrix.platform }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
|
||||
steps:
|
||||
- name: Free Space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Set PLATFORM Variable
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set CACHE_NAME Variable
|
||||
shell: python
|
||||
run: |
|
||||
import os, re
|
||||
|
||||
def slugify(input_string, max_length=63):
|
||||
slug = input_string.lower()
|
||||
slug = re.sub(r'[^a-z0-9 -]', ' ', slug)
|
||||
slug = slug.strip()
|
||||
slug = re.sub(r'\s+', '-', slug)
|
||||
slug = re.sub(r'-+', '-', slug)
|
||||
slug = slug[:max_length]
|
||||
slug = re.sub(r'[-]+$', '', slug)
|
||||
return slug
|
||||
|
||||
ref_name_slug = "cache"
|
||||
|
||||
if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request":
|
||||
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"CACHE_NAME={ref_name_slug}\n")
|
||||
|
||||
- name: Get ImageOS
|
||||
# There's no way around this, because "ImageOS" is only available to
|
||||
# processes, but the setup-go action uses it in its key.
|
||||
id: imageos
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
return process.env.ImageOS
|
||||
|
||||
- name: Extract Metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,manifest-descriptor # Important for digest annotation (used by Github packages)
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/${{ env.REPO_NAME }}
|
||||
labels: |
|
||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
tags: |
|
||||
type=sha
|
||||
type=ref,event=pr
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=beta,enable={{is_default_branch}}
|
||||
|
||||
- name: Setup QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Load Go Build Cache for Docker
|
||||
id: go-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
||||
# Cache only the go builds, the module download is cached via the docker layer caching
|
||||
path: |
|
||||
go-build-cache
|
||||
|
||||
- name: Inject Go Build Cache into Docker
|
||||
uses: reproducible-containers/buildkit-cache-dance@v3
|
||||
with:
|
||||
cache-map: |
|
||||
{
|
||||
"go-build-cache": "/root/.cache/go-build"
|
||||
}
|
||||
skip-extraction: ${{ steps.go-cache.outputs.cache-hit }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and Publish Image Digest
|
||||
id: build
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
provenance: false
|
||||
# don't specify 'tags' here (error "get can't push tagged ref by digest")
|
||||
# tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
annotations: ${{ steps.meta.outputs.annotations }}
|
||||
platforms: ${{ matrix.platform }}
|
||||
outputs: |
|
||||
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
|
||||
cache-from: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
||||
cache-to: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd
|
||||
|
||||
- name: Export Image Digest
|
||||
run: |
|
||||
mkdir -p /tmp/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
|
||||
- name: Upload Image Digest
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM }}
|
||||
path: /tmp/digests/*
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
merge-image:
|
||||
name: Merge & Push Final Docker Image
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- build-image
|
||||
|
||||
steps:
|
||||
- name: Download Image Digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Extract Metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
||||
with:
|
||||
images: |
|
||||
${{ env.REPO_NAME }}
|
||||
ghcr.io/${{ env.REPO_NAME }}
|
||||
labels: |
|
||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
tags: |
|
||||
type=sha
|
||||
type=ref,event=pr
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=beta,enable={{is_default_branch}}
|
||||
|
||||
- name: Extract Tags
|
||||
shell: python
|
||||
run: |
|
||||
import json, os
|
||||
|
||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
||||
metadata = json.loads(metadata_json)
|
||||
|
||||
tags = [f"--tag '{tag}'" for tag in metadata["tags"]]
|
||||
tags_string = " ".join(tags)
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"TAGS={tags_string}\n")
|
||||
|
||||
- name: Extract Annotations
|
||||
shell: python
|
||||
run: |
|
||||
import json, os
|
||||
|
||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
||||
metadata = json.loads(metadata_json)
|
||||
|
||||
annotations = [f"--annotation '{annotation}'" for annotation in metadata["annotations"]]
|
||||
annotations_string = " ".join(annotations)
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"ANNOTATIONS={annotations_string}\n")
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create & Push Manifest List
|
||||
working-directory: /tmp/digests
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
${{ env.TAGS }} \
|
||||
${{ env.ANNOTATIONS }} \
|
||||
$(printf 'ghcr.io/${{ env.REPO_NAME }}@sha256:%s ' *)
|
||||
|
||||
- name: Inspect and Run Multi-Platform Image
|
||||
run: |
|
||||
docker buildx imagetools inspect --raw ${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
||||
docker buildx imagetools inspect --raw ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
||||
docker run --rm ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} version
|
||||
45
.github/workflows/build_publish_docker_plugin.yml
vendored
Normal file
45
.github/workflows/build_publish_docker_plugin.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
---
|
||||
# Github Actions release for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_plugin.yml" -*-
|
||||
|
||||
name: Release Build for Docker Plugin
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
|
||||
build_docker_volume_plugin:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
shell: bash
|
||||
run: |
|
||||
VER=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
done
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
||||
@@ -1,89 +0,0 @@
|
||||
name: Docker release build
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Get actual patch version
|
||||
id: actual_patch_version
|
||||
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
|
||||
- name: Get actual minor version
|
||||
id: actual_minor_version
|
||||
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
|
||||
- name: Get actual major version
|
||||
id: actual_major_version
|
||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USER }}
|
||||
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
push: true
|
||||
tags: |
|
||||
rclone/rclone:latest
|
||||
rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
|
||||
build_docker_volume_plugin:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
shell: bash
|
||||
run: |
|
||||
VER=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
done
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
||||
44
Dockerfile
44
Dockerfile
@@ -1,19 +1,47 @@
|
||||
FROM golang:alpine AS builder
|
||||
|
||||
COPY . /go/src/github.com/rclone/rclone/
|
||||
ARG CGO_ENABLED=0
|
||||
|
||||
WORKDIR /go/src/github.com/rclone/rclone/
|
||||
|
||||
RUN apk add --no-cache make bash gawk git
|
||||
RUN \
|
||||
CGO_ENABLED=0 \
|
||||
make
|
||||
RUN ./rclone version
|
||||
RUN echo "**** Set Go Environment Variables ****" && \
|
||||
go env -w GOCACHE=/root/.cache/go-build
|
||||
|
||||
RUN echo "**** Install Dependencies ****" && \
|
||||
apk add --no-cache \
|
||||
make \
|
||||
bash \
|
||||
gawk \
|
||||
git
|
||||
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
|
||||
RUN echo "**** Download Go Dependencies ****" && \
|
||||
go mod download -x
|
||||
|
||||
RUN echo "**** Verify Go Dependencies ****" && \
|
||||
go mod verify
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build,sharing=locked \
|
||||
echo "**** Build Binary ****" && \
|
||||
make
|
||||
|
||||
RUN echo "**** Print Version Binary ****" && \
|
||||
./rclone version
|
||||
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
RUN echo "**** Install Dependencies ****" && \
|
||||
apk add --no-cache \
|
||||
ca-certificates \
|
||||
fuse3 \
|
||||
tzdata && \
|
||||
echo "Enable user_allow_other in fuse" && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,16 +3,149 @@
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// Check first feature flags are set on this
|
||||
// remote
|
||||
func TestBlockIDCreator(t *testing.T) {
|
||||
// Check creation and random number
|
||||
bic, err := newBlockIDCreator()
|
||||
require.NoError(t, err)
|
||||
bic2, err := newBlockIDCreator()
|
||||
require.NoError(t, err)
|
||||
assert.NotEqual(t, bic.random, bic2.random)
|
||||
assert.NotEqual(t, bic.random, [8]byte{})
|
||||
|
||||
// Set random to known value for tests
|
||||
bic.random = [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
chunkNumber := uint64(0xFEDCBA9876543210)
|
||||
|
||||
// Check creation of ID
|
||||
want := base64.StdEncoding.EncodeToString([]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, 1, 2, 3, 4, 5, 6, 7, 8})
|
||||
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", want)
|
||||
got := bic.newBlockID(chunkNumber)
|
||||
assert.Equal(t, want, got)
|
||||
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", got)
|
||||
|
||||
// Test checkID is working
|
||||
assert.NoError(t, bic.checkID(chunkNumber, got))
|
||||
assert.ErrorContains(t, bic.checkID(chunkNumber, "$"+got), "illegal base64")
|
||||
assert.ErrorContains(t, bic.checkID(chunkNumber, "AAAA"+got), "bad block ID length")
|
||||
assert.ErrorContains(t, bic.checkID(chunkNumber+1, got), "expecting decoded")
|
||||
assert.ErrorContains(t, bic2.checkID(chunkNumber, got), "random bytes")
|
||||
}
|
||||
|
||||
func (f *Fs) testFeatures(t *testing.T) {
|
||||
// Check first feature flags are set on this remote
|
||||
enabled := f.Features().SetTier
|
||||
assert.True(t, enabled)
|
||||
enabled = f.Features().GetTier
|
||||
assert.True(t, enabled)
|
||||
}
|
||||
|
||||
type ReadSeekCloser struct {
|
||||
*strings.Reader
|
||||
}
|
||||
|
||||
func (r *ReadSeekCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stage a block at remote but don't commit it
|
||||
func (f *Fs) stageBlockWithoutCommit(ctx context.Context, t *testing.T, remote string) {
|
||||
var (
|
||||
containerName, blobPath = f.split(remote)
|
||||
containerClient = f.cntSVC(containerName)
|
||||
blobClient = containerClient.NewBlockBlobClient(blobPath)
|
||||
data = "uncommitted data"
|
||||
blockID = "1"
|
||||
blockIDBase64 = base64.StdEncoding.EncodeToString([]byte(blockID))
|
||||
)
|
||||
r := &ReadSeekCloser{strings.NewReader(data)}
|
||||
_, err := blobClient.StageBlock(ctx, blockIDBase64, r, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the block is staged but not committed
|
||||
blockList, err := blobClient.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
|
||||
require.NoError(t, err)
|
||||
found := false
|
||||
for _, block := range blockList.UncommittedBlocks {
|
||||
if *block.Name == blockIDBase64 {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.True(t, found, "Block ID not found in uncommitted blocks")
|
||||
}
|
||||
|
||||
// This tests uploading a blob where it has uncommitted blocks with a different ID size.
|
||||
//
|
||||
// https://gauravmantri.com/2013/05/18/windows-azure-blob-storage-dealing-with-the-specified-blob-or-block-content-is-invalid-error/
|
||||
//
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/WriteUncommittedBlocks
|
||||
func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
|
||||
var (
|
||||
ctx = context.Background()
|
||||
remote = "testBlob"
|
||||
)
|
||||
|
||||
// Multipart copy the blob please
|
||||
oldUseCopyBlob, oldCopyCutoff := f.opt.UseCopyBlob, f.opt.CopyCutoff
|
||||
f.opt.UseCopyBlob = false
|
||||
f.opt.CopyCutoff = f.opt.ChunkSize
|
||||
defer func() {
|
||||
f.opt.UseCopyBlob, f.opt.CopyCutoff = oldUseCopyBlob, oldCopyCutoff
|
||||
}()
|
||||
|
||||
// Create a blob with uncommitted blocks
|
||||
f.stageBlockWithoutCommit(ctx, t, remote)
|
||||
|
||||
// Now attempt to overwrite the block with a different sized block ID to provoke this error
|
||||
|
||||
// Check the object does not exist
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
|
||||
// Upload a multipart file over the block with uncommitted chunks of a different ID size
|
||||
size := 4*int(f.opt.ChunkSize) - 1
|
||||
contents := random.String(size)
|
||||
item := fstest.NewItem(remote, contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||
o := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
|
||||
// Check size
|
||||
assert.Equal(t, int64(size), o.Size())
|
||||
|
||||
// Create a new blob with uncommitted blocks
|
||||
newRemote := "testBlob2"
|
||||
f.stageBlockWithoutCommit(ctx, t, newRemote)
|
||||
|
||||
// Copy over that block
|
||||
dst, err := f.Copy(ctx, o, newRemote)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check basics
|
||||
assert.Equal(t, int64(size), dst.Size())
|
||||
assert.Equal(t, newRemote, dst.Remote())
|
||||
|
||||
// Check contents
|
||||
gotContents := fstests.ReadObject(ctx, t, dst, -1)
|
||||
assert.Equal(t, contents, gotContents)
|
||||
|
||||
// Remove the object
|
||||
require.NoError(t, dst.Remove(ctx))
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Features", f.testFeatures)
|
||||
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
|
||||
}
|
||||
|
||||
@@ -15,13 +15,17 @@ import (
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
name := "TestAzureBlob"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: defaultChunkSize,
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "use_copy_blob", Value: "false"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -40,6 +44,7 @@ func TestIntegration2(t *testing.T) {
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "directory_markers", Value: "true"},
|
||||
{Name: name, Key: "use_copy_blob", Value: "false"},
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -48,8 +53,13 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setCopyCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetCopyCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
func TestValidateAccessTier(t *testing.T) {
|
||||
|
||||
@@ -237,6 +237,30 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
||||
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "disable_instance_discovery",
|
||||
Help: `Skip requesting Microsoft Entra instance metadata
|
||||
This should be set true only by applications authenticating in
|
||||
disconnected clouds, or private clouds such as Azure Stack.
|
||||
It determines whether rclone requests Microsoft Entra instance
|
||||
metadata from ` + "`https://login.microsoft.com/`" + ` before
|
||||
authenticating.
|
||||
Setting this to true will skip this request, making you responsible
|
||||
for ensuring the configured authority is valid and trustworthy.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_az",
|
||||
Help: `Use Azure CLI tool az for authentication
|
||||
Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/)
|
||||
as the sole means of authentication.
|
||||
Setting this can be useful if you wish to use the az CLI on a host with
|
||||
a System Managed Identity that you do not want to use.
|
||||
Don't set env_auth at the same time.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
@@ -319,10 +343,12 @@ type Options struct {
|
||||
Username string `config:"username"`
|
||||
Password string `config:"password"`
|
||||
ServicePrincipalFile string `config:"service_principal_file"`
|
||||
DisableInstanceDiscovery bool `config:"disable_instance_discovery"`
|
||||
UseMSI bool `config:"use_msi"`
|
||||
MSIObjectID string `config:"msi_object_id"`
|
||||
MSIClientID string `config:"msi_client_id"`
|
||||
MSIResourceID string `config:"msi_mi_res_id"`
|
||||
UseAZ bool `config:"use_az"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
MaxStreamSize fs.SizeSuffix `config:"max_stream_size"`
|
||||
@@ -414,7 +440,8 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
}
|
||||
// Read credentials from the environment
|
||||
options := azidentity.DefaultAzureCredentialOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
ClientOptions: policyClientOptions,
|
||||
DisableInstanceDiscovery: opt.DisableInstanceDiscovery,
|
||||
}
|
||||
cred, err = azidentity.NewDefaultAzureCredential(&options)
|
||||
if err != nil {
|
||||
@@ -425,6 +452,13 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create new shared key credential failed: %w", err)
|
||||
}
|
||||
case opt.UseAZ:
|
||||
var options = azidentity.AzureCLICredentialOptions{}
|
||||
cred, err = azidentity.NewAzureCLICredential(&options)
|
||||
fmt.Println(cred)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err)
|
||||
}
|
||||
case opt.SASURL != "":
|
||||
client, err = service.NewClientWithNoCredential(opt.SASURL, &clientOpt)
|
||||
if err != nil {
|
||||
@@ -899,7 +933,7 @@ func (o *Object) getMetadata(ctx context.Context) error {
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
//
|
||||
// May make a network request becaue the [fs.List] method does not
|
||||
// May make a network request because the [fs.List] method does not
|
||||
// return MD5 hashes for DirEntry
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
||||
if ty != hash.MD5 {
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
@@ -299,14 +300,13 @@ type Fs struct {
|
||||
|
||||
// Object describes a b2 object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // b2 id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
sha1 string // SHA-1 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
meta map[string]string // The object metadata if known - may be nil - with lower case keys
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // b2 id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
sha1 string // SHA-1 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -1318,16 +1318,22 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
|
||||
// Check current version of the file
|
||||
if deleteHidden && object.Action == "hide" {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
||||
toBeDeleted <- object
|
||||
if !operations.SkipDestructive(ctx, object.Name, "remove hide marker") {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
} else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
|
||||
toBeDeleted <- object
|
||||
if !operations.SkipDestructive(ctx, object.Name, "remove pending upload") {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp)))
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(remote, "Deleting (id %q)", object.ID)
|
||||
toBeDeleted <- object
|
||||
if !operations.SkipDestructive(ctx, object.Name, "delete") {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
}
|
||||
last = remote
|
||||
tr.Done(ctx, nil)
|
||||
@@ -1598,9 +1604,6 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// For now, just set "mtime" in metadata
|
||||
o.meta = make(map[string]string, 1)
|
||||
o.meta["mtime"] = o.modTime.Format(time.RFC3339Nano)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1880,13 +1883,6 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||
Info: Info,
|
||||
}
|
||||
|
||||
// Embryonic metadata support - just mtime
|
||||
o.meta = make(map[string]string, 1)
|
||||
modTime, err := parseTimeStringHelper(info.Info[timeKey])
|
||||
if err == nil {
|
||||
o.meta["mtime"] = modTime.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
// When reading files from B2 via cloudflare using
|
||||
// --b2-download-url cloudflare strips the Content-Length
|
||||
// headers (presumably so it can inject stuff) so use the old
|
||||
@@ -2293,8 +2289,10 @@ func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, op
|
||||
|
||||
}
|
||||
|
||||
skip := operations.SkipDestructive(ctx, name, "update lifecycle rules")
|
||||
|
||||
var bucket *api.Bucket
|
||||
if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil || newRule.DaysFromStartingToCancelingUnfinishedLargeFiles != nil {
|
||||
if !skip && (newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil || newRule.DaysFromStartingToCancelingUnfinishedLargeFiles != nil) {
|
||||
bucketID, err := f.getBucketID(ctx, bucketName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
@@ -256,12 +258,6 @@ func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
// mtime
|
||||
for k, v := range metadata {
|
||||
got := o.meta[k]
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
||||
|
||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
||||
@@ -463,24 +459,161 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Cleanup", func(t *testing.T) {
|
||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
||||
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
|
||||
fstest.CheckListing(t, f, items)
|
||||
// Set --b2-versions for this test
|
||||
f.opt.Versions = true
|
||||
defer func() {
|
||||
f.opt.Versions = false
|
||||
}()
|
||||
fstest.CheckListing(t, f, items)
|
||||
t.Run("DryRun", func(t *testing.T) {
|
||||
f.opt.Versions = true
|
||||
defer func() {
|
||||
f.opt.Versions = false
|
||||
}()
|
||||
// Listing should be unchanged after dry run
|
||||
before := listAllFiles(ctx, t, f, dirName)
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.DryRun = true
|
||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
||||
after := listAllFiles(ctx, t, f, dirName)
|
||||
assert.Equal(t, before, after)
|
||||
})
|
||||
|
||||
t.Run("RealThing", func(t *testing.T) {
|
||||
f.opt.Versions = true
|
||||
defer func() {
|
||||
f.opt.Versions = false
|
||||
}()
|
||||
// Listing should reflect current state after cleanup
|
||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
||||
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
|
||||
fstest.CheckListing(t, f, items)
|
||||
})
|
||||
})
|
||||
|
||||
// Purge gets tested later
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestCleanupUnfinished(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// B2CleanupHidden tests cleaning up hidden files
|
||||
t.Run("CleanupUnfinished", func(t *testing.T) {
|
||||
dirName := "unfinished"
|
||||
fileCount := 5
|
||||
expectedFiles := []string{}
|
||||
for i := 1; i < fileCount; i++ {
|
||||
fileName := fmt.Sprintf("%s/unfinished-%d", dirName, i)
|
||||
expectedFiles = append(expectedFiles, fileName)
|
||||
obj := &Object{
|
||||
fs: f,
|
||||
remote: fileName,
|
||||
}
|
||||
objInfo := object.NewStaticObjectInfo(fileName, fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
|
||||
_, err := f.newLargeUpload(ctx, obj, nil, objInfo, f.opt.ChunkSize, false, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
checkListing(ctx, t, f, dirName, expectedFiles)
|
||||
|
||||
t.Run("DryRun", func(t *testing.T) {
|
||||
// Listing should not change after dry run
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.DryRun = true
|
||||
require.NoError(t, f.cleanUp(ctx, false, true, 0))
|
||||
checkListing(ctx, t, f, dirName, expectedFiles)
|
||||
})
|
||||
|
||||
t.Run("RealThing", func(t *testing.T) {
|
||||
// Listing should be empty after real cleanup
|
||||
require.NoError(t, f.cleanUp(ctx, false, true, 0))
|
||||
checkListing(ctx, t, f, dirName, []string{})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func listAllFiles(ctx context.Context, t *testing.T, f *Fs, dirName string) []string {
|
||||
bucket, directory := f.split(dirName)
|
||||
foundFiles := []string{}
|
||||
require.NoError(t, f.list(ctx, bucket, directory, "", false, true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||
if !isDirectory {
|
||||
foundFiles = append(foundFiles, object.Name)
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
sort.Strings(foundFiles)
|
||||
return foundFiles
|
||||
}
|
||||
|
||||
func checkListing(ctx context.Context, t *testing.T, f *Fs, dirName string, expectedFiles []string) {
|
||||
foundFiles := listAllFiles(ctx, t, f, dirName)
|
||||
sort.Strings(expectedFiles)
|
||||
assert.Equal(t, expectedFiles, foundFiles)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestLifecycleRules(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
opt := map[string]string{}
|
||||
|
||||
t.Run("InitState", func(t *testing.T) {
|
||||
// There should be no lifecycle rules at the outset
|
||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
})
|
||||
|
||||
t.Run("DryRun", func(t *testing.T) {
|
||||
// There should still be no lifecycle rules after each dry run operation
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.DryRun = true
|
||||
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
|
||||
delete(opt, "daysFromHidingToDeleting")
|
||||
opt["daysFromUploadingToHiding"] = "40"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
})
|
||||
|
||||
t.Run("RealThing", func(t *testing.T) {
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(lifecycleRules))
|
||||
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
|
||||
|
||||
delete(opt, "daysFromHidingToDeleting")
|
||||
opt["daysFromUploadingToHiding"] = "40"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(lifecycleRules))
|
||||
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
|
||||
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(lifecycleRules))
|
||||
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
|
||||
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
|
||||
})
|
||||
}
|
||||
|
||||
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Metadata", f.InternalTestMetadata)
|
||||
t.Run("Versions", f.InternalTestVersions)
|
||||
t.Run("CleanupUnfinished", f.InternalTestCleanupUnfinished)
|
||||
t.Run("LifecycleRules", f.InternalTestLifecycleRules)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -74,7 +75,7 @@ var (
|
||||
)
|
||||
|
||||
type boxCustomClaims struct {
|
||||
jwtutil.LegacyStandardClaims
|
||||
jwt.StandardClaims
|
||||
BoxSubType string `json:"box_sub_type,omitempty"`
|
||||
}
|
||||
|
||||
@@ -222,8 +223,10 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomC
|
||||
}
|
||||
|
||||
claims = &boxCustomClaims{
|
||||
LegacyStandardClaims: jwtutil.LegacyStandardClaims{
|
||||
ID: val,
|
||||
//lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely
|
||||
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019
|
||||
StandardClaims: jwt.StandardClaims{
|
||||
Id: val,
|
||||
Issuer: boxConfig.BoxAppSettings.ClientID,
|
||||
Subject: boxConfig.EnterpriseID,
|
||||
Audience: tokenURL,
|
||||
|
||||
@@ -2480,7 +2480,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte)
|
||||
if len(data) > maxMetadataSizeWritten {
|
||||
return nil, false, ErrMetaTooBig
|
||||
}
|
||||
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
if len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
return nil, false, errors.New("invalid json")
|
||||
}
|
||||
var metadata metaSimpleJSON
|
||||
|
||||
@@ -203,7 +203,6 @@ func driveScopesContainsAppFolder(scopes []string) bool {
|
||||
if scope == scopePrefix+"drive.appfolder" {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -1212,6 +1211,7 @@ func fixMimeType(mimeTypeIn string) string {
|
||||
}
|
||||
return mimeTypeOut
|
||||
}
|
||||
|
||||
func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
|
||||
out = make(map[string][]string, len(in))
|
||||
for k, v := range in {
|
||||
@@ -1222,9 +1222,11 @@ func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func isInternalMimeType(mimeType string) bool {
|
||||
return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
|
||||
}
|
||||
|
||||
func isLinkMimeType(mimeType string) bool {
|
||||
return strings.HasPrefix(mimeType, "application/x-link-")
|
||||
}
|
||||
@@ -1657,7 +1659,8 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.F
|
||||
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
|
||||
func (f *Fs) newObjectWithExportInfo(
|
||||
ctx context.Context, remote string, info *drive.File,
|
||||
extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
|
||||
extension, exportName, exportMimeType string, isDocument bool,
|
||||
) (o fs.Object, err error) {
|
||||
// Note that resolveShortcut will have been called already if
|
||||
// we are being called from a listing. However the drive.Item
|
||||
// will have been resolved so this will do nothing.
|
||||
@@ -1848,6 +1851,7 @@ func linkTemplate(mt string) *template.Template {
|
||||
})
|
||||
return _linkTemplates[mt]
|
||||
}
|
||||
|
||||
func (f *Fs) fetchFormats(ctx context.Context) {
|
||||
fetchFormatsOnce.Do(func() {
|
||||
var about *drive.About
|
||||
@@ -1893,7 +1897,8 @@ func (f *Fs) importFormats(ctx context.Context) map[string][]string {
|
||||
// Look through the exportExtensions and find the first format that can be
|
||||
// converted. If none found then return ("", "", false)
|
||||
func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string) (
|
||||
extension, mimeType string, isDocument bool) {
|
||||
extension, mimeType string, isDocument bool,
|
||||
) {
|
||||
exportMimeTypes, isDocument := f.exportFormats(ctx)[itemMimeType]
|
||||
if isDocument {
|
||||
for _, _extension := range f.exportExtensions {
|
||||
@@ -2689,7 +2694,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
if shortcutID != "" {
|
||||
return f.delete(ctx, shortcutID, f.opt.UseTrash)
|
||||
}
|
||||
var trashedFiles = false
|
||||
trashedFiles := false
|
||||
if check {
|
||||
found, err := f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, true, func(item *drive.File) bool {
|
||||
if !item.Trashed {
|
||||
@@ -2926,7 +2931,6 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -3187,6 +3191,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (pageToken string, err error) {
|
||||
var startPageToken *drive.StartPageToken
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -4018,14 +4023,13 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
case "query":
|
||||
if len(arg) == 1 {
|
||||
query := arg[0]
|
||||
var results, err = f.query(ctx, query)
|
||||
results, err := f.query(ctx, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute query: %q, error: %w", query, err)
|
||||
}
|
||||
return results, nil
|
||||
} else {
|
||||
return nil, errors.New("need a query argument")
|
||||
}
|
||||
return nil, errors.New("need a query argument")
|
||||
case "rescue":
|
||||
dirID := ""
|
||||
_, delete := opt["delete"]
|
||||
@@ -4085,6 +4089,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 {
|
||||
return "", hash.ErrUnsupported
|
||||
@@ -4099,7 +4104,8 @@ func (o *baseObject) Size() int64 {
|
||||
|
||||
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
|
||||
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
|
||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error,
|
||||
) {
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
@@ -4312,12 +4318,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
return o.baseObject.open(ctx, o.url, options...)
|
||||
}
|
||||
|
||||
func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Update the size with what we are reading as it can change from
|
||||
// the HEAD in the listing to this GET. This stops rclone marking
|
||||
// the transfer as corrupted.
|
||||
var offset, end int64 = 0, -1
|
||||
var newOptions = options[:0]
|
||||
newOptions := options[:0]
|
||||
for _, o := range options {
|
||||
// Note that Range requests don't work on Google docs:
|
||||
// https://developers.google.com/drive/v3/web/manage-downloads#partial_download
|
||||
@@ -4344,9 +4351,10 @@ func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
var data = o.content
|
||||
data := o.content
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
@@ -4371,7 +4379,8 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
|
||||
}
|
||||
|
||||
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
|
||||
src fs.ObjectInfo) (info *drive.File, err error) {
|
||||
src fs.ObjectInfo,
|
||||
) (info *drive.File, err error) {
|
||||
// Make the API request to upload metadata and file data.
|
||||
size := src.Size()
|
||||
if size >= 0 && size < int64(o.fs.opt.UploadCutoff) {
|
||||
@@ -4449,6 +4458,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
srcMimeType := fs.MimeType(ctx, src)
|
||||
importMimeType := ""
|
||||
@@ -4544,6 +4554,7 @@ func (o *baseObject) Metadata(ctx context.Context) (metadata fs.Metadata, err er
|
||||
func (o *documentObject) ext() string {
|
||||
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
|
||||
}
|
||||
|
||||
func (o *linkObject) ext() string {
|
||||
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package googlephotos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -35,7 +36,7 @@ func TestIntegration(t *testing.T) {
|
||||
*fstest.RemoteName = "TestGooglePhotos:"
|
||||
}
|
||||
f, err := fs.NewFs(ctx, *fstest.RemoteName)
|
||||
if err == fs.ErrorNotFoundInConfigFile {
|
||||
if errors.Is(err, fs.ErrorNotFoundInConfigFile) {
|
||||
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -445,7 +445,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// build request
|
||||
// cant use normal rename as file needs to be "activated" first
|
||||
// can't use normal rename as file needs to be "activated" first
|
||||
|
||||
r := api.NewUpdateFileInfo()
|
||||
r.DocumentID = doc.DocumentID
|
||||
|
||||
@@ -75,7 +75,7 @@ type MoveFolderParam struct {
|
||||
DestinationPath string `validate:"nonzero" json:"destinationPath"`
|
||||
}
|
||||
|
||||
// JobIDResponse respresents response struct with JobID for folder operations
|
||||
// JobIDResponse represents response struct with JobID for folder operations
|
||||
type JobIDResponse struct {
|
||||
JobID string `json:"jobId"`
|
||||
}
|
||||
|
||||
@@ -151,6 +151,19 @@ Owner is able to add custom keys. Metadata feature grabs all the keys including
|
||||
Help: "Host of InternetArchive Frontend.\n\nLeave blank for default value.",
|
||||
Default: "https://archive.org",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "item_metadata",
|
||||
Help: `Metadata to be set on the IA item, this is different from file-level metadata that can be set using --metadata-set.
|
||||
Format is key=value and the 'x-archive-meta-' prefix is automatically added.`,
|
||||
Default: []string{},
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "item_derive",
|
||||
Help: `Whether to trigger derive on the IA item or not. If set to false, the item will not be derived by IA upon upload.
|
||||
The derive process produces a number of secondary files from an upload to make an upload more usable on the web.
|
||||
Setting this to false is useful for uploading files that are already in a format that IA can display or reduce burden on IA's infrastructure.`,
|
||||
Default: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Don't ask the server to test against MD5 checksum calculated by rclone.
|
||||
@@ -201,6 +214,8 @@ type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
FrontEndpoint string `config:"front_endpoint"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
ItemMetadata []string `config:"item_metadata"`
|
||||
ItemDerive bool `config:"item_derive"`
|
||||
WaitArchive fs.Duration `config:"wait_archive"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
@@ -790,17 +805,23 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
"x-amz-filemeta-rclone-update-track": updateTracker,
|
||||
|
||||
// we add some more headers for intuitive actions
|
||||
"x-amz-auto-make-bucket": "1", // create an item if does not exist, do nothing if already
|
||||
"x-archive-auto-make-bucket": "1", // same as above in IAS3 original way
|
||||
"x-archive-keep-old-version": "0", // do not keep old versions (a.k.a. trashes in other clouds)
|
||||
"x-archive-meta-mediatype": "data", // mark media type of the uploading file as "data"
|
||||
"x-archive-queue-derive": "0", // skip derivation process (e.g. encoding to smaller files, OCR on PDFs)
|
||||
"x-archive-cascade-delete": "1", // enable "cascate delete" (delete all derived files in addition to the file itself)
|
||||
"x-amz-auto-make-bucket": "1", // create an item if does not exist, do nothing if already
|
||||
"x-archive-auto-make-bucket": "1", // same as above in IAS3 original way
|
||||
"x-archive-keep-old-version": "0", // do not keep old versions (a.k.a. trashes in other clouds)
|
||||
"x-archive-cascade-delete": "1", // enable "cascate delete" (delete all derived files in addition to the file itself)
|
||||
}
|
||||
|
||||
if size >= 0 {
|
||||
headers["Content-Length"] = fmt.Sprintf("%d", size)
|
||||
headers["x-archive-size-hint"] = fmt.Sprintf("%d", size)
|
||||
}
|
||||
|
||||
// This is IA's ITEM metadata, not file metadata
|
||||
headers, err = o.appendItemMetadataHeaders(headers, o.fs.opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var mdata fs.Metadata
|
||||
mdata, err = fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||
if err == nil && mdata != nil {
|
||||
@@ -863,6 +884,51 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
func (o *Object) appendItemMetadataHeaders(headers map[string]string, options Options) (newHeaders map[string]string, err error) {
|
||||
metadataCounter := make(map[string]int)
|
||||
metadataValues := make(map[string][]string)
|
||||
|
||||
// First pass: count occurrences and collect values
|
||||
for _, v := range options.ItemMetadata {
|
||||
parts := strings.SplitN(v, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return newHeaders, errors.New("item metadata key=value should be in the form key=value")
|
||||
}
|
||||
key, value := parts[0], parts[1]
|
||||
metadataCounter[key]++
|
||||
metadataValues[key] = append(metadataValues[key], value)
|
||||
}
|
||||
|
||||
// Second pass: add headers with appropriate prefixes
|
||||
for key, count := range metadataCounter {
|
||||
if count == 1 {
|
||||
// Only one occurrence, use x-archive-meta-
|
||||
headers[fmt.Sprintf("x-archive-meta-%s", key)] = metadataValues[key][0]
|
||||
} else {
|
||||
// Multiple occurrences, use x-archive-meta01-, x-archive-meta02-, etc.
|
||||
for i, value := range metadataValues[key] {
|
||||
headers[fmt.Sprintf("x-archive-meta%02d-%s", i+1, key)] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if o.fs.opt.ItemDerive {
|
||||
headers["x-archive-queue-derive"] = "1"
|
||||
} else {
|
||||
headers["x-archive-queue-derive"] = "0"
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Setting IA item derive: %t", o.fs.opt.ItemDerive)
|
||||
|
||||
for k, v := range headers {
|
||||
if strings.HasPrefix(k, "x-archive-meta") {
|
||||
fs.Debugf(o, "Setting IA item metadata: %s=%s", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
return headers, nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
|
||||
@@ -131,7 +131,7 @@ func init() {
|
||||
Help: "Microsoft Cloud for US Government",
|
||||
}, {
|
||||
Value: regionDE,
|
||||
Help: "Microsoft Cloud Germany",
|
||||
Help: "Microsoft Cloud Germany (deprecated - try " + regionGlobal + " region first).",
|
||||
}, {
|
||||
Value: regionCN,
|
||||
Help: "Azure and Office 365 operated by Vnet Group in China",
|
||||
|
||||
@@ -92,6 +92,21 @@ Note that these chunks are buffered in memory so increasing them will
|
||||
increase memory use.`,
|
||||
Default: 10 * fs.Mebi,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "access",
|
||||
Help: "Files and folders will be uploaded with this access permission (default private)",
|
||||
Default: "private",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "private",
|
||||
Help: "The file or folder access can be granted in a way that will allow select users to view, read or write what is absolutely essential for them.",
|
||||
}, {
|
||||
Value: "public",
|
||||
Help: "The file or folder can be downloaded by anyone from a web browser. The link can be shared in any way,",
|
||||
}, {
|
||||
Value: "hidden",
|
||||
Help: "The file or folder can be accessed has the same restrictions as Public if the user knows the URL of the file or folder link in order to access the contents",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -102,6 +117,7 @@ type Options struct {
|
||||
Password string `config:"password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Access string `config:"access"`
|
||||
}
|
||||
|
||||
// Fs represents a remote server
|
||||
@@ -735,6 +751,23 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// getAccessLevel is a helper function to determine access level integer
|
||||
func getAccessLevel(access string) int64 {
|
||||
var accessLevel int64
|
||||
switch access {
|
||||
case "private":
|
||||
accessLevel = 0
|
||||
case "public":
|
||||
accessLevel = 1
|
||||
case "hidden":
|
||||
accessLevel = 2
|
||||
default:
|
||||
accessLevel = 0
|
||||
fs.Errorf(nil, "Invalid access: %s, defaulting to private", access)
|
||||
}
|
||||
return accessLevel
|
||||
}
|
||||
|
||||
// DirCacher methods
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
@@ -747,7 +780,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
SessionID: f.session.SessionID,
|
||||
FolderName: f.opt.Enc.FromStandardName(leaf),
|
||||
FolderSubParent: pathID,
|
||||
FolderIsPublic: 0,
|
||||
FolderIsPublic: getAccessLevel(f.opt.Access),
|
||||
FolderPublicUpl: 0,
|
||||
FolderPublicDisplay: 0,
|
||||
FolderPublicDnl: 0,
|
||||
@@ -1080,7 +1113,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Set permissions
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
update := permissions{SessionID: o.fs.session.SessionID, FileID: o.id, FileIsPublic: 0}
|
||||
update := permissions{SessionID: o.fs.session.SessionID, FileID: o.id, FileIsPublic: getAccessLevel(o.fs.opt.Access)}
|
||||
// fs.Debugf(nil, "Permissions : %#v", update)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
|
||||
@@ -424,7 +424,7 @@ func (f *Fs) newSingleConnClient(ctx context.Context) (*rest.Client, error) {
|
||||
})
|
||||
// Set our own http client in the context
|
||||
ctx = oauthutil.Context(ctx, baseClient)
|
||||
// create a new oauth client, re-use the token source
|
||||
// create a new oauth client, reuse the token source
|
||||
oAuthClient := oauth2.NewClient(ctx, f.ts)
|
||||
return rest.NewClient(oAuthClient).SetRoot("https://" + f.opt.Hostname), nil
|
||||
}
|
||||
|
||||
59
backend/s3/ibm_signer.go
Normal file
59
backend/s3/ibm_signer.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/IBM/go-sdk-core/v5/core"
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
v4signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
)
|
||||
|
||||
// Authenticator defines an interface for obtaining an IAM token.
|
||||
type Authenticator interface {
|
||||
GetToken() (string, error)
|
||||
}
|
||||
|
||||
// IbmIamSigner is a structure for signing requests using IBM IAM.
|
||||
// Requeres APIKey and Resource InstanceID
|
||||
type IbmIamSigner struct {
|
||||
APIKey string
|
||||
InstanceID string
|
||||
Auth Authenticator
|
||||
}
|
||||
|
||||
// SignHTTP signs requests using IBM IAM token.
|
||||
func (signer *IbmIamSigner) SignHTTP(ctx context.Context, credentials aws.Credentials, req *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4signer.SignerOptions)) error {
|
||||
var authenticator Authenticator
|
||||
if signer.Auth != nil {
|
||||
authenticator = signer.Auth
|
||||
} else {
|
||||
authenticator = &core.IamAuthenticator{ApiKey: signer.APIKey}
|
||||
}
|
||||
token, err := authenticator.GetToken()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+token)
|
||||
req.Header.Set("ibm-service-instance-id", signer.InstanceID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NoOpCredentialsProvider is needed since S3 SDK requires having credentials, eventhough authentication is happening via IBM IAM.
|
||||
type NoOpCredentialsProvider struct{}
|
||||
|
||||
// Retrieve returns mock credentials for the NoOpCredentialsProvider.
|
||||
func (n *NoOpCredentialsProvider) Retrieve(ctx context.Context) (aws.Credentials, error) {
|
||||
return aws.Credentials{
|
||||
AccessKeyID: "NoOpAccessKey",
|
||||
SecretAccessKey: "NoOpSecretKey",
|
||||
SessionToken: "",
|
||||
Source: "NoOpCredentialsProvider",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsExpired always returns false
|
||||
func (n *NoOpCredentialsProvider) IsExpired() bool {
|
||||
return false
|
||||
}
|
||||
47
backend/s3/ibm_signer_test.go
Normal file
47
backend/s3/ibm_signer_test.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type MockAuthenticator struct {
|
||||
Token string
|
||||
Error error
|
||||
}
|
||||
|
||||
func (m *MockAuthenticator) GetToken() (string, error) {
|
||||
return m.Token, m.Error
|
||||
}
|
||||
|
||||
func TestSignHTTP(t *testing.T) {
|
||||
apiKey := "mock-api-key"
|
||||
instanceID := "mock-instance-id"
|
||||
token := "mock-iam-token"
|
||||
mockAuth := &MockAuthenticator{
|
||||
Token: token,
|
||||
Error: nil,
|
||||
}
|
||||
signer := &IbmIamSigner{
|
||||
APIKey: apiKey,
|
||||
InstanceID: instanceID,
|
||||
Auth: mockAuth,
|
||||
}
|
||||
req, err := http.NewRequest("GET", "https://example.com", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create HTTP request: %v", err)
|
||||
}
|
||||
credentials := aws.Credentials{
|
||||
AccessKeyID: "mock-access-key",
|
||||
SecretAccessKey: "mock-secret-key",
|
||||
}
|
||||
err = signer.SignHTTP(context.TODO(), credentials, req, "payload-hash", "service", "region", time.Now())
|
||||
assert.NoError(t, err, "Expected no error")
|
||||
assert.Equal(t, "Bearer "+token, req.Header.Get("Authorization"), "Authorization header should be set correctly")
|
||||
assert.Equal(t, instanceID, req.Header.Get("ibm-service-instance-id"), "ibm-service-instance-id header should be set correctly")
|
||||
}
|
||||
185
backend/s3/s3.go
185
backend/s3/s3.go
@@ -36,8 +36,8 @@ import (
|
||||
"github.com/aws/smithy-go/logging"
|
||||
"github.com/aws/smithy-go/middleware"
|
||||
smithyhttp "github.com/aws/smithy-go/transport/http"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
@@ -1343,7 +1343,7 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,MagaluCloud,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,Magalu,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -1356,6 +1356,10 @@ func init() {
|
||||
Value: "sfo3.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces San Francisco 3",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "sfo2.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces San Francisco 2",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "fra1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Frankfurt 1",
|
||||
@@ -1372,6 +1376,18 @@ func init() {
|
||||
Value: "sgp1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Singapore 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "lon1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces London 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "tor1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Toronto 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "blr1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Bangalore 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "localhost:8333",
|
||||
Help: "SeaweedFS S3 localhost",
|
||||
@@ -1476,14 +1492,6 @@ func init() {
|
||||
Value: "s3.ir-tbz-sh1.arvanstorage.ir",
|
||||
Help: "ArvanCloud Tabriz Iran (Shahriar) endpoint",
|
||||
Provider: "ArvanCloud",
|
||||
}, {
|
||||
Value: "br-se1.magaluobjects.com",
|
||||
Help: "Magalu BR Southeast 1 endpoint",
|
||||
Provider: "Magalu",
|
||||
}, {
|
||||
Value: "br-ne1.magaluobjects.com",
|
||||
Help: "Magalu BR Northeast 1 endpoint",
|
||||
Provider: "Magalu",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
@@ -2122,13 +2130,16 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Help: "Standard storage class",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: #todo
|
||||
// Mapping from here: https://docs.magalu.cloud/docs/storage/object-storage/Classes-de-Armazenamento/standard
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in Magalu.",
|
||||
Provider: "Magalu",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "STANDARD",
|
||||
Help: "Standard storage class",
|
||||
}, {
|
||||
Value: "GLACIER_IR",
|
||||
Help: "Glacier Instant Retrieval storage class",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
|
||||
@@ -2669,6 +2680,34 @@ knows about - please make a bug report if not.
|
||||
You can change this if you want to disable the use of multipart uploads.
|
||||
This shouldn't be necessary in normal operation.
|
||||
|
||||
This should be automatically set correctly for all providers rclone
|
||||
knows about - please make a bug report if not.
|
||||
`,
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_x_id",
|
||||
Help: `Set if rclone should add x-id URL parameters.
|
||||
|
||||
You can change this if you want to disable the AWS SDK from
|
||||
adding x-id URL parameters.
|
||||
|
||||
This shouldn't be necessary in normal operation.
|
||||
|
||||
This should be automatically set correctly for all providers rclone
|
||||
knows about - please make a bug report if not.
|
||||
`,
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "sign_accept_encoding",
|
||||
Help: `Set if rclone should include Accept-Encoding as part of the signature.
|
||||
|
||||
You can change this if you want to stop rclone including
|
||||
Accept-Encoding as part of the signature.
|
||||
|
||||
This shouldn't be necessary in normal operation.
|
||||
|
||||
This should be automatically set correctly for all providers rclone
|
||||
knows about - please make a bug report if not.
|
||||
`,
|
||||
@@ -2725,6 +2764,16 @@ use |-vv| to see the debug level logs.
|
||||
Default: sdkLogMode(0),
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "ibm_api_key",
|
||||
Help: "IBM API Key to be used to obtain IAM token",
|
||||
Provider: "IBMCOS",
|
||||
},
|
||||
{
|
||||
Name: "ibm_resource_instance_id",
|
||||
Help: "IBM service instance id",
|
||||
Provider: "IBMCOS",
|
||||
},
|
||||
}})
|
||||
}
|
||||
|
||||
@@ -2878,6 +2927,10 @@ type Options struct {
|
||||
UseUnsignedPayload fs.Tristate `config:"use_unsigned_payload"`
|
||||
SDKLogMode sdkLogMode `config:"sdk_log_mode"`
|
||||
DirectoryBucket bool `config:"directory_bucket"`
|
||||
IBMAPIKey string `config:"ibm_api_key"`
|
||||
IBMInstanceID string `config:"ibm_resource_instance_id"`
|
||||
UseXID fs.Tristate `config:"use_x_id"`
|
||||
SignAcceptEncoding fs.Tristate `config:"sign_accept_encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
@@ -3062,59 +3115,67 @@ func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||
}
|
||||
}
|
||||
|
||||
// Fixup the request if needed.
|
||||
//
|
||||
// Google Cloud Storage alters the Accept-Encoding header, which
|
||||
// breaks the v2 request signature
|
||||
// breaks the v2 request signature. This is set with opt.SignAcceptEncoding.
|
||||
//
|
||||
// It also doesn't like the x-id URL parameter SDKv2 puts in so we
|
||||
// remove that too.
|
||||
// remove that too. This is set with opt.UseXID.Value.
|
||||
//
|
||||
// See https://github.com/aws/aws-sdk-go-v2/issues/1816.
|
||||
// Adapted from: https://github.com/aws/aws-sdk-go-v2/issues/1816#issuecomment-1927281540
|
||||
func fixupGCS(o *s3.Options) {
|
||||
func fixupRequest(o *s3.Options, opt *Options) {
|
||||
type ignoredHeadersKey struct{}
|
||||
headers := []string{"Accept-Encoding"}
|
||||
|
||||
fixup := middleware.FinalizeMiddlewareFunc(
|
||||
"FixupGCS",
|
||||
"FixupRequest",
|
||||
func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (out middleware.FinalizeOutput, metadata middleware.Metadata, err error) {
|
||||
req, ok := in.Request.(*smithyhttp.Request)
|
||||
if !ok {
|
||||
return out, metadata, fmt.Errorf("fixupGCS: unexpected request middleware type %T", in.Request)
|
||||
return out, metadata, fmt.Errorf("fixupRequest: unexpected request middleware type %T", in.Request)
|
||||
}
|
||||
|
||||
// Delete headers from being signed - will restore later
|
||||
ignored := make(map[string]string, len(headers))
|
||||
for _, h := range headers {
|
||||
ignored[h] = req.Header.Get(h)
|
||||
req.Header.Del(h)
|
||||
if !opt.SignAcceptEncoding.Value {
|
||||
// Delete headers from being signed - will restore later
|
||||
ignored := make(map[string]string, len(headers))
|
||||
for _, h := range headers {
|
||||
ignored[h] = req.Header.Get(h)
|
||||
req.Header.Del(h)
|
||||
}
|
||||
|
||||
// Store ignored on context
|
||||
ctx = middleware.WithStackValue(ctx, ignoredHeadersKey{}, ignored)
|
||||
}
|
||||
|
||||
// Remove x-id because Google doesn't like them
|
||||
if query := req.URL.Query(); query.Has("x-id") {
|
||||
query.Del("x-id")
|
||||
req.URL.RawQuery = query.Encode()
|
||||
if !opt.UseXID.Value {
|
||||
// Remove x-id
|
||||
if query := req.URL.Query(); query.Has("x-id") {
|
||||
query.Del("x-id")
|
||||
req.URL.RawQuery = query.Encode()
|
||||
}
|
||||
}
|
||||
|
||||
// Store ignored on context
|
||||
ctx = middleware.WithStackValue(ctx, ignoredHeadersKey{}, ignored)
|
||||
|
||||
return next.HandleFinalize(ctx, in)
|
||||
},
|
||||
)
|
||||
|
||||
// Restore headers if necessary
|
||||
restore := middleware.FinalizeMiddlewareFunc(
|
||||
"FixupGCSRestoreHeaders",
|
||||
"FixupRequestRestoreHeaders",
|
||||
func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (out middleware.FinalizeOutput, metadata middleware.Metadata, err error) {
|
||||
req, ok := in.Request.(*smithyhttp.Request)
|
||||
if !ok {
|
||||
return out, metadata, fmt.Errorf("fixupGCS: unexpected request middleware type %T", in.Request)
|
||||
return out, metadata, fmt.Errorf("fixupRequest: unexpected request middleware type %T", in.Request)
|
||||
}
|
||||
|
||||
// Restore ignored from ctx
|
||||
ignored, _ := middleware.GetStackValue(ctx, ignoredHeadersKey{}).(map[string]string)
|
||||
for k, v := range ignored {
|
||||
req.Header.Set(k, v)
|
||||
if !opt.SignAcceptEncoding.Value {
|
||||
// Restore ignored from ctx
|
||||
ignored, _ := middleware.GetStackValue(ctx, ignoredHeadersKey{}).(map[string]string)
|
||||
for k, v := range ignored {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
return next.HandleFinalize(ctx, in)
|
||||
@@ -3160,6 +3221,7 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
|
||||
|
||||
// Try to fill in the config from the environment if env_auth=true
|
||||
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
|
||||
|
||||
configOpts := []func(*awsconfig.LoadOptions) error{}
|
||||
// Set the name of the profile if supplied
|
||||
if opt.Profile != "" {
|
||||
@@ -3173,8 +3235,12 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't load configuration with env_auth=true: %w", err)
|
||||
}
|
||||
|
||||
} else {
|
||||
switch {
|
||||
case opt.Provider == "IBMCOS" && opt.V2Auth:
|
||||
awsConfig.Credentials = &NoOpCredentialsProvider{}
|
||||
fs.Debugf(nil, "Using IBM IAM")
|
||||
case opt.AccessKeyID == "" && opt.SecretAccessKey == "":
|
||||
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
|
||||
awsConfig.Credentials = aws.AnonymousCredentials{}
|
||||
@@ -3228,14 +3294,21 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
|
||||
|
||||
if opt.V2Auth || opt.Region == "other-v2-signature" {
|
||||
fs.Debugf(nil, "Using v2 auth")
|
||||
options = append(options, func(s3Opt *s3.Options) {
|
||||
s3Opt.HTTPSignerV4 = &v2Signer{opt: opt}
|
||||
})
|
||||
if opt.Provider == "IBMCOS" && opt.IBMAPIKey != "" && opt.IBMInstanceID != "" {
|
||||
options = append(options, func(s3Opt *s3.Options) {
|
||||
s3Opt.HTTPSignerV4 = &IbmIamSigner{APIKey: opt.IBMAPIKey, InstanceID: opt.IBMInstanceID}
|
||||
})
|
||||
} else {
|
||||
options = append(options, func(s3Opt *s3.Options) {
|
||||
s3Opt.HTTPSignerV4 = &v2Signer{opt: opt}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if opt.Provider == "GCS" {
|
||||
// Fixup the request if needed
|
||||
if !opt.UseXID.Value || !opt.SignAcceptEncoding.Value {
|
||||
options = append(options, func(o *s3.Options) {
|
||||
fixupGCS(o)
|
||||
fixupRequest(o, opt)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -3344,12 +3417,14 @@ func setQuirks(opt *Options) {
|
||||
listObjectsV2 = true // Always use ListObjectsV2 instead of ListObjects
|
||||
virtualHostStyle = true // Use bucket.provider.com instead of putting the bucket in the URL
|
||||
urlEncodeListings = true // URL encode the listings to help with control characters
|
||||
useMultipartEtag = true // Set if Etags for multpart uploads are compatible with AWS
|
||||
useMultipartEtag = true // Set if Etags for multipart uploads are compatible with AWS
|
||||
useAcceptEncodingGzip = true // Set Accept-Encoding: gzip
|
||||
mightGzip = true // assume all providers might use content encoding gzip until proven otherwise
|
||||
useAlreadyExists = true // Set if provider returns AlreadyOwnedByYou or no error if you try to remake your own bucket
|
||||
useMultipartUploads = true // Set if provider supports multipart uploads
|
||||
useUnsignedPayload = true // Do we need to use unsigned payloads to avoid seeking in PutObject
|
||||
useXID = true // Add x-id URL parameter into requests
|
||||
signAcceptEncoding = true // If we should include AcceptEncoding in the signature
|
||||
)
|
||||
switch opt.Provider {
|
||||
case "AWS":
|
||||
@@ -3494,11 +3569,14 @@ func setQuirks(opt *Options) {
|
||||
// Google break request Signature by mutating accept-encoding HTTP header
|
||||
// https://github.com/rclone/rclone/issues/6670
|
||||
useAcceptEncodingGzip = false
|
||||
signAcceptEncoding = false
|
||||
useAlreadyExists = true // returns BucketNameUnavailable instead of BucketAlreadyExists but good enough!
|
||||
// GCS S3 doesn't support multi-part server side copy:
|
||||
// See: https://issuetracker.google.com/issues/323465186
|
||||
// So make cutoff very large which it does seem to support
|
||||
opt.CopyCutoff = math.MaxInt64
|
||||
// GCS doesn't like the x-id URL parameter the SDKv2 inserts
|
||||
useXID = false
|
||||
default: //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid defaultCaseOrder: consider to make `default` case as first or as last case
|
||||
fs.Logf("s3", "s3 provider %q not known - please set correctly", opt.Provider)
|
||||
fallthrough
|
||||
@@ -3568,6 +3646,18 @@ func setQuirks(opt *Options) {
|
||||
opt.UseUnsignedPayload.Valid = true
|
||||
opt.UseUnsignedPayload.Value = useUnsignedPayload
|
||||
}
|
||||
|
||||
// Set the correct use UseXID if not manually set
|
||||
if !opt.UseXID.Valid {
|
||||
opt.UseXID.Valid = true
|
||||
opt.UseXID.Value = useXID
|
||||
}
|
||||
|
||||
// Set the correct SignAcceptEncoding if not manually set
|
||||
if !opt.SignAcceptEncoding.Valid {
|
||||
opt.SignAcceptEncoding.Valid = true
|
||||
opt.SignAcceptEncoding.Value = signAcceptEncoding
|
||||
}
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
@@ -3682,6 +3772,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.Provider == "IDrive" {
|
||||
f.features.SetTier = false
|
||||
}
|
||||
if opt.Provider == "AWS" {
|
||||
f.features.DoubleSlash = true
|
||||
}
|
||||
if opt.DirectoryMarkers {
|
||||
f.features.CanHaveEmptyDirectories = true
|
||||
}
|
||||
@@ -4153,7 +4246,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
opt.prefix += "/"
|
||||
}
|
||||
if !opt.findFile {
|
||||
if opt.directory != "" {
|
||||
if opt.directory != "" && (opt.prefix == "" && !bucket.IsAllSlashes(opt.directory) || opt.prefix != "" && !strings.HasSuffix(opt.directory, "/")) {
|
||||
opt.directory += "/"
|
||||
}
|
||||
}
|
||||
@@ -4250,14 +4343,18 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
}
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, opt.prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
fs.Logf(f, "Odd directory name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(opt.prefix):]
|
||||
// Trim one slash off the remote name
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
if remote == "" || bucket.IsAllSlashes(remote) {
|
||||
remote += "/"
|
||||
}
|
||||
if opt.addBucket {
|
||||
remote = bucket.Join(opt.bucket, remote)
|
||||
}
|
||||
remote = strings.TrimSuffix(remote, "/")
|
||||
err = fn(remote, &types.Object{Key: &remote}, nil, true)
|
||||
if err != nil {
|
||||
if err == errEndList {
|
||||
@@ -6057,7 +6154,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
if mOut == nil {
|
||||
err = fserrors.RetryErrorf("internal error: no info from multipart upload")
|
||||
} else if mOut.UploadId == nil {
|
||||
err = fserrors.RetryErrorf("internal error: no UploadId in multpart upload: %#v", *mOut)
|
||||
err = fserrors.RetryErrorf("internal error: no UploadId in multipart upload: %#v", *mOut)
|
||||
}
|
||||
}
|
||||
return f.shouldRetry(ctx, err)
|
||||
|
||||
@@ -2,8 +2,10 @@ package smb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
smb2 "github.com/cloudsoda/go-smb2"
|
||||
@@ -11,14 +13,17 @@ import (
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// dial starts a client connection to the given SMB server. It is a
|
||||
// convenience function that connects to the given network address,
|
||||
// initiates the SMB handshake, and then sets up a Client.
|
||||
//
|
||||
// The context is only used for establishing the connection, not after.
|
||||
func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
|
||||
dialer := fshttp.NewDialer(ctx)
|
||||
tconn, err := dialer.Dial(network, addr)
|
||||
tconn, err := dialer.DialContext(ctx, network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -89,15 +94,7 @@ func (c *conn) close() (err error) {
|
||||
|
||||
// True if it's closed
|
||||
func (c *conn) closed() bool {
|
||||
var nopErr error
|
||||
if c.smbShare != nil {
|
||||
// stat the current directory
|
||||
_, nopErr = c.smbShare.Stat(".")
|
||||
} else {
|
||||
// list the shares
|
||||
_, nopErr = c.smbSession.ListSharenames()
|
||||
}
|
||||
return nopErr != nil
|
||||
return c.smbSession.Echo() != nil
|
||||
}
|
||||
|
||||
// Show that we are using a SMB session
|
||||
@@ -118,23 +115,20 @@ func (f *Fs) getSessions() int32 {
|
||||
}
|
||||
|
||||
// Open a new connection to the SMB server.
|
||||
//
|
||||
// The context is only used for establishing the connection, not after.
|
||||
func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err error) {
|
||||
// As we are pooling these connections we need to decouple
|
||||
// them from the current context
|
||||
bgCtx := context.Background()
|
||||
|
||||
c, err = f.dial(bgCtx, "tcp", f.opt.Host+":"+f.opt.Port)
|
||||
c, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't connect SMB: %w", err)
|
||||
}
|
||||
if share != "" {
|
||||
// mount the specified share as well if user requested
|
||||
c.smbShare, err = c.smbSession.Mount(share)
|
||||
err = c.mountShare(share)
|
||||
if err != nil {
|
||||
_ = c.smbSession.Logoff()
|
||||
return nil, fmt.Errorf("couldn't initialize SMB: %w", err)
|
||||
}
|
||||
c.smbShare = c.smbShare.WithContext(bgCtx)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
@@ -192,23 +186,30 @@ func (f *Fs) getConnection(ctx context.Context, share string) (c *conn, err erro
|
||||
// Return a SMB connection to the pool
|
||||
//
|
||||
// It nils the pointed to connection out so it can't be reused
|
||||
func (f *Fs) putConnection(pc **conn) {
|
||||
c := *pc
|
||||
*pc = nil
|
||||
|
||||
var nopErr error
|
||||
if c.smbShare != nil {
|
||||
// stat the current directory
|
||||
_, nopErr = c.smbShare.Stat(".")
|
||||
} else {
|
||||
// list the shares
|
||||
_, nopErr = c.smbSession.ListSharenames()
|
||||
}
|
||||
if nopErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
||||
_ = c.close()
|
||||
//
|
||||
// if err is not nil then it checks the connection is alive using an
|
||||
// ECHO request
|
||||
func (f *Fs) putConnection(pc **conn, err error) {
|
||||
if pc == nil {
|
||||
return
|
||||
}
|
||||
c := *pc
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
// If not a regular SMB error then check the connection
|
||||
if !(errors.Is(err, os.ErrNotExist) || errors.Is(err, os.ErrExist) || errors.Is(err, os.ErrPermission)) {
|
||||
echoErr := c.smbSession.Echo()
|
||||
if echoErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", echoErr)
|
||||
_ = c.close()
|
||||
return
|
||||
}
|
||||
fs.Debugf(f, "Connection OK after error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
f.poolMu.Lock()
|
||||
f.pool = append(f.pool, c)
|
||||
@@ -235,15 +236,18 @@ func (f *Fs) drainPool(ctx context.Context) (err error) {
|
||||
if len(f.pool) != 0 {
|
||||
fs.Debugf(f, "Closing %d unused connections", len(f.pool))
|
||||
}
|
||||
|
||||
g, _ := errgroup.WithContext(ctx)
|
||||
for i, c := range f.pool {
|
||||
if !c.closed() {
|
||||
cErr := c.close()
|
||||
if cErr != nil {
|
||||
err = cErr
|
||||
g.Go(func() (err error) {
|
||||
if !c.closed() {
|
||||
err = c.close()
|
||||
}
|
||||
}
|
||||
f.pool[i] = nil
|
||||
f.pool[i] = nil
|
||||
return err
|
||||
})
|
||||
}
|
||||
err = g.Wait()
|
||||
f.pool = nil
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
minSleep = 100 * time.Millisecond
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
@@ -207,7 +207,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, err
|
||||
}
|
||||
stat, err := cn.smbShare.Stat(f.toSambaPath(dir))
|
||||
f.putConnection(&cn)
|
||||
f.putConnection(&cn, err)
|
||||
if err != nil {
|
||||
// ignore stat error here
|
||||
return f, nil
|
||||
@@ -268,7 +268,7 @@ func (f *Fs) findObjectSeparate(ctx context.Context, share, path string) (fs.Obj
|
||||
return nil, err
|
||||
}
|
||||
stat, err := cn.smbShare.Stat(f.toSambaPath(path))
|
||||
f.putConnection(&cn)
|
||||
f.putConnection(&cn, err)
|
||||
if err != nil {
|
||||
return nil, translateError(err, false)
|
||||
}
|
||||
@@ -290,7 +290,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.MkdirAll(f.toSambaPath(path), 0o755)
|
||||
f.putConnection(&cn)
|
||||
f.putConnection(&cn, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -305,7 +305,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.Remove(f.toSambaPath(path))
|
||||
f.putConnection(&cn)
|
||||
f.putConnection(&cn, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -375,7 +375,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (_ fs.Objec
|
||||
return nil, err
|
||||
}
|
||||
err = cn.smbShare.Rename(f.toSambaPath(srcPath), f.toSambaPath(dstPath))
|
||||
f.putConnection(&cn)
|
||||
f.putConnection(&cn, err)
|
||||
if err != nil {
|
||||
return nil, translateError(err, false)
|
||||
}
|
||||
@@ -412,7 +412,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.putConnection(&cn)
|
||||
defer f.putConnection(&cn, err)
|
||||
|
||||
_, err = cn.smbShare.Stat(dstPath)
|
||||
if os.IsNotExist(err) {
|
||||
@@ -430,7 +430,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.putConnection(&cn)
|
||||
defer f.putConnection(&cn, err)
|
||||
|
||||
if share == "" {
|
||||
shares, err := cn.smbSession.ListSharenames()
|
||||
@@ -474,7 +474,7 @@ func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := cn.smbShare.Statfs(dir)
|
||||
f.putConnection(&cn)
|
||||
f.putConnection(&cn, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -556,7 +556,7 @@ func (f *Fs) ensureDirectory(ctx context.Context, share, _path string) error {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.MkdirAll(f.toSambaPath(dir), 0o755)
|
||||
f.putConnection(&cn)
|
||||
f.putConnection(&cn, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -604,7 +604,7 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer o.fs.putConnection(&cn)
|
||||
defer o.fs.putConnection(&cn, err)
|
||||
|
||||
err = cn.smbShare.Chtimes(reqDir, t, t)
|
||||
if err != nil {
|
||||
@@ -650,24 +650,25 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
fl, err := cn.smbShare.OpenFile(filename, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
o.fs.putConnection(&cn)
|
||||
o.fs.putConnection(&cn, err)
|
||||
return nil, fmt.Errorf("failed to open: %w", err)
|
||||
}
|
||||
pos, err := fl.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
o.fs.putConnection(&cn)
|
||||
o.fs.putConnection(&cn, err)
|
||||
return nil, fmt.Errorf("failed to seek: %w", err)
|
||||
}
|
||||
if pos != offset {
|
||||
o.fs.putConnection(&cn)
|
||||
return nil, fmt.Errorf("failed to seek: wrong position (expected=%d, reported=%d)", offset, pos)
|
||||
err = fmt.Errorf("failed to seek: wrong position (expected=%d, reported=%d)", offset, pos)
|
||||
o.fs.putConnection(&cn, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
in = readers.NewLimitedReadCloser(fl, limit)
|
||||
in = &boundReadCloser{
|
||||
rc: in,
|
||||
close: func() error {
|
||||
o.fs.putConnection(&cn)
|
||||
o.fs.putConnection(&cn, nil)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@@ -697,7 +698,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
o.fs.putConnection(&cn)
|
||||
o.fs.putConnection(&cn, err)
|
||||
}()
|
||||
|
||||
fl, err := cn.smbShare.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
|
||||
@@ -757,7 +758,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
}
|
||||
|
||||
err = cn.smbShare.Remove(filename)
|
||||
o.fs.putConnection(&cn)
|
||||
o.fs.putConnection(&cn, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -120,7 +120,7 @@ func init() {
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(rootURL) // FIXME
|
||||
|
||||
// FIXME
|
||||
//err = f.pacer.Call(func() (bool, error) {
|
||||
// err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = srv.CallXML(context.Background(), &opts, &authRequest, nil)
|
||||
// return shouldRetry(ctx, resp, err)
|
||||
//})
|
||||
@@ -327,7 +327,7 @@ func (f *Fs) readMetaDataForID(ctx context.Context, ID string) (info *api.File,
|
||||
func (f *Fs) getAuthToken(ctx context.Context) error {
|
||||
fs.Debugf(f, "Renewing token")
|
||||
|
||||
var authRequest = api.TokenAuthRequest{
|
||||
authRequest := api.TokenAuthRequest{
|
||||
AccessKeyID: withDefault(f.opt.AccessKeyID, accessKeyID),
|
||||
PrivateAccessKey: withDefault(f.opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)),
|
||||
RefreshToken: f.opt.RefreshToken,
|
||||
@@ -509,7 +509,7 @@ func errorHandler(resp *http.Response) (err error) {
|
||||
return fmt.Errorf("error reading error out of body: %w", err)
|
||||
}
|
||||
match := findError.FindSubmatch(body)
|
||||
if match == nil || len(match) < 2 || len(match[1]) == 0 {
|
||||
if len(match) < 2 || len(match[1]) == 0 {
|
||||
return fmt.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
|
||||
}
|
||||
return fmt.Errorf("HTTP error %v (%v): %s", resp.StatusCode, resp.Status, match[1])
|
||||
@@ -552,7 +552,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(ctx, pathID, nil, func(item *api.Collection) bool {
|
||||
if strings.EqualFold(item.Name, leaf) {
|
||||
|
||||
@@ -29,7 +29,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
||||
cmd := exec.Command("git", "log", "--oneline", from+".."+to)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
logMap = map[string]string{}
|
||||
logs = []string{}
|
||||
@@ -39,7 +39,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
||||
}
|
||||
match := logRe.FindSubmatch(line)
|
||||
if match == nil {
|
||||
log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
var hash, logMessage = string(match[1]), string(match[2])
|
||||
logMap[logMessage] = hash
|
||||
@@ -52,12 +52,12 @@ func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 0 {
|
||||
log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
// v1.54.0
|
||||
versionBytes, err := os.ReadFile("VERSION")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
if versionBytes[0] == 'v' {
|
||||
versionBytes = versionBytes[1:]
|
||||
@@ -65,7 +65,7 @@ func main() {
|
||||
versionBytes = bytes.TrimSpace(versionBytes)
|
||||
semver := semver.New(string(versionBytes))
|
||||
stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1)
|
||||
log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
masterMap, masterLogs := readCommits(stable+".0", "master")
|
||||
stableMap, _ := readCommits(stable+".0", stable+"-stable")
|
||||
for _, logMessage := range masterLogs {
|
||||
|
||||
@@ -108,6 +108,8 @@ var logReplacements = []string{
|
||||
`^.*?Can't compare hashes, so using check --download.*?$`, dropMe,
|
||||
// ignore timestamps in directory time updates
|
||||
`^(INFO : .*?: (Made directory with|Set directory) (metadata|modification time)).*$`, dropMe,
|
||||
// ignore equivalent log for backends lacking dir modtime support
|
||||
`^(INFO : .*?: Making directory).*$`, dropMe,
|
||||
// ignore sizes in directory time updates
|
||||
`^(NOTICE: .*?: Skipped set directory modification time as --dry-run is set).*$`, dropMe,
|
||||
// ignore sizes in directory metadata updates
|
||||
@@ -746,6 +748,16 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
case "test-func":
|
||||
b.TestFn = testFunc
|
||||
return
|
||||
case "concurrent-func":
|
||||
b.TestFn = func() {
|
||||
src := filepath.Join(b.dataDir, "file7.txt")
|
||||
dst := "file1.txt"
|
||||
err := b.copyFile(ctx, src, b.replaceHex(b.path2), dst)
|
||||
if err != nil {
|
||||
fs.Errorf(src, "error copying file: %v", err)
|
||||
}
|
||||
}
|
||||
return
|
||||
case "fix-names":
|
||||
// in case the local os converted any filenames
|
||||
ci.NoUnicodeNormalization = true
|
||||
@@ -871,10 +883,9 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
if !ok || err != nil {
|
||||
fs.Logf(remotePath, "Can't find expected file %s (was it renamed by the os?) %v", args[1], err)
|
||||
return
|
||||
} else {
|
||||
// include hash of filename to make unicode form differences easier to see in logs
|
||||
fs.Debugf(remotePath, "verified file exists at correct path. filename hash: %s", stringToHash(leaf))
|
||||
}
|
||||
// include hash of filename to make unicode form differences easier to see in logs
|
||||
fs.Debugf(remotePath, "verified file exists at correct path. filename hash: %s", stringToHash(leaf))
|
||||
return
|
||||
default:
|
||||
return fmt.Errorf("unknown command: %q", args[0])
|
||||
|
||||
@@ -218,7 +218,7 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
|
||||
if b.opt.CompareFlag == "" {
|
||||
return nil
|
||||
}
|
||||
var CompareFlag CompareOpt // for exlcusions
|
||||
var CompareFlag CompareOpt // for exclusions
|
||||
opts := strings.Split(b.opt.CompareFlag, ",")
|
||||
for _, opt := range opts {
|
||||
switch strings.ToLower(strings.TrimSpace(opt)) {
|
||||
|
||||
@@ -161,9 +161,7 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
||||
return
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
err = b.checkListing(now, newListing, "current "+msg)
|
||||
}
|
||||
err = b.checkListing(now, newListing, "current "+msg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -286,7 +284,7 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
||||
}
|
||||
|
||||
// applyDeltas
|
||||
func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (changes1, changes2 bool, results2to1, results1to2 []Results, queues queues, err error) {
|
||||
func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (results2to1, results1to2 []Results, queues queues, err error) {
|
||||
path1 := bilib.FsPath(b.fs1)
|
||||
path2 := bilib.FsPath(b.fs2)
|
||||
|
||||
@@ -367,7 +365,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
}
|
||||
}
|
||||
|
||||
//if there are potential conflicts to check, check them all here (outside the loop) in one fell swoop
|
||||
// if there are potential conflicts to check, check them all here (outside the loop) in one fell swoop
|
||||
matches, err := b.checkconflicts(ctxCheck, filterCheck, b.fs1, b.fs2)
|
||||
|
||||
for _, file := range ds1.sort() {
|
||||
@@ -392,7 +390,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
} else if d2.is(deltaOther) {
|
||||
b.indent("!WARNING", file, "New or changed in both paths")
|
||||
|
||||
//if files are identical, leave them alone instead of renaming
|
||||
// if files are identical, leave them alone instead of renaming
|
||||
if (dirs1.has(file) || dirs1.has(alias)) && (dirs2.has(file) || dirs2.has(alias)) {
|
||||
fs.Infof(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file)
|
||||
ls1.getPut(file, skippedDirs1)
|
||||
@@ -486,7 +484,6 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
|
||||
// Do the batch operation
|
||||
if copy2to1.NotEmpty() && !b.InGracefulShutdown {
|
||||
changes1 = true
|
||||
b.indent("Path2", "Path1", "Do queued copies to")
|
||||
ctx = b.setBackupDir(ctx, 1)
|
||||
results2to1, err = b.fastCopy(ctx, b.fs2, b.fs1, copy2to1, "copy2to1")
|
||||
@@ -498,12 +495,11 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
return
|
||||
}
|
||||
|
||||
//copy empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
// copy empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs1, copy2to1, dirs2, &results2to1, "make")
|
||||
}
|
||||
|
||||
if copy1to2.NotEmpty() && !b.InGracefulShutdown {
|
||||
changes2 = true
|
||||
b.indent("Path1", "Path2", "Do queued copies to")
|
||||
ctx = b.setBackupDir(ctx, 2)
|
||||
results1to2, err = b.fastCopy(ctx, b.fs1, b.fs2, copy1to2, "copy1to2")
|
||||
@@ -515,7 +511,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
return
|
||||
}
|
||||
|
||||
//copy empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
// copy empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, copy1to2, dirs1, &results1to2, "make")
|
||||
}
|
||||
|
||||
@@ -523,7 +519,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
if err = b.saveQueue(delete1, "delete1"); err != nil {
|
||||
return
|
||||
}
|
||||
//propagate deletions of empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
// propagate deletions of empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs1, delete1, dirs1, &results2to1, "remove")
|
||||
}
|
||||
|
||||
@@ -531,7 +527,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
if err = b.saveQueue(delete2, "delete2"); err != nil {
|
||||
return
|
||||
}
|
||||
//propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
// propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, delete2, dirs2, &results1to2, "remove")
|
||||
}
|
||||
|
||||
|
||||
@@ -394,7 +394,7 @@ func parseHash(str string) (string, string, error) {
|
||||
return "", "", fmt.Errorf("invalid hash %q", str)
|
||||
}
|
||||
|
||||
// checkListing verifies that listing is not empty (unless resynching)
|
||||
// checkListing verifies that listing is not empty (unless resyncing)
|
||||
func (b *bisyncRun) checkListing(ls *fileList, listing, msg string) error {
|
||||
if b.opt.Resync || !ls.empty() {
|
||||
return nil
|
||||
|
||||
@@ -359,8 +359,6 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
|
||||
// Determine and apply changes to Path1 and Path2
|
||||
noChanges := ds1.empty() && ds2.empty()
|
||||
changes1 := false // 2to1
|
||||
changes2 := false // 1to2
|
||||
results2to1 := []Results{}
|
||||
results1to2 := []Results{}
|
||||
|
||||
@@ -370,7 +368,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
fs.Infof(nil, "No changes found")
|
||||
} else {
|
||||
fs.Infof(nil, "Applying changes")
|
||||
changes1, changes2, results2to1, results1to2, queues, err = b.applyDeltas(octx, ds1, ds2)
|
||||
results2to1, results1to2, queues, err = b.applyDeltas(octx, ds1, ds2)
|
||||
if err != nil {
|
||||
if b.InGracefulShutdown && (err == context.Canceled || err == accounting.ErrorMaxTransferLimitReachedGraceful || strings.Contains(err.Error(), "context canceled")) {
|
||||
fs.Infof(nil, "Ignoring sync error due to Graceful Shutdown: %v", err)
|
||||
@@ -395,21 +393,11 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
}
|
||||
b.saveOldListings()
|
||||
// save new listings
|
||||
// NOTE: "changes" in this case does not mean this run vs. last run, it means start of this run vs. end of this run.
|
||||
// i.e. whether we can use the March lst-new as this side's lst without modifying it.
|
||||
if noChanges {
|
||||
b.replaceCurrentListings()
|
||||
} else {
|
||||
if changes1 || b.InGracefulShutdown { // 2to1
|
||||
err1 = b.modifyListing(fctx, b.fs2, b.fs1, results2to1, queues, false)
|
||||
} else {
|
||||
err1 = bilib.CopyFileIfExists(b.newListing1, b.listing1)
|
||||
}
|
||||
if changes2 || b.InGracefulShutdown { // 1to2
|
||||
err2 = b.modifyListing(fctx, b.fs1, b.fs2, results1to2, queues, true)
|
||||
} else {
|
||||
err2 = bilib.CopyFileIfExists(b.newListing2, b.listing2)
|
||||
}
|
||||
err1 = b.modifyListing(fctx, b.fs2, b.fs1, results2to1, queues, false) // 2to1
|
||||
err2 = b.modifyListing(fctx, b.fs1, b.fs2, results1to2, queues, true) // 1to2
|
||||
}
|
||||
if b.DebugName != "" {
|
||||
l1, _ := b.loadListing(b.listing1)
|
||||
|
||||
1
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.copy2to1.que
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.copy2to1.que
vendored
Normal file
@@ -0,0 +1 @@
|
||||
"file1.txt"
|
||||
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst-new
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst-new
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst-old
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst-old
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst-new
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst-new
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst-old
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst-old
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
73
cmd/bisync/testdata/test_concurrent/golden/test.log
vendored
Normal file
73
cmd/bisync/testdata/test_concurrent/golden/test.log
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
[36m(01) :[0m [34mtest concurrent[0m
|
||||
|
||||
[36m(02) :[0m [34mtest initial bisync[0m
|
||||
[36m(03) :[0m [34mbisync resync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
[36m(04) :[0m [34mtest changed on one path - file1[0m
|
||||
[36m(05) :[0m [34mtouch-glob 2001-01-02 {datadir/} file5R.txt[0m
|
||||
[36m(06) :[0m [34mtouch-glob 2023-08-26 {datadir/} file7.txt[0m
|
||||
[36m(07) :[0m [34mcopy-as {datadir/}file5R.txt {path2/} file1.txt[0m
|
||||
|
||||
[36m(08) :[0m [34mtest bisync with file changed during[0m
|
||||
[36m(09) :[0m [34mconcurrent-func[0m
|
||||
[36m(10) :[0m [34mbisync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Building Path1 and Path2 listings
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : - [34mPath2[0m [35m[33mFile changed: [35msize (larger)[0m, [35mtime (newer)[0m[0m[0m - [36mfile1.txt[0m
|
||||
INFO : Path2: 1 changes: [32m 0 new[0m, [33m 1 modified[0m, [31m 0 deleted[0m
|
||||
INFO : ([33mModified[0m: [36m 1 newer[0m, [34m 0 older[0m, [36m 1 larger[0m, [34m 0 smaller[0m)
|
||||
INFO : Applying changes
|
||||
INFO : - [34mPath2[0m [35m[32mQueue copy to[0m Path1[0m - [36m{path1/}file1.txt[0m
|
||||
INFO : - [34mPath2[0m [35mDo queued copies to[0m - [36mPath1[0m
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
[36m(11) :[0m [34mbisync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Building Path1 and Path2 listings
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : No changes found
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
1
cmd/bisync/testdata/test_concurrent/initial/RCLONE_TEST
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/initial/RCLONE_TEST
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is used for testing the health of rclone accesses to the local/remote file system. Do not delete.
|
||||
0
cmd/bisync/testdata/test_concurrent/initial/file1.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file1.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file2.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file2.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file3.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file3.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file4.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file4.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file5.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file5.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file6.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file6.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file7.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file7.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file8.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file8.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/modfiles/dummy.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/modfiles/dummy.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file1.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file1.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is newer
|
||||
1
cmd/bisync/testdata/test_concurrent/modfiles/file10.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file10.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is newer
|
||||
1
cmd/bisync/testdata/test_concurrent/modfiles/file11.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file11.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is newer
|
||||
1
cmd/bisync/testdata/test_concurrent/modfiles/file2.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file2.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
Newer version
|
||||
1
cmd/bisync/testdata/test_concurrent/modfiles/file5L.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file5L.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is newer and not equal to 5R
|
||||
1
cmd/bisync/testdata/test_concurrent/modfiles/file5R.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file5R.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is newer and not equal to 5L
|
||||
1
cmd/bisync/testdata/test_concurrent/modfiles/file6.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file6.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is newer
|
||||
1
cmd/bisync/testdata/test_concurrent/modfiles/file7.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file7.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is newer
|
||||
15
cmd/bisync/testdata/test_concurrent/scenario.txt
vendored
Normal file
15
cmd/bisync/testdata/test_concurrent/scenario.txt
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
test concurrent
|
||||
|
||||
test initial bisync
|
||||
bisync resync
|
||||
|
||||
test changed on one path - file1
|
||||
touch-glob 2001-01-02 {datadir/} file5R.txt
|
||||
touch-glob 2023-08-26 {datadir/} file7.txt
|
||||
copy-as {datadir/}file5R.txt {path2/} file1.txt
|
||||
|
||||
test bisync with file changed during
|
||||
concurrent-func
|
||||
bisync
|
||||
|
||||
bisync
|
||||
@@ -23,7 +23,7 @@ INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
[36m(05) :[0m [34mmove-listings empty-path1[0m
|
||||
|
||||
[36m(06) :[0m [34mtest 2. resync with empty path2, resulting in synching all content to path2.[0m
|
||||
[36m(06) :[0m [34mtest 2. resync with empty path2, resulting in syncing all content to path2.[0m
|
||||
[36m(07) :[0m [34mpurge-children {path2/}[0m
|
||||
[36m(08) :[0m [34mbisync resync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
|
||||
4
cmd/bisync/testdata/test_resync/scenario.txt
vendored
4
cmd/bisync/testdata/test_resync/scenario.txt
vendored
@@ -1,6 +1,6 @@
|
||||
test resync
|
||||
# 1. Resync with empty Path1, resulting in copying all content FROM Path2
|
||||
# 2. Resync with empty Path2, resulting in synching all content TO Path2
|
||||
# 2. Resync with empty Path2, resulting in syncing all content TO Path2
|
||||
# 3. Exercise all of the various file difference scenarios during a resync:
|
||||
# File Path1 Path2 Expected action Who wins
|
||||
# - file1.txt Exists Missing Sync Path1 >Path2 Path1
|
||||
@@ -17,7 +17,7 @@ purge-children {path1/}
|
||||
bisync resync
|
||||
move-listings empty-path1
|
||||
|
||||
test 2. resync with empty path2, resulting in synching all content to path2.
|
||||
test 2. resync with empty path2, resulting in syncing all content to path2.
|
||||
purge-children {path2/}
|
||||
bisync resync
|
||||
move-listings empty-path2
|
||||
|
||||
11
cmd/cmd.go
11
cmd/cmd.go
@@ -429,11 +429,12 @@ func initConfig() {
|
||||
fs.Fatalf(nil, "Failed to start remote control: %v", err)
|
||||
}
|
||||
|
||||
// Start the metrics server if configured
|
||||
_, err = rcserver.MetricsStart(ctx, &rc.Opt)
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "Failed to start metrics server: %v", err)
|
||||
|
||||
// Start the metrics server if configured and not running the "rc" command
|
||||
if len(os.Args) >= 2 && os.Args[1] != "rc" {
|
||||
_, err = rcserver.MetricsStart(ctx, &rc.Opt)
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "Failed to start metrics server: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Setup CPU profiling if desired
|
||||
|
||||
@@ -303,6 +303,9 @@ func doConfig(name string, in rc.Params, do func(config.UpdateRemoteOpt) (*fs.Co
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if updateRemoteOpt.NoOutput {
|
||||
return nil
|
||||
}
|
||||
if !(updateRemoteOpt.NonInteractive || updateRemoteOpt.Continue) {
|
||||
config.ShowRemote(name)
|
||||
} else {
|
||||
@@ -323,6 +326,7 @@ func init() {
|
||||
for _, cmdFlags := range []*pflag.FlagSet{configCreateCommand.Flags(), configUpdateCommand.Flags()} {
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.Obscure, "obscure", "", false, "Force any passwords to be obscured", "Config")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NoObscure, "no-obscure", "", false, "Force any passwords not to be obscured", "Config")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NoOutput, "no-output", "", false, "Don't provide any output", "Config")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NonInteractive, "non-interactive", "", false, "Don't interact with user and return questions", "Config")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.Continue, "continue", "", false, "Continue the configuration process with an answer", "Config")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.All, "all", "", false, "Ask the full set of config questions", "Config")
|
||||
@@ -549,12 +553,12 @@ password to re-encrypt the config.
|
||||
|
||||
When |--password-command| is called to change the password then the
|
||||
environment variable |RCLONE_PASSWORD_CHANGE=1| will be set. So if
|
||||
changing passwords programatically you can use the environment
|
||||
changing passwords programmatically you can use the environment
|
||||
variable to distinguish which password you must supply.
|
||||
|
||||
Alternatively you can remove the password first (with |rclone config
|
||||
encryption remove|), then set it again with this command which may be
|
||||
easier if you don't mind the unecrypted config file being on the disk
|
||||
easier if you don't mind the unencrypted config file being on the disk
|
||||
briefly.
|
||||
`, "|", "`"),
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
|
||||
@@ -54,7 +54,7 @@ destination if there is one with the same name.
|
||||
Setting |--stdout| or making the output file name |-|
|
||||
will cause the output to be written to standard output.
|
||||
|
||||
### Troublshooting
|
||||
### Troubleshooting
|
||||
|
||||
If you can't get |rclone copyurl| to work then here are some things you can try:
|
||||
|
||||
|
||||
@@ -194,7 +194,7 @@ func (f *FS) Chown(name string, uid, gid int) (err error) {
|
||||
return file.Chown(uid, gid)
|
||||
}
|
||||
|
||||
// Chtimes changes the acces time and modified time
|
||||
// Chtimes changes the access time and modified time
|
||||
func (f *FS) Chtimes(name string, atime time.Time, mtime time.Time) (err error) {
|
||||
defer log.Trace(name, "atime=%v, mtime=%v", atime, mtime)("err=%v", &err)
|
||||
return f.vfs.Chtimes(name, atime, mtime)
|
||||
|
||||
@@ -145,7 +145,7 @@ that it uses an on disk cache, but the cache entries are held as
|
||||
symlinks. Rclone will use the handle of the underlying file as the NFS
|
||||
handle which improves performance. This sort of cache can't be backed
|
||||
up and restored as the underlying handles will change. This is Linux
|
||||
only. It requres running rclone as root or with |CAP_DAC_READ_SEARCH|.
|
||||
only. It requires running rclone as root or with |CAP_DAC_READ_SEARCH|.
|
||||
You can run rclone with this extra permission by doing this to the
|
||||
rclone binary |sudo setcap cap_dac_read_search+ep /path/to/rclone|.
|
||||
|
||||
|
||||
@@ -158,7 +158,7 @@ func (b *s3Backend) HeadObject(ctx context.Context, bucketName, objectName strin
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetObject fetchs the object from the filesystem.
|
||||
// GetObject fetches the object from the filesystem.
|
||||
func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string, rangeRequest *gofakes3.ObjectRangeRequest) (obj *gofakes3.Object, err error) {
|
||||
_vfs, err := b.s.getVFS(ctx)
|
||||
if err != nil {
|
||||
@@ -400,7 +400,7 @@ func (b *s3Backend) deleteObject(ctx context.Context, bucketName, objectName str
|
||||
}
|
||||
|
||||
fp := path.Join(bucketName, objectName)
|
||||
// S3 does not report an error when attemping to delete a key that does not exist, so
|
||||
// S3 does not report an error when attempting to delete a key that does not exist, so
|
||||
// we need to skip IsNotExist errors.
|
||||
if err := _vfs.Remove(fp); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
|
||||
@@ -19,7 +19,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
|
||||
for _, entry := range dirEntries {
|
||||
object := entry.Name()
|
||||
|
||||
// workround for control-chars detect
|
||||
// workaround for control-chars detect
|
||||
objectPath := path.Join(fdPath, object)
|
||||
|
||||
if !strings.HasPrefix(object, name) {
|
||||
|
||||
@@ -928,3 +928,17 @@ put them back in again.` >}}
|
||||
* Matt Ickstadt <mattico8@gmail.com> <matt@beckenterprises.com>
|
||||
* Spencer McCullough <mccullough.spencer@gmail.com>
|
||||
* Jonathan Giannuzzi <jonathan@giannuzzi.me>
|
||||
* Christoph Berger <github@christophberger.com>
|
||||
* Tim White <tim.white@su.org.au>
|
||||
* Robin Schneider <robin.schneider@stackit.cloud>
|
||||
* izouxv <izouxv@users.noreply.github.com>
|
||||
* Moises Lima <mozlima@users.noreply.github.com>
|
||||
* Bruno Fernandes <bruno.fernandes1996@hotmail.com>
|
||||
* Corentin Barreau <corentin@archive.org>
|
||||
* hiddenmarten <hiddenmarten@gmail.com>
|
||||
* Trevor Starick <trevor.starick@gmail.com>
|
||||
* b-wimmer <132347192+b-wimmer@users.noreply.github.com>
|
||||
* Jess <jess@jessie.cafe>
|
||||
* Zachary Vorhies <zachvorhies@protonmail.com>
|
||||
* Alexander Minbaev <minbaev@gmail.com>
|
||||
* Joel K Biju <joelkbiju18@gmail.com>
|
||||
|
||||
@@ -938,8 +938,9 @@ You can set custom upload headers with the `--header-upload` flag.
|
||||
- Content-Encoding
|
||||
- Content-Language
|
||||
- Content-Type
|
||||
- X-MS-Tags
|
||||
|
||||
Eg `--header-upload "Content-Type: text/potato"`
|
||||
Eg `--header-upload "Content-Type: text/potato"` or `--header-upload "X-MS-Tags: foo=bar"`
|
||||
|
||||
## Limitations
|
||||
|
||||
|
||||
@@ -206,6 +206,13 @@ If the resource has multiple user-assigned identities you will need to
|
||||
unset `env_auth` and set `use_msi` instead. See the [`use_msi`
|
||||
section](#use_msi).
|
||||
|
||||
If you are operating in disconnected clouds, or private clouds such as
|
||||
Azure Stack you may want to set `disable_instance_discovery = true`.
|
||||
This determines whether rclone requests Microsoft Entra instance
|
||||
metadata from `https://login.microsoft.com/` before authenticating.
|
||||
Setting this to `true` will skip this request, making you responsible
|
||||
for ensuring the configured authority is valid and trustworthy.
|
||||
|
||||
##### Env Auth: 3. Azure CLI credentials (as used by the az tool)
|
||||
|
||||
Credentials created with the `az` tool can be picked up using `env_auth`.
|
||||
@@ -288,6 +295,13 @@ be explicitly specified using exactly one of the `msi_object_id`,
|
||||
|
||||
If none of `msi_object_id`, `msi_client_id`, or `msi_mi_res_id` is
|
||||
set, this is is equivalent to using `env_auth`.
|
||||
|
||||
#### Azure CLI tool `az` {#use_az}
|
||||
Set to use the [Azure CLI tool `az`](https://learn.microsoft.com/en-us/cli/azure/)
|
||||
as the sole means of authentication.
|
||||
Setting this can be useful if you wish to use the `az` CLI on a host with
|
||||
a System Managed Identity that you do not want to use.
|
||||
Don't set `env_auth` at the same time.
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/azurefiles/azurefiles.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
@@ -487,7 +487,7 @@ See the [bisync filters](#filtering) section and generic
|
||||
[--filter-from](/filtering/#filter-from-read-filtering-patterns-from-a-file)
|
||||
documentation.
|
||||
An [example filters file](#example-filters-file) contains filters for
|
||||
non-allowed files for synching with Dropbox.
|
||||
non-allowed files for syncing with Dropbox.
|
||||
|
||||
If you make changes to your filters file then bisync requires a run
|
||||
with `--resync`. This is a safety feature, which prevents existing files
|
||||
@@ -664,7 +664,7 @@ Using `--check-sync=false` will disable it and may significantly reduce the
|
||||
sync run times for very large numbers of files.
|
||||
|
||||
The check may be run manually with `--check-sync=only`. It runs only the
|
||||
integrity check and terminates without actually synching.
|
||||
integrity check and terminates without actually syncing.
|
||||
|
||||
Note that currently, `--check-sync` **only checks listing snapshots and NOT the
|
||||
actual files on the remotes.** Note also that the listing snapshots will not
|
||||
@@ -1141,7 +1141,7 @@ The `--include*`, `--exclude*`, and `--filter` flags are also supported.
|
||||
|
||||
### How to filter directories
|
||||
|
||||
Filtering portions of the directory tree is a critical feature for synching.
|
||||
Filtering portions of the directory tree is a critical feature for syncing.
|
||||
|
||||
Examples of directory trees (always beneath the Path1/Path2 root level)
|
||||
you may want to exclude from your sync:
|
||||
@@ -1250,7 +1250,7 @@ quashed by adding `--quiet` to the bisync command line.
|
||||
|
||||
## Example exclude-style filters files for use with Dropbox {#exclude-filters}
|
||||
|
||||
- Dropbox disallows synching the listed temporary and configuration/data files.
|
||||
- Dropbox disallows syncing the listed temporary and configuration/data files.
|
||||
The `- <filename>` filters exclude these files where ever they may occur
|
||||
in the sync tree. Consider adding similar exclusions for file types
|
||||
you don't need to sync, such as core dump and software build files.
|
||||
@@ -1584,7 +1584,7 @@ test command flags can be equally prefixed by a single `-` or double dash.
|
||||
|
||||
- `go test . -case basic -remote local -remote2 local`
|
||||
runs the `test_basic` test case using only the local filesystem,
|
||||
synching one local directory with another local directory.
|
||||
syncing one local directory with another local directory.
|
||||
Test script output is to the console, while commands within scenario.txt
|
||||
have their output sent to the `.../workdir/test.log` file,
|
||||
which is finally compared to the golden copy.
|
||||
@@ -1815,6 +1815,9 @@ about _Unison_ and synchronization in general.
|
||||
|
||||
## Changelog
|
||||
|
||||
### `v1.69.1`
|
||||
* Fixed an issue causing listings to not capture concurrent modifications under certain conditions
|
||||
|
||||
### `v1.68`
|
||||
* Fixed an issue affecting backends that round modtimes to a lower precision.
|
||||
|
||||
@@ -1860,4 +1863,4 @@ causing bisync to consider more files than necessary due to overbroad filters du
|
||||
* Added [new `--ignore-listing-checksum` flag](https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=6.%20%2D%2Dignore%2Dchecksum%20should%20be%20split%20into%20two%20flags%20for%20separate%20purposes)
|
||||
to distinguish from `--ignore-checksum`
|
||||
* [Performance improvements](https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=6.%20Deletes%20take%20several%20times%20longer%20than%20copies) for large remotes
|
||||
* Documentation and testing improvements
|
||||
* Documentation and testing improvements
|
||||
|
||||
@@ -7,6 +7,8 @@ versionIntroduced: v1.65
|
||||
---
|
||||
# rclone serve nfs
|
||||
|
||||
*Not available in Windows.*
|
||||
|
||||
Serve the remote as an NFS mount
|
||||
|
||||
## Synopsis
|
||||
|
||||
@@ -741,7 +741,7 @@ strong random number generator. The nonce is incremented for each
|
||||
chunk read making sure each nonce is unique for each block written.
|
||||
The chance of a nonce being reused is minuscule. If you wrote an
|
||||
exabyte of data (10¹⁸ bytes) you would have a probability of
|
||||
approximately 2×10⁻³² of re-using a nonce.
|
||||
approximately 2×10⁻³² of reusing a nonce.
|
||||
|
||||
#### Chunk
|
||||
|
||||
|
||||
@@ -619,6 +619,11 @@ it to `false`. It is also possible to specify `--boolean=false` or
|
||||
parsed as `--boolean` and the `false` is parsed as an extra command
|
||||
line argument for rclone.
|
||||
|
||||
Options documented to take a `stringArray` parameter accept multiple
|
||||
values. To pass more than one value, repeat the option; for example:
|
||||
`--include value1 --include value2`.
|
||||
|
||||
|
||||
### Time or duration options {#time-option}
|
||||
|
||||
TIME or DURATION options can be specified as a duration string or a
|
||||
@@ -2930,7 +2935,7 @@ so they take exactly the same form.
|
||||
The options set by environment variables can be seen with the `-vv` flag, e.g. `rclone version -vv`.
|
||||
|
||||
Options that can appear multiple times (type `stringArray`) are
|
||||
treated slighly differently as environment variables can only be
|
||||
treated slightly differently as environment variables can only be
|
||||
defined once. In order to allow a simple mechanism for adding one or
|
||||
many items, the input is treated as a [CSV encoded](https://godoc.org/encoding/csv)
|
||||
string. For example
|
||||
|
||||
@@ -9,6 +9,8 @@ description: "Rclone Global Flags"
|
||||
This describes the global flags available to every rclone command
|
||||
split into groups.
|
||||
|
||||
See the [Options section](/docs/#options) for syntax and usage advice.
|
||||
|
||||
|
||||
## Copy
|
||||
|
||||
|
||||
@@ -384,7 +384,7 @@ Use the gphotosdl proxy for downloading the full resolution images
|
||||
The Google API will deliver images and video which aren't full
|
||||
resolution, and/or have EXIF data missing.
|
||||
|
||||
However if you ue the gphotosdl proxy tnen you can download original,
|
||||
However if you use the gphotosdl proxy then you can download original,
|
||||
unchanged images.
|
||||
|
||||
This runs a headless browser in the background.
|
||||
|
||||
@@ -61,7 +61,7 @@ Enter a value.
|
||||
config_2fa> 2FACODE
|
||||
Remote config
|
||||
--------------------
|
||||
[koofr]
|
||||
[iclouddrive]
|
||||
- type: iclouddrive
|
||||
- apple_id: APPLEID
|
||||
- password: *** ENCRYPTED ***
|
||||
@@ -78,6 +78,20 @@ y/e/d> y
|
||||
|
||||
ADP is currently unsupported and need to be disabled
|
||||
|
||||
On iPhone, Settings `>` Apple Account `>` iCloud `>` 'Access iCloud Data on the Web' must be ON, and 'Advanced Data Protection' OFF.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Missing PCS cookies from the request
|
||||
|
||||
This means you have Advanced Data Protection (ADP) turned on. This is not supported at the moment. If you want to use rclone you will have to turn it off. See above for how to turn it off.
|
||||
|
||||
You will need to clear the `cookies` and the `trust_token` fields in the config. Or you can delete the remote config and start again.
|
||||
|
||||
You should then run `rclone reconnect remote:`.
|
||||
|
||||
Note that changing the ADP setting may not take effect immediately - you may need to wait a few hours or a day before you can get rclone to work - keep clearing the config entry and running `rclone reconnect remote:` until rclone functions properly.
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/iclouddrive/iclouddrive.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
|
||||
@@ -936,6 +936,28 @@ See the [metadata](/docs/#metadata) docs for more info.
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
||||
|
||||
### Impersonate other users as Admin
|
||||
|
||||
Unlike Google Drive and impersonating any domain user via service accounts, OneDrive requires you to authenticate as an admin account, and manually setup a remote per user you wish to impersonate.
|
||||
|
||||
1. In [Microsoft 365 Admin Center](https://admin.microsoft.com), open each user you need to "impersonate" and go to the OneDrive section. There is a heading called "Get access to files", you need to click to create the link, this creates the link of the format `https://{tenant}-my.sharepoint.com/personal/{user_name_domain_tld}/` but also changes the permissions so you your admin user has access.
|
||||
2. Then in powershell run the following commands:
|
||||
```console
|
||||
Install-Module Microsoft.Graph -Scope CurrentUser -Repository PSGallery -Force
|
||||
Import-Module Microsoft.Graph.Files
|
||||
Connect-MgGraph -Scopes "Files.ReadWrite.All"
|
||||
# Follow the steps to allow access to your admin user
|
||||
# Then run this for each user you want to impersonate to get the Drive ID
|
||||
Get-MgUserDefaultDrive -UserId '{emailaddress}'
|
||||
# This will give you output of the format:
|
||||
# Name Id DriveType CreatedDateTime
|
||||
# ---- -- --------- ---------------
|
||||
# OneDrive b!XYZ123 business 14/10/2023 1:00:58 pm
|
||||
|
||||
```
|
||||
3. Then in rclone add a onedrive remote type, and use the `Type in driveID` with the DriveID you got in the previous step. One remote per user. It will then confirm the drive ID, and hopefully give you a message of `Found drive "root" of type "business"` and then include the URL of the format `https://{tenant}-my.sharepoint.com/personal/{user_name_domain_tld}/Documents`
|
||||
|
||||
|
||||
## Limitations
|
||||
|
||||
If you don't use rclone for 90 days the refresh token will
|
||||
|
||||
@@ -2068,7 +2068,7 @@ the `--vfs-cache-mode` is off, it will return an empty result.
|
||||
],
|
||||
}
|
||||
|
||||
The `expiry` time is the time until the file is elegible for being
|
||||
The `expiry` time is the time until the file is eligible for being
|
||||
uploaded in floating point seconds. This may go negative. As rclone
|
||||
only transfers `--transfers` files at once, only the lowest
|
||||
`--transfers` expiry times will have `uploading` as `true`. So there
|
||||
|
||||
@@ -750,7 +750,7 @@ Notes on above:
|
||||
that `USER_NAME` has been created.
|
||||
2. The Resource entry must include both resource ARNs, as one implies
|
||||
the bucket and the other implies the bucket's objects.
|
||||
3. When using [s3-no-check-bucket](#s3-no-check-bucket) and the bucket already exsits, the `"arn:aws:s3:::BUCKET_NAME"` doesn't have to be included.
|
||||
3. When using [s3-no-check-bucket](#s3-no-check-bucket) and the bucket already exists, the `"arn:aws:s3:::BUCKET_NAME"` doesn't have to be included.
|
||||
|
||||
For reference, [here's an Ansible script](https://gist.github.com/ebridges/ebfc9042dd7c756cd101cfa807b7ae2b)
|
||||
that will generate one or more buckets that will work with `rclone sync`.
|
||||
@@ -2958,7 +2958,7 @@ Choose a number from below, or type in your own value
|
||||
location_constraint>1
|
||||
```
|
||||
|
||||
9. Specify a canned ACL. IBM Cloud (Storage) supports "public-read" and "private". IBM Cloud(Infra) supports all the canned ACLs. On-Premise COS supports all the canned ACLs.
|
||||
8. Specify a canned ACL. IBM Cloud (Storage) supports "public-read" and "private". IBM Cloud(Infra) supports all the canned ACLs. On-Premise COS supports all the canned ACLs.
|
||||
```
|
||||
Canned ACL used when creating buckets and/or storing objects in S3.
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
@@ -2974,8 +2974,7 @@ Choose a number from below, or type in your own value
|
||||
acl> 1
|
||||
```
|
||||
|
||||
|
||||
12. Review the displayed configuration and accept to save the "remote" then quit. The config file should look like this
|
||||
9. Review the displayed configuration and accept to save the "remote" then quit. The config file should look like this
|
||||
```
|
||||
[xxx]
|
||||
type = s3
|
||||
@@ -2987,7 +2986,7 @@ acl> 1
|
||||
acl = private
|
||||
```
|
||||
|
||||
13. Execute rclone commands
|
||||
10. Execute rclone commands
|
||||
```
|
||||
1) Create a bucket.
|
||||
rclone mkdir IBM-COS-XREGION:newbucket
|
||||
@@ -3006,6 +3005,35 @@ acl> 1
|
||||
rclone delete IBM-COS-XREGION:newbucket/file.txt
|
||||
```
|
||||
|
||||
#### IBM IAM authentication
|
||||
If using IBM IAM authentication with IBM API KEY you need to fill in these additional parameters
|
||||
1. Select false for env_auth
|
||||
2. Leave `access_key_id` and `secret_access_key` blank
|
||||
3. Paste your `ibm_api_key`
|
||||
```
|
||||
Option ibm_api_key.
|
||||
IBM API Key to be used to obtain IAM token
|
||||
Enter a value of type string. Press Enter for the default (1).
|
||||
ibm_api_key>
|
||||
```
|
||||
4. Paste your `ibm_resource_instance_id`
|
||||
```
|
||||
Option ibm_resource_instance_id.
|
||||
IBM service instance id
|
||||
Enter a value of type string. Press Enter for the default (2).
|
||||
ibm_resource_instance_id>
|
||||
```
|
||||
5. In advanced settings type true for `v2_auth`
|
||||
```
|
||||
Option v2_auth.
|
||||
If true use v2 authentication.
|
||||
If this is false (the default) then rclone will use v4 authentication.
|
||||
If it is set then rclone will use v2 authentication.
|
||||
Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH.
|
||||
Enter a boolean value (true or false). Press Enter for the default (true).
|
||||
v2_auth>
|
||||
```
|
||||
|
||||
### IDrive e2 {#idrive-e2}
|
||||
|
||||
Here is an example of making an [IDrive e2](https://www.idrive.com/e2/)
|
||||
|
||||
@@ -65,28 +65,29 @@ type StatsInfo struct {
|
||||
}
|
||||
|
||||
type averageValues struct {
|
||||
mu sync.Mutex
|
||||
lpBytes int64
|
||||
lpTime time.Time
|
||||
speed float64
|
||||
stop chan bool
|
||||
stopped sync.WaitGroup
|
||||
startOnce sync.Once
|
||||
stopOnce sync.Once
|
||||
mu sync.Mutex
|
||||
lpBytes int64
|
||||
lpTime time.Time
|
||||
speed float64
|
||||
stop chan bool
|
||||
stopped sync.WaitGroup
|
||||
started bool
|
||||
}
|
||||
|
||||
// NewStats creates an initialised StatsInfo
|
||||
func NewStats(ctx context.Context) *StatsInfo {
|
||||
ci := fs.GetConfig(ctx)
|
||||
return &StatsInfo{
|
||||
s := &StatsInfo{
|
||||
ctx: ctx,
|
||||
ci: ci,
|
||||
checking: newTransferMap(ci.Checkers, "checking"),
|
||||
transferring: newTransferMap(ci.Transfers, "transferring"),
|
||||
inProgress: newInProgress(ctx),
|
||||
startTime: time.Now(),
|
||||
average: averageValues{stop: make(chan bool)},
|
||||
average: averageValues{},
|
||||
}
|
||||
s.startAverageLoop()
|
||||
return s
|
||||
}
|
||||
|
||||
// RemoteStats returns stats for rc
|
||||
@@ -328,61 +329,96 @@ func (s *StatsInfo) averageLoop() {
|
||||
ticker := time.NewTicker(averagePeriodLength)
|
||||
defer ticker.Stop()
|
||||
|
||||
startTime := time.Now()
|
||||
a := &s.average
|
||||
defer a.stopped.Done()
|
||||
|
||||
shouldRun := false
|
||||
|
||||
for {
|
||||
select {
|
||||
case now := <-ticker.C:
|
||||
a.mu.Lock()
|
||||
var elapsed float64
|
||||
if a.lpTime.IsZero() {
|
||||
elapsed = now.Sub(startTime).Seconds()
|
||||
} else {
|
||||
elapsed = now.Sub(a.lpTime).Seconds()
|
||||
|
||||
if !shouldRun {
|
||||
a.mu.Unlock()
|
||||
continue
|
||||
}
|
||||
|
||||
avg := 0.0
|
||||
elapsed := now.Sub(a.lpTime).Seconds()
|
||||
if elapsed > 0 {
|
||||
avg = float64(a.lpBytes) / elapsed
|
||||
}
|
||||
|
||||
if period < averagePeriod {
|
||||
period++
|
||||
}
|
||||
|
||||
a.speed = (avg + a.speed*(period-1)) / period
|
||||
a.lpBytes = 0
|
||||
a.lpTime = now
|
||||
|
||||
a.mu.Unlock()
|
||||
|
||||
case stop, ok := <-a.stop:
|
||||
if !ok {
|
||||
return // Channel closed, exit the loop
|
||||
}
|
||||
|
||||
a.mu.Lock()
|
||||
|
||||
// If we are resuming, store the current time
|
||||
if !shouldRun && !stop {
|
||||
a.lpTime = time.Now()
|
||||
}
|
||||
shouldRun = !stop
|
||||
|
||||
a.mu.Unlock()
|
||||
case <-a.stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Resume the average loop
|
||||
func (s *StatsInfo) resumeAverageLoop() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.average.stop <- false
|
||||
}
|
||||
|
||||
// Pause the average loop
|
||||
func (s *StatsInfo) pauseAverageLoop() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.average.stop <- true
|
||||
}
|
||||
|
||||
// Start the average loop
|
||||
//
|
||||
// Call with the mutex held
|
||||
func (s *StatsInfo) _startAverageLoop() {
|
||||
if !s.average.started {
|
||||
s.average.stop = make(chan bool)
|
||||
s.average.started = true
|
||||
s.average.stopped.Add(1)
|
||||
go s.averageLoop()
|
||||
}
|
||||
}
|
||||
|
||||
// Start the average loop
|
||||
func (s *StatsInfo) startAverageLoop() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.average.startOnce.Do(func() {
|
||||
s.average.stopped.Add(1)
|
||||
go s.averageLoop()
|
||||
})
|
||||
s._startAverageLoop()
|
||||
}
|
||||
|
||||
// Stop the average loop
|
||||
//
|
||||
// Call with the mutex held
|
||||
func (s *StatsInfo) _stopAverageLoop() {
|
||||
s.average.stopOnce.Do(func() {
|
||||
if s.average.started {
|
||||
close(s.average.stop)
|
||||
s.average.stopped.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
// Stop the average loop
|
||||
func (s *StatsInfo) stopAverageLoop() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s._stopAverageLoop()
|
||||
}
|
||||
}
|
||||
|
||||
// String convert the StatsInfo to a string for printing
|
||||
@@ -564,9 +600,9 @@ func (s *StatsInfo) GetBytesWithPending() int64 {
|
||||
pending := int64(0)
|
||||
for _, tr := range s.startedTransfers {
|
||||
if tr.acc != nil {
|
||||
bytes, size := tr.acc.progress()
|
||||
if bytes < size {
|
||||
pending += size - bytes
|
||||
bytesRead, size := tr.acc.progress()
|
||||
if bytesRead < size {
|
||||
pending += size - bytesRead
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -699,7 +735,8 @@ func (s *StatsInfo) ResetCounters() {
|
||||
s.oldDuration = 0
|
||||
|
||||
s._stopAverageLoop()
|
||||
s.average = averageValues{stop: make(chan bool)}
|
||||
s.average = averageValues{}
|
||||
s._startAverageLoop()
|
||||
}
|
||||
|
||||
// ResetErrors sets the errors count to 0 and resets lastError, fatalError and retryError
|
||||
@@ -788,7 +825,7 @@ func (s *StatsInfo) NewTransfer(obj fs.DirEntry, dstFs fs.Fs) *Transfer {
|
||||
}
|
||||
tr := newTransfer(s, obj, srcFs, dstFs)
|
||||
s.transferring.add(tr)
|
||||
s.startAverageLoop()
|
||||
s.resumeAverageLoop()
|
||||
return tr
|
||||
}
|
||||
|
||||
@@ -796,7 +833,7 @@ func (s *StatsInfo) NewTransfer(obj fs.DirEntry, dstFs fs.Fs) *Transfer {
|
||||
func (s *StatsInfo) NewTransferRemoteSize(remote string, size int64, srcFs, dstFs fs.Fs) *Transfer {
|
||||
tr := newTransferRemoteSize(s, remote, size, false, "", srcFs, dstFs)
|
||||
s.transferring.add(tr)
|
||||
s.startAverageLoop()
|
||||
s.resumeAverageLoop()
|
||||
return tr
|
||||
}
|
||||
|
||||
@@ -811,7 +848,7 @@ func (s *StatsInfo) DoneTransferring(remote string, ok bool) {
|
||||
s.mu.Unlock()
|
||||
}
|
||||
if s.transferring.empty() && s.checking.empty() {
|
||||
time.AfterFunc(averageStopAfter, s.stopAverageLoop)
|
||||
s.pauseAverageLoop()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
4
fs/cache/cache_test.go
vendored
4
fs/cache/cache_test.go
vendored
@@ -131,7 +131,7 @@ func TestPutErr(t *testing.T) {
|
||||
assert.Equal(t, 1, Entries())
|
||||
|
||||
fNew, err := GetFn(context.Background(), "mock:/", create)
|
||||
require.Equal(t, fs.ErrorNotFoundInConfigFile, err)
|
||||
require.True(t, errors.Is(err, fs.ErrorNotFoundInConfigFile))
|
||||
require.Equal(t, f, fNew)
|
||||
|
||||
assert.Equal(t, 1, Entries())
|
||||
@@ -141,7 +141,7 @@ func TestPutErr(t *testing.T) {
|
||||
PutErr("mock:/file.txt", f, fs.ErrorNotFoundInConfigFile)
|
||||
|
||||
fNew, err = GetFn(context.Background(), "mock:/file.txt", create)
|
||||
require.Equal(t, fs.ErrorNotFoundInConfigFile, err)
|
||||
require.True(t, errors.Is(err, fs.ErrorNotFoundInConfigFile))
|
||||
require.Equal(t, f, fNew)
|
||||
|
||||
assert.Equal(t, 1, Entries())
|
||||
|
||||
@@ -514,6 +514,8 @@ type UpdateRemoteOpt struct {
|
||||
Obscure bool `json:"obscure"`
|
||||
// Treat all passwords as obscured
|
||||
NoObscure bool `json:"noObscure"`
|
||||
// Don't provide any output
|
||||
NoOutput bool `json:"noOutput"`
|
||||
// Don't interact with the user - return questions
|
||||
NonInteractive bool `json:"nonInteractive"`
|
||||
// If set then supply state and result parameters to continue the process
|
||||
|
||||
@@ -120,6 +120,7 @@ func init() {
|
||||
extraHelp += `- opt - a dictionary of options to control the configuration
|
||||
- obscure - declare passwords are plain and need obscuring
|
||||
- noObscure - declare passwords are already obscured and don't need obscuring
|
||||
- noOutput - don't print anything to stdout
|
||||
- nonInteractive - don't interact with a user, return questions
|
||||
- continue - continue the config process with an answer
|
||||
- all - ask all the config questions not just the post config ones
|
||||
|
||||
@@ -39,6 +39,7 @@ type Features struct {
|
||||
NoMultiThreading bool // set if can't have multiplethreads on one download open
|
||||
Overlay bool // this wraps one or more backends to add functionality
|
||||
ChunkWriterDoesntSeek bool // set if the chunk writer doesn't need to read the data more than once
|
||||
DoubleSlash bool // set if backend supports double slashes in paths
|
||||
|
||||
// Purge all files in the directory specified
|
||||
//
|
||||
@@ -383,6 +384,8 @@ func (ft *Features) Mask(ctx context.Context, f Fs) *Features {
|
||||
ft.PartialUploads = ft.PartialUploads && mask.PartialUploads
|
||||
ft.NoMultiThreading = ft.NoMultiThreading && mask.NoMultiThreading
|
||||
// ft.Overlay = ft.Overlay && mask.Overlay don't propagate Overlay
|
||||
ft.ChunkWriterDoesntSeek = ft.ChunkWriterDoesntSeek && mask.ChunkWriterDoesntSeek
|
||||
ft.DoubleSlash = ft.DoubleSlash && mask.DoubleSlash
|
||||
|
||||
if mask.Purge == nil {
|
||||
ft.Purge = nil
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/lib/structs"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/publicsuffix"
|
||||
)
|
||||
|
||||
@@ -120,6 +121,29 @@ func NewTransportCustom(ctx context.Context, customize func(*http.Transport)) ht
|
||||
|
||||
if ci.DisableHTTP2 {
|
||||
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
|
||||
} else {
|
||||
// Enable HTTP/2
|
||||
err := http2.ConfigureTransport(t)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to configure HTTP2: %v", err)
|
||||
} else {
|
||||
// Create a new HTTP/2 transport - use HTTP/1 defaults where appropriate
|
||||
t2 := &http2.Transport{
|
||||
DisableCompression: t.DisableCompression,
|
||||
IdleConnTimeout: t.IdleConnTimeout,
|
||||
TLSClientConfig: t.TLSClientConfig,
|
||||
MaxReadFrameSize: 8 * 1024 * 1024, // 16k (default) to 16M.
|
||||
//ReadIdleTimeout: 10 * time.Second, // Check idle connections every 10s
|
||||
//WriteByteTimeout: 5 * time.Second, // Custom setting
|
||||
//AllowHTTP: false, // Ensure HTTPS-only HTTP/2
|
||||
//StrictMaxConcurrentStreams: true, // Example setting
|
||||
}
|
||||
|
||||
// Override HTTP/2 handling while keeping existing HTTP/1 support
|
||||
t.TLSNextProto[http2.NextProtoTLS] = func(authority string, c *tls.Conn) http.RoundTripper {
|
||||
return t2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// customize the transport if required
|
||||
|
||||
@@ -133,7 +133,7 @@ func TestCertificates(t *testing.T) {
|
||||
assert.Fail(t, "Certificate expired", "Certificate expires at %s, current time is %s", cert[0].NotAfter.Sub(startTime), time.Since(startTime))
|
||||
}
|
||||
|
||||
// Write some test data to fullfil the request
|
||||
// Write some test data to fulfill the request
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
_, _ = fmt.Fprintln(w, "test data")
|
||||
}))
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
)
|
||||
|
||||
// DirSorted reads Object and *Dir into entries for the given Fs.
|
||||
@@ -43,7 +44,10 @@ func filterAndSortDir(ctx context.Context, entries fs.DirEntries, includeAll boo
|
||||
newEntries = entries[:0] // in place filter
|
||||
prefix := ""
|
||||
if dir != "" {
|
||||
prefix = dir + "/"
|
||||
prefix = dir
|
||||
if !bucket.IsAllSlashes(dir) {
|
||||
prefix += "/"
|
||||
}
|
||||
}
|
||||
for _, entry := range entries {
|
||||
ok := true
|
||||
@@ -77,10 +81,10 @@ func filterAndSortDir(ctx context.Context, entries fs.DirEntries, includeAll boo
|
||||
case !strings.HasPrefix(remote, prefix):
|
||||
ok = false
|
||||
fs.Errorf(entry, "Entry doesn't belong in directory %q (too short) - ignoring", dir)
|
||||
case remote == prefix:
|
||||
case remote == dir:
|
||||
ok = false
|
||||
fs.Errorf(entry, "Entry doesn't belong in directory %q (same as directory) - ignoring", dir)
|
||||
case strings.ContainsRune(remote[len(prefix):], '/'):
|
||||
case strings.ContainsRune(remote[len(prefix):], '/') && !bucket.IsAllSlashes(remote[len(prefix):]):
|
||||
ok = false
|
||||
fs.Errorf(entry, "Entry doesn't belong in directory %q (contains subdir) - ignoring", dir)
|
||||
default:
|
||||
|
||||
@@ -49,7 +49,8 @@ func TestFilterAndSortIncludeAll(t *testing.T) {
|
||||
|
||||
func TestFilterAndSortCheckDir(t *testing.T) {
|
||||
// Check the different kinds of error when listing "dir"
|
||||
da := mockdir.New("dir/")
|
||||
da := mockdir.New("dir")
|
||||
da2 := mockdir.New("dir/") // double slash dir - allowed for bucket based remotes
|
||||
oA := mockobject.Object("diR/a")
|
||||
db := mockdir.New("dir/b")
|
||||
oB := mockobject.Object("dir/B/sub")
|
||||
@@ -57,18 +58,19 @@ func TestFilterAndSortCheckDir(t *testing.T) {
|
||||
oC := mockobject.Object("dir/C")
|
||||
dd := mockdir.New("dir/d")
|
||||
oD := mockobject.Object("dir/D")
|
||||
entries := fs.DirEntries{da, oA, db, oB, dc, oC, dd, oD}
|
||||
entries := fs.DirEntries{da, da2, oA, db, oB, dc, oC, dd, oD}
|
||||
newEntries, err := filterAndSortDir(context.Background(), entries, true, "dir", nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
fs.DirEntries{da2, oC, oD, db, dc, dd},
|
||||
newEntries,
|
||||
fs.DirEntries{oC, oD, db, dc, dd},
|
||||
)
|
||||
}
|
||||
|
||||
func TestFilterAndSortCheckDirRoot(t *testing.T) {
|
||||
// Check the different kinds of error when listing the root ""
|
||||
da := mockdir.New("")
|
||||
da2 := mockdir.New("/") // doubleslash dir allowed on bucket based remotes
|
||||
oA := mockobject.Object("A")
|
||||
db := mockdir.New("b")
|
||||
oB := mockobject.Object("B/sub")
|
||||
@@ -76,12 +78,12 @@ func TestFilterAndSortCheckDirRoot(t *testing.T) {
|
||||
oC := mockobject.Object("C")
|
||||
dd := mockdir.New("d")
|
||||
oD := mockobject.Object("D")
|
||||
entries := fs.DirEntries{da, oA, db, oB, dc, oC, dd, oD}
|
||||
entries := fs.DirEntries{da, da2, oA, db, oB, dc, oC, dd, oD}
|
||||
newEntries, err := filterAndSortDir(context.Background(), entries, true, "", nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
fs.DirEntries{da2, oA, oC, oD, db, dc, dd},
|
||||
newEntries,
|
||||
fs.DirEntries{oA, oC, oD, db, dc, dd},
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -95,7 +95,7 @@ func LogValueHide(key string, value interface{}) LogValueItem {
|
||||
return LogValueItem{key: key, value: value, render: false}
|
||||
}
|
||||
|
||||
// String returns the representation of value. If render is fals this
|
||||
// String returns the representation of value. If render is false this
|
||||
// is an empty string so LogValueItem entries won't show in the
|
||||
// textual representation of logs.
|
||||
func (j LogValueItem) String() string {
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -104,7 +105,7 @@ func ParseRemote(path string) (fsInfo *RegInfo, configName, fsPath string, conne
|
||||
m := ConfigMap("", nil, configName, parsed.Config)
|
||||
fsName, ok = m.Get("type")
|
||||
if !ok {
|
||||
return nil, "", "", nil, ErrorNotFoundInConfigFile
|
||||
return nil, "", "", nil, fmt.Errorf("%w (%q)", ErrorNotFoundInConfigFile, configName)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -297,7 +297,7 @@ func (o *MemoryObject) Open(ctx context.Context, options ...fs.OpenOption) (io.R
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// This re-uses the internal buffer if at all possible.
|
||||
// This reuses the internal buffer if at all possible.
|
||||
func (o *MemoryObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
size := src.Size()
|
||||
if size == 0 {
|
||||
|
||||
@@ -142,7 +142,7 @@ func TestMemoryObject(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
checkContent(o, "Rutabaga")
|
||||
assert.Equal(t, newNow, o.ModTime(context.Background()))
|
||||
assert.Equal(t, "Rutaba", string(content)) // check we re-used the buffer
|
||||
assert.Equal(t, "Rutaba", string(content)) // check we reused the buffer
|
||||
|
||||
// not within the buffer
|
||||
newStr := "0123456789"
|
||||
|
||||
@@ -358,7 +358,7 @@ func TestRemoteServing(t *testing.T) {
|
||||
URL: "[notfoundremote:]/",
|
||||
Status: http.StatusInternalServerError,
|
||||
Expected: `{
|
||||
"error": "failed to make Fs: didn't find section in config file",
|
||||
"error": "failed to make Fs: didn't find section in config file (\"notfoundremote\")",
|
||||
"input": null,
|
||||
"path": "/",
|
||||
"status": 500
|
||||
|
||||
@@ -726,7 +726,7 @@ func (s *syncCopyMove) markParentNotEmpty(entry fs.DirEntry) {
|
||||
parentDir = ""
|
||||
}
|
||||
delete(s.srcEmptyDirs, parentDir)
|
||||
if parentDir == "" {
|
||||
if parentDir == "" || parentDir == "/" {
|
||||
break
|
||||
}
|
||||
parentDir = path.Dir(parentDir)
|
||||
@@ -1109,23 +1109,17 @@ func (s *syncCopyMove) copyDirMetadata(ctx context.Context, f fs.Fs, dst fs.Dire
|
||||
if !s.setDirModTimeAfter && equal {
|
||||
return nil
|
||||
}
|
||||
if s.setDirModTimeAfter && equal {
|
||||
newDst = dst
|
||||
} else if s.copyEmptySrcDirs {
|
||||
if s.setDirMetadata {
|
||||
newDst = dst
|
||||
if !equal {
|
||||
if s.setDirMetadata && s.copyEmptySrcDirs {
|
||||
newDst, err = operations.CopyDirMetadata(ctx, f, dst, dir, src)
|
||||
} else if s.setDirModTime {
|
||||
if dst == nil {
|
||||
newDst, err = operations.MkdirModTime(ctx, f, dir, src.ModTime(ctx))
|
||||
} else {
|
||||
newDst, err = operations.SetDirModTime(ctx, f, dst, dir, src.ModTime(ctx))
|
||||
}
|
||||
} else if dst == nil {
|
||||
// Create the directory if it doesn't exist
|
||||
} else if dst == nil && s.setDirModTime && s.copyEmptySrcDirs {
|
||||
newDst, err = operations.MkdirModTime(ctx, f, dir, src.ModTime(ctx))
|
||||
} else if dst == nil && s.copyEmptySrcDirs {
|
||||
err = operations.Mkdir(ctx, f, dir)
|
||||
} else if dst != nil && s.setDirModTime {
|
||||
newDst, err = operations.SetDirModTime(ctx, f, dst, dir, src.ModTime(ctx))
|
||||
}
|
||||
} else {
|
||||
newDst = dst
|
||||
}
|
||||
// If we need to set modtime after and we created a dir, then save it for later
|
||||
if s.setDirModTime && s.setDirModTimeAfter && err == nil {
|
||||
|
||||
@@ -2762,6 +2762,81 @@ func TestSyncConcurrentTruncate(t *testing.T) {
|
||||
testSyncConcurrent(t, "truncate")
|
||||
}
|
||||
|
||||
// Test that sync replaces dir modtimes in dst if they've changed
|
||||
func testSyncReplaceDirModTime(t *testing.T, copyEmptySrcDirs bool) {
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
ctx, _ := fs.AddConfig(context.Background())
|
||||
r := fstest.NewRun(t)
|
||||
|
||||
file1 := r.WriteFile("file1", "file1", t2)
|
||||
file2 := r.WriteFile("test_dir1/file2", "file2", t2)
|
||||
file3 := r.WriteFile("test_dir2/sub_dir/file3", "file3", t2)
|
||||
r.CheckLocalItems(t, file1, file2, file3)
|
||||
|
||||
_, err := operations.MkdirModTime(ctx, r.Flocal, "empty_dir", t2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// A directory that's empty on both src and dst
|
||||
_, err = operations.MkdirModTime(ctx, r.Flocal, "empty_on_remote", t2)
|
||||
require.NoError(t, err)
|
||||
_, err = operations.MkdirModTime(ctx, r.Fremote, "empty_on_remote", t2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// set logging
|
||||
// (this checks log output as DirModtime operations do not yet have stats, and r.CheckDirectoryModTimes also does not tell us what actions were taken)
|
||||
oldLogLevel := fs.GetConfig(context.Background()).LogLevel
|
||||
defer func() { fs.GetConfig(context.Background()).LogLevel = oldLogLevel }() // reset to old val after test
|
||||
// need to do this as fs.Infof only respects the globalConfig
|
||||
fs.GetConfig(context.Background()).LogLevel = fs.LogLevelInfo
|
||||
|
||||
// First run
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
output := bilib.CaptureOutput(func() {
|
||||
err := CopyDir(ctx, r.Fremote, r.Flocal, copyEmptySrcDirs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
require.NotNil(t, output)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
|
||||
// Save all dirs
|
||||
dirs := []string{"test_dir1", "test_dir2", "test_dir2/sub_dir", "empty_on_remote"}
|
||||
if copyEmptySrcDirs {
|
||||
dirs = append(dirs, "empty_dir")
|
||||
}
|
||||
|
||||
// Change dir modtimes
|
||||
for _, dir := range dirs {
|
||||
_, err := operations.SetDirModTime(ctx, r.Flocal, nil, dir, t1)
|
||||
if err != nil && !errors.Is(err, fs.ErrorNotImplemented) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Run again
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
output = bilib.CaptureOutput(func() {
|
||||
err := CopyDir(ctx, r.Fremote, r.Flocal, copyEmptySrcDirs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
require.NotNil(t, output)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
r.CheckLocalItems(t, file1, file2, file3)
|
||||
r.CheckRemoteItems(t, file1, file2, file3)
|
||||
|
||||
// Check that the modtimes of the directories are as expected
|
||||
r.CheckDirectoryModTimes(t, dirs...)
|
||||
}
|
||||
|
||||
func TestSyncReplaceDirModTime(t *testing.T) {
|
||||
testSyncReplaceDirModTime(t, false)
|
||||
}
|
||||
|
||||
func TestSyncReplaceDirModTimeWithEmptyDirs(t *testing.T) {
|
||||
testSyncReplaceDirModTime(t, true)
|
||||
}
|
||||
|
||||
// Tests that nothing is transferred when src and dst already match
|
||||
// Run the same sync twice, ensure no action is taken the second time
|
||||
func testNothingToTransfer(t *testing.T, copyEmptySrcDirs bool) {
|
||||
|
||||
@@ -459,7 +459,7 @@ func Run(t *testing.T, opt *Opt) {
|
||||
subRemoteName, subRemoteLeaf, err = fstest.RandomRemoteName(remoteName)
|
||||
require.NoError(t, err)
|
||||
f, err = fs.NewFs(context.Background(), subRemoteName)
|
||||
if err == fs.ErrorNotFoundInConfigFile {
|
||||
if errors.Is(err, fs.ErrorNotFoundInConfigFile) {
|
||||
t.Logf("Didn't find %q in config file - skipping tests", remoteName)
|
||||
return
|
||||
}
|
||||
@@ -2121,6 +2121,144 @@ func Run(t *testing.T, opt *Opt) {
|
||||
}
|
||||
})
|
||||
|
||||
// Run tests for bucket based Fs
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Bucket
|
||||
t.Run("Bucket", func(t *testing.T) {
|
||||
// Test if this Fs is bucket based - this test won't work for wrapped bucket based backends.
|
||||
if !f.Features().BucketBased {
|
||||
t.Skip("Not a bucket based backend")
|
||||
}
|
||||
if f.Features().CanHaveEmptyDirectories {
|
||||
t.Skip("Can have empty directories")
|
||||
}
|
||||
if !f.Features().DoubleSlash {
|
||||
t.Skip("Can't have // in paths")
|
||||
}
|
||||
// Create some troublesome file names
|
||||
fileNames := []string{
|
||||
file1.Path,
|
||||
file2.Path,
|
||||
".leadingdot",
|
||||
"/.leadingdot",
|
||||
"///tripleslash",
|
||||
"//doubleslash",
|
||||
"dir/.leadingdot",
|
||||
"dir///tripleslash",
|
||||
"dir//doubleslash",
|
||||
}
|
||||
dirNames := []string{
|
||||
"hello? sausage",
|
||||
"hello? sausage/êé",
|
||||
"hello? sausage/êé/Hello, 世界",
|
||||
"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
|
||||
"/",
|
||||
"//",
|
||||
"///",
|
||||
"dir",
|
||||
"dir/",
|
||||
"dir//",
|
||||
}
|
||||
t1 := fstest.Time("2003-02-03T04:05:06.499999999Z")
|
||||
var objs []fs.Object
|
||||
for _, fileName := range fileNames[2:] {
|
||||
contents := "bad file name: " + fileName
|
||||
file := fstest.NewItem(fileName, contents, t1)
|
||||
objs = append(objs, PutTestContents(ctx, t, f, &file, contents, true))
|
||||
}
|
||||
|
||||
// Check they arrived
|
||||
// This uses walk.Walk with a max size set to make sure we don't use ListR
|
||||
check := func(f fs.Fs, dir string, wantFileNames, wantDirNames []string) {
|
||||
t.Helper()
|
||||
var gotFileNames, gotDirNames []string
|
||||
require.NoError(t, walk.Walk(ctx, f, dir, true, 100, func(path string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if _, isObj := entry.(fs.Object); isObj {
|
||||
gotFileNames = append(gotFileNames, entry.Remote())
|
||||
} else {
|
||||
gotDirNames = append(gotDirNames, entry.Remote())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
sort.Strings(wantDirNames)
|
||||
sort.Strings(wantFileNames)
|
||||
sort.Strings(gotDirNames)
|
||||
sort.Strings(gotFileNames)
|
||||
assert.Equal(t, wantFileNames, gotFileNames)
|
||||
assert.Equal(t, wantDirNames, gotDirNames)
|
||||
}
|
||||
check(f, "", fileNames, dirNames)
|
||||
check(f, "/", []string{
|
||||
"/.leadingdot",
|
||||
"///tripleslash",
|
||||
"//doubleslash",
|
||||
}, []string{
|
||||
"//",
|
||||
"///",
|
||||
})
|
||||
check(f, "//", []string{
|
||||
"///tripleslash",
|
||||
"//doubleslash",
|
||||
}, []string{
|
||||
"///",
|
||||
})
|
||||
check(f, "dir", []string{
|
||||
"dir/.leadingdot",
|
||||
"dir///tripleslash",
|
||||
"dir//doubleslash",
|
||||
}, []string{
|
||||
"dir/",
|
||||
"dir//",
|
||||
})
|
||||
check(f, "dir/", []string{
|
||||
"dir///tripleslash",
|
||||
"dir//doubleslash",
|
||||
}, []string{
|
||||
"dir//",
|
||||
})
|
||||
check(f, "dir//", []string{
|
||||
"dir///tripleslash",
|
||||
}, nil)
|
||||
|
||||
// Now create a backend not at the root of a bucket
|
||||
f2, err := fs.NewFs(ctx, subRemoteName+"/dir")
|
||||
require.NoError(t, err)
|
||||
check(f2, "", []string{
|
||||
".leadingdot",
|
||||
"//tripleslash",
|
||||
"/doubleslash",
|
||||
}, []string{
|
||||
"/",
|
||||
"//",
|
||||
})
|
||||
check(f2, "/", []string{
|
||||
"//tripleslash",
|
||||
"/doubleslash",
|
||||
}, []string{
|
||||
"//",
|
||||
})
|
||||
check(f2, "//", []string{
|
||||
"//tripleslash",
|
||||
}, []string(nil))
|
||||
|
||||
// Remove the objects
|
||||
for _, obj := range objs {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}
|
||||
|
||||
// Check they are gone
|
||||
fstest.CheckListingWithPrecision(t, f, []fstest.Item{file1, file2}, []string{
|
||||
"hello? sausage",
|
||||
"hello? sausage/êé",
|
||||
"hello? sausage/êé/Hello, 世界",
|
||||
"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
|
||||
}, fs.GetModifyWindow(ctx, f))
|
||||
})
|
||||
|
||||
// State of remote at the moment the internal tests are called
|
||||
InternalTestFiles = []fstest.Item{file1, file2}
|
||||
|
||||
@@ -2391,7 +2529,7 @@ func Run(t *testing.T, opt *Opt) {
|
||||
var itemCopy = item
|
||||
itemCopy.Path += ".copy"
|
||||
|
||||
// Set copy cutoff to mininum value so we make chunks
|
||||
// Set copy cutoff to minimum value so we make chunks
|
||||
origCutoff, err := do.SetCopyCutoff(minChunkSize)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
|
||||
18
go.mod
18
go.mod
@@ -25,7 +25,7 @@ require (
|
||||
github.com/aws/smithy-go v1.22.1
|
||||
github.com/buengese/sgzip v0.1.1
|
||||
github.com/cloudinary/cloudinary-go/v2 v2.9.0
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20241223203758-52b943b88fd6
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20250124173933-e6bbeea507ed
|
||||
github.com/colinmarc/hdfs/v2 v2.4.0
|
||||
github.com/coreos/go-semver v0.3.1
|
||||
github.com/coreos/go-systemd/v22 v22.5.0
|
||||
@@ -36,7 +36,6 @@ require (
|
||||
github.com/go-chi/chi/v5 v5.2.0
|
||||
github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348
|
||||
github.com/go-git/go-billy/v5 v5.6.2
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hanwen/go-fuse/v2 v2.7.2
|
||||
github.com/henrybear327/Proton-API-Bridge v1.0.0
|
||||
@@ -92,6 +91,7 @@ require (
|
||||
gopkg.in/validator.v2 v2.0.1
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
storj.io/uplink v1.13.1
|
||||
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -110,6 +110,7 @@ require (
|
||||
github.com/anacrolix/generics v0.0.1 // indirect
|
||||
github.com/andybalholm/cascadia v1.3.2 // indirect
|
||||
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27 // indirect
|
||||
@@ -147,10 +148,16 @@ require (
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-openapi/errors v0.21.0 // indirect
|
||||
github.com/go-openapi/strfmt v0.22.1 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.20.0 // indirect
|
||||
github.com/go-resty/resty/v2 v2.11.0 // indirect
|
||||
github.com/goccy/go-json v0.10.4 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/google/s2a-go v0.1.8 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.1 // indirect
|
||||
@@ -172,14 +179,16 @@ require (
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/lpar/date v1.0.0 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/xxml v0.0.3 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/onsi/ginkgo v1.16.5 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/panjf2000/ants/v2 v2.9.1 // indirect
|
||||
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
@@ -203,6 +212,7 @@ require (
|
||||
github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
github.com/zeebo/errs v1.3.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
|
||||
go.opentelemetry.io/otel v1.31.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.31.0 // indirect
|
||||
@@ -223,8 +233,10 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/IBM/go-sdk-core/v5 v5.17.5
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.1.4
|
||||
github.com/golang-jwt/jwt/v4 v4.5.1
|
||||
github.com/pkg/xattr v0.4.10
|
||||
golang.org/x/mobile v0.0.0-20250106192035-c31d5b91ecc3
|
||||
golang.org/x/term v0.28.0
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user